1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org> 8 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org> 9 * All rights reserved. 10 * 11 * Portions of this software were developed by Edward Tomasz Napierala 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification. 20 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 21 * substantially similar to the "NO WARRANTY" disclaimer below 22 * ("Disclaimer") and any redistribution must be conditioned upon 23 * including a substantially similar Disclaimer requirement for further 24 * binary redistribution. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 * $Id$ 40 */ 41 /* 42 * CAM Target Layer, a SCSI device emulation subsystem. 43 * 44 * Author: Ken Merry <ken@FreeBSD.org> 45 */ 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/ctype.h> 50 #include <sys/kernel.h> 51 #include <sys/types.h> 52 #include <sys/kthread.h> 53 #include <sys/bio.h> 54 #include <sys/fcntl.h> 55 #include <sys/lock.h> 56 #include <sys/module.h> 57 #include <sys/mutex.h> 58 #include <sys/condvar.h> 59 #include <sys/malloc.h> 60 #include <sys/conf.h> 61 #include <sys/ioccom.h> 62 #include <sys/queue.h> 63 #include <sys/sbuf.h> 64 #include <sys/smp.h> 65 #include <sys/endian.h> 66 #include <sys/proc.h> 67 #include <sys/sched.h> 68 #include <sys/sysctl.h> 69 #include <sys/nv.h> 70 #include <sys/dnv.h> 71 #include <vm/uma.h> 72 73 #include <cam/cam.h> 74 #include <cam/scsi/scsi_all.h> 75 #include <cam/scsi/scsi_cd.h> 76 #include <cam/scsi/scsi_da.h> 77 #include <cam/ctl/ctl_io.h> 78 #include <cam/ctl/ctl.h> 79 #include <cam/ctl/ctl_frontend.h> 80 #include <cam/ctl/ctl_util.h> 81 #include <cam/ctl/ctl_backend.h> 82 #include <cam/ctl/ctl_ioctl.h> 83 #include <cam/ctl/ctl_ha.h> 84 #include <cam/ctl/ctl_private.h> 85 #include <cam/ctl/ctl_debug.h> 86 #include <cam/ctl/ctl_scsi_all.h> 87 #include <cam/ctl/ctl_error.h> 88 89 struct ctl_softc *control_softc = NULL; 90 91 /* 92 * Template mode pages. 93 */ 94 95 /* 96 * Note that these are default values only. The actual values will be 97 * filled in when the user does a mode sense. 98 */ 99 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 100 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 101 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 102 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 103 /*read_retry_count*/0, 104 /*correction_span*/0, 105 /*head_offset_count*/0, 106 /*data_strobe_offset_cnt*/0, 107 /*byte8*/SMS_RWER_LBPERE, 108 /*write_retry_count*/0, 109 /*reserved2*/0, 110 /*recovery_time_limit*/{0, 0}, 111 }; 112 113 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 114 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 115 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 116 /*byte3*/SMS_RWER_PER, 117 /*read_retry_count*/0, 118 /*correction_span*/0, 119 /*head_offset_count*/0, 120 /*data_strobe_offset_cnt*/0, 121 /*byte8*/SMS_RWER_LBPERE, 122 /*write_retry_count*/0, 123 /*reserved2*/0, 124 /*recovery_time_limit*/{0, 0}, 125 }; 126 127 const static struct scsi_format_page format_page_default = { 128 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 129 /*page_length*/sizeof(struct scsi_format_page) - 2, 130 /*tracks_per_zone*/ {0, 0}, 131 /*alt_sectors_per_zone*/ {0, 0}, 132 /*alt_tracks_per_zone*/ {0, 0}, 133 /*alt_tracks_per_lun*/ {0, 0}, 134 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 135 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 136 /*bytes_per_sector*/ {0, 0}, 137 /*interleave*/ {0, 0}, 138 /*track_skew*/ {0, 0}, 139 /*cylinder_skew*/ {0, 0}, 140 /*flags*/ SFP_HSEC, 141 /*reserved*/ {0, 0, 0} 142 }; 143 144 const static struct scsi_format_page format_page_changeable = { 145 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 146 /*page_length*/sizeof(struct scsi_format_page) - 2, 147 /*tracks_per_zone*/ {0, 0}, 148 /*alt_sectors_per_zone*/ {0, 0}, 149 /*alt_tracks_per_zone*/ {0, 0}, 150 /*alt_tracks_per_lun*/ {0, 0}, 151 /*sectors_per_track*/ {0, 0}, 152 /*bytes_per_sector*/ {0, 0}, 153 /*interleave*/ {0, 0}, 154 /*track_skew*/ {0, 0}, 155 /*cylinder_skew*/ {0, 0}, 156 /*flags*/ 0, 157 /*reserved*/ {0, 0, 0} 158 }; 159 160 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 161 /*page_code*/SMS_RIGID_DISK_PAGE, 162 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 163 /*cylinders*/ {0, 0, 0}, 164 /*heads*/ CTL_DEFAULT_HEADS, 165 /*start_write_precomp*/ {0, 0, 0}, 166 /*start_reduced_current*/ {0, 0, 0}, 167 /*step_rate*/ {0, 0}, 168 /*landing_zone_cylinder*/ {0, 0, 0}, 169 /*rpl*/ SRDP_RPL_DISABLED, 170 /*rotational_offset*/ 0, 171 /*reserved1*/ 0, 172 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 173 CTL_DEFAULT_ROTATION_RATE & 0xff}, 174 /*reserved2*/ {0, 0} 175 }; 176 177 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 178 /*page_code*/SMS_RIGID_DISK_PAGE, 179 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 180 /*cylinders*/ {0, 0, 0}, 181 /*heads*/ 0, 182 /*start_write_precomp*/ {0, 0, 0}, 183 /*start_reduced_current*/ {0, 0, 0}, 184 /*step_rate*/ {0, 0}, 185 /*landing_zone_cylinder*/ {0, 0, 0}, 186 /*rpl*/ 0, 187 /*rotational_offset*/ 0, 188 /*reserved1*/ 0, 189 /*rotation_rate*/ {0, 0}, 190 /*reserved2*/ {0, 0} 191 }; 192 193 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 194 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 195 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 196 /*byte3*/0, 197 /*read_retry_count*/0, 198 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 199 /*recovery_time_limit*/{0, 0}, 200 }; 201 202 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 203 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 204 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 205 /*byte3*/SMS_VER_PER, 206 /*read_retry_count*/0, 207 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 208 /*recovery_time_limit*/{0, 0}, 209 }; 210 211 const static struct scsi_caching_page caching_page_default = { 212 /*page_code*/SMS_CACHING_PAGE, 213 /*page_length*/sizeof(struct scsi_caching_page) - 2, 214 /*flags1*/ SCP_DISC | SCP_WCE, 215 /*ret_priority*/ 0, 216 /*disable_pf_transfer_len*/ {0xff, 0xff}, 217 /*min_prefetch*/ {0, 0}, 218 /*max_prefetch*/ {0xff, 0xff}, 219 /*max_pf_ceiling*/ {0xff, 0xff}, 220 /*flags2*/ 0, 221 /*cache_segments*/ 0, 222 /*cache_seg_size*/ {0, 0}, 223 /*reserved*/ 0, 224 /*non_cache_seg_size*/ {0, 0, 0} 225 }; 226 227 const static struct scsi_caching_page caching_page_changeable = { 228 /*page_code*/SMS_CACHING_PAGE, 229 /*page_length*/sizeof(struct scsi_caching_page) - 2, 230 /*flags1*/ SCP_WCE | SCP_RCD, 231 /*ret_priority*/ 0, 232 /*disable_pf_transfer_len*/ {0, 0}, 233 /*min_prefetch*/ {0, 0}, 234 /*max_prefetch*/ {0, 0}, 235 /*max_pf_ceiling*/ {0, 0}, 236 /*flags2*/ 0, 237 /*cache_segments*/ 0, 238 /*cache_seg_size*/ {0, 0}, 239 /*reserved*/ 0, 240 /*non_cache_seg_size*/ {0, 0, 0} 241 }; 242 243 const static struct scsi_control_page control_page_default = { 244 /*page_code*/SMS_CONTROL_MODE_PAGE, 245 /*page_length*/sizeof(struct scsi_control_page) - 2, 246 /*rlec*/0, 247 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 248 /*eca_and_aen*/0, 249 /*flags4*/SCP_TAS, 250 /*aen_holdoff_period*/{0, 0}, 251 /*busy_timeout_period*/{0, 0}, 252 /*extended_selftest_completion_time*/{0, 0} 253 }; 254 255 const static struct scsi_control_page control_page_changeable = { 256 /*page_code*/SMS_CONTROL_MODE_PAGE, 257 /*page_length*/sizeof(struct scsi_control_page) - 2, 258 /*rlec*/SCP_DSENSE, 259 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 260 /*eca_and_aen*/SCP_SWP, 261 /*flags4*/0, 262 /*aen_holdoff_period*/{0, 0}, 263 /*busy_timeout_period*/{0, 0}, 264 /*extended_selftest_completion_time*/{0, 0} 265 }; 266 267 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 268 269 const static struct scsi_control_ext_page control_ext_page_default = { 270 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 271 /*subpage_code*/0x01, 272 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 273 /*flags*/0, 274 /*prio*/0, 275 /*max_sense*/0 276 }; 277 278 const static struct scsi_control_ext_page control_ext_page_changeable = { 279 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 280 /*subpage_code*/0x01, 281 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 282 /*flags*/0, 283 /*prio*/0, 284 /*max_sense*/0xff 285 }; 286 287 const static struct scsi_info_exceptions_page ie_page_default = { 288 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 289 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 290 /*info_flags*/SIEP_FLAGS_EWASC, 291 /*mrie*/SIEP_MRIE_NO, 292 /*interval_timer*/{0, 0, 0, 0}, 293 /*report_count*/{0, 0, 0, 1} 294 }; 295 296 const static struct scsi_info_exceptions_page ie_page_changeable = { 297 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 298 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 299 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 300 SIEP_FLAGS_LOGERR, 301 /*mrie*/0x0f, 302 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 303 /*report_count*/{0xff, 0xff, 0xff, 0xff} 304 }; 305 306 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 307 308 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 309 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 310 /*subpage_code*/0x02, 311 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 312 /*flags*/0, 313 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 314 /*descr*/{}}, 315 {{/*flags*/0, 316 /*resource*/0x01, 317 /*reserved*/{0, 0}, 318 /*count*/{0, 0, 0, 0}}, 319 {/*flags*/0, 320 /*resource*/0x02, 321 /*reserved*/{0, 0}, 322 /*count*/{0, 0, 0, 0}}, 323 {/*flags*/0, 324 /*resource*/0xf1, 325 /*reserved*/{0, 0}, 326 /*count*/{0, 0, 0, 0}}, 327 {/*flags*/0, 328 /*resource*/0xf2, 329 /*reserved*/{0, 0}, 330 /*count*/{0, 0, 0, 0}} 331 } 332 }; 333 334 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 335 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 336 /*subpage_code*/0x02, 337 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 338 /*flags*/SLBPP_SITUA, 339 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 340 /*descr*/{}}, 341 {{/*flags*/0, 342 /*resource*/0, 343 /*reserved*/{0, 0}, 344 /*count*/{0, 0, 0, 0}}, 345 {/*flags*/0, 346 /*resource*/0, 347 /*reserved*/{0, 0}, 348 /*count*/{0, 0, 0, 0}}, 349 {/*flags*/0, 350 /*resource*/0, 351 /*reserved*/{0, 0}, 352 /*count*/{0, 0, 0, 0}}, 353 {/*flags*/0, 354 /*resource*/0, 355 /*reserved*/{0, 0}, 356 /*count*/{0, 0, 0, 0}} 357 } 358 }; 359 360 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 361 /*page_code*/SMS_CDDVD_CAPS_PAGE, 362 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 363 /*caps1*/0x3f, 364 /*caps2*/0x00, 365 /*caps3*/0xf0, 366 /*caps4*/0x00, 367 /*caps5*/0x29, 368 /*caps6*/0x00, 369 /*obsolete*/{0, 0}, 370 /*nvol_levels*/{0, 0}, 371 /*buffer_size*/{8, 0}, 372 /*obsolete2*/{0, 0}, 373 /*reserved*/0, 374 /*digital*/0, 375 /*obsolete3*/0, 376 /*copy_management*/0, 377 /*reserved2*/0, 378 /*rotation_control*/0, 379 /*cur_write_speed*/0, 380 /*num_speed_descr*/0, 381 }; 382 383 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 384 /*page_code*/SMS_CDDVD_CAPS_PAGE, 385 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 386 /*caps1*/0, 387 /*caps2*/0, 388 /*caps3*/0, 389 /*caps4*/0, 390 /*caps5*/0, 391 /*caps6*/0, 392 /*obsolete*/{0, 0}, 393 /*nvol_levels*/{0, 0}, 394 /*buffer_size*/{0, 0}, 395 /*obsolete2*/{0, 0}, 396 /*reserved*/0, 397 /*digital*/0, 398 /*obsolete3*/0, 399 /*copy_management*/0, 400 /*reserved2*/0, 401 /*rotation_control*/0, 402 /*cur_write_speed*/0, 403 /*num_speed_descr*/0, 404 }; 405 406 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 407 "CAM Target Layer"); 408 static int worker_threads = -1; 409 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 410 &worker_threads, 1, "Number of worker threads"); 411 static int ctl_debug = CTL_DEBUG_NONE; 412 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 413 &ctl_debug, 0, "Enabled debug flags"); 414 static int ctl_lun_map_size = 1024; 415 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 416 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 417 #ifdef CTL_TIME_IO 418 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 419 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 420 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 421 #endif 422 423 /* 424 * Maximum number of LUNs we support. MUST be a power of 2. 425 */ 426 #define CTL_DEFAULT_MAX_LUNS 1024 427 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 428 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); 429 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, 430 &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); 431 432 /* 433 * Maximum number of ports registered at one time. 434 */ 435 #define CTL_DEFAULT_MAX_PORTS 1024 436 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 437 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); 438 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, 439 &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); 440 441 /* 442 * Maximum number of initiators we support. 443 */ 444 #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) 445 446 /* 447 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 448 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 449 * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), 450 * Block limits (0xB0), Block Device Characteristics (0xB1) and 451 * Logical Block Provisioning (0xB2) 452 */ 453 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 454 455 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 456 int param); 457 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 458 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 459 static int ctl_init(void); 460 static int ctl_shutdown(void); 461 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 462 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 463 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 464 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 465 struct ctl_ooa *ooa_hdr, 466 struct ctl_ooa_entry *kern_entries); 467 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 468 struct thread *td); 469 static int ctl_enable_lun(struct ctl_lun *lun); 470 static int ctl_disable_lun(struct ctl_lun *lun); 471 static int ctl_free_lun(struct ctl_lun *lun); 472 473 static int ctl_do_mode_select(union ctl_io *io); 474 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 475 uint64_t res_key, uint64_t sa_res_key, 476 uint8_t type, uint32_t residx, 477 struct ctl_scsiio *ctsio, 478 struct scsi_per_res_out *cdb, 479 struct scsi_per_res_out_parms* param); 480 static void ctl_pro_preempt_other(struct ctl_lun *lun, 481 union ctl_ha_msg *msg); 482 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 483 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 484 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 485 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 486 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 487 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 488 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 489 int alloc_len); 490 static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); 491 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 492 int alloc_len); 493 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 494 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 495 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 496 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 497 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 498 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 499 bool seq); 500 static ctl_action ctl_seq_check(union ctl_io *io1, union ctl_io *io2); 501 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 502 union ctl_io *pending_io, const uint8_t *serialize_row, 503 union ctl_io *ooa_io); 504 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 505 union ctl_io **starting_io); 506 static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, 507 bool skip); 508 static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, 509 bool skip); 510 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 511 const struct ctl_cmd_entry *entry, 512 struct ctl_scsiio *ctsio); 513 static void ctl_failover_lun(union ctl_io *io); 514 static void ctl_scsiio_precheck(struct ctl_scsiio *ctsio); 515 static int ctl_scsiio(struct ctl_scsiio *ctsio); 516 517 static int ctl_target_reset(union ctl_io *io); 518 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 519 ctl_ua_type ua_type); 520 static int ctl_lun_reset(union ctl_io *io); 521 static int ctl_abort_task(union ctl_io *io); 522 static int ctl_abort_task_set(union ctl_io *io); 523 static int ctl_query_task(union ctl_io *io, int task_set); 524 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 525 ctl_ua_type ua_type); 526 static int ctl_i_t_nexus_reset(union ctl_io *io); 527 static int ctl_query_async_event(union ctl_io *io); 528 static void ctl_run_task(union ctl_io *io); 529 #ifdef CTL_IO_DELAY 530 static void ctl_datamove_timer_wakeup(void *arg); 531 static void ctl_done_timer_wakeup(void *arg); 532 #endif /* CTL_IO_DELAY */ 533 534 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 535 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 536 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr); 537 static void ctl_datamove_remote_write(union ctl_io *io); 538 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr); 539 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 540 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 541 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 542 ctl_ha_dt_cb callback); 543 static void ctl_datamove_remote_read(union ctl_io *io); 544 static void ctl_datamove_remote(union ctl_io *io); 545 static void ctl_process_done(union ctl_io *io); 546 static void ctl_thresh_thread(void *arg); 547 static void ctl_work_thread(void *arg); 548 static void ctl_enqueue_incoming(union ctl_io *io); 549 static void ctl_enqueue_rtr(union ctl_io *io); 550 static void ctl_enqueue_done(union ctl_io *io); 551 static void ctl_enqueue_isc(union ctl_io *io); 552 static const struct ctl_cmd_entry * 553 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 554 static const struct ctl_cmd_entry * 555 ctl_validate_command(struct ctl_scsiio *ctsio); 556 static int ctl_cmd_applicable(uint8_t lun_type, 557 const struct ctl_cmd_entry *entry); 558 static int ctl_ha_init(void); 559 static int ctl_ha_shutdown(void); 560 561 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 562 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 563 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 564 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 565 566 /* 567 * Load the serialization table. This isn't very pretty, but is probably 568 * the easiest way to do it. 569 */ 570 #include "ctl_ser_table.c" 571 572 /* 573 * We only need to define open, close and ioctl routines for this driver. 574 */ 575 static struct cdevsw ctl_cdevsw = { 576 .d_version = D_VERSION, 577 .d_flags = 0, 578 .d_open = ctl_open, 579 .d_close = ctl_close, 580 .d_ioctl = ctl_ioctl, 581 .d_name = "ctl", 582 }; 583 584 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 585 586 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 587 588 static moduledata_t ctl_moduledata = { 589 "ctl", 590 ctl_module_event_handler, 591 NULL 592 }; 593 594 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 595 MODULE_VERSION(ctl, 1); 596 597 static struct ctl_frontend ha_frontend = 598 { 599 .name = "ha", 600 .init = ctl_ha_init, 601 .shutdown = ctl_ha_shutdown, 602 }; 603 604 static int 605 ctl_ha_init(void) 606 { 607 struct ctl_softc *softc = control_softc; 608 609 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 610 &softc->othersc_pool) != 0) 611 return (ENOMEM); 612 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 613 ctl_pool_free(softc->othersc_pool); 614 return (EIO); 615 } 616 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 617 != CTL_HA_STATUS_SUCCESS) { 618 ctl_ha_msg_destroy(softc); 619 ctl_pool_free(softc->othersc_pool); 620 return (EIO); 621 } 622 return (0); 623 }; 624 625 static int 626 ctl_ha_shutdown(void) 627 { 628 struct ctl_softc *softc = control_softc; 629 struct ctl_port *port; 630 631 ctl_ha_msg_shutdown(softc); 632 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 633 return (EIO); 634 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 635 return (EIO); 636 ctl_pool_free(softc->othersc_pool); 637 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 638 ctl_port_deregister(port); 639 free(port->port_name, M_CTL); 640 free(port, M_CTL); 641 } 642 return (0); 643 }; 644 645 static void 646 ctl_ha_datamove(union ctl_io *io) 647 { 648 struct ctl_lun *lun = CTL_LUN(io); 649 struct ctl_sg_entry *sgl; 650 union ctl_ha_msg msg; 651 uint32_t sg_entries_sent; 652 int do_sg_copy, i, j; 653 654 memset(&msg.dt, 0, sizeof(msg.dt)); 655 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 656 msg.hdr.original_sc = io->io_hdr.remote_io; 657 msg.hdr.serializing_sc = io; 658 msg.hdr.nexus = io->io_hdr.nexus; 659 msg.hdr.status = io->io_hdr.status; 660 msg.dt.flags = io->io_hdr.flags; 661 662 /* 663 * We convert everything into a S/G list here. We can't 664 * pass by reference, only by value between controllers. 665 * So we can't pass a pointer to the S/G list, only as many 666 * S/G entries as we can fit in here. If it's possible for 667 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 668 * then we need to break this up into multiple transfers. 669 */ 670 if (io->scsiio.kern_sg_entries == 0) { 671 msg.dt.kern_sg_entries = 1; 672 #if 0 673 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 674 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 675 } else { 676 /* XXX KDM use busdma here! */ 677 msg.dt.sg_list[0].addr = 678 (void *)vtophys(io->scsiio.kern_data_ptr); 679 } 680 #else 681 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 682 ("HA does not support BUS_ADDR")); 683 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 684 #endif 685 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 686 do_sg_copy = 0; 687 } else { 688 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 689 do_sg_copy = 1; 690 } 691 692 msg.dt.kern_data_len = io->scsiio.kern_data_len; 693 msg.dt.kern_total_len = io->scsiio.kern_total_len; 694 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 695 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 696 msg.dt.sg_sequence = 0; 697 698 /* 699 * Loop until we've sent all of the S/G entries. On the 700 * other end, we'll recompose these S/G entries into one 701 * contiguous list before processing. 702 */ 703 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 704 msg.dt.sg_sequence++) { 705 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 706 sizeof(msg.dt.sg_list[0])), 707 msg.dt.kern_sg_entries - sg_entries_sent); 708 if (do_sg_copy != 0) { 709 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 710 for (i = sg_entries_sent, j = 0; 711 i < msg.dt.cur_sg_entries; i++, j++) { 712 #if 0 713 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 714 msg.dt.sg_list[j].addr = sgl[i].addr; 715 } else { 716 /* XXX KDM use busdma here! */ 717 msg.dt.sg_list[j].addr = 718 (void *)vtophys(sgl[i].addr); 719 } 720 #else 721 KASSERT((io->io_hdr.flags & 722 CTL_FLAG_BUS_ADDR) == 0, 723 ("HA does not support BUS_ADDR")); 724 msg.dt.sg_list[j].addr = sgl[i].addr; 725 #endif 726 msg.dt.sg_list[j].len = sgl[i].len; 727 } 728 } 729 730 sg_entries_sent += msg.dt.cur_sg_entries; 731 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 732 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 733 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 734 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 735 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 736 io->io_hdr.port_status = 31341; 737 ctl_datamove_done(io, true); 738 return; 739 } 740 msg.dt.sent_sg_entries = sg_entries_sent; 741 } 742 743 /* 744 * Officially handover the request from us to peer. 745 * If failover has just happened, then we must return error. 746 * If failover happen just after, then it is not our problem. 747 */ 748 if (lun) 749 mtx_lock(&lun->lun_lock); 750 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 751 if (lun) 752 mtx_unlock(&lun->lun_lock); 753 io->io_hdr.port_status = 31342; 754 ctl_datamove_done(io, true); 755 return; 756 } 757 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 758 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 759 if (lun) 760 mtx_unlock(&lun->lun_lock); 761 } 762 763 static void 764 ctl_ha_done(union ctl_io *io) 765 { 766 union ctl_ha_msg msg; 767 768 if (io->io_hdr.io_type == CTL_IO_SCSI) { 769 memset(&msg, 0, sizeof(msg)); 770 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 771 msg.hdr.original_sc = io->io_hdr.remote_io; 772 msg.hdr.nexus = io->io_hdr.nexus; 773 msg.hdr.status = io->io_hdr.status; 774 msg.scsi.scsi_status = io->scsiio.scsi_status; 775 msg.scsi.tag_num = io->scsiio.tag_num; 776 msg.scsi.tag_type = io->scsiio.tag_type; 777 msg.scsi.sense_len = io->scsiio.sense_len; 778 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 779 io->scsiio.sense_len); 780 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 781 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 782 msg.scsi.sense_len, M_WAITOK); 783 } 784 ctl_free_io(io); 785 } 786 787 static void 788 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 789 union ctl_ha_msg *msg_info) 790 { 791 struct ctl_scsiio *ctsio; 792 793 if (msg_info->hdr.original_sc == NULL) { 794 printf("%s: original_sc == NULL!\n", __func__); 795 /* XXX KDM now what? */ 796 return; 797 } 798 799 ctsio = &msg_info->hdr.original_sc->scsiio; 800 ctsio->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 801 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 802 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 803 ctsio->io_hdr.status = msg_info->hdr.status; 804 ctsio->scsi_status = msg_info->scsi.scsi_status; 805 ctsio->sense_len = msg_info->scsi.sense_len; 806 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 807 msg_info->scsi.sense_len); 808 ctl_enqueue_isc((union ctl_io *)ctsio); 809 } 810 811 static void 812 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 813 union ctl_ha_msg *msg_info) 814 { 815 struct ctl_scsiio *ctsio; 816 817 if (msg_info->hdr.serializing_sc == NULL) { 818 printf("%s: serializing_sc == NULL!\n", __func__); 819 /* XXX KDM now what? */ 820 return; 821 } 822 823 ctsio = &msg_info->hdr.serializing_sc->scsiio; 824 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 825 ctl_enqueue_isc((union ctl_io *)ctsio); 826 } 827 828 void 829 ctl_isc_announce_lun(struct ctl_lun *lun) 830 { 831 struct ctl_softc *softc = lun->ctl_softc; 832 union ctl_ha_msg *msg; 833 struct ctl_ha_msg_lun_pr_key pr_key; 834 int i, k; 835 836 if (softc->ha_link != CTL_HA_LINK_ONLINE) 837 return; 838 mtx_lock(&lun->lun_lock); 839 i = sizeof(msg->lun); 840 if (lun->lun_devid) 841 i += lun->lun_devid->len; 842 i += sizeof(pr_key) * lun->pr_key_count; 843 alloc: 844 mtx_unlock(&lun->lun_lock); 845 msg = malloc(i, M_CTL, M_WAITOK); 846 mtx_lock(&lun->lun_lock); 847 k = sizeof(msg->lun); 848 if (lun->lun_devid) 849 k += lun->lun_devid->len; 850 k += sizeof(pr_key) * lun->pr_key_count; 851 if (i < k) { 852 free(msg, M_CTL); 853 i = k; 854 goto alloc; 855 } 856 bzero(&msg->lun, sizeof(msg->lun)); 857 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 858 msg->hdr.nexus.targ_lun = lun->lun; 859 msg->hdr.nexus.targ_mapped_lun = lun->lun; 860 msg->lun.flags = lun->flags; 861 msg->lun.pr_generation = lun->pr_generation; 862 msg->lun.pr_res_idx = lun->pr_res_idx; 863 msg->lun.pr_res_type = lun->pr_res_type; 864 msg->lun.pr_key_count = lun->pr_key_count; 865 i = 0; 866 if (lun->lun_devid) { 867 msg->lun.lun_devid_len = lun->lun_devid->len; 868 memcpy(&msg->lun.data[i], lun->lun_devid->data, 869 msg->lun.lun_devid_len); 870 i += msg->lun.lun_devid_len; 871 } 872 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 873 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 874 continue; 875 pr_key.pr_iid = k; 876 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 877 i += sizeof(pr_key); 878 } 879 mtx_unlock(&lun->lun_lock); 880 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->lun, sizeof(msg->lun) + i, 881 M_WAITOK); 882 free(msg, M_CTL); 883 884 if (lun->flags & CTL_LUN_PRIMARY_SC) { 885 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 886 ctl_isc_announce_mode(lun, -1, 887 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 888 lun->mode_pages.index[i].subpage); 889 } 890 } 891 } 892 893 void 894 ctl_isc_announce_port(struct ctl_port *port) 895 { 896 struct ctl_softc *softc = port->ctl_softc; 897 union ctl_ha_msg *msg; 898 int i; 899 900 if (port->targ_port < softc->port_min || 901 port->targ_port >= softc->port_max || 902 softc->ha_link != CTL_HA_LINK_ONLINE) 903 return; 904 i = sizeof(msg->port) + strlen(port->port_name) + 1; 905 if (port->lun_map) 906 i += port->lun_map_size * sizeof(uint32_t); 907 if (port->port_devid) 908 i += port->port_devid->len; 909 if (port->target_devid) 910 i += port->target_devid->len; 911 if (port->init_devid) 912 i += port->init_devid->len; 913 msg = malloc(i, M_CTL, M_WAITOK); 914 bzero(&msg->port, sizeof(msg->port)); 915 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 916 msg->hdr.nexus.targ_port = port->targ_port; 917 msg->port.port_type = port->port_type; 918 msg->port.physical_port = port->physical_port; 919 msg->port.virtual_port = port->virtual_port; 920 msg->port.status = port->status; 921 i = 0; 922 msg->port.name_len = sprintf(&msg->port.data[i], 923 "%d:%s", softc->ha_id, port->port_name) + 1; 924 i += msg->port.name_len; 925 if (port->lun_map) { 926 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 927 memcpy(&msg->port.data[i], port->lun_map, 928 msg->port.lun_map_len); 929 i += msg->port.lun_map_len; 930 } 931 if (port->port_devid) { 932 msg->port.port_devid_len = port->port_devid->len; 933 memcpy(&msg->port.data[i], port->port_devid->data, 934 msg->port.port_devid_len); 935 i += msg->port.port_devid_len; 936 } 937 if (port->target_devid) { 938 msg->port.target_devid_len = port->target_devid->len; 939 memcpy(&msg->port.data[i], port->target_devid->data, 940 msg->port.target_devid_len); 941 i += msg->port.target_devid_len; 942 } 943 if (port->init_devid) { 944 msg->port.init_devid_len = port->init_devid->len; 945 memcpy(&msg->port.data[i], port->init_devid->data, 946 msg->port.init_devid_len); 947 i += msg->port.init_devid_len; 948 } 949 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 950 M_WAITOK); 951 free(msg, M_CTL); 952 } 953 954 void 955 ctl_isc_announce_iid(struct ctl_port *port, int iid) 956 { 957 struct ctl_softc *softc = port->ctl_softc; 958 union ctl_ha_msg *msg; 959 int i, l; 960 961 if (port->targ_port < softc->port_min || 962 port->targ_port >= softc->port_max || 963 softc->ha_link != CTL_HA_LINK_ONLINE) 964 return; 965 mtx_lock(&softc->ctl_lock); 966 i = sizeof(msg->iid); 967 l = 0; 968 if (port->wwpn_iid[iid].name) 969 l = strlen(port->wwpn_iid[iid].name) + 1; 970 i += l; 971 msg = malloc(i, M_CTL, M_NOWAIT); 972 if (msg == NULL) { 973 mtx_unlock(&softc->ctl_lock); 974 return; 975 } 976 bzero(&msg->iid, sizeof(msg->iid)); 977 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 978 msg->hdr.nexus.targ_port = port->targ_port; 979 msg->hdr.nexus.initid = iid; 980 msg->iid.in_use = port->wwpn_iid[iid].in_use; 981 msg->iid.name_len = l; 982 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 983 if (port->wwpn_iid[iid].name) 984 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 985 mtx_unlock(&softc->ctl_lock); 986 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 987 free(msg, M_CTL); 988 } 989 990 void 991 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 992 uint8_t page, uint8_t subpage) 993 { 994 struct ctl_softc *softc = lun->ctl_softc; 995 union ctl_ha_msg *msg; 996 u_int i, l; 997 998 if (softc->ha_link != CTL_HA_LINK_ONLINE) 999 return; 1000 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1001 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1002 page && lun->mode_pages.index[i].subpage == subpage) 1003 break; 1004 } 1005 if (i == CTL_NUM_MODE_PAGES) 1006 return; 1007 1008 /* Don't try to replicate pages not present on this device. */ 1009 if (lun->mode_pages.index[i].page_data == NULL) 1010 return; 1011 1012 l = sizeof(msg->mode) + lun->mode_pages.index[i].page_len; 1013 msg = malloc(l, M_CTL, M_WAITOK | M_ZERO); 1014 msg->hdr.msg_type = CTL_MSG_MODE_SYNC; 1015 msg->hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 1016 msg->hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 1017 msg->hdr.nexus.targ_lun = lun->lun; 1018 msg->hdr.nexus.targ_mapped_lun = lun->lun; 1019 msg->mode.page_code = page; 1020 msg->mode.subpage = subpage; 1021 msg->mode.page_len = lun->mode_pages.index[i].page_len; 1022 memcpy(msg->mode.data, lun->mode_pages.index[i].page_data, 1023 msg->mode.page_len); 1024 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->mode, l, M_WAITOK); 1025 free(msg, M_CTL); 1026 } 1027 1028 static void 1029 ctl_isc_ha_link_up(struct ctl_softc *softc) 1030 { 1031 struct ctl_port *port; 1032 struct ctl_lun *lun; 1033 union ctl_ha_msg msg; 1034 int i; 1035 1036 /* Announce this node parameters to peer for validation. */ 1037 msg.login.msg_type = CTL_MSG_LOGIN; 1038 msg.login.version = CTL_HA_VERSION; 1039 msg.login.ha_mode = softc->ha_mode; 1040 msg.login.ha_id = softc->ha_id; 1041 msg.login.max_luns = ctl_max_luns; 1042 msg.login.max_ports = ctl_max_ports; 1043 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1044 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1045 M_WAITOK); 1046 1047 STAILQ_FOREACH(port, &softc->port_list, links) { 1048 ctl_isc_announce_port(port); 1049 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1050 if (port->wwpn_iid[i].in_use) 1051 ctl_isc_announce_iid(port, i); 1052 } 1053 } 1054 STAILQ_FOREACH(lun, &softc->lun_list, links) 1055 ctl_isc_announce_lun(lun); 1056 } 1057 1058 static void 1059 ctl_isc_ha_link_down(struct ctl_softc *softc) 1060 { 1061 struct ctl_port *port; 1062 struct ctl_lun *lun; 1063 union ctl_io *io; 1064 int i; 1065 1066 mtx_lock(&softc->ctl_lock); 1067 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1068 mtx_lock(&lun->lun_lock); 1069 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1070 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1071 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1072 } 1073 mtx_unlock(&lun->lun_lock); 1074 1075 mtx_unlock(&softc->ctl_lock); 1076 io = ctl_alloc_io(softc->othersc_pool); 1077 mtx_lock(&softc->ctl_lock); 1078 ctl_zero_io(io); 1079 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1080 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1081 ctl_enqueue_isc(io); 1082 } 1083 1084 STAILQ_FOREACH(port, &softc->port_list, links) { 1085 if (port->targ_port >= softc->port_min && 1086 port->targ_port < softc->port_max) 1087 continue; 1088 port->status &= ~CTL_PORT_STATUS_ONLINE; 1089 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1090 port->wwpn_iid[i].in_use = 0; 1091 free(port->wwpn_iid[i].name, M_CTL); 1092 port->wwpn_iid[i].name = NULL; 1093 } 1094 } 1095 mtx_unlock(&softc->ctl_lock); 1096 } 1097 1098 static void 1099 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1100 { 1101 struct ctl_lun *lun; 1102 uint32_t iid; 1103 1104 if (len < sizeof(msg->ua)) { 1105 printf("%s: Received truncated message %d < %zu\n", 1106 __func__, len, sizeof(msg->ua)); 1107 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1108 return; 1109 } 1110 1111 mtx_lock(&softc->ctl_lock); 1112 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || 1113 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1114 mtx_unlock(&softc->ctl_lock); 1115 return; 1116 } 1117 mtx_lock(&lun->lun_lock); 1118 mtx_unlock(&softc->ctl_lock); 1119 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1120 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1121 iid = ctl_get_initindex(&msg->hdr.nexus); 1122 if (msg->ua.ua_all) { 1123 if (msg->ua.ua_set) 1124 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1125 else 1126 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1127 } else { 1128 if (msg->ua.ua_set) 1129 ctl_est_ua(lun, iid, msg->ua.ua_type); 1130 else 1131 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1132 } 1133 mtx_unlock(&lun->lun_lock); 1134 } 1135 1136 static void 1137 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1138 { 1139 struct ctl_lun *lun; 1140 struct ctl_ha_msg_lun_pr_key pr_key; 1141 int i, k; 1142 ctl_lun_flags oflags; 1143 uint32_t targ_lun; 1144 1145 if (len < offsetof(struct ctl_ha_msg_lun, data[0])) { 1146 printf("%s: Received truncated message %d < %zu\n", 1147 __func__, len, offsetof(struct ctl_ha_msg_lun, data[0])); 1148 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1149 return; 1150 } 1151 i = msg->lun.lun_devid_len + msg->lun.pr_key_count * sizeof(pr_key); 1152 if (len < offsetof(struct ctl_ha_msg_lun, data[i])) { 1153 printf("%s: Received truncated message data %d < %zu\n", 1154 __func__, len, offsetof(struct ctl_ha_msg_lun, data[i])); 1155 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1156 return; 1157 } 1158 1159 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1160 mtx_lock(&softc->ctl_lock); 1161 if (targ_lun >= ctl_max_luns || 1162 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1163 mtx_unlock(&softc->ctl_lock); 1164 return; 1165 } 1166 mtx_lock(&lun->lun_lock); 1167 mtx_unlock(&softc->ctl_lock); 1168 if (lun->flags & CTL_LUN_DISABLED) { 1169 mtx_unlock(&lun->lun_lock); 1170 return; 1171 } 1172 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1173 if (msg->lun.lun_devid_len != i || (i > 0 && 1174 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1175 mtx_unlock(&lun->lun_lock); 1176 printf("%s: Received conflicting HA LUN %d\n", 1177 __func__, targ_lun); 1178 return; 1179 } else { 1180 /* Record whether peer is primary. */ 1181 oflags = lun->flags; 1182 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1183 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1184 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1185 else 1186 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1187 if (oflags != lun->flags) 1188 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1189 1190 /* If peer is primary and we are not -- use data */ 1191 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1192 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1193 lun->pr_generation = msg->lun.pr_generation; 1194 lun->pr_res_idx = msg->lun.pr_res_idx; 1195 lun->pr_res_type = msg->lun.pr_res_type; 1196 lun->pr_key_count = msg->lun.pr_key_count; 1197 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1198 ctl_clr_prkey(lun, k); 1199 for (k = 0; k < msg->lun.pr_key_count; k++) { 1200 memcpy(&pr_key, &msg->lun.data[i], 1201 sizeof(pr_key)); 1202 ctl_alloc_prkey(lun, pr_key.pr_iid); 1203 ctl_set_prkey(lun, pr_key.pr_iid, 1204 pr_key.pr_key); 1205 i += sizeof(pr_key); 1206 } 1207 } 1208 1209 mtx_unlock(&lun->lun_lock); 1210 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1211 __func__, targ_lun, 1212 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1213 "primary" : "secondary")); 1214 1215 /* If we are primary but peer doesn't know -- notify */ 1216 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1217 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1218 ctl_isc_announce_lun(lun); 1219 } 1220 } 1221 1222 static void 1223 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1224 { 1225 struct ctl_port *port; 1226 struct ctl_lun *lun; 1227 int i, new; 1228 1229 if (len < offsetof(struct ctl_ha_msg_port, data[0])) { 1230 printf("%s: Received truncated message %d < %zu\n", 1231 __func__, len, offsetof(struct ctl_ha_msg_port, data[0])); 1232 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1233 return; 1234 } 1235 i = msg->port.name_len + msg->port.lun_map_len + 1236 msg->port.port_devid_len + msg->port.target_devid_len + 1237 msg->port.init_devid_len; 1238 if (len < offsetof(struct ctl_ha_msg_port, data[i])) { 1239 printf("%s: Received truncated message data %d < %zu\n", 1240 __func__, len, offsetof(struct ctl_ha_msg_port, data[i])); 1241 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1242 return; 1243 } 1244 1245 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1246 if (port == NULL) { 1247 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1248 msg->hdr.nexus.targ_port)); 1249 new = 1; 1250 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1251 port->frontend = &ha_frontend; 1252 port->targ_port = msg->hdr.nexus.targ_port; 1253 port->fe_datamove = ctl_ha_datamove; 1254 port->fe_done = ctl_ha_done; 1255 } else if (port->frontend == &ha_frontend) { 1256 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1257 msg->hdr.nexus.targ_port)); 1258 new = 0; 1259 } else { 1260 printf("%s: Received conflicting HA port %d\n", 1261 __func__, msg->hdr.nexus.targ_port); 1262 return; 1263 } 1264 port->port_type = msg->port.port_type; 1265 port->physical_port = msg->port.physical_port; 1266 port->virtual_port = msg->port.virtual_port; 1267 port->status = msg->port.status; 1268 i = 0; 1269 free(port->port_name, M_CTL); 1270 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1271 M_CTL); 1272 i += msg->port.name_len; 1273 if (msg->port.lun_map_len != 0) { 1274 if (port->lun_map == NULL || 1275 port->lun_map_size * sizeof(uint32_t) < 1276 msg->port.lun_map_len) { 1277 port->lun_map_size = 0; 1278 free(port->lun_map, M_CTL); 1279 port->lun_map = malloc(msg->port.lun_map_len, 1280 M_CTL, M_WAITOK); 1281 } 1282 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1283 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1284 i += msg->port.lun_map_len; 1285 } else { 1286 port->lun_map_size = 0; 1287 free(port->lun_map, M_CTL); 1288 port->lun_map = NULL; 1289 } 1290 if (msg->port.port_devid_len != 0) { 1291 if (port->port_devid == NULL || 1292 port->port_devid->len < msg->port.port_devid_len) { 1293 free(port->port_devid, M_CTL); 1294 port->port_devid = malloc(sizeof(struct ctl_devid) + 1295 msg->port.port_devid_len, M_CTL, M_WAITOK); 1296 } 1297 memcpy(port->port_devid->data, &msg->port.data[i], 1298 msg->port.port_devid_len); 1299 port->port_devid->len = msg->port.port_devid_len; 1300 i += msg->port.port_devid_len; 1301 } else { 1302 free(port->port_devid, M_CTL); 1303 port->port_devid = NULL; 1304 } 1305 if (msg->port.target_devid_len != 0) { 1306 if (port->target_devid == NULL || 1307 port->target_devid->len < msg->port.target_devid_len) { 1308 free(port->target_devid, M_CTL); 1309 port->target_devid = malloc(sizeof(struct ctl_devid) + 1310 msg->port.target_devid_len, M_CTL, M_WAITOK); 1311 } 1312 memcpy(port->target_devid->data, &msg->port.data[i], 1313 msg->port.target_devid_len); 1314 port->target_devid->len = msg->port.target_devid_len; 1315 i += msg->port.target_devid_len; 1316 } else { 1317 free(port->target_devid, M_CTL); 1318 port->target_devid = NULL; 1319 } 1320 if (msg->port.init_devid_len != 0) { 1321 if (port->init_devid == NULL || 1322 port->init_devid->len < msg->port.init_devid_len) { 1323 free(port->init_devid, M_CTL); 1324 port->init_devid = malloc(sizeof(struct ctl_devid) + 1325 msg->port.init_devid_len, M_CTL, M_WAITOK); 1326 } 1327 memcpy(port->init_devid->data, &msg->port.data[i], 1328 msg->port.init_devid_len); 1329 port->init_devid->len = msg->port.init_devid_len; 1330 i += msg->port.init_devid_len; 1331 } else { 1332 free(port->init_devid, M_CTL); 1333 port->init_devid = NULL; 1334 } 1335 if (new) { 1336 if (ctl_port_register(port) != 0) { 1337 printf("%s: ctl_port_register() failed with error\n", 1338 __func__); 1339 } 1340 } 1341 mtx_lock(&softc->ctl_lock); 1342 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1343 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1344 continue; 1345 mtx_lock(&lun->lun_lock); 1346 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1347 mtx_unlock(&lun->lun_lock); 1348 } 1349 mtx_unlock(&softc->ctl_lock); 1350 } 1351 1352 static void 1353 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1354 { 1355 struct ctl_port *port; 1356 int i, iid; 1357 1358 if (len < offsetof(struct ctl_ha_msg_iid, data[0])) { 1359 printf("%s: Received truncated message %d < %zu\n", 1360 __func__, len, offsetof(struct ctl_ha_msg_iid, data[0])); 1361 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1362 return; 1363 } 1364 i = msg->iid.name_len; 1365 if (len < offsetof(struct ctl_ha_msg_iid, data[i])) { 1366 printf("%s: Received truncated message data %d < %zu\n", 1367 __func__, len, offsetof(struct ctl_ha_msg_iid, data[i])); 1368 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1369 return; 1370 } 1371 1372 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1373 if (port == NULL) { 1374 printf("%s: Received IID for unknown port %d\n", 1375 __func__, msg->hdr.nexus.targ_port); 1376 return; 1377 } 1378 iid = msg->hdr.nexus.initid; 1379 if (port->wwpn_iid[iid].in_use != 0 && 1380 msg->iid.in_use == 0) 1381 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1382 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1383 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1384 free(port->wwpn_iid[iid].name, M_CTL); 1385 if (msg->iid.name_len) { 1386 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1387 msg->iid.name_len, M_CTL); 1388 } else 1389 port->wwpn_iid[iid].name = NULL; 1390 } 1391 1392 static void 1393 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1394 { 1395 1396 if (len < sizeof(msg->login)) { 1397 printf("%s: Received truncated message %d < %zu\n", 1398 __func__, len, sizeof(msg->login)); 1399 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1400 return; 1401 } 1402 1403 if (msg->login.version != CTL_HA_VERSION) { 1404 printf("CTL HA peers have different versions %d != %d\n", 1405 msg->login.version, CTL_HA_VERSION); 1406 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1407 return; 1408 } 1409 if (msg->login.ha_mode != softc->ha_mode) { 1410 printf("CTL HA peers have different ha_mode %d != %d\n", 1411 msg->login.ha_mode, softc->ha_mode); 1412 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1413 return; 1414 } 1415 if (msg->login.ha_id == softc->ha_id) { 1416 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1417 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1418 return; 1419 } 1420 if (msg->login.max_luns != ctl_max_luns || 1421 msg->login.max_ports != ctl_max_ports || 1422 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1423 printf("CTL HA peers have different limits\n"); 1424 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1425 return; 1426 } 1427 } 1428 1429 static void 1430 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1431 { 1432 struct ctl_lun *lun; 1433 u_int i; 1434 uint32_t initidx, targ_lun; 1435 1436 if (len < offsetof(struct ctl_ha_msg_mode, data[0])) { 1437 printf("%s: Received truncated message %d < %zu\n", 1438 __func__, len, offsetof(struct ctl_ha_msg_mode, data[0])); 1439 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1440 return; 1441 } 1442 i = msg->mode.page_len; 1443 if (len < offsetof(struct ctl_ha_msg_mode, data[i])) { 1444 printf("%s: Received truncated message data %d < %zu\n", 1445 __func__, len, offsetof(struct ctl_ha_msg_mode, data[i])); 1446 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1447 return; 1448 } 1449 1450 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1451 mtx_lock(&softc->ctl_lock); 1452 if (targ_lun >= ctl_max_luns || 1453 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1454 mtx_unlock(&softc->ctl_lock); 1455 return; 1456 } 1457 mtx_lock(&lun->lun_lock); 1458 mtx_unlock(&softc->ctl_lock); 1459 if (lun->flags & CTL_LUN_DISABLED) { 1460 mtx_unlock(&lun->lun_lock); 1461 return; 1462 } 1463 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1464 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1465 msg->mode.page_code && 1466 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1467 break; 1468 } 1469 if (i == CTL_NUM_MODE_PAGES) { 1470 mtx_unlock(&lun->lun_lock); 1471 return; 1472 } 1473 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1474 min(lun->mode_pages.index[i].page_len, msg->mode.page_len)); 1475 initidx = ctl_get_initindex(&msg->hdr.nexus); 1476 if (initidx != -1) 1477 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1478 mtx_unlock(&lun->lun_lock); 1479 } 1480 1481 /* 1482 * ISC (Inter Shelf Communication) event handler. Events from the HA 1483 * subsystem come in here. 1484 */ 1485 static void 1486 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1487 { 1488 struct ctl_softc *softc = control_softc; 1489 union ctl_io *io; 1490 struct ctl_prio *presio; 1491 ctl_ha_status isc_status; 1492 1493 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1494 if (event == CTL_HA_EVT_MSG_RECV) { 1495 union ctl_ha_msg *msg, msgbuf; 1496 1497 if (param > sizeof(msgbuf)) 1498 msg = malloc(param, M_CTL, M_WAITOK); 1499 else 1500 msg = &msgbuf; 1501 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1502 M_WAITOK); 1503 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1504 printf("%s: Error receiving message: %d\n", 1505 __func__, isc_status); 1506 if (msg != &msgbuf) 1507 free(msg, M_CTL); 1508 return; 1509 } 1510 1511 CTL_DEBUG_PRINT(("CTL: msg_type %d len %d\n", 1512 msg->hdr.msg_type, param)); 1513 switch (msg->hdr.msg_type) { 1514 case CTL_MSG_SERIALIZE: 1515 io = ctl_alloc_io(softc->othersc_pool); 1516 ctl_zero_io(io); 1517 // populate ctsio from msg 1518 io->io_hdr.io_type = CTL_IO_SCSI; 1519 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1520 io->io_hdr.remote_io = msg->hdr.original_sc; 1521 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1522 CTL_FLAG_IO_ACTIVE; 1523 /* 1524 * If we're in serialization-only mode, we don't 1525 * want to go through full done processing. Thus 1526 * the COPY flag. 1527 * 1528 * XXX KDM add another flag that is more specific. 1529 */ 1530 if (softc->ha_mode != CTL_HA_MODE_XFER) 1531 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1532 io->io_hdr.nexus = msg->hdr.nexus; 1533 io->scsiio.priority = msg->scsi.priority; 1534 io->scsiio.tag_num = msg->scsi.tag_num; 1535 io->scsiio.tag_type = msg->scsi.tag_type; 1536 #ifdef CTL_TIME_IO 1537 io->io_hdr.start_time = time_uptime; 1538 getbinuptime(&io->io_hdr.start_bt); 1539 #endif /* CTL_TIME_IO */ 1540 io->scsiio.cdb_len = msg->scsi.cdb_len; 1541 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1542 CTL_MAX_CDBLEN); 1543 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1544 const struct ctl_cmd_entry *entry; 1545 1546 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1547 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1548 io->io_hdr.flags |= 1549 entry->flags & CTL_FLAG_DATA_MASK; 1550 } 1551 ctl_enqueue_isc(io); 1552 break; 1553 1554 /* Performed on the Originating SC, XFER mode only */ 1555 case CTL_MSG_DATAMOVE: { 1556 struct ctl_sg_entry *sgl; 1557 int i, j; 1558 1559 io = msg->hdr.original_sc; 1560 if (io == NULL) { 1561 printf("%s: original_sc == NULL!\n", __func__); 1562 /* XXX KDM do something here */ 1563 break; 1564 } 1565 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1566 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1567 /* 1568 * Keep track of this, we need to send it back over 1569 * when the datamove is complete. 1570 */ 1571 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1572 if (msg->hdr.status == CTL_SUCCESS) 1573 io->io_hdr.status = msg->hdr.status; 1574 1575 if (msg->dt.sg_sequence == 0) { 1576 #ifdef CTL_TIME_IO 1577 getbinuptime(&io->io_hdr.dma_start_bt); 1578 #endif 1579 i = msg->dt.kern_sg_entries + 1580 msg->dt.kern_data_len / 1581 CTL_HA_DATAMOVE_SEGMENT + 1; 1582 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1583 M_WAITOK | M_ZERO); 1584 CTL_RSGL(io) = sgl; 1585 CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; 1586 1587 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1588 1589 io->scsiio.kern_sg_entries = 1590 msg->dt.kern_sg_entries; 1591 io->scsiio.rem_sg_entries = 1592 msg->dt.kern_sg_entries; 1593 io->scsiio.kern_data_len = 1594 msg->dt.kern_data_len; 1595 io->scsiio.kern_total_len = 1596 msg->dt.kern_total_len; 1597 io->scsiio.kern_data_resid = 1598 msg->dt.kern_data_resid; 1599 io->scsiio.kern_rel_offset = 1600 msg->dt.kern_rel_offset; 1601 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1602 io->io_hdr.flags |= msg->dt.flags & 1603 CTL_FLAG_BUS_ADDR; 1604 } else 1605 sgl = (struct ctl_sg_entry *) 1606 io->scsiio.kern_data_ptr; 1607 1608 for (i = msg->dt.sent_sg_entries, j = 0; 1609 i < (msg->dt.sent_sg_entries + 1610 msg->dt.cur_sg_entries); i++, j++) { 1611 sgl[i].addr = msg->dt.sg_list[j].addr; 1612 sgl[i].len = msg->dt.sg_list[j].len; 1613 } 1614 1615 /* 1616 * If this is the last piece of the I/O, we've got 1617 * the full S/G list. Queue processing in the thread. 1618 * Otherwise wait for the next piece. 1619 */ 1620 if (msg->dt.sg_last != 0) 1621 ctl_enqueue_isc(io); 1622 break; 1623 } 1624 /* Performed on the Serializing (primary) SC, XFER mode only */ 1625 case CTL_MSG_DATAMOVE_DONE: { 1626 if (msg->hdr.serializing_sc == NULL) { 1627 printf("%s: serializing_sc == NULL!\n", 1628 __func__); 1629 /* XXX KDM now what? */ 1630 break; 1631 } 1632 /* 1633 * We grab the sense information here in case 1634 * there was a failure, so we can return status 1635 * back to the initiator. 1636 */ 1637 io = msg->hdr.serializing_sc; 1638 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1639 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1640 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1641 io->io_hdr.port_status = msg->scsi.port_status; 1642 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1643 if (msg->hdr.status != CTL_STATUS_NONE) { 1644 io->io_hdr.status = msg->hdr.status; 1645 io->scsiio.scsi_status = msg->scsi.scsi_status; 1646 io->scsiio.sense_len = msg->scsi.sense_len; 1647 memcpy(&io->scsiio.sense_data, 1648 &msg->scsi.sense_data, 1649 msg->scsi.sense_len); 1650 if (msg->hdr.status == CTL_SUCCESS) 1651 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1652 } 1653 ctl_enqueue_isc(io); 1654 break; 1655 } 1656 1657 /* Preformed on Originating SC, SER_ONLY mode */ 1658 case CTL_MSG_R2R: 1659 io = msg->hdr.original_sc; 1660 if (io == NULL) { 1661 printf("%s: original_sc == NULL!\n", 1662 __func__); 1663 break; 1664 } 1665 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1666 io->io_hdr.msg_type = CTL_MSG_R2R; 1667 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1668 ctl_enqueue_isc(io); 1669 break; 1670 1671 /* 1672 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1673 * mode. 1674 * Performed on the Originating (i.e. secondary) SC in XFER 1675 * mode 1676 */ 1677 case CTL_MSG_FINISH_IO: 1678 if (softc->ha_mode == CTL_HA_MODE_XFER) 1679 ctl_isc_handler_finish_xfer(softc, msg); 1680 else 1681 ctl_isc_handler_finish_ser_only(softc, msg); 1682 break; 1683 1684 /* Preformed on Originating SC */ 1685 case CTL_MSG_BAD_JUJU: 1686 io = msg->hdr.original_sc; 1687 if (io == NULL) { 1688 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1689 __func__); 1690 break; 1691 } 1692 ctl_copy_sense_data(msg, io); 1693 /* 1694 * IO should have already been cleaned up on other 1695 * SC so clear this flag so we won't send a message 1696 * back to finish the IO there. 1697 */ 1698 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1699 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1700 1701 /* io = msg->hdr.serializing_sc; */ 1702 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1703 ctl_enqueue_isc(io); 1704 break; 1705 1706 /* Handle resets sent from the other side */ 1707 case CTL_MSG_MANAGE_TASKS: { 1708 struct ctl_taskio *taskio; 1709 taskio = (struct ctl_taskio *)ctl_alloc_io( 1710 softc->othersc_pool); 1711 ctl_zero_io((union ctl_io *)taskio); 1712 taskio->io_hdr.io_type = CTL_IO_TASK; 1713 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1714 taskio->io_hdr.nexus = msg->hdr.nexus; 1715 taskio->task_action = msg->task.task_action; 1716 taskio->tag_num = msg->task.tag_num; 1717 taskio->tag_type = msg->task.tag_type; 1718 #ifdef CTL_TIME_IO 1719 taskio->io_hdr.start_time = time_uptime; 1720 getbinuptime(&taskio->io_hdr.start_bt); 1721 #endif /* CTL_TIME_IO */ 1722 ctl_run_task((union ctl_io *)taskio); 1723 break; 1724 } 1725 /* Persistent Reserve action which needs attention */ 1726 case CTL_MSG_PERS_ACTION: 1727 presio = (struct ctl_prio *)ctl_alloc_io( 1728 softc->othersc_pool); 1729 ctl_zero_io((union ctl_io *)presio); 1730 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1731 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1732 presio->io_hdr.nexus = msg->hdr.nexus; 1733 presio->pr_msg = msg->pr; 1734 ctl_enqueue_isc((union ctl_io *)presio); 1735 break; 1736 case CTL_MSG_UA: 1737 ctl_isc_ua(softc, msg, param); 1738 break; 1739 case CTL_MSG_PORT_SYNC: 1740 ctl_isc_port_sync(softc, msg, param); 1741 break; 1742 case CTL_MSG_LUN_SYNC: 1743 ctl_isc_lun_sync(softc, msg, param); 1744 break; 1745 case CTL_MSG_IID_SYNC: 1746 ctl_isc_iid_sync(softc, msg, param); 1747 break; 1748 case CTL_MSG_LOGIN: 1749 ctl_isc_login(softc, msg, param); 1750 break; 1751 case CTL_MSG_MODE_SYNC: 1752 ctl_isc_mode_sync(softc, msg, param); 1753 break; 1754 default: 1755 printf("Received HA message of unknown type %d\n", 1756 msg->hdr.msg_type); 1757 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1758 break; 1759 } 1760 if (msg != &msgbuf) 1761 free(msg, M_CTL); 1762 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1763 printf("CTL: HA link status changed from %d to %d\n", 1764 softc->ha_link, param); 1765 if (param == softc->ha_link) 1766 return; 1767 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1768 softc->ha_link = param; 1769 ctl_isc_ha_link_down(softc); 1770 } else { 1771 softc->ha_link = param; 1772 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1773 ctl_isc_ha_link_up(softc); 1774 } 1775 return; 1776 } else { 1777 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1778 return; 1779 } 1780 } 1781 1782 static void 1783 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1784 { 1785 1786 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1787 src->scsi.sense_len); 1788 dest->scsiio.scsi_status = src->scsi.scsi_status; 1789 dest->scsiio.sense_len = src->scsi.sense_len; 1790 dest->io_hdr.status = src->hdr.status; 1791 } 1792 1793 static void 1794 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1795 { 1796 1797 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1798 src->scsiio.sense_len); 1799 dest->scsi.scsi_status = src->scsiio.scsi_status; 1800 dest->scsi.sense_len = src->scsiio.sense_len; 1801 dest->hdr.status = src->io_hdr.status; 1802 } 1803 1804 void 1805 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1806 { 1807 struct ctl_softc *softc = lun->ctl_softc; 1808 ctl_ua_type *pu; 1809 1810 if (initidx < softc->init_min || initidx >= softc->init_max) 1811 return; 1812 mtx_assert(&lun->lun_lock, MA_OWNED); 1813 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1814 if (pu == NULL) 1815 return; 1816 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1817 } 1818 1819 void 1820 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1821 { 1822 int i; 1823 1824 mtx_assert(&lun->lun_lock, MA_OWNED); 1825 if (lun->pending_ua[port] == NULL) 1826 return; 1827 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1828 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1829 continue; 1830 lun->pending_ua[port][i] |= ua; 1831 } 1832 } 1833 1834 void 1835 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1836 { 1837 struct ctl_softc *softc = lun->ctl_softc; 1838 int i; 1839 1840 mtx_assert(&lun->lun_lock, MA_OWNED); 1841 for (i = softc->port_min; i < softc->port_max; i++) 1842 ctl_est_ua_port(lun, i, except, ua); 1843 } 1844 1845 void 1846 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1847 { 1848 struct ctl_softc *softc = lun->ctl_softc; 1849 ctl_ua_type *pu; 1850 1851 if (initidx < softc->init_min || initidx >= softc->init_max) 1852 return; 1853 mtx_assert(&lun->lun_lock, MA_OWNED); 1854 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1855 if (pu == NULL) 1856 return; 1857 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1858 } 1859 1860 void 1861 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1862 { 1863 struct ctl_softc *softc = lun->ctl_softc; 1864 int i, j; 1865 1866 mtx_assert(&lun->lun_lock, MA_OWNED); 1867 for (i = softc->port_min; i < softc->port_max; i++) { 1868 if (lun->pending_ua[i] == NULL) 1869 continue; 1870 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1871 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1872 continue; 1873 lun->pending_ua[i][j] &= ~ua; 1874 } 1875 } 1876 } 1877 1878 void 1879 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1880 ctl_ua_type ua_type) 1881 { 1882 struct ctl_lun *lun; 1883 1884 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1885 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1886 mtx_lock(&lun->lun_lock); 1887 ctl_clr_ua(lun, initidx, ua_type); 1888 mtx_unlock(&lun->lun_lock); 1889 } 1890 } 1891 1892 static int 1893 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1894 { 1895 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1896 struct ctl_lun *lun; 1897 struct ctl_lun_req ireq; 1898 int error, value; 1899 1900 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1901 error = sysctl_handle_int(oidp, &value, 0, req); 1902 if ((error != 0) || (req->newptr == NULL)) 1903 return (error); 1904 1905 mtx_lock(&softc->ctl_lock); 1906 if (value == 0) 1907 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1908 else 1909 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1910 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1911 mtx_unlock(&softc->ctl_lock); 1912 bzero(&ireq, sizeof(ireq)); 1913 ireq.reqtype = CTL_LUNREQ_MODIFY; 1914 ireq.reqdata.modify.lun_id = lun->lun; 1915 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1916 curthread); 1917 if (ireq.status != CTL_LUN_OK) { 1918 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1919 __func__, ireq.status, ireq.error_str); 1920 } 1921 mtx_lock(&softc->ctl_lock); 1922 } 1923 mtx_unlock(&softc->ctl_lock); 1924 return (0); 1925 } 1926 1927 static int 1928 ctl_init(void) 1929 { 1930 struct make_dev_args args; 1931 struct ctl_softc *softc; 1932 int i, error; 1933 1934 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1935 M_WAITOK | M_ZERO); 1936 1937 make_dev_args_init(&args); 1938 args.mda_devsw = &ctl_cdevsw; 1939 args.mda_uid = UID_ROOT; 1940 args.mda_gid = GID_OPERATOR; 1941 args.mda_mode = 0600; 1942 args.mda_si_drv1 = softc; 1943 args.mda_si_drv2 = NULL; 1944 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1945 if (error != 0) { 1946 free(softc, M_DEVBUF); 1947 control_softc = NULL; 1948 return (error); 1949 } 1950 1951 sysctl_ctx_init(&softc->sysctl_ctx); 1952 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1953 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1954 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); 1955 1956 if (softc->sysctl_tree == NULL) { 1957 printf("%s: unable to allocate sysctl tree\n", __func__); 1958 destroy_dev(softc->dev); 1959 free(softc, M_DEVBUF); 1960 control_softc = NULL; 1961 return (ENOMEM); 1962 } 1963 1964 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1965 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1966 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1967 softc->flags = 0; 1968 1969 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1970 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1971 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1972 1973 if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { 1974 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", 1975 ctl_max_luns, CTL_DEFAULT_MAX_LUNS); 1976 ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 1977 } 1978 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, 1979 M_DEVBUF, M_WAITOK | M_ZERO); 1980 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * 1981 ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1982 if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { 1983 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", 1984 ctl_max_ports, CTL_DEFAULT_MAX_PORTS); 1985 ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 1986 } 1987 softc->ctl_port_mask = malloc(sizeof(uint32_t) * 1988 ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1989 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, 1990 M_DEVBUF, M_WAITOK | M_ZERO); 1991 1992 /* 1993 * In Copan's HA scheme, the "master" and "slave" roles are 1994 * figured out through the slot the controller is in. Although it 1995 * is an active/active system, someone has to be in charge. 1996 */ 1997 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1998 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1999 "HA head ID (0 - no HA)"); 2000 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 2001 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 2002 softc->is_single = 1; 2003 softc->port_cnt = ctl_max_ports; 2004 softc->port_min = 0; 2005 } else { 2006 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; 2007 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 2008 } 2009 softc->port_max = softc->port_min + softc->port_cnt; 2010 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 2011 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 2012 2013 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2014 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 2015 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 2016 2017 STAILQ_INIT(&softc->lun_list); 2018 STAILQ_INIT(&softc->fe_list); 2019 STAILQ_INIT(&softc->port_list); 2020 STAILQ_INIT(&softc->be_list); 2021 ctl_tpc_init(softc); 2022 2023 if (worker_threads <= 0) 2024 worker_threads = max(1, mp_ncpus / 4); 2025 if (worker_threads > CTL_MAX_THREADS) 2026 worker_threads = CTL_MAX_THREADS; 2027 2028 for (i = 0; i < worker_threads; i++) { 2029 struct ctl_thread *thr = &softc->threads[i]; 2030 2031 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 2032 thr->ctl_softc = softc; 2033 STAILQ_INIT(&thr->incoming_queue); 2034 STAILQ_INIT(&thr->rtr_queue); 2035 STAILQ_INIT(&thr->done_queue); 2036 STAILQ_INIT(&thr->isc_queue); 2037 2038 error = kproc_kthread_add(ctl_work_thread, thr, 2039 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 2040 if (error != 0) { 2041 printf("error creating CTL work thread!\n"); 2042 return (error); 2043 } 2044 } 2045 error = kproc_kthread_add(ctl_thresh_thread, softc, 2046 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 2047 if (error != 0) { 2048 printf("error creating CTL threshold thread!\n"); 2049 return (error); 2050 } 2051 2052 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 2053 OID_AUTO, "ha_role", 2054 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 2055 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 2056 2057 if (softc->is_single == 0) { 2058 if (ctl_frontend_register(&ha_frontend) != 0) 2059 softc->is_single = 1; 2060 } 2061 return (0); 2062 } 2063 2064 static int 2065 ctl_shutdown(void) 2066 { 2067 struct ctl_softc *softc = control_softc; 2068 int i; 2069 2070 if (softc->is_single == 0) 2071 ctl_frontend_deregister(&ha_frontend); 2072 2073 destroy_dev(softc->dev); 2074 2075 /* Shutdown CTL threads. */ 2076 softc->shutdown = 1; 2077 for (i = 0; i < worker_threads; i++) { 2078 struct ctl_thread *thr = &softc->threads[i]; 2079 while (thr->thread != NULL) { 2080 wakeup(thr); 2081 if (thr->thread != NULL) 2082 pause("CTL thr shutdown", 1); 2083 } 2084 mtx_destroy(&thr->queue_lock); 2085 } 2086 while (softc->thresh_thread != NULL) { 2087 wakeup(softc->thresh_thread); 2088 if (softc->thresh_thread != NULL) 2089 pause("CTL thr shutdown", 1); 2090 } 2091 2092 ctl_tpc_shutdown(softc); 2093 uma_zdestroy(softc->io_zone); 2094 mtx_destroy(&softc->ctl_lock); 2095 2096 free(softc->ctl_luns, M_DEVBUF); 2097 free(softc->ctl_lun_mask, M_DEVBUF); 2098 free(softc->ctl_port_mask, M_DEVBUF); 2099 free(softc->ctl_ports, M_DEVBUF); 2100 2101 sysctl_ctx_free(&softc->sysctl_ctx); 2102 2103 free(softc, M_DEVBUF); 2104 control_softc = NULL; 2105 return (0); 2106 } 2107 2108 static int 2109 ctl_module_event_handler(module_t mod, int what, void *arg) 2110 { 2111 2112 switch (what) { 2113 case MOD_LOAD: 2114 return (ctl_init()); 2115 case MOD_UNLOAD: 2116 return (ctl_shutdown()); 2117 default: 2118 return (EOPNOTSUPP); 2119 } 2120 } 2121 2122 /* 2123 * XXX KDM should we do some access checks here? Bump a reference count to 2124 * prevent a CTL module from being unloaded while someone has it open? 2125 */ 2126 static int 2127 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2128 { 2129 return (0); 2130 } 2131 2132 static int 2133 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2134 { 2135 return (0); 2136 } 2137 2138 /* 2139 * Remove an initiator by port number and initiator ID. 2140 * Returns 0 for success, -1 for failure. 2141 */ 2142 int 2143 ctl_remove_initiator(struct ctl_port *port, int iid) 2144 { 2145 struct ctl_softc *softc = port->ctl_softc; 2146 int last; 2147 2148 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2149 2150 if (iid > CTL_MAX_INIT_PER_PORT) { 2151 printf("%s: initiator ID %u > maximun %u!\n", 2152 __func__, iid, CTL_MAX_INIT_PER_PORT); 2153 return (-1); 2154 } 2155 2156 mtx_lock(&softc->ctl_lock); 2157 last = (--port->wwpn_iid[iid].in_use == 0); 2158 port->wwpn_iid[iid].last_use = time_uptime; 2159 mtx_unlock(&softc->ctl_lock); 2160 if (last) 2161 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2162 ctl_isc_announce_iid(port, iid); 2163 2164 return (0); 2165 } 2166 2167 /* 2168 * Add an initiator to the initiator map. 2169 * Returns iid for success, < 0 for failure. 2170 */ 2171 int 2172 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2173 { 2174 struct ctl_softc *softc = port->ctl_softc; 2175 time_t best_time; 2176 int i, best; 2177 2178 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2179 2180 if (iid >= CTL_MAX_INIT_PER_PORT) { 2181 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2182 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2183 free(name, M_CTL); 2184 return (-1); 2185 } 2186 2187 mtx_lock(&softc->ctl_lock); 2188 2189 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2190 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2191 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2192 iid = i; 2193 break; 2194 } 2195 if (name != NULL && port->wwpn_iid[i].name != NULL && 2196 strcmp(name, port->wwpn_iid[i].name) == 0) { 2197 iid = i; 2198 break; 2199 } 2200 } 2201 } 2202 2203 if (iid < 0) { 2204 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2205 if (port->wwpn_iid[i].in_use == 0 && 2206 port->wwpn_iid[i].wwpn == 0 && 2207 port->wwpn_iid[i].name == NULL) { 2208 iid = i; 2209 break; 2210 } 2211 } 2212 } 2213 2214 if (iid < 0) { 2215 best = -1; 2216 best_time = INT32_MAX; 2217 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2218 if (port->wwpn_iid[i].in_use == 0) { 2219 if (port->wwpn_iid[i].last_use < best_time) { 2220 best = i; 2221 best_time = port->wwpn_iid[i].last_use; 2222 } 2223 } 2224 } 2225 iid = best; 2226 } 2227 2228 if (iid < 0) { 2229 mtx_unlock(&softc->ctl_lock); 2230 free(name, M_CTL); 2231 return (-2); 2232 } 2233 2234 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2235 /* 2236 * This is not an error yet. 2237 */ 2238 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2239 #if 0 2240 printf("%s: port %d iid %u WWPN %#jx arrived" 2241 " again\n", __func__, port->targ_port, 2242 iid, (uintmax_t)wwpn); 2243 #endif 2244 goto take; 2245 } 2246 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2247 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2248 #if 0 2249 printf("%s: port %d iid %u name '%s' arrived" 2250 " again\n", __func__, port->targ_port, 2251 iid, name); 2252 #endif 2253 goto take; 2254 } 2255 2256 /* 2257 * This is an error, but what do we do about it? The 2258 * driver is telling us we have a new WWPN for this 2259 * initiator ID, so we pretty much need to use it. 2260 */ 2261 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2262 " but WWPN %#jx '%s' is still at that address\n", 2263 __func__, port->targ_port, iid, wwpn, name, 2264 (uintmax_t)port->wwpn_iid[iid].wwpn, 2265 port->wwpn_iid[iid].name); 2266 } 2267 take: 2268 free(port->wwpn_iid[iid].name, M_CTL); 2269 port->wwpn_iid[iid].name = name; 2270 port->wwpn_iid[iid].wwpn = wwpn; 2271 port->wwpn_iid[iid].in_use++; 2272 mtx_unlock(&softc->ctl_lock); 2273 ctl_isc_announce_iid(port, iid); 2274 2275 return (iid); 2276 } 2277 2278 static int 2279 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2280 { 2281 int len; 2282 2283 switch (port->port_type) { 2284 case CTL_PORT_FC: 2285 { 2286 struct scsi_transportid_fcp *id = 2287 (struct scsi_transportid_fcp *)buf; 2288 if (port->wwpn_iid[iid].wwpn == 0) 2289 return (0); 2290 memset(id, 0, sizeof(*id)); 2291 id->format_protocol = SCSI_PROTO_FC; 2292 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2293 return (sizeof(*id)); 2294 } 2295 case CTL_PORT_ISCSI: 2296 { 2297 struct scsi_transportid_iscsi_port *id = 2298 (struct scsi_transportid_iscsi_port *)buf; 2299 if (port->wwpn_iid[iid].name == NULL) 2300 return (0); 2301 memset(id, 0, 256); 2302 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2303 SCSI_PROTO_ISCSI; 2304 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2305 len = roundup2(min(len, 252), 4); 2306 scsi_ulto2b(len, id->additional_length); 2307 return (sizeof(*id) + len); 2308 } 2309 case CTL_PORT_SAS: 2310 { 2311 struct scsi_transportid_sas *id = 2312 (struct scsi_transportid_sas *)buf; 2313 if (port->wwpn_iid[iid].wwpn == 0) 2314 return (0); 2315 memset(id, 0, sizeof(*id)); 2316 id->format_protocol = SCSI_PROTO_SAS; 2317 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2318 return (sizeof(*id)); 2319 } 2320 default: 2321 { 2322 struct scsi_transportid_spi *id = 2323 (struct scsi_transportid_spi *)buf; 2324 memset(id, 0, sizeof(*id)); 2325 id->format_protocol = SCSI_PROTO_SPI; 2326 scsi_ulto2b(iid, id->scsi_addr); 2327 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2328 return (sizeof(*id)); 2329 } 2330 } 2331 } 2332 2333 /* 2334 * Serialize a command that went down the "wrong" side, and so was sent to 2335 * this controller for execution. The logic is a little different than the 2336 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2337 * sent back to the other side, but in the success case, we execute the 2338 * command on this side (XFER mode) or tell the other side to execute it 2339 * (SER_ONLY mode). 2340 */ 2341 static void 2342 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2343 { 2344 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2345 struct ctl_port *port = CTL_PORT(ctsio); 2346 union ctl_ha_msg msg_info; 2347 struct ctl_lun *lun; 2348 const struct ctl_cmd_entry *entry; 2349 union ctl_io *bio; 2350 uint32_t targ_lun; 2351 2352 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2353 2354 /* Make sure that we know about this port. */ 2355 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2356 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2357 /*retry_count*/ 1); 2358 goto badjuju; 2359 } 2360 2361 /* Make sure that we know about this LUN. */ 2362 mtx_lock(&softc->ctl_lock); 2363 if (targ_lun >= ctl_max_luns || 2364 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2365 mtx_unlock(&softc->ctl_lock); 2366 2367 /* 2368 * The other node would not send this request to us unless 2369 * received announce that we are primary node for this LUN. 2370 * If this LUN does not exist now, it is probably result of 2371 * a race, so respond to initiator in the most opaque way. 2372 */ 2373 ctl_set_busy(ctsio); 2374 goto badjuju; 2375 } 2376 mtx_lock(&lun->lun_lock); 2377 mtx_unlock(&softc->ctl_lock); 2378 2379 /* 2380 * If the LUN is invalid, pretend that it doesn't exist. 2381 * It will go away as soon as all pending I/Os completed. 2382 */ 2383 if (lun->flags & CTL_LUN_DISABLED) { 2384 mtx_unlock(&lun->lun_lock); 2385 ctl_set_busy(ctsio); 2386 goto badjuju; 2387 } 2388 2389 entry = ctl_get_cmd_entry(ctsio, NULL); 2390 ctsio->seridx = entry->seridx; 2391 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2392 mtx_unlock(&lun->lun_lock); 2393 goto badjuju; 2394 } 2395 2396 CTL_LUN(ctsio) = lun; 2397 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2398 2399 /* 2400 * Every I/O goes into the OOA queue for a 2401 * particular LUN, and stays there until completion. 2402 */ 2403 #ifdef CTL_TIME_IO 2404 if (LIST_EMPTY(&lun->ooa_queue)) 2405 lun->idle_time += getsbinuptime() - lun->last_busy; 2406 #endif 2407 LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2408 2409 bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); 2410 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 2411 case CTL_ACTION_PASS: 2412 case CTL_ACTION_SKIP: 2413 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2414 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2415 ctl_enqueue_rtr((union ctl_io *)ctsio); 2416 mtx_unlock(&lun->lun_lock); 2417 } else { 2418 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2419 mtx_unlock(&lun->lun_lock); 2420 2421 /* send msg back to other side */ 2422 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2423 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2424 msg_info.hdr.msg_type = CTL_MSG_R2R; 2425 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2426 sizeof(msg_info.hdr), M_WAITOK); 2427 } 2428 break; 2429 case CTL_ACTION_BLOCK: 2430 ctsio->io_hdr.blocker = bio; 2431 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 2432 blocked_links); 2433 mtx_unlock(&lun->lun_lock); 2434 break; 2435 case CTL_ACTION_OVERLAP: 2436 LIST_REMOVE(&ctsio->io_hdr, ooa_links); 2437 mtx_unlock(&lun->lun_lock); 2438 ctl_set_overlapped_cmd(ctsio); 2439 goto badjuju; 2440 case CTL_ACTION_OVERLAP_TAG: 2441 LIST_REMOVE(&ctsio->io_hdr, ooa_links); 2442 mtx_unlock(&lun->lun_lock); 2443 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 2444 badjuju: 2445 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2446 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2447 msg_info.hdr.serializing_sc = NULL; 2448 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2449 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2450 sizeof(msg_info.scsi), M_WAITOK); 2451 ctl_free_io((union ctl_io *)ctsio); 2452 break; 2453 default: 2454 __assert_unreachable(); 2455 } 2456 } 2457 2458 /* 2459 * Returns 0 for success, errno for failure. 2460 */ 2461 static void 2462 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2463 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2464 { 2465 struct ctl_io_hdr *ioh; 2466 2467 mtx_lock(&lun->lun_lock); 2468 ioh = LIST_FIRST(&lun->ooa_queue); 2469 if (ioh == NULL) { 2470 mtx_unlock(&lun->lun_lock); 2471 return; 2472 } 2473 while (LIST_NEXT(ioh, ooa_links) != NULL) 2474 ioh = LIST_NEXT(ioh, ooa_links); 2475 for ( ; ioh; ioh = LIST_PREV(ioh, &lun->ooa_queue, ctl_io_hdr, ooa_links)) { 2476 union ctl_io *io = (union ctl_io *)ioh; 2477 struct ctl_ooa_entry *entry; 2478 2479 /* 2480 * If we've got more than we can fit, just count the 2481 * remaining entries. 2482 */ 2483 if (*cur_fill_num >= ooa_hdr->alloc_num) { 2484 (*cur_fill_num)++; 2485 continue; 2486 } 2487 2488 entry = &kern_entries[*cur_fill_num]; 2489 2490 entry->tag_num = io->scsiio.tag_num; 2491 entry->tag_type = io->scsiio.tag_type; 2492 entry->lun_num = lun->lun; 2493 #ifdef CTL_TIME_IO 2494 entry->start_bt = io->io_hdr.start_bt; 2495 #endif 2496 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2497 entry->cdb_len = io->scsiio.cdb_len; 2498 if (io->io_hdr.blocker != NULL) 2499 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2500 2501 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2502 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2503 2504 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2505 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2506 2507 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2508 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2509 2510 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2511 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2512 2513 if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) 2514 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; 2515 2516 if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) 2517 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; 2518 (*cur_fill_num)++; 2519 } 2520 mtx_unlock(&lun->lun_lock); 2521 } 2522 2523 /* 2524 * Escape characters that are illegal or not recommended in XML. 2525 */ 2526 int 2527 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2528 { 2529 char *end = str + size; 2530 int retval; 2531 2532 retval = 0; 2533 2534 for (; *str && str < end; str++) { 2535 switch (*str) { 2536 case '&': 2537 retval = sbuf_cat(sb, "&"); 2538 break; 2539 case '>': 2540 retval = sbuf_cat(sb, ">"); 2541 break; 2542 case '<': 2543 retval = sbuf_cat(sb, "<"); 2544 break; 2545 default: 2546 retval = sbuf_putc(sb, *str); 2547 break; 2548 } 2549 2550 if (retval != 0) 2551 break; 2552 } 2553 2554 return (retval); 2555 } 2556 2557 static void 2558 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2559 { 2560 struct scsi_vpd_id_descriptor *desc; 2561 int i; 2562 2563 if (id == NULL || id->len < 4) 2564 return; 2565 desc = (struct scsi_vpd_id_descriptor *)id->data; 2566 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2567 case SVPD_ID_TYPE_T10: 2568 sbuf_cat(sb, "t10."); 2569 break; 2570 case SVPD_ID_TYPE_EUI64: 2571 sbuf_cat(sb, "eui."); 2572 break; 2573 case SVPD_ID_TYPE_NAA: 2574 sbuf_cat(sb, "naa."); 2575 break; 2576 case SVPD_ID_TYPE_SCSI_NAME: 2577 break; 2578 } 2579 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2580 case SVPD_ID_CODESET_BINARY: 2581 for (i = 0; i < desc->length; i++) 2582 sbuf_printf(sb, "%02x", desc->identifier[i]); 2583 break; 2584 case SVPD_ID_CODESET_ASCII: 2585 sbuf_printf(sb, "%.*s", (int)desc->length, 2586 (char *)desc->identifier); 2587 break; 2588 case SVPD_ID_CODESET_UTF8: 2589 sbuf_cat(sb, (char *)desc->identifier); 2590 break; 2591 } 2592 } 2593 2594 static int 2595 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2596 struct thread *td) 2597 { 2598 struct ctl_softc *softc = dev->si_drv1; 2599 struct ctl_port *port; 2600 struct ctl_lun *lun; 2601 int retval; 2602 2603 retval = 0; 2604 2605 switch (cmd) { 2606 case CTL_IO: 2607 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2608 break; 2609 case CTL_ENABLE_PORT: 2610 case CTL_DISABLE_PORT: 2611 case CTL_SET_PORT_WWNS: { 2612 struct ctl_port *port; 2613 struct ctl_port_entry *entry; 2614 2615 entry = (struct ctl_port_entry *)addr; 2616 2617 mtx_lock(&softc->ctl_lock); 2618 STAILQ_FOREACH(port, &softc->port_list, links) { 2619 int action, done; 2620 2621 if (port->targ_port < softc->port_min || 2622 port->targ_port >= softc->port_max) 2623 continue; 2624 2625 action = 0; 2626 done = 0; 2627 if ((entry->port_type == CTL_PORT_NONE) 2628 && (entry->targ_port == port->targ_port)) { 2629 /* 2630 * If the user only wants to enable or 2631 * disable or set WWNs on a specific port, 2632 * do the operation and we're done. 2633 */ 2634 action = 1; 2635 done = 1; 2636 } else if (entry->port_type & port->port_type) { 2637 /* 2638 * Compare the user's type mask with the 2639 * particular frontend type to see if we 2640 * have a match. 2641 */ 2642 action = 1; 2643 done = 0; 2644 2645 /* 2646 * Make sure the user isn't trying to set 2647 * WWNs on multiple ports at the same time. 2648 */ 2649 if (cmd == CTL_SET_PORT_WWNS) { 2650 printf("%s: Can't set WWNs on " 2651 "multiple ports\n", __func__); 2652 retval = EINVAL; 2653 break; 2654 } 2655 } 2656 if (action == 0) 2657 continue; 2658 2659 /* 2660 * XXX KDM we have to drop the lock here, because 2661 * the online/offline operations can potentially 2662 * block. We need to reference count the frontends 2663 * so they can't go away, 2664 */ 2665 if (cmd == CTL_ENABLE_PORT) { 2666 mtx_unlock(&softc->ctl_lock); 2667 ctl_port_online(port); 2668 mtx_lock(&softc->ctl_lock); 2669 } else if (cmd == CTL_DISABLE_PORT) { 2670 mtx_unlock(&softc->ctl_lock); 2671 ctl_port_offline(port); 2672 mtx_lock(&softc->ctl_lock); 2673 } else if (cmd == CTL_SET_PORT_WWNS) { 2674 ctl_port_set_wwns(port, 2675 (entry->flags & CTL_PORT_WWNN_VALID) ? 2676 1 : 0, entry->wwnn, 2677 (entry->flags & CTL_PORT_WWPN_VALID) ? 2678 1 : 0, entry->wwpn); 2679 } 2680 if (done != 0) 2681 break; 2682 } 2683 mtx_unlock(&softc->ctl_lock); 2684 break; 2685 } 2686 case CTL_GET_OOA: { 2687 struct ctl_ooa *ooa_hdr; 2688 struct ctl_ooa_entry *entries; 2689 uint32_t cur_fill_num; 2690 2691 ooa_hdr = (struct ctl_ooa *)addr; 2692 2693 if ((ooa_hdr->alloc_len == 0) 2694 || (ooa_hdr->alloc_num == 0)) { 2695 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2696 "must be non-zero\n", __func__, 2697 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2698 retval = EINVAL; 2699 break; 2700 } 2701 2702 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2703 sizeof(struct ctl_ooa_entry))) { 2704 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2705 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2706 __func__, ooa_hdr->alloc_len, 2707 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2708 retval = EINVAL; 2709 break; 2710 } 2711 2712 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2713 if (entries == NULL) { 2714 printf("%s: could not allocate %d bytes for OOA " 2715 "dump\n", __func__, ooa_hdr->alloc_len); 2716 retval = ENOMEM; 2717 break; 2718 } 2719 2720 mtx_lock(&softc->ctl_lock); 2721 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2722 (ooa_hdr->lun_num >= ctl_max_luns || 2723 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2724 mtx_unlock(&softc->ctl_lock); 2725 free(entries, M_CTL); 2726 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2727 __func__, (uintmax_t)ooa_hdr->lun_num); 2728 retval = EINVAL; 2729 break; 2730 } 2731 2732 cur_fill_num = 0; 2733 2734 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2735 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2736 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2737 ooa_hdr, entries); 2738 } 2739 } else { 2740 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2741 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2742 entries); 2743 } 2744 mtx_unlock(&softc->ctl_lock); 2745 2746 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2747 ooa_hdr->fill_len = ooa_hdr->fill_num * 2748 sizeof(struct ctl_ooa_entry); 2749 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2750 if (retval != 0) { 2751 printf("%s: error copying out %d bytes for OOA dump\n", 2752 __func__, ooa_hdr->fill_len); 2753 } 2754 2755 getbinuptime(&ooa_hdr->cur_bt); 2756 2757 if (cur_fill_num > ooa_hdr->alloc_num) { 2758 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2759 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2760 } else { 2761 ooa_hdr->dropped_num = 0; 2762 ooa_hdr->status = CTL_OOA_OK; 2763 } 2764 2765 free(entries, M_CTL); 2766 break; 2767 } 2768 case CTL_DELAY_IO: { 2769 struct ctl_io_delay_info *delay_info; 2770 2771 delay_info = (struct ctl_io_delay_info *)addr; 2772 2773 #ifdef CTL_IO_DELAY 2774 mtx_lock(&softc->ctl_lock); 2775 if (delay_info->lun_id >= ctl_max_luns || 2776 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2777 mtx_unlock(&softc->ctl_lock); 2778 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2779 break; 2780 } 2781 mtx_lock(&lun->lun_lock); 2782 mtx_unlock(&softc->ctl_lock); 2783 delay_info->status = CTL_DELAY_STATUS_OK; 2784 switch (delay_info->delay_type) { 2785 case CTL_DELAY_TYPE_CONT: 2786 case CTL_DELAY_TYPE_ONESHOT: 2787 break; 2788 default: 2789 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2790 break; 2791 } 2792 switch (delay_info->delay_loc) { 2793 case CTL_DELAY_LOC_DATAMOVE: 2794 lun->delay_info.datamove_type = delay_info->delay_type; 2795 lun->delay_info.datamove_delay = delay_info->delay_secs; 2796 break; 2797 case CTL_DELAY_LOC_DONE: 2798 lun->delay_info.done_type = delay_info->delay_type; 2799 lun->delay_info.done_delay = delay_info->delay_secs; 2800 break; 2801 default: 2802 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2803 break; 2804 } 2805 mtx_unlock(&lun->lun_lock); 2806 #else 2807 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2808 #endif /* CTL_IO_DELAY */ 2809 break; 2810 } 2811 case CTL_ERROR_INJECT: { 2812 struct ctl_error_desc *err_desc, *new_err_desc; 2813 2814 err_desc = (struct ctl_error_desc *)addr; 2815 2816 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2817 M_WAITOK | M_ZERO); 2818 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2819 2820 mtx_lock(&softc->ctl_lock); 2821 if (err_desc->lun_id >= ctl_max_luns || 2822 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2823 mtx_unlock(&softc->ctl_lock); 2824 free(new_err_desc, M_CTL); 2825 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2826 __func__, (uintmax_t)err_desc->lun_id); 2827 retval = EINVAL; 2828 break; 2829 } 2830 mtx_lock(&lun->lun_lock); 2831 mtx_unlock(&softc->ctl_lock); 2832 2833 /* 2834 * We could do some checking here to verify the validity 2835 * of the request, but given the complexity of error 2836 * injection requests, the checking logic would be fairly 2837 * complex. 2838 * 2839 * For now, if the request is invalid, it just won't get 2840 * executed and might get deleted. 2841 */ 2842 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2843 2844 /* 2845 * XXX KDM check to make sure the serial number is unique, 2846 * in case we somehow manage to wrap. That shouldn't 2847 * happen for a very long time, but it's the right thing to 2848 * do. 2849 */ 2850 new_err_desc->serial = lun->error_serial; 2851 err_desc->serial = lun->error_serial; 2852 lun->error_serial++; 2853 2854 mtx_unlock(&lun->lun_lock); 2855 break; 2856 } 2857 case CTL_ERROR_INJECT_DELETE: { 2858 struct ctl_error_desc *delete_desc, *desc, *desc2; 2859 int delete_done; 2860 2861 delete_desc = (struct ctl_error_desc *)addr; 2862 delete_done = 0; 2863 2864 mtx_lock(&softc->ctl_lock); 2865 if (delete_desc->lun_id >= ctl_max_luns || 2866 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2867 mtx_unlock(&softc->ctl_lock); 2868 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2869 __func__, (uintmax_t)delete_desc->lun_id); 2870 retval = EINVAL; 2871 break; 2872 } 2873 mtx_lock(&lun->lun_lock); 2874 mtx_unlock(&softc->ctl_lock); 2875 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2876 if (desc->serial != delete_desc->serial) 2877 continue; 2878 2879 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2880 links); 2881 free(desc, M_CTL); 2882 delete_done = 1; 2883 } 2884 mtx_unlock(&lun->lun_lock); 2885 if (delete_done == 0) { 2886 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2887 "error serial %ju on LUN %u\n", __func__, 2888 delete_desc->serial, delete_desc->lun_id); 2889 retval = EINVAL; 2890 break; 2891 } 2892 break; 2893 } 2894 case CTL_DUMP_STRUCTS: { 2895 int j, k; 2896 struct ctl_port *port; 2897 struct ctl_frontend *fe; 2898 2899 mtx_lock(&softc->ctl_lock); 2900 printf("CTL Persistent Reservation information start:\n"); 2901 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2902 mtx_lock(&lun->lun_lock); 2903 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2904 mtx_unlock(&lun->lun_lock); 2905 continue; 2906 } 2907 2908 for (j = 0; j < ctl_max_ports; j++) { 2909 if (lun->pr_keys[j] == NULL) 2910 continue; 2911 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2912 if (lun->pr_keys[j][k] == 0) 2913 continue; 2914 printf(" LUN %ju port %d iid %d key " 2915 "%#jx\n", lun->lun, j, k, 2916 (uintmax_t)lun->pr_keys[j][k]); 2917 } 2918 } 2919 mtx_unlock(&lun->lun_lock); 2920 } 2921 printf("CTL Persistent Reservation information end\n"); 2922 printf("CTL Ports:\n"); 2923 STAILQ_FOREACH(port, &softc->port_list, links) { 2924 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2925 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2926 port->frontend->name, port->port_type, 2927 port->physical_port, port->virtual_port, 2928 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2929 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2930 if (port->wwpn_iid[j].in_use == 0 && 2931 port->wwpn_iid[j].wwpn == 0 && 2932 port->wwpn_iid[j].name == NULL) 2933 continue; 2934 2935 printf(" iid %u use %d WWPN %#jx '%s'\n", 2936 j, port->wwpn_iid[j].in_use, 2937 (uintmax_t)port->wwpn_iid[j].wwpn, 2938 port->wwpn_iid[j].name); 2939 } 2940 } 2941 printf("CTL Port information end\n"); 2942 mtx_unlock(&softc->ctl_lock); 2943 /* 2944 * XXX KDM calling this without a lock. We'd likely want 2945 * to drop the lock before calling the frontend's dump 2946 * routine anyway. 2947 */ 2948 printf("CTL Frontends:\n"); 2949 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2950 printf(" Frontend '%s'\n", fe->name); 2951 if (fe->fe_dump != NULL) 2952 fe->fe_dump(); 2953 } 2954 printf("CTL Frontend information end\n"); 2955 break; 2956 } 2957 case CTL_LUN_REQ: { 2958 struct ctl_lun_req *lun_req; 2959 struct ctl_backend_driver *backend; 2960 void *packed; 2961 nvlist_t *tmp_args_nvl; 2962 size_t packed_len; 2963 2964 lun_req = (struct ctl_lun_req *)addr; 2965 tmp_args_nvl = lun_req->args_nvl; 2966 2967 backend = ctl_backend_find(lun_req->backend); 2968 if (backend == NULL) { 2969 lun_req->status = CTL_LUN_ERROR; 2970 snprintf(lun_req->error_str, 2971 sizeof(lun_req->error_str), 2972 "Backend \"%s\" not found.", 2973 lun_req->backend); 2974 break; 2975 } 2976 2977 if (lun_req->args != NULL) { 2978 if (lun_req->args_len > CTL_MAX_ARGS_LEN) { 2979 lun_req->status = CTL_LUN_ERROR; 2980 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2981 "Too big args."); 2982 break; 2983 } 2984 packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); 2985 if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { 2986 free(packed, M_CTL); 2987 lun_req->status = CTL_LUN_ERROR; 2988 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2989 "Cannot copyin args."); 2990 break; 2991 } 2992 lun_req->args_nvl = nvlist_unpack(packed, 2993 lun_req->args_len, 0); 2994 free(packed, M_CTL); 2995 2996 if (lun_req->args_nvl == NULL) { 2997 lun_req->status = CTL_LUN_ERROR; 2998 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2999 "Cannot unpack args nvlist."); 3000 break; 3001 } 3002 } else 3003 lun_req->args_nvl = nvlist_create(0); 3004 3005 lun_req->result_nvl = NULL; 3006 retval = backend->ioctl(dev, cmd, addr, flag, td); 3007 nvlist_destroy(lun_req->args_nvl); 3008 lun_req->args_nvl = tmp_args_nvl; 3009 3010 if (lun_req->result_nvl != NULL) { 3011 if (lun_req->result != NULL) { 3012 packed = nvlist_pack(lun_req->result_nvl, 3013 &packed_len); 3014 if (packed == NULL) { 3015 lun_req->status = CTL_LUN_ERROR; 3016 snprintf(lun_req->error_str, 3017 sizeof(lun_req->error_str), 3018 "Cannot pack result nvlist."); 3019 break; 3020 } 3021 3022 if (packed_len > lun_req->result_len) { 3023 lun_req->status = CTL_LUN_ERROR; 3024 snprintf(lun_req->error_str, 3025 sizeof(lun_req->error_str), 3026 "Result nvlist too large."); 3027 free(packed, M_NVLIST); 3028 break; 3029 } 3030 3031 if (copyout(packed, lun_req->result, packed_len)) { 3032 lun_req->status = CTL_LUN_ERROR; 3033 snprintf(lun_req->error_str, 3034 sizeof(lun_req->error_str), 3035 "Cannot copyout() the result."); 3036 free(packed, M_NVLIST); 3037 break; 3038 } 3039 3040 lun_req->result_len = packed_len; 3041 free(packed, M_NVLIST); 3042 } 3043 3044 nvlist_destroy(lun_req->result_nvl); 3045 } 3046 break; 3047 } 3048 case CTL_LUN_LIST: { 3049 struct sbuf *sb; 3050 struct ctl_lun_list *list; 3051 const char *name, *value; 3052 void *cookie; 3053 int type; 3054 3055 list = (struct ctl_lun_list *)addr; 3056 3057 /* 3058 * Allocate a fixed length sbuf here, based on the length 3059 * of the user's buffer. We could allocate an auto-extending 3060 * buffer, and then tell the user how much larger our 3061 * amount of data is than his buffer, but that presents 3062 * some problems: 3063 * 3064 * 1. The sbuf(9) routines use a blocking malloc, and so 3065 * we can't hold a lock while calling them with an 3066 * auto-extending buffer. 3067 * 3068 * 2. There is not currently a LUN reference counting 3069 * mechanism, outside of outstanding transactions on 3070 * the LUN's OOA queue. So a LUN could go away on us 3071 * while we're getting the LUN number, backend-specific 3072 * information, etc. Thus, given the way things 3073 * currently work, we need to hold the CTL lock while 3074 * grabbing LUN information. 3075 * 3076 * So, from the user's standpoint, the best thing to do is 3077 * allocate what he thinks is a reasonable buffer length, 3078 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3079 * double the buffer length and try again. (And repeat 3080 * that until he succeeds.) 3081 */ 3082 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3083 if (sb == NULL) { 3084 list->status = CTL_LUN_LIST_ERROR; 3085 snprintf(list->error_str, sizeof(list->error_str), 3086 "Unable to allocate %d bytes for LUN list", 3087 list->alloc_len); 3088 break; 3089 } 3090 3091 sbuf_cat(sb, "<ctllunlist>\n"); 3092 3093 mtx_lock(&softc->ctl_lock); 3094 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3095 mtx_lock(&lun->lun_lock); 3096 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3097 (uintmax_t)lun->lun); 3098 3099 /* 3100 * Bail out as soon as we see that we've overfilled 3101 * the buffer. 3102 */ 3103 if (retval != 0) 3104 break; 3105 3106 retval = sbuf_printf(sb, "\t<backend_type>%s" 3107 "</backend_type>\n", 3108 (lun->backend == NULL) ? "none" : 3109 lun->backend->name); 3110 3111 if (retval != 0) 3112 break; 3113 3114 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3115 lun->be_lun->lun_type); 3116 3117 if (retval != 0) 3118 break; 3119 3120 if (lun->backend == NULL) { 3121 retval = sbuf_cat(sb, "</lun>\n"); 3122 if (retval != 0) 3123 break; 3124 continue; 3125 } 3126 3127 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3128 (lun->be_lun->maxlba > 0) ? 3129 lun->be_lun->maxlba + 1 : 0); 3130 3131 if (retval != 0) 3132 break; 3133 3134 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3135 lun->be_lun->blocksize); 3136 3137 if (retval != 0) 3138 break; 3139 3140 retval = sbuf_cat(sb, "\t<serial_number>"); 3141 3142 if (retval != 0) 3143 break; 3144 3145 retval = ctl_sbuf_printf_esc(sb, 3146 lun->be_lun->serial_num, 3147 sizeof(lun->be_lun->serial_num)); 3148 3149 if (retval != 0) 3150 break; 3151 3152 retval = sbuf_cat(sb, "</serial_number>\n"); 3153 3154 if (retval != 0) 3155 break; 3156 3157 retval = sbuf_cat(sb, "\t<device_id>"); 3158 3159 if (retval != 0) 3160 break; 3161 3162 retval = ctl_sbuf_printf_esc(sb, 3163 lun->be_lun->device_id, 3164 sizeof(lun->be_lun->device_id)); 3165 3166 if (retval != 0) 3167 break; 3168 3169 retval = sbuf_cat(sb, "</device_id>\n"); 3170 3171 if (retval != 0) 3172 break; 3173 3174 if (lun->backend->lun_info != NULL) { 3175 retval = lun->backend->lun_info(lun->be_lun, sb); 3176 if (retval != 0) 3177 break; 3178 } 3179 3180 cookie = NULL; 3181 while ((name = nvlist_next(lun->be_lun->options, &type, 3182 &cookie)) != NULL) { 3183 sbuf_printf(sb, "\t<%s>", name); 3184 3185 if (type == NV_TYPE_STRING) { 3186 value = dnvlist_get_string( 3187 lun->be_lun->options, name, NULL); 3188 if (value != NULL) 3189 sbuf_cat(sb, value); 3190 } 3191 3192 sbuf_printf(sb, "</%s>\n", name); 3193 } 3194 3195 retval = sbuf_cat(sb, "</lun>\n"); 3196 3197 if (retval != 0) 3198 break; 3199 mtx_unlock(&lun->lun_lock); 3200 } 3201 if (lun != NULL) 3202 mtx_unlock(&lun->lun_lock); 3203 mtx_unlock(&softc->ctl_lock); 3204 3205 if ((retval != 0) 3206 || ((retval = sbuf_cat(sb, "</ctllunlist>\n")) != 0)) { 3207 retval = 0; 3208 sbuf_delete(sb); 3209 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3210 snprintf(list->error_str, sizeof(list->error_str), 3211 "Out of space, %d bytes is too small", 3212 list->alloc_len); 3213 break; 3214 } 3215 3216 sbuf_finish(sb); 3217 3218 retval = copyout(sbuf_data(sb), list->lun_xml, 3219 sbuf_len(sb) + 1); 3220 3221 list->fill_len = sbuf_len(sb) + 1; 3222 list->status = CTL_LUN_LIST_OK; 3223 sbuf_delete(sb); 3224 break; 3225 } 3226 case CTL_ISCSI: { 3227 struct ctl_iscsi *ci; 3228 struct ctl_frontend *fe; 3229 3230 ci = (struct ctl_iscsi *)addr; 3231 3232 fe = ctl_frontend_find("iscsi"); 3233 if (fe == NULL) { 3234 ci->status = CTL_ISCSI_ERROR; 3235 snprintf(ci->error_str, sizeof(ci->error_str), 3236 "Frontend \"iscsi\" not found."); 3237 break; 3238 } 3239 3240 retval = fe->ioctl(dev, cmd, addr, flag, td); 3241 break; 3242 } 3243 case CTL_PORT_REQ: { 3244 struct ctl_req *req; 3245 struct ctl_frontend *fe; 3246 void *packed; 3247 nvlist_t *tmp_args_nvl; 3248 size_t packed_len; 3249 3250 req = (struct ctl_req *)addr; 3251 tmp_args_nvl = req->args_nvl; 3252 3253 fe = ctl_frontend_find(req->driver); 3254 if (fe == NULL) { 3255 req->status = CTL_LUN_ERROR; 3256 snprintf(req->error_str, sizeof(req->error_str), 3257 "Frontend \"%s\" not found.", req->driver); 3258 break; 3259 } 3260 3261 if (req->args != NULL) { 3262 if (req->args_len > CTL_MAX_ARGS_LEN) { 3263 req->status = CTL_LUN_ERROR; 3264 snprintf(req->error_str, sizeof(req->error_str), 3265 "Too big args."); 3266 break; 3267 } 3268 packed = malloc(req->args_len, M_CTL, M_WAITOK); 3269 if (copyin(req->args, packed, req->args_len) != 0) { 3270 free(packed, M_CTL); 3271 req->status = CTL_LUN_ERROR; 3272 snprintf(req->error_str, sizeof(req->error_str), 3273 "Cannot copyin args."); 3274 break; 3275 } 3276 req->args_nvl = nvlist_unpack(packed, 3277 req->args_len, 0); 3278 free(packed, M_CTL); 3279 3280 if (req->args_nvl == NULL) { 3281 req->status = CTL_LUN_ERROR; 3282 snprintf(req->error_str, sizeof(req->error_str), 3283 "Cannot unpack args nvlist."); 3284 break; 3285 } 3286 } else 3287 req->args_nvl = nvlist_create(0); 3288 3289 req->result_nvl = NULL; 3290 if (fe->ioctl) 3291 retval = fe->ioctl(dev, cmd, addr, flag, td); 3292 else 3293 retval = ENODEV; 3294 3295 nvlist_destroy(req->args_nvl); 3296 req->args_nvl = tmp_args_nvl; 3297 3298 if (req->result_nvl != NULL) { 3299 if (req->result != NULL) { 3300 packed = nvlist_pack(req->result_nvl, 3301 &packed_len); 3302 if (packed == NULL) { 3303 req->status = CTL_LUN_ERROR; 3304 snprintf(req->error_str, 3305 sizeof(req->error_str), 3306 "Cannot pack result nvlist."); 3307 break; 3308 } 3309 3310 if (packed_len > req->result_len) { 3311 req->status = CTL_LUN_ERROR; 3312 snprintf(req->error_str, 3313 sizeof(req->error_str), 3314 "Result nvlist too large."); 3315 free(packed, M_NVLIST); 3316 break; 3317 } 3318 3319 if (copyout(packed, req->result, packed_len)) { 3320 req->status = CTL_LUN_ERROR; 3321 snprintf(req->error_str, 3322 sizeof(req->error_str), 3323 "Cannot copyout() the result."); 3324 free(packed, M_NVLIST); 3325 break; 3326 } 3327 3328 req->result_len = packed_len; 3329 free(packed, M_NVLIST); 3330 } 3331 3332 nvlist_destroy(req->result_nvl); 3333 } 3334 break; 3335 } 3336 case CTL_PORT_LIST: { 3337 struct sbuf *sb; 3338 struct ctl_port *port; 3339 struct ctl_lun_list *list; 3340 const char *name, *value; 3341 void *cookie; 3342 int j, type; 3343 uint32_t plun; 3344 3345 list = (struct ctl_lun_list *)addr; 3346 3347 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3348 if (sb == NULL) { 3349 list->status = CTL_LUN_LIST_ERROR; 3350 snprintf(list->error_str, sizeof(list->error_str), 3351 "Unable to allocate %d bytes for LUN list", 3352 list->alloc_len); 3353 break; 3354 } 3355 3356 sbuf_cat(sb, "<ctlportlist>\n"); 3357 3358 mtx_lock(&softc->ctl_lock); 3359 STAILQ_FOREACH(port, &softc->port_list, links) { 3360 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3361 (uintmax_t)port->targ_port); 3362 3363 /* 3364 * Bail out as soon as we see that we've overfilled 3365 * the buffer. 3366 */ 3367 if (retval != 0) 3368 break; 3369 3370 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3371 "</frontend_type>\n", port->frontend->name); 3372 if (retval != 0) 3373 break; 3374 3375 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3376 port->port_type); 3377 if (retval != 0) 3378 break; 3379 3380 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3381 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3382 if (retval != 0) 3383 break; 3384 3385 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3386 port->port_name); 3387 if (retval != 0) 3388 break; 3389 3390 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3391 port->physical_port); 3392 if (retval != 0) 3393 break; 3394 3395 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3396 port->virtual_port); 3397 if (retval != 0) 3398 break; 3399 3400 if (port->target_devid != NULL) { 3401 sbuf_cat(sb, "\t<target>"); 3402 ctl_id_sbuf(port->target_devid, sb); 3403 sbuf_cat(sb, "</target>\n"); 3404 } 3405 3406 if (port->port_devid != NULL) { 3407 sbuf_cat(sb, "\t<port>"); 3408 ctl_id_sbuf(port->port_devid, sb); 3409 sbuf_cat(sb, "</port>\n"); 3410 } 3411 3412 if (port->port_info != NULL) { 3413 retval = port->port_info(port->onoff_arg, sb); 3414 if (retval != 0) 3415 break; 3416 } 3417 3418 cookie = NULL; 3419 while ((name = nvlist_next(port->options, &type, 3420 &cookie)) != NULL) { 3421 sbuf_printf(sb, "\t<%s>", name); 3422 3423 if (type == NV_TYPE_STRING) { 3424 value = dnvlist_get_string(port->options, 3425 name, NULL); 3426 if (value != NULL) 3427 sbuf_printf(sb, "%s", value); 3428 } 3429 3430 sbuf_printf(sb, "</%s>\n", name); 3431 } 3432 3433 if (port->lun_map != NULL) { 3434 sbuf_cat(sb, "\t<lun_map>on</lun_map>\n"); 3435 for (j = 0; j < port->lun_map_size; j++) { 3436 plun = ctl_lun_map_from_port(port, j); 3437 if (plun == UINT32_MAX) 3438 continue; 3439 sbuf_printf(sb, 3440 "\t<lun id=\"%u\">%u</lun>\n", 3441 j, plun); 3442 } 3443 } 3444 3445 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3446 if (port->wwpn_iid[j].in_use == 0 || 3447 (port->wwpn_iid[j].wwpn == 0 && 3448 port->wwpn_iid[j].name == NULL)) 3449 continue; 3450 3451 if (port->wwpn_iid[j].name != NULL) 3452 retval = sbuf_printf(sb, 3453 "\t<initiator id=\"%u\">%s</initiator>\n", 3454 j, port->wwpn_iid[j].name); 3455 else 3456 retval = sbuf_printf(sb, 3457 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3458 j, port->wwpn_iid[j].wwpn); 3459 if (retval != 0) 3460 break; 3461 } 3462 if (retval != 0) 3463 break; 3464 3465 retval = sbuf_cat(sb, "</targ_port>\n"); 3466 if (retval != 0) 3467 break; 3468 } 3469 mtx_unlock(&softc->ctl_lock); 3470 3471 if ((retval != 0) 3472 || ((retval = sbuf_cat(sb, "</ctlportlist>\n")) != 0)) { 3473 retval = 0; 3474 sbuf_delete(sb); 3475 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3476 snprintf(list->error_str, sizeof(list->error_str), 3477 "Out of space, %d bytes is too small", 3478 list->alloc_len); 3479 break; 3480 } 3481 3482 sbuf_finish(sb); 3483 3484 retval = copyout(sbuf_data(sb), list->lun_xml, 3485 sbuf_len(sb) + 1); 3486 3487 list->fill_len = sbuf_len(sb) + 1; 3488 list->status = CTL_LUN_LIST_OK; 3489 sbuf_delete(sb); 3490 break; 3491 } 3492 case CTL_LUN_MAP: { 3493 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3494 struct ctl_port *port; 3495 3496 mtx_lock(&softc->ctl_lock); 3497 if (lm->port < softc->port_min || 3498 lm->port >= softc->port_max || 3499 (port = softc->ctl_ports[lm->port]) == NULL) { 3500 mtx_unlock(&softc->ctl_lock); 3501 return (ENXIO); 3502 } 3503 if (port->status & CTL_PORT_STATUS_ONLINE) { 3504 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3505 if (ctl_lun_map_to_port(port, lun->lun) == 3506 UINT32_MAX) 3507 continue; 3508 mtx_lock(&lun->lun_lock); 3509 ctl_est_ua_port(lun, lm->port, -1, 3510 CTL_UA_LUN_CHANGE); 3511 mtx_unlock(&lun->lun_lock); 3512 } 3513 } 3514 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3515 if (lm->plun != UINT32_MAX) { 3516 if (lm->lun == UINT32_MAX) 3517 retval = ctl_lun_map_unset(port, lm->plun); 3518 else if (lm->lun < ctl_max_luns && 3519 softc->ctl_luns[lm->lun] != NULL) 3520 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3521 else 3522 return (ENXIO); 3523 } else { 3524 if (lm->lun == UINT32_MAX) 3525 retval = ctl_lun_map_deinit(port); 3526 else 3527 retval = ctl_lun_map_init(port); 3528 } 3529 if (port->status & CTL_PORT_STATUS_ONLINE) 3530 ctl_isc_announce_port(port); 3531 break; 3532 } 3533 case CTL_GET_LUN_STATS: { 3534 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3535 int i; 3536 3537 /* 3538 * XXX KDM no locking here. If the LUN list changes, 3539 * things can blow up. 3540 */ 3541 i = 0; 3542 stats->status = CTL_SS_OK; 3543 stats->fill_len = 0; 3544 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3545 if (lun->lun < stats->first_item) 3546 continue; 3547 if (stats->fill_len + sizeof(lun->stats) > 3548 stats->alloc_len) { 3549 stats->status = CTL_SS_NEED_MORE_SPACE; 3550 break; 3551 } 3552 retval = copyout(&lun->stats, &stats->stats[i++], 3553 sizeof(lun->stats)); 3554 if (retval != 0) 3555 break; 3556 stats->fill_len += sizeof(lun->stats); 3557 } 3558 stats->num_items = softc->num_luns; 3559 stats->flags = CTL_STATS_FLAG_NONE; 3560 #ifdef CTL_TIME_IO 3561 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3562 #endif 3563 getnanouptime(&stats->timestamp); 3564 break; 3565 } 3566 case CTL_GET_PORT_STATS: { 3567 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3568 int i; 3569 3570 /* 3571 * XXX KDM no locking here. If the LUN list changes, 3572 * things can blow up. 3573 */ 3574 i = 0; 3575 stats->status = CTL_SS_OK; 3576 stats->fill_len = 0; 3577 STAILQ_FOREACH(port, &softc->port_list, links) { 3578 if (port->targ_port < stats->first_item) 3579 continue; 3580 if (stats->fill_len + sizeof(port->stats) > 3581 stats->alloc_len) { 3582 stats->status = CTL_SS_NEED_MORE_SPACE; 3583 break; 3584 } 3585 retval = copyout(&port->stats, &stats->stats[i++], 3586 sizeof(port->stats)); 3587 if (retval != 0) 3588 break; 3589 stats->fill_len += sizeof(port->stats); 3590 } 3591 stats->num_items = softc->num_ports; 3592 stats->flags = CTL_STATS_FLAG_NONE; 3593 #ifdef CTL_TIME_IO 3594 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3595 #endif 3596 getnanouptime(&stats->timestamp); 3597 break; 3598 } 3599 default: { 3600 /* XXX KDM should we fix this? */ 3601 #if 0 3602 struct ctl_backend_driver *backend; 3603 unsigned int type; 3604 int found; 3605 3606 found = 0; 3607 3608 /* 3609 * We encode the backend type as the ioctl type for backend 3610 * ioctls. So parse it out here, and then search for a 3611 * backend of this type. 3612 */ 3613 type = _IOC_TYPE(cmd); 3614 3615 STAILQ_FOREACH(backend, &softc->be_list, links) { 3616 if (backend->type == type) { 3617 found = 1; 3618 break; 3619 } 3620 } 3621 if (found == 0) { 3622 printf("ctl: unknown ioctl command %#lx or backend " 3623 "%d\n", cmd, type); 3624 retval = EINVAL; 3625 break; 3626 } 3627 retval = backend->ioctl(dev, cmd, addr, flag, td); 3628 #endif 3629 retval = ENOTTY; 3630 break; 3631 } 3632 } 3633 return (retval); 3634 } 3635 3636 uint32_t 3637 ctl_get_initindex(struct ctl_nexus *nexus) 3638 { 3639 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3640 } 3641 3642 int 3643 ctl_lun_map_init(struct ctl_port *port) 3644 { 3645 struct ctl_softc *softc = port->ctl_softc; 3646 struct ctl_lun *lun; 3647 int size = ctl_lun_map_size; 3648 uint32_t i; 3649 3650 if (port->lun_map == NULL || port->lun_map_size < size) { 3651 port->lun_map_size = 0; 3652 free(port->lun_map, M_CTL); 3653 port->lun_map = malloc(size * sizeof(uint32_t), 3654 M_CTL, M_NOWAIT); 3655 } 3656 if (port->lun_map == NULL) 3657 return (ENOMEM); 3658 for (i = 0; i < size; i++) 3659 port->lun_map[i] = UINT32_MAX; 3660 port->lun_map_size = size; 3661 if (port->status & CTL_PORT_STATUS_ONLINE) { 3662 if (port->lun_disable != NULL) { 3663 STAILQ_FOREACH(lun, &softc->lun_list, links) 3664 port->lun_disable(port->targ_lun_arg, lun->lun); 3665 } 3666 ctl_isc_announce_port(port); 3667 } 3668 return (0); 3669 } 3670 3671 int 3672 ctl_lun_map_deinit(struct ctl_port *port) 3673 { 3674 struct ctl_softc *softc = port->ctl_softc; 3675 struct ctl_lun *lun; 3676 3677 if (port->lun_map == NULL) 3678 return (0); 3679 port->lun_map_size = 0; 3680 free(port->lun_map, M_CTL); 3681 port->lun_map = NULL; 3682 if (port->status & CTL_PORT_STATUS_ONLINE) { 3683 if (port->lun_enable != NULL) { 3684 STAILQ_FOREACH(lun, &softc->lun_list, links) 3685 port->lun_enable(port->targ_lun_arg, lun->lun); 3686 } 3687 ctl_isc_announce_port(port); 3688 } 3689 return (0); 3690 } 3691 3692 int 3693 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3694 { 3695 int status; 3696 uint32_t old; 3697 3698 if (port->lun_map == NULL) { 3699 status = ctl_lun_map_init(port); 3700 if (status != 0) 3701 return (status); 3702 } 3703 if (plun >= port->lun_map_size) 3704 return (EINVAL); 3705 old = port->lun_map[plun]; 3706 port->lun_map[plun] = glun; 3707 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3708 if (port->lun_enable != NULL) 3709 port->lun_enable(port->targ_lun_arg, plun); 3710 ctl_isc_announce_port(port); 3711 } 3712 return (0); 3713 } 3714 3715 int 3716 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3717 { 3718 uint32_t old; 3719 3720 if (port->lun_map == NULL || plun >= port->lun_map_size) 3721 return (0); 3722 old = port->lun_map[plun]; 3723 port->lun_map[plun] = UINT32_MAX; 3724 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3725 if (port->lun_disable != NULL) 3726 port->lun_disable(port->targ_lun_arg, plun); 3727 ctl_isc_announce_port(port); 3728 } 3729 return (0); 3730 } 3731 3732 uint32_t 3733 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3734 { 3735 3736 if (port == NULL) 3737 return (UINT32_MAX); 3738 if (port->lun_map == NULL) 3739 return (lun_id); 3740 if (lun_id > port->lun_map_size) 3741 return (UINT32_MAX); 3742 return (port->lun_map[lun_id]); 3743 } 3744 3745 uint32_t 3746 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3747 { 3748 uint32_t i; 3749 3750 if (port == NULL) 3751 return (UINT32_MAX); 3752 if (port->lun_map == NULL) 3753 return (lun_id); 3754 for (i = 0; i < port->lun_map_size; i++) { 3755 if (port->lun_map[i] == lun_id) 3756 return (i); 3757 } 3758 return (UINT32_MAX); 3759 } 3760 3761 uint32_t 3762 ctl_decode_lun(uint64_t encoded) 3763 { 3764 uint8_t lun[8]; 3765 uint32_t result = 0xffffffff; 3766 3767 be64enc(lun, encoded); 3768 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3769 case RPL_LUNDATA_ATYP_PERIPH: 3770 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3771 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3772 result = lun[1]; 3773 break; 3774 case RPL_LUNDATA_ATYP_FLAT: 3775 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3776 lun[6] == 0 && lun[7] == 0) 3777 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3778 break; 3779 case RPL_LUNDATA_ATYP_EXTLUN: 3780 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3781 case 0x02: 3782 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3783 case 0x00: 3784 result = lun[1]; 3785 break; 3786 case 0x10: 3787 result = (lun[1] << 16) + (lun[2] << 8) + 3788 lun[3]; 3789 break; 3790 case 0x20: 3791 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3792 result = (lun[2] << 24) + 3793 (lun[3] << 16) + (lun[4] << 8) + 3794 lun[5]; 3795 break; 3796 } 3797 break; 3798 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3799 result = 0xffffffff; 3800 break; 3801 } 3802 break; 3803 } 3804 return (result); 3805 } 3806 3807 uint64_t 3808 ctl_encode_lun(uint32_t decoded) 3809 { 3810 uint64_t l = decoded; 3811 3812 if (l <= 0xff) 3813 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3814 if (l <= 0x3fff) 3815 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3816 if (l <= 0xffffff) 3817 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3818 (l << 32)); 3819 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3820 } 3821 3822 int 3823 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3824 { 3825 int i; 3826 3827 for (i = first; i < last; i++) { 3828 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3829 return (i); 3830 } 3831 return (-1); 3832 } 3833 3834 int 3835 ctl_set_mask(uint32_t *mask, uint32_t bit) 3836 { 3837 uint32_t chunk, piece; 3838 3839 chunk = bit >> 5; 3840 piece = bit % (sizeof(uint32_t) * 8); 3841 3842 if ((mask[chunk] & (1 << piece)) != 0) 3843 return (-1); 3844 else 3845 mask[chunk] |= (1 << piece); 3846 3847 return (0); 3848 } 3849 3850 int 3851 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3852 { 3853 uint32_t chunk, piece; 3854 3855 chunk = bit >> 5; 3856 piece = bit % (sizeof(uint32_t) * 8); 3857 3858 if ((mask[chunk] & (1 << piece)) == 0) 3859 return (-1); 3860 else 3861 mask[chunk] &= ~(1 << piece); 3862 3863 return (0); 3864 } 3865 3866 int 3867 ctl_is_set(uint32_t *mask, uint32_t bit) 3868 { 3869 uint32_t chunk, piece; 3870 3871 chunk = bit >> 5; 3872 piece = bit % (sizeof(uint32_t) * 8); 3873 3874 if ((mask[chunk] & (1 << piece)) == 0) 3875 return (0); 3876 else 3877 return (1); 3878 } 3879 3880 static uint64_t 3881 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3882 { 3883 uint64_t *t; 3884 3885 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3886 if (t == NULL) 3887 return (0); 3888 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3889 } 3890 3891 static void 3892 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3893 { 3894 uint64_t *t; 3895 3896 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3897 if (t == NULL) 3898 return; 3899 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3900 } 3901 3902 static void 3903 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3904 { 3905 uint64_t *p; 3906 u_int i; 3907 3908 i = residx/CTL_MAX_INIT_PER_PORT; 3909 if (lun->pr_keys[i] != NULL) 3910 return; 3911 mtx_unlock(&lun->lun_lock); 3912 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3913 M_WAITOK | M_ZERO); 3914 mtx_lock(&lun->lun_lock); 3915 if (lun->pr_keys[i] == NULL) 3916 lun->pr_keys[i] = p; 3917 else 3918 free(p, M_CTL); 3919 } 3920 3921 static void 3922 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3923 { 3924 uint64_t *t; 3925 3926 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3927 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3928 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3929 } 3930 3931 /* 3932 * ctl_softc, pool_name, total_ctl_io are passed in. 3933 * npool is passed out. 3934 */ 3935 int 3936 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3937 uint32_t total_ctl_io, void **npool) 3938 { 3939 struct ctl_io_pool *pool; 3940 3941 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3942 M_NOWAIT | M_ZERO); 3943 if (pool == NULL) 3944 return (ENOMEM); 3945 3946 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3947 pool->ctl_softc = ctl_softc; 3948 #ifdef IO_POOLS 3949 pool->zone = uma_zsecond_create(pool->name, NULL, 3950 NULL, NULL, NULL, ctl_softc->io_zone); 3951 /* uma_prealloc(pool->zone, total_ctl_io); */ 3952 #else 3953 pool->zone = ctl_softc->io_zone; 3954 #endif 3955 3956 *npool = pool; 3957 return (0); 3958 } 3959 3960 void 3961 ctl_pool_free(struct ctl_io_pool *pool) 3962 { 3963 3964 if (pool == NULL) 3965 return; 3966 3967 #ifdef IO_POOLS 3968 uma_zdestroy(pool->zone); 3969 #endif 3970 free(pool, M_CTL); 3971 } 3972 3973 union ctl_io * 3974 ctl_alloc_io(void *pool_ref) 3975 { 3976 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3977 union ctl_io *io; 3978 3979 io = uma_zalloc(pool->zone, M_WAITOK); 3980 if (io != NULL) { 3981 io->io_hdr.pool = pool_ref; 3982 CTL_SOFTC(io) = pool->ctl_softc; 3983 TAILQ_INIT(&io->io_hdr.blocked_queue); 3984 } 3985 return (io); 3986 } 3987 3988 union ctl_io * 3989 ctl_alloc_io_nowait(void *pool_ref) 3990 { 3991 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3992 union ctl_io *io; 3993 3994 io = uma_zalloc(pool->zone, M_NOWAIT); 3995 if (io != NULL) { 3996 io->io_hdr.pool = pool_ref; 3997 CTL_SOFTC(io) = pool->ctl_softc; 3998 TAILQ_INIT(&io->io_hdr.blocked_queue); 3999 } 4000 return (io); 4001 } 4002 4003 void 4004 ctl_free_io(union ctl_io *io) 4005 { 4006 struct ctl_io_pool *pool; 4007 4008 if (io == NULL) 4009 return; 4010 4011 pool = (struct ctl_io_pool *)io->io_hdr.pool; 4012 uma_zfree(pool->zone, io); 4013 } 4014 4015 void 4016 ctl_zero_io(union ctl_io *io) 4017 { 4018 struct ctl_io_pool *pool; 4019 4020 if (io == NULL) 4021 return; 4022 4023 /* 4024 * May need to preserve linked list pointers at some point too. 4025 */ 4026 pool = io->io_hdr.pool; 4027 memset(io, 0, sizeof(*io)); 4028 io->io_hdr.pool = pool; 4029 CTL_SOFTC(io) = pool->ctl_softc; 4030 TAILQ_INIT(&io->io_hdr.blocked_queue); 4031 } 4032 4033 int 4034 ctl_expand_number(const char *buf, uint64_t *num) 4035 { 4036 char *endptr; 4037 uint64_t number; 4038 unsigned shift; 4039 4040 number = strtoq(buf, &endptr, 0); 4041 4042 switch (tolower((unsigned char)*endptr)) { 4043 case 'e': 4044 shift = 60; 4045 break; 4046 case 'p': 4047 shift = 50; 4048 break; 4049 case 't': 4050 shift = 40; 4051 break; 4052 case 'g': 4053 shift = 30; 4054 break; 4055 case 'm': 4056 shift = 20; 4057 break; 4058 case 'k': 4059 shift = 10; 4060 break; 4061 case 'b': 4062 case '\0': /* No unit. */ 4063 *num = number; 4064 return (0); 4065 default: 4066 /* Unrecognized unit. */ 4067 return (-1); 4068 } 4069 4070 if ((number << shift) >> shift != number) { 4071 /* Overflow */ 4072 return (-1); 4073 } 4074 *num = number << shift; 4075 return (0); 4076 } 4077 4078 /* 4079 * This routine could be used in the future to load default and/or saved 4080 * mode page parameters for a particuar lun. 4081 */ 4082 static int 4083 ctl_init_page_index(struct ctl_lun *lun) 4084 { 4085 int i, page_code; 4086 struct ctl_page_index *page_index; 4087 const char *value; 4088 uint64_t ival; 4089 4090 memcpy(&lun->mode_pages.index, page_index_template, 4091 sizeof(page_index_template)); 4092 4093 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4094 page_index = &lun->mode_pages.index[i]; 4095 if (lun->be_lun->lun_type == T_DIRECT && 4096 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4097 continue; 4098 if (lun->be_lun->lun_type == T_PROCESSOR && 4099 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4100 continue; 4101 if (lun->be_lun->lun_type == T_CDROM && 4102 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4103 continue; 4104 4105 page_code = page_index->page_code & SMPH_PC_MASK; 4106 switch (page_code) { 4107 case SMS_RW_ERROR_RECOVERY_PAGE: { 4108 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4109 ("subpage %#x for page %#x is incorrect!", 4110 page_index->subpage, page_code)); 4111 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4112 &rw_er_page_default, 4113 sizeof(rw_er_page_default)); 4114 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4115 &rw_er_page_changeable, 4116 sizeof(rw_er_page_changeable)); 4117 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4118 &rw_er_page_default, 4119 sizeof(rw_er_page_default)); 4120 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4121 &rw_er_page_default, 4122 sizeof(rw_er_page_default)); 4123 page_index->page_data = 4124 (uint8_t *)lun->mode_pages.rw_er_page; 4125 break; 4126 } 4127 case SMS_FORMAT_DEVICE_PAGE: { 4128 struct scsi_format_page *format_page; 4129 4130 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4131 ("subpage %#x for page %#x is incorrect!", 4132 page_index->subpage, page_code)); 4133 4134 /* 4135 * Sectors per track are set above. Bytes per 4136 * sector need to be set here on a per-LUN basis. 4137 */ 4138 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4139 &format_page_default, 4140 sizeof(format_page_default)); 4141 memcpy(&lun->mode_pages.format_page[ 4142 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4143 sizeof(format_page_changeable)); 4144 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4145 &format_page_default, 4146 sizeof(format_page_default)); 4147 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4148 &format_page_default, 4149 sizeof(format_page_default)); 4150 4151 format_page = &lun->mode_pages.format_page[ 4152 CTL_PAGE_CURRENT]; 4153 scsi_ulto2b(lun->be_lun->blocksize, 4154 format_page->bytes_per_sector); 4155 4156 format_page = &lun->mode_pages.format_page[ 4157 CTL_PAGE_DEFAULT]; 4158 scsi_ulto2b(lun->be_lun->blocksize, 4159 format_page->bytes_per_sector); 4160 4161 format_page = &lun->mode_pages.format_page[ 4162 CTL_PAGE_SAVED]; 4163 scsi_ulto2b(lun->be_lun->blocksize, 4164 format_page->bytes_per_sector); 4165 4166 page_index->page_data = 4167 (uint8_t *)lun->mode_pages.format_page; 4168 break; 4169 } 4170 case SMS_RIGID_DISK_PAGE: { 4171 struct scsi_rigid_disk_page *rigid_disk_page; 4172 uint32_t sectors_per_cylinder; 4173 uint64_t cylinders; 4174 #ifndef __XSCALE__ 4175 int shift; 4176 #endif /* !__XSCALE__ */ 4177 4178 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4179 ("subpage %#x for page %#x is incorrect!", 4180 page_index->subpage, page_code)); 4181 4182 /* 4183 * Rotation rate and sectors per track are set 4184 * above. We calculate the cylinders here based on 4185 * capacity. Due to the number of heads and 4186 * sectors per track we're using, smaller arrays 4187 * may turn out to have 0 cylinders. Linux and 4188 * FreeBSD don't pay attention to these mode pages 4189 * to figure out capacity, but Solaris does. It 4190 * seems to deal with 0 cylinders just fine, and 4191 * works out a fake geometry based on the capacity. 4192 */ 4193 memcpy(&lun->mode_pages.rigid_disk_page[ 4194 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4195 sizeof(rigid_disk_page_default)); 4196 memcpy(&lun->mode_pages.rigid_disk_page[ 4197 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4198 sizeof(rigid_disk_page_changeable)); 4199 4200 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4201 CTL_DEFAULT_HEADS; 4202 4203 /* 4204 * The divide method here will be more accurate, 4205 * probably, but results in floating point being 4206 * used in the kernel on i386 (__udivdi3()). On the 4207 * XScale, though, __udivdi3() is implemented in 4208 * software. 4209 * 4210 * The shift method for cylinder calculation is 4211 * accurate if sectors_per_cylinder is a power of 4212 * 2. Otherwise it might be slightly off -- you 4213 * might have a bit of a truncation problem. 4214 */ 4215 #ifdef __XSCALE__ 4216 cylinders = (lun->be_lun->maxlba + 1) / 4217 sectors_per_cylinder; 4218 #else 4219 for (shift = 31; shift > 0; shift--) { 4220 if (sectors_per_cylinder & (1 << shift)) 4221 break; 4222 } 4223 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4224 #endif 4225 4226 /* 4227 * We've basically got 3 bytes, or 24 bits for the 4228 * cylinder size in the mode page. If we're over, 4229 * just round down to 2^24. 4230 */ 4231 if (cylinders > 0xffffff) 4232 cylinders = 0xffffff; 4233 4234 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4235 CTL_PAGE_DEFAULT]; 4236 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4237 4238 if ((value = dnvlist_get_string(lun->be_lun->options, 4239 "rpm", NULL)) != NULL) { 4240 scsi_ulto2b(strtol(value, NULL, 0), 4241 rigid_disk_page->rotation_rate); 4242 } 4243 4244 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4245 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4246 sizeof(rigid_disk_page_default)); 4247 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4248 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4249 sizeof(rigid_disk_page_default)); 4250 4251 page_index->page_data = 4252 (uint8_t *)lun->mode_pages.rigid_disk_page; 4253 break; 4254 } 4255 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4256 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4257 ("subpage %#x for page %#x is incorrect!", 4258 page_index->subpage, page_code)); 4259 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4260 &verify_er_page_default, 4261 sizeof(verify_er_page_default)); 4262 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4263 &verify_er_page_changeable, 4264 sizeof(verify_er_page_changeable)); 4265 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4266 &verify_er_page_default, 4267 sizeof(verify_er_page_default)); 4268 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4269 &verify_er_page_default, 4270 sizeof(verify_er_page_default)); 4271 page_index->page_data = 4272 (uint8_t *)lun->mode_pages.verify_er_page; 4273 break; 4274 } 4275 case SMS_CACHING_PAGE: { 4276 struct scsi_caching_page *caching_page; 4277 4278 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4279 ("subpage %#x for page %#x is incorrect!", 4280 page_index->subpage, page_code)); 4281 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4282 &caching_page_default, 4283 sizeof(caching_page_default)); 4284 memcpy(&lun->mode_pages.caching_page[ 4285 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4286 sizeof(caching_page_changeable)); 4287 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4288 &caching_page_default, 4289 sizeof(caching_page_default)); 4290 caching_page = &lun->mode_pages.caching_page[ 4291 CTL_PAGE_SAVED]; 4292 value = dnvlist_get_string(lun->be_lun->options, 4293 "writecache", NULL); 4294 if (value != NULL && strcmp(value, "off") == 0) 4295 caching_page->flags1 &= ~SCP_WCE; 4296 value = dnvlist_get_string(lun->be_lun->options, 4297 "readcache", NULL); 4298 if (value != NULL && strcmp(value, "off") == 0) 4299 caching_page->flags1 |= SCP_RCD; 4300 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4301 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4302 sizeof(caching_page_default)); 4303 page_index->page_data = 4304 (uint8_t *)lun->mode_pages.caching_page; 4305 break; 4306 } 4307 case SMS_CONTROL_MODE_PAGE: { 4308 switch (page_index->subpage) { 4309 case SMS_SUBPAGE_PAGE_0: { 4310 struct scsi_control_page *control_page; 4311 4312 memcpy(&lun->mode_pages.control_page[ 4313 CTL_PAGE_DEFAULT], 4314 &control_page_default, 4315 sizeof(control_page_default)); 4316 memcpy(&lun->mode_pages.control_page[ 4317 CTL_PAGE_CHANGEABLE], 4318 &control_page_changeable, 4319 sizeof(control_page_changeable)); 4320 memcpy(&lun->mode_pages.control_page[ 4321 CTL_PAGE_SAVED], 4322 &control_page_default, 4323 sizeof(control_page_default)); 4324 control_page = &lun->mode_pages.control_page[ 4325 CTL_PAGE_SAVED]; 4326 value = dnvlist_get_string(lun->be_lun->options, 4327 "reordering", NULL); 4328 if (value != NULL && 4329 strcmp(value, "unrestricted") == 0) { 4330 control_page->queue_flags &= 4331 ~SCP_QUEUE_ALG_MASK; 4332 control_page->queue_flags |= 4333 SCP_QUEUE_ALG_UNRESTRICTED; 4334 } 4335 memcpy(&lun->mode_pages.control_page[ 4336 CTL_PAGE_CURRENT], 4337 &lun->mode_pages.control_page[ 4338 CTL_PAGE_SAVED], 4339 sizeof(control_page_default)); 4340 page_index->page_data = 4341 (uint8_t *)lun->mode_pages.control_page; 4342 break; 4343 } 4344 case 0x01: 4345 memcpy(&lun->mode_pages.control_ext_page[ 4346 CTL_PAGE_DEFAULT], 4347 &control_ext_page_default, 4348 sizeof(control_ext_page_default)); 4349 memcpy(&lun->mode_pages.control_ext_page[ 4350 CTL_PAGE_CHANGEABLE], 4351 &control_ext_page_changeable, 4352 sizeof(control_ext_page_changeable)); 4353 memcpy(&lun->mode_pages.control_ext_page[ 4354 CTL_PAGE_SAVED], 4355 &control_ext_page_default, 4356 sizeof(control_ext_page_default)); 4357 memcpy(&lun->mode_pages.control_ext_page[ 4358 CTL_PAGE_CURRENT], 4359 &lun->mode_pages.control_ext_page[ 4360 CTL_PAGE_SAVED], 4361 sizeof(control_ext_page_default)); 4362 page_index->page_data = 4363 (uint8_t *)lun->mode_pages.control_ext_page; 4364 break; 4365 default: 4366 panic("subpage %#x for page %#x is incorrect!", 4367 page_index->subpage, page_code); 4368 } 4369 break; 4370 } 4371 case SMS_INFO_EXCEPTIONS_PAGE: { 4372 switch (page_index->subpage) { 4373 case SMS_SUBPAGE_PAGE_0: 4374 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4375 &ie_page_default, 4376 sizeof(ie_page_default)); 4377 memcpy(&lun->mode_pages.ie_page[ 4378 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4379 sizeof(ie_page_changeable)); 4380 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4381 &ie_page_default, 4382 sizeof(ie_page_default)); 4383 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4384 &ie_page_default, 4385 sizeof(ie_page_default)); 4386 page_index->page_data = 4387 (uint8_t *)lun->mode_pages.ie_page; 4388 break; 4389 case 0x02: { 4390 struct ctl_logical_block_provisioning_page *page; 4391 4392 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4393 &lbp_page_default, 4394 sizeof(lbp_page_default)); 4395 memcpy(&lun->mode_pages.lbp_page[ 4396 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4397 sizeof(lbp_page_changeable)); 4398 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4399 &lbp_page_default, 4400 sizeof(lbp_page_default)); 4401 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4402 value = dnvlist_get_string(lun->be_lun->options, 4403 "avail-threshold", NULL); 4404 if (value != NULL && 4405 ctl_expand_number(value, &ival) == 0) { 4406 page->descr[0].flags |= SLBPPD_ENABLED | 4407 SLBPPD_ARMING_DEC; 4408 if (lun->be_lun->blocksize) 4409 ival /= lun->be_lun->blocksize; 4410 else 4411 ival /= 512; 4412 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4413 page->descr[0].count); 4414 } 4415 value = dnvlist_get_string(lun->be_lun->options, 4416 "used-threshold", NULL); 4417 if (value != NULL && 4418 ctl_expand_number(value, &ival) == 0) { 4419 page->descr[1].flags |= SLBPPD_ENABLED | 4420 SLBPPD_ARMING_INC; 4421 if (lun->be_lun->blocksize) 4422 ival /= lun->be_lun->blocksize; 4423 else 4424 ival /= 512; 4425 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4426 page->descr[1].count); 4427 } 4428 value = dnvlist_get_string(lun->be_lun->options, 4429 "pool-avail-threshold", NULL); 4430 if (value != NULL && 4431 ctl_expand_number(value, &ival) == 0) { 4432 page->descr[2].flags |= SLBPPD_ENABLED | 4433 SLBPPD_ARMING_DEC; 4434 if (lun->be_lun->blocksize) 4435 ival /= lun->be_lun->blocksize; 4436 else 4437 ival /= 512; 4438 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4439 page->descr[2].count); 4440 } 4441 value = dnvlist_get_string(lun->be_lun->options, 4442 "pool-used-threshold", NULL); 4443 if (value != NULL && 4444 ctl_expand_number(value, &ival) == 0) { 4445 page->descr[3].flags |= SLBPPD_ENABLED | 4446 SLBPPD_ARMING_INC; 4447 if (lun->be_lun->blocksize) 4448 ival /= lun->be_lun->blocksize; 4449 else 4450 ival /= 512; 4451 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4452 page->descr[3].count); 4453 } 4454 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4455 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4456 sizeof(lbp_page_default)); 4457 page_index->page_data = 4458 (uint8_t *)lun->mode_pages.lbp_page; 4459 break; 4460 } 4461 default: 4462 panic("subpage %#x for page %#x is incorrect!", 4463 page_index->subpage, page_code); 4464 } 4465 break; 4466 } 4467 case SMS_CDDVD_CAPS_PAGE:{ 4468 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4469 ("subpage %#x for page %#x is incorrect!", 4470 page_index->subpage, page_code)); 4471 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4472 &cddvd_page_default, 4473 sizeof(cddvd_page_default)); 4474 memcpy(&lun->mode_pages.cddvd_page[ 4475 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4476 sizeof(cddvd_page_changeable)); 4477 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4478 &cddvd_page_default, 4479 sizeof(cddvd_page_default)); 4480 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4481 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4482 sizeof(cddvd_page_default)); 4483 page_index->page_data = 4484 (uint8_t *)lun->mode_pages.cddvd_page; 4485 break; 4486 } 4487 default: 4488 panic("invalid page code value %#x", page_code); 4489 } 4490 } 4491 4492 return (CTL_RETVAL_COMPLETE); 4493 } 4494 4495 static int 4496 ctl_init_log_page_index(struct ctl_lun *lun) 4497 { 4498 struct ctl_page_index *page_index; 4499 int i, j, k, prev; 4500 4501 memcpy(&lun->log_pages.index, log_page_index_template, 4502 sizeof(log_page_index_template)); 4503 4504 prev = -1; 4505 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4506 page_index = &lun->log_pages.index[i]; 4507 if (lun->be_lun->lun_type == T_DIRECT && 4508 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4509 continue; 4510 if (lun->be_lun->lun_type == T_PROCESSOR && 4511 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4512 continue; 4513 if (lun->be_lun->lun_type == T_CDROM && 4514 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4515 continue; 4516 4517 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4518 lun->backend->lun_attr == NULL) 4519 continue; 4520 4521 if (page_index->page_code != prev) { 4522 lun->log_pages.pages_page[j] = page_index->page_code; 4523 prev = page_index->page_code; 4524 j++; 4525 } 4526 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4527 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4528 k++; 4529 } 4530 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4531 lun->log_pages.index[0].page_len = j; 4532 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4533 lun->log_pages.index[1].page_len = k * 2; 4534 lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; 4535 lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); 4536 lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; 4537 lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; 4538 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; 4539 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); 4540 lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; 4541 lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); 4542 4543 return (CTL_RETVAL_COMPLETE); 4544 } 4545 4546 static int 4547 hex2bin(const char *str, uint8_t *buf, int buf_size) 4548 { 4549 int i; 4550 u_char c; 4551 4552 memset(buf, 0, buf_size); 4553 while (isspace(str[0])) 4554 str++; 4555 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4556 str += 2; 4557 buf_size *= 2; 4558 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4559 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4560 str++; 4561 c = str[i]; 4562 if (isdigit(c)) 4563 c -= '0'; 4564 else if (isalpha(c)) 4565 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4566 else 4567 break; 4568 if (c >= 16) 4569 break; 4570 if ((i & 1) == 0) 4571 buf[i / 2] |= (c << 4); 4572 else 4573 buf[i / 2] |= c; 4574 } 4575 return ((i + 1) / 2); 4576 } 4577 4578 /* 4579 * Add LUN. 4580 * 4581 * Returns 0 for success, non-zero (errno) for failure. 4582 */ 4583 int 4584 ctl_add_lun(struct ctl_be_lun *be_lun) 4585 { 4586 struct ctl_softc *ctl_softc = control_softc; 4587 struct ctl_lun *nlun, *lun; 4588 struct scsi_vpd_id_descriptor *desc; 4589 struct scsi_vpd_id_t10 *t10id; 4590 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4591 int lun_number; 4592 int devidlen, idlen1, idlen2 = 0, len; 4593 4594 /* 4595 * We support only Direct Access, CD-ROM or Processor LUN types. 4596 */ 4597 switch (be_lun->lun_type) { 4598 case T_DIRECT: 4599 case T_PROCESSOR: 4600 case T_CDROM: 4601 break; 4602 case T_SEQUENTIAL: 4603 case T_CHANGER: 4604 default: 4605 return (EINVAL); 4606 } 4607 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO); 4608 4609 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * 4610 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4611 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, 4612 M_DEVBUF, M_WAITOK | M_ZERO); 4613 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, 4614 M_DEVBUF, M_WAITOK | M_ZERO); 4615 4616 /* Generate LUN ID. */ 4617 devidlen = max(CTL_DEVID_MIN_LEN, 4618 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4619 idlen1 = sizeof(*t10id) + devidlen; 4620 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4621 scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); 4622 if (scsiname != NULL) { 4623 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4624 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4625 } 4626 eui = dnvlist_get_string(be_lun->options, "eui", NULL); 4627 if (eui != NULL) { 4628 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4629 } 4630 naa = dnvlist_get_string(be_lun->options, "naa", NULL); 4631 if (naa != NULL) { 4632 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4633 } 4634 uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); 4635 if (uuid != NULL) { 4636 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4637 } 4638 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4639 M_CTL, M_WAITOK | M_ZERO); 4640 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4641 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4642 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4643 desc->length = idlen1; 4644 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4645 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4646 if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { 4647 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4648 } else { 4649 strncpy(t10id->vendor, vendor, 4650 min(sizeof(t10id->vendor), strlen(vendor))); 4651 } 4652 strncpy((char *)t10id->vendor_spec_id, 4653 (char *)be_lun->device_id, devidlen); 4654 if (scsiname != NULL) { 4655 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4656 desc->length); 4657 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4658 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4659 SVPD_ID_TYPE_SCSI_NAME; 4660 desc->length = idlen2; 4661 strlcpy(desc->identifier, scsiname, idlen2); 4662 } 4663 if (eui != NULL) { 4664 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4665 desc->length); 4666 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4667 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4668 SVPD_ID_TYPE_EUI64; 4669 desc->length = hex2bin(eui, desc->identifier, 16); 4670 desc->length = desc->length > 12 ? 16 : 4671 (desc->length > 8 ? 12 : 8); 4672 len -= 16 - desc->length; 4673 } 4674 if (naa != NULL) { 4675 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4676 desc->length); 4677 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4678 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4679 SVPD_ID_TYPE_NAA; 4680 desc->length = hex2bin(naa, desc->identifier, 16); 4681 desc->length = desc->length > 8 ? 16 : 8; 4682 len -= 16 - desc->length; 4683 } 4684 if (uuid != NULL) { 4685 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4686 desc->length); 4687 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4688 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4689 SVPD_ID_TYPE_UUID; 4690 desc->identifier[0] = 0x10; 4691 hex2bin(uuid, &desc->identifier[2], 16); 4692 desc->length = 18; 4693 } 4694 lun->lun_devid->len = len; 4695 4696 mtx_lock(&ctl_softc->ctl_lock); 4697 /* 4698 * See if the caller requested a particular LUN number. If so, see 4699 * if it is available. Otherwise, allocate the first available LUN. 4700 */ 4701 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4702 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) 4703 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4704 mtx_unlock(&ctl_softc->ctl_lock); 4705 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { 4706 printf("ctl: requested LUN ID %d is higher " 4707 "than ctl_max_luns - 1 (%d)\n", 4708 be_lun->req_lun_id, ctl_max_luns - 1); 4709 } else { 4710 /* 4711 * XXX KDM return an error, or just assign 4712 * another LUN ID in this case?? 4713 */ 4714 printf("ctl: requested LUN ID %d is already " 4715 "in use\n", be_lun->req_lun_id); 4716 } 4717 fail: 4718 free(lun->lun_devid, M_CTL); 4719 free(lun, M_CTL); 4720 return (ENOSPC); 4721 } 4722 lun_number = be_lun->req_lun_id; 4723 } else { 4724 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); 4725 if (lun_number == -1) { 4726 mtx_unlock(&ctl_softc->ctl_lock); 4727 printf("ctl: can't allocate LUN, out of LUNs\n"); 4728 goto fail; 4729 } 4730 } 4731 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4732 mtx_unlock(&ctl_softc->ctl_lock); 4733 4734 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4735 lun->lun = lun_number; 4736 lun->be_lun = be_lun; 4737 /* 4738 * The processor LUN is always enabled. Disk LUNs come on line 4739 * disabled, and must be enabled by the backend. 4740 */ 4741 lun->flags |= CTL_LUN_DISABLED; 4742 lun->backend = be_lun->be; 4743 be_lun->ctl_lun = lun; 4744 be_lun->lun_id = lun_number; 4745 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4746 lun->flags |= CTL_LUN_EJECTED; 4747 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4748 lun->flags |= CTL_LUN_NO_MEDIA; 4749 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4750 lun->flags |= CTL_LUN_STOPPED; 4751 4752 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4753 lun->flags |= CTL_LUN_PRIMARY_SC; 4754 4755 value = dnvlist_get_string(be_lun->options, "removable", NULL); 4756 if (value != NULL) { 4757 if (strcmp(value, "on") == 0) 4758 lun->flags |= CTL_LUN_REMOVABLE; 4759 } else if (be_lun->lun_type == T_CDROM) 4760 lun->flags |= CTL_LUN_REMOVABLE; 4761 4762 lun->ctl_softc = ctl_softc; 4763 #ifdef CTL_TIME_IO 4764 lun->last_busy = getsbinuptime(); 4765 #endif 4766 LIST_INIT(&lun->ooa_queue); 4767 STAILQ_INIT(&lun->error_list); 4768 lun->ie_reported = 1; 4769 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4770 ctl_tpc_lun_init(lun); 4771 if (lun->flags & CTL_LUN_REMOVABLE) { 4772 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4773 M_CTL, M_WAITOK); 4774 } 4775 4776 /* 4777 * Initialize the mode and log page index. 4778 */ 4779 ctl_init_page_index(lun); 4780 ctl_init_log_page_index(lun); 4781 4782 /* Setup statistics gathering */ 4783 lun->stats.item = lun_number; 4784 4785 /* 4786 * Now, before we insert this lun on the lun list, set the lun 4787 * inventory changed UA for all other luns. 4788 */ 4789 mtx_lock(&ctl_softc->ctl_lock); 4790 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4791 mtx_lock(&nlun->lun_lock); 4792 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4793 mtx_unlock(&nlun->lun_lock); 4794 } 4795 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4796 ctl_softc->ctl_luns[lun_number] = lun; 4797 ctl_softc->num_luns++; 4798 mtx_unlock(&ctl_softc->ctl_lock); 4799 4800 /* 4801 * We successfully added the LUN, attempt to enable it. 4802 */ 4803 if (ctl_enable_lun(lun) != 0) { 4804 printf("%s: ctl_enable_lun() failed!\n", __func__); 4805 mtx_lock(&ctl_softc->ctl_lock); 4806 STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); 4807 ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); 4808 ctl_softc->ctl_luns[lun_number] = NULL; 4809 ctl_softc->num_luns--; 4810 mtx_unlock(&ctl_softc->ctl_lock); 4811 free(lun->lun_devid, M_CTL); 4812 free(lun, M_CTL); 4813 return (EIO); 4814 } 4815 4816 return (0); 4817 } 4818 4819 /* 4820 * Free LUN that has no active requests. 4821 */ 4822 static int 4823 ctl_free_lun(struct ctl_lun *lun) 4824 { 4825 struct ctl_softc *softc = lun->ctl_softc; 4826 struct ctl_lun *nlun; 4827 int i; 4828 4829 KASSERT(LIST_EMPTY(&lun->ooa_queue), 4830 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4831 4832 mtx_lock(&softc->ctl_lock); 4833 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4834 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4835 softc->ctl_luns[lun->lun] = NULL; 4836 softc->num_luns--; 4837 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4838 mtx_lock(&nlun->lun_lock); 4839 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4840 mtx_unlock(&nlun->lun_lock); 4841 } 4842 mtx_unlock(&softc->ctl_lock); 4843 4844 /* 4845 * Tell the backend to free resources, if this LUN has a backend. 4846 */ 4847 lun->be_lun->lun_shutdown(lun->be_lun); 4848 4849 lun->ie_reportcnt = UINT32_MAX; 4850 callout_drain(&lun->ie_callout); 4851 ctl_tpc_lun_shutdown(lun); 4852 mtx_destroy(&lun->lun_lock); 4853 free(lun->lun_devid, M_CTL); 4854 for (i = 0; i < ctl_max_ports; i++) 4855 free(lun->pending_ua[i], M_CTL); 4856 free(lun->pending_ua, M_DEVBUF); 4857 for (i = 0; i < ctl_max_ports; i++) 4858 free(lun->pr_keys[i], M_CTL); 4859 free(lun->pr_keys, M_DEVBUF); 4860 free(lun->write_buffer, M_CTL); 4861 free(lun->prevent, M_CTL); 4862 free(lun, M_CTL); 4863 4864 return (0); 4865 } 4866 4867 static int 4868 ctl_enable_lun(struct ctl_lun *lun) 4869 { 4870 struct ctl_softc *softc; 4871 struct ctl_port *port, *nport; 4872 int retval; 4873 4874 softc = lun->ctl_softc; 4875 4876 mtx_lock(&softc->ctl_lock); 4877 mtx_lock(&lun->lun_lock); 4878 KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, 4879 ("%s: LUN not disabled", __func__)); 4880 lun->flags &= ~CTL_LUN_DISABLED; 4881 mtx_unlock(&lun->lun_lock); 4882 4883 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4884 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4885 port->lun_map != NULL || port->lun_enable == NULL) 4886 continue; 4887 4888 /* 4889 * Drop the lock while we call the FETD's enable routine. 4890 * This can lead to a callback into CTL (at least in the 4891 * case of the internal initiator frontend. 4892 */ 4893 mtx_unlock(&softc->ctl_lock); 4894 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4895 mtx_lock(&softc->ctl_lock); 4896 if (retval != 0) { 4897 printf("%s: FETD %s port %d returned error " 4898 "%d for lun_enable on lun %jd\n", 4899 __func__, port->port_name, port->targ_port, 4900 retval, (intmax_t)lun->lun); 4901 } 4902 } 4903 4904 mtx_unlock(&softc->ctl_lock); 4905 ctl_isc_announce_lun(lun); 4906 4907 return (0); 4908 } 4909 4910 static int 4911 ctl_disable_lun(struct ctl_lun *lun) 4912 { 4913 struct ctl_softc *softc; 4914 struct ctl_port *port; 4915 int retval; 4916 4917 softc = lun->ctl_softc; 4918 4919 mtx_lock(&softc->ctl_lock); 4920 mtx_lock(&lun->lun_lock); 4921 KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, 4922 ("%s: LUN not enabled", __func__)); 4923 lun->flags |= CTL_LUN_DISABLED; 4924 mtx_unlock(&lun->lun_lock); 4925 4926 STAILQ_FOREACH(port, &softc->port_list, links) { 4927 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4928 port->lun_map != NULL || port->lun_disable == NULL) 4929 continue; 4930 4931 /* 4932 * Drop the lock before we call the frontend's disable 4933 * routine, to avoid lock order reversals. 4934 * 4935 * XXX KDM what happens if the frontend list changes while 4936 * we're traversing it? It's unlikely, but should be handled. 4937 */ 4938 mtx_unlock(&softc->ctl_lock); 4939 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4940 mtx_lock(&softc->ctl_lock); 4941 if (retval != 0) { 4942 printf("%s: FETD %s port %d returned error " 4943 "%d for lun_disable on lun %jd\n", 4944 __func__, port->port_name, port->targ_port, 4945 retval, (intmax_t)lun->lun); 4946 } 4947 } 4948 4949 mtx_unlock(&softc->ctl_lock); 4950 ctl_isc_announce_lun(lun); 4951 4952 return (0); 4953 } 4954 4955 int 4956 ctl_start_lun(struct ctl_be_lun *be_lun) 4957 { 4958 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4959 4960 mtx_lock(&lun->lun_lock); 4961 lun->flags &= ~CTL_LUN_STOPPED; 4962 mtx_unlock(&lun->lun_lock); 4963 return (0); 4964 } 4965 4966 int 4967 ctl_stop_lun(struct ctl_be_lun *be_lun) 4968 { 4969 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4970 4971 mtx_lock(&lun->lun_lock); 4972 lun->flags |= CTL_LUN_STOPPED; 4973 mtx_unlock(&lun->lun_lock); 4974 return (0); 4975 } 4976 4977 int 4978 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4979 { 4980 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4981 4982 mtx_lock(&lun->lun_lock); 4983 lun->flags |= CTL_LUN_NO_MEDIA; 4984 mtx_unlock(&lun->lun_lock); 4985 return (0); 4986 } 4987 4988 int 4989 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4990 { 4991 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4992 union ctl_ha_msg msg; 4993 4994 mtx_lock(&lun->lun_lock); 4995 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4996 if (lun->flags & CTL_LUN_REMOVABLE) 4997 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4998 mtx_unlock(&lun->lun_lock); 4999 if ((lun->flags & CTL_LUN_REMOVABLE) && 5000 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5001 bzero(&msg.ua, sizeof(msg.ua)); 5002 msg.hdr.msg_type = CTL_MSG_UA; 5003 msg.hdr.nexus.initid = -1; 5004 msg.hdr.nexus.targ_port = -1; 5005 msg.hdr.nexus.targ_lun = lun->lun; 5006 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5007 msg.ua.ua_all = 1; 5008 msg.ua.ua_set = 1; 5009 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 5010 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5011 M_WAITOK); 5012 } 5013 return (0); 5014 } 5015 5016 int 5017 ctl_lun_ejected(struct ctl_be_lun *be_lun) 5018 { 5019 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5020 5021 mtx_lock(&lun->lun_lock); 5022 lun->flags |= CTL_LUN_EJECTED; 5023 mtx_unlock(&lun->lun_lock); 5024 return (0); 5025 } 5026 5027 int 5028 ctl_lun_primary(struct ctl_be_lun *be_lun) 5029 { 5030 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5031 5032 mtx_lock(&lun->lun_lock); 5033 lun->flags |= CTL_LUN_PRIMARY_SC; 5034 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5035 mtx_unlock(&lun->lun_lock); 5036 ctl_isc_announce_lun(lun); 5037 return (0); 5038 } 5039 5040 int 5041 ctl_lun_secondary(struct ctl_be_lun *be_lun) 5042 { 5043 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5044 5045 mtx_lock(&lun->lun_lock); 5046 lun->flags &= ~CTL_LUN_PRIMARY_SC; 5047 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5048 mtx_unlock(&lun->lun_lock); 5049 ctl_isc_announce_lun(lun); 5050 return (0); 5051 } 5052 5053 /* 5054 * Remove LUN. If there are active requests, wait for completion. 5055 * 5056 * Returns 0 for success, non-zero (errno) for failure. 5057 * Completion is reported to backed via the lun_shutdown() method. 5058 */ 5059 int 5060 ctl_remove_lun(struct ctl_be_lun *be_lun) 5061 { 5062 struct ctl_lun *lun; 5063 5064 lun = (struct ctl_lun *)be_lun->ctl_lun; 5065 5066 ctl_disable_lun(lun); 5067 5068 mtx_lock(&lun->lun_lock); 5069 lun->flags |= CTL_LUN_INVALID; 5070 5071 /* 5072 * If there is nothing in the OOA queue, go ahead and free the LUN. 5073 * If we have something in the OOA queue, we'll free it when the 5074 * last I/O completes. 5075 */ 5076 if (LIST_EMPTY(&lun->ooa_queue)) { 5077 mtx_unlock(&lun->lun_lock); 5078 ctl_free_lun(lun); 5079 } else 5080 mtx_unlock(&lun->lun_lock); 5081 5082 return (0); 5083 } 5084 5085 void 5086 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5087 { 5088 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5089 union ctl_ha_msg msg; 5090 5091 mtx_lock(&lun->lun_lock); 5092 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5093 mtx_unlock(&lun->lun_lock); 5094 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5095 /* Send msg to other side. */ 5096 bzero(&msg.ua, sizeof(msg.ua)); 5097 msg.hdr.msg_type = CTL_MSG_UA; 5098 msg.hdr.nexus.initid = -1; 5099 msg.hdr.nexus.targ_port = -1; 5100 msg.hdr.nexus.targ_lun = lun->lun; 5101 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5102 msg.ua.ua_all = 1; 5103 msg.ua.ua_set = 1; 5104 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5105 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5106 M_WAITOK); 5107 } 5108 } 5109 5110 /* 5111 * Backend "memory move is complete" callback for requests that never 5112 * make it down to say RAIDCore's configuration code. 5113 */ 5114 int 5115 ctl_config_move_done(union ctl_io *io, bool samethr) 5116 { 5117 int retval; 5118 5119 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5120 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5121 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 5122 5123 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5124 ctl_data_print(io); 5125 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5126 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5127 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5128 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5129 /* 5130 * XXX KDM just assuming a single pointer here, and not a 5131 * S/G list. If we start using S/G lists for config data, 5132 * we'll need to know how to clean them up here as well. 5133 */ 5134 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5135 free(io->scsiio.kern_data_ptr, M_CTL); 5136 ctl_done(io); 5137 retval = CTL_RETVAL_COMPLETE; 5138 } else { 5139 /* 5140 * XXX KDM now we need to continue data movement. Some 5141 * options: 5142 * - call ctl_scsiio() again? We don't do this for data 5143 * writes, because for those at least we know ahead of 5144 * time where the write will go and how long it is. For 5145 * config writes, though, that information is largely 5146 * contained within the write itself, thus we need to 5147 * parse out the data again. 5148 * 5149 * - Call some other function once the data is in? 5150 */ 5151 5152 /* 5153 * XXX KDM call ctl_scsiio() again for now, and check flag 5154 * bits to see whether we're allocated or not. 5155 */ 5156 retval = ctl_scsiio(&io->scsiio); 5157 } 5158 return (retval); 5159 } 5160 5161 /* 5162 * This gets called by a backend driver when it is done with a 5163 * data_submit method. 5164 */ 5165 void 5166 ctl_data_submit_done(union ctl_io *io) 5167 { 5168 /* 5169 * If the IO_CONT flag is set, we need to call the supplied 5170 * function to continue processing the I/O, instead of completing 5171 * the I/O just yet. 5172 * 5173 * If there is an error, though, we don't want to keep processing. 5174 * Instead, just send status back to the initiator. 5175 */ 5176 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5177 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5178 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5179 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5180 io->scsiio.io_cont(io); 5181 return; 5182 } 5183 ctl_done(io); 5184 } 5185 5186 /* 5187 * This gets called by a backend driver when it is done with a 5188 * configuration write. 5189 */ 5190 void 5191 ctl_config_write_done(union ctl_io *io) 5192 { 5193 uint8_t *buf; 5194 5195 /* 5196 * If the IO_CONT flag is set, we need to call the supplied 5197 * function to continue processing the I/O, instead of completing 5198 * the I/O just yet. 5199 * 5200 * If there is an error, though, we don't want to keep processing. 5201 * Instead, just send status back to the initiator. 5202 */ 5203 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5204 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5205 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5206 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5207 io->scsiio.io_cont(io); 5208 return; 5209 } 5210 /* 5211 * Since a configuration write can be done for commands that actually 5212 * have data allocated, like write buffer, and commands that have 5213 * no data, like start/stop unit, we need to check here. 5214 */ 5215 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5216 buf = io->scsiio.kern_data_ptr; 5217 else 5218 buf = NULL; 5219 ctl_done(io); 5220 if (buf) 5221 free(buf, M_CTL); 5222 } 5223 5224 void 5225 ctl_config_read_done(union ctl_io *io) 5226 { 5227 uint8_t *buf; 5228 5229 /* 5230 * If there is some error -- we are done, skip data transfer. 5231 */ 5232 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5233 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5234 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5235 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5236 buf = io->scsiio.kern_data_ptr; 5237 else 5238 buf = NULL; 5239 ctl_done(io); 5240 if (buf) 5241 free(buf, M_CTL); 5242 return; 5243 } 5244 5245 /* 5246 * If the IO_CONT flag is set, we need to call the supplied 5247 * function to continue processing the I/O, instead of completing 5248 * the I/O just yet. 5249 */ 5250 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5251 io->scsiio.io_cont(io); 5252 return; 5253 } 5254 5255 ctl_datamove(io); 5256 } 5257 5258 /* 5259 * SCSI release command. 5260 */ 5261 int 5262 ctl_scsi_release(struct ctl_scsiio *ctsio) 5263 { 5264 struct ctl_lun *lun = CTL_LUN(ctsio); 5265 uint32_t residx; 5266 5267 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5268 5269 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5270 5271 /* 5272 * XXX KDM right now, we only support LUN reservation. We don't 5273 * support 3rd party reservations, or extent reservations, which 5274 * might actually need the parameter list. If we've gotten this 5275 * far, we've got a LUN reservation. Anything else got kicked out 5276 * above. So, according to SPC, ignore the length. 5277 */ 5278 5279 mtx_lock(&lun->lun_lock); 5280 5281 /* 5282 * According to SPC, it is not an error for an intiator to attempt 5283 * to release a reservation on a LUN that isn't reserved, or that 5284 * is reserved by another initiator. The reservation can only be 5285 * released, though, by the initiator who made it or by one of 5286 * several reset type events. 5287 */ 5288 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5289 lun->flags &= ~CTL_LUN_RESERVED; 5290 5291 mtx_unlock(&lun->lun_lock); 5292 5293 ctl_set_success(ctsio); 5294 ctl_done((union ctl_io *)ctsio); 5295 return (CTL_RETVAL_COMPLETE); 5296 } 5297 5298 int 5299 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5300 { 5301 struct ctl_lun *lun = CTL_LUN(ctsio); 5302 uint32_t residx; 5303 5304 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5305 5306 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5307 5308 /* 5309 * XXX KDM right now, we only support LUN reservation. We don't 5310 * support 3rd party reservations, or extent reservations, which 5311 * might actually need the parameter list. If we've gotten this 5312 * far, we've got a LUN reservation. Anything else got kicked out 5313 * above. So, according to SPC, ignore the length. 5314 */ 5315 5316 mtx_lock(&lun->lun_lock); 5317 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5318 ctl_set_reservation_conflict(ctsio); 5319 goto bailout; 5320 } 5321 5322 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5323 if (lun->flags & CTL_LUN_PR_RESERVED) { 5324 ctl_set_success(ctsio); 5325 goto bailout; 5326 } 5327 5328 lun->flags |= CTL_LUN_RESERVED; 5329 lun->res_idx = residx; 5330 ctl_set_success(ctsio); 5331 5332 bailout: 5333 mtx_unlock(&lun->lun_lock); 5334 ctl_done((union ctl_io *)ctsio); 5335 return (CTL_RETVAL_COMPLETE); 5336 } 5337 5338 int 5339 ctl_start_stop(struct ctl_scsiio *ctsio) 5340 { 5341 struct ctl_lun *lun = CTL_LUN(ctsio); 5342 struct scsi_start_stop_unit *cdb; 5343 int retval; 5344 5345 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5346 5347 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5348 5349 if ((cdb->how & SSS_PC_MASK) == 0) { 5350 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5351 (cdb->how & SSS_START) == 0) { 5352 uint32_t residx; 5353 5354 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5355 if (ctl_get_prkey(lun, residx) == 0 || 5356 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5357 ctl_set_reservation_conflict(ctsio); 5358 ctl_done((union ctl_io *)ctsio); 5359 return (CTL_RETVAL_COMPLETE); 5360 } 5361 } 5362 5363 if ((cdb->how & SSS_LOEJ) && 5364 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5365 ctl_set_invalid_field(ctsio, 5366 /*sks_valid*/ 1, 5367 /*command*/ 1, 5368 /*field*/ 4, 5369 /*bit_valid*/ 1, 5370 /*bit*/ 1); 5371 ctl_done((union ctl_io *)ctsio); 5372 return (CTL_RETVAL_COMPLETE); 5373 } 5374 5375 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5376 lun->prevent_count > 0) { 5377 /* "Medium removal prevented" */ 5378 ctl_set_sense(ctsio, /*current_error*/ 1, 5379 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5380 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5381 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5382 ctl_done((union ctl_io *)ctsio); 5383 return (CTL_RETVAL_COMPLETE); 5384 } 5385 } 5386 5387 retval = lun->backend->config_write((union ctl_io *)ctsio); 5388 return (retval); 5389 } 5390 5391 int 5392 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5393 { 5394 struct ctl_lun *lun = CTL_LUN(ctsio); 5395 struct scsi_prevent *cdb; 5396 int retval; 5397 uint32_t initidx; 5398 5399 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5400 5401 cdb = (struct scsi_prevent *)ctsio->cdb; 5402 5403 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5404 ctl_set_invalid_opcode(ctsio); 5405 ctl_done((union ctl_io *)ctsio); 5406 return (CTL_RETVAL_COMPLETE); 5407 } 5408 5409 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5410 mtx_lock(&lun->lun_lock); 5411 if ((cdb->how & PR_PREVENT) && 5412 ctl_is_set(lun->prevent, initidx) == 0) { 5413 ctl_set_mask(lun->prevent, initidx); 5414 lun->prevent_count++; 5415 } else if ((cdb->how & PR_PREVENT) == 0 && 5416 ctl_is_set(lun->prevent, initidx)) { 5417 ctl_clear_mask(lun->prevent, initidx); 5418 lun->prevent_count--; 5419 } 5420 mtx_unlock(&lun->lun_lock); 5421 retval = lun->backend->config_write((union ctl_io *)ctsio); 5422 return (retval); 5423 } 5424 5425 /* 5426 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5427 * we don't really do anything with the LBA and length fields if the user 5428 * passes them in. Instead we'll just flush out the cache for the entire 5429 * LUN. 5430 */ 5431 int 5432 ctl_sync_cache(struct ctl_scsiio *ctsio) 5433 { 5434 struct ctl_lun *lun = CTL_LUN(ctsio); 5435 struct ctl_lba_len_flags *lbalen; 5436 uint64_t starting_lba; 5437 uint32_t block_count; 5438 int retval; 5439 uint8_t byte2; 5440 5441 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5442 5443 retval = 0; 5444 5445 switch (ctsio->cdb[0]) { 5446 case SYNCHRONIZE_CACHE: { 5447 struct scsi_sync_cache *cdb; 5448 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5449 5450 starting_lba = scsi_4btoul(cdb->begin_lba); 5451 block_count = scsi_2btoul(cdb->lb_count); 5452 byte2 = cdb->byte2; 5453 break; 5454 } 5455 case SYNCHRONIZE_CACHE_16: { 5456 struct scsi_sync_cache_16 *cdb; 5457 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5458 5459 starting_lba = scsi_8btou64(cdb->begin_lba); 5460 block_count = scsi_4btoul(cdb->lb_count); 5461 byte2 = cdb->byte2; 5462 break; 5463 } 5464 default: 5465 ctl_set_invalid_opcode(ctsio); 5466 ctl_done((union ctl_io *)ctsio); 5467 goto bailout; 5468 break; /* NOTREACHED */ 5469 } 5470 5471 /* 5472 * We check the LBA and length, but don't do anything with them. 5473 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5474 * get flushed. This check will just help satisfy anyone who wants 5475 * to see an error for an out of range LBA. 5476 */ 5477 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5478 ctl_set_lba_out_of_range(ctsio, 5479 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5480 ctl_done((union ctl_io *)ctsio); 5481 goto bailout; 5482 } 5483 5484 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5485 lbalen->lba = starting_lba; 5486 lbalen->len = block_count; 5487 lbalen->flags = byte2; 5488 retval = lun->backend->config_write((union ctl_io *)ctsio); 5489 5490 bailout: 5491 return (retval); 5492 } 5493 5494 int 5495 ctl_format(struct ctl_scsiio *ctsio) 5496 { 5497 struct scsi_format *cdb; 5498 int length, defect_list_len; 5499 5500 CTL_DEBUG_PRINT(("ctl_format\n")); 5501 5502 cdb = (struct scsi_format *)ctsio->cdb; 5503 5504 length = 0; 5505 if (cdb->byte2 & SF_FMTDATA) { 5506 if (cdb->byte2 & SF_LONGLIST) 5507 length = sizeof(struct scsi_format_header_long); 5508 else 5509 length = sizeof(struct scsi_format_header_short); 5510 } 5511 5512 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5513 && (length > 0)) { 5514 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5515 ctsio->kern_data_len = length; 5516 ctsio->kern_total_len = length; 5517 ctsio->kern_rel_offset = 0; 5518 ctsio->kern_sg_entries = 0; 5519 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5520 ctsio->be_move_done = ctl_config_move_done; 5521 ctl_datamove((union ctl_io *)ctsio); 5522 5523 return (CTL_RETVAL_COMPLETE); 5524 } 5525 5526 defect_list_len = 0; 5527 5528 if (cdb->byte2 & SF_FMTDATA) { 5529 if (cdb->byte2 & SF_LONGLIST) { 5530 struct scsi_format_header_long *header; 5531 5532 header = (struct scsi_format_header_long *) 5533 ctsio->kern_data_ptr; 5534 5535 defect_list_len = scsi_4btoul(header->defect_list_len); 5536 if (defect_list_len != 0) { 5537 ctl_set_invalid_field(ctsio, 5538 /*sks_valid*/ 1, 5539 /*command*/ 0, 5540 /*field*/ 2, 5541 /*bit_valid*/ 0, 5542 /*bit*/ 0); 5543 goto bailout; 5544 } 5545 } else { 5546 struct scsi_format_header_short *header; 5547 5548 header = (struct scsi_format_header_short *) 5549 ctsio->kern_data_ptr; 5550 5551 defect_list_len = scsi_2btoul(header->defect_list_len); 5552 if (defect_list_len != 0) { 5553 ctl_set_invalid_field(ctsio, 5554 /*sks_valid*/ 1, 5555 /*command*/ 0, 5556 /*field*/ 2, 5557 /*bit_valid*/ 0, 5558 /*bit*/ 0); 5559 goto bailout; 5560 } 5561 } 5562 } 5563 5564 ctl_set_success(ctsio); 5565 bailout: 5566 5567 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5568 free(ctsio->kern_data_ptr, M_CTL); 5569 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5570 } 5571 5572 ctl_done((union ctl_io *)ctsio); 5573 return (CTL_RETVAL_COMPLETE); 5574 } 5575 5576 int 5577 ctl_read_buffer(struct ctl_scsiio *ctsio) 5578 { 5579 struct ctl_lun *lun = CTL_LUN(ctsio); 5580 uint64_t buffer_offset; 5581 uint32_t len; 5582 uint8_t byte2; 5583 static uint8_t descr[4]; 5584 static uint8_t echo_descr[4] = { 0 }; 5585 5586 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5587 5588 switch (ctsio->cdb[0]) { 5589 case READ_BUFFER: { 5590 struct scsi_read_buffer *cdb; 5591 5592 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5593 buffer_offset = scsi_3btoul(cdb->offset); 5594 len = scsi_3btoul(cdb->length); 5595 byte2 = cdb->byte2; 5596 break; 5597 } 5598 case READ_BUFFER_16: { 5599 struct scsi_read_buffer_16 *cdb; 5600 5601 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5602 buffer_offset = scsi_8btou64(cdb->offset); 5603 len = scsi_4btoul(cdb->length); 5604 byte2 = cdb->byte2; 5605 break; 5606 } 5607 default: /* This shouldn't happen. */ 5608 ctl_set_invalid_opcode(ctsio); 5609 ctl_done((union ctl_io *)ctsio); 5610 return (CTL_RETVAL_COMPLETE); 5611 } 5612 5613 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5614 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5615 ctl_set_invalid_field(ctsio, 5616 /*sks_valid*/ 1, 5617 /*command*/ 1, 5618 /*field*/ 6, 5619 /*bit_valid*/ 0, 5620 /*bit*/ 0); 5621 ctl_done((union ctl_io *)ctsio); 5622 return (CTL_RETVAL_COMPLETE); 5623 } 5624 5625 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5626 descr[0] = 0; 5627 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5628 ctsio->kern_data_ptr = descr; 5629 len = min(len, sizeof(descr)); 5630 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5631 ctsio->kern_data_ptr = echo_descr; 5632 len = min(len, sizeof(echo_descr)); 5633 } else { 5634 if (lun->write_buffer == NULL) { 5635 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5636 M_CTL, M_WAITOK); 5637 } 5638 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5639 } 5640 ctsio->kern_data_len = len; 5641 ctsio->kern_total_len = len; 5642 ctsio->kern_rel_offset = 0; 5643 ctsio->kern_sg_entries = 0; 5644 ctl_set_success(ctsio); 5645 ctsio->be_move_done = ctl_config_move_done; 5646 ctl_datamove((union ctl_io *)ctsio); 5647 return (CTL_RETVAL_COMPLETE); 5648 } 5649 5650 int 5651 ctl_write_buffer(struct ctl_scsiio *ctsio) 5652 { 5653 struct ctl_lun *lun = CTL_LUN(ctsio); 5654 struct scsi_write_buffer *cdb; 5655 int buffer_offset, len; 5656 5657 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5658 5659 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5660 5661 len = scsi_3btoul(cdb->length); 5662 buffer_offset = scsi_3btoul(cdb->offset); 5663 5664 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5665 ctl_set_invalid_field(ctsio, 5666 /*sks_valid*/ 1, 5667 /*command*/ 1, 5668 /*field*/ 6, 5669 /*bit_valid*/ 0, 5670 /*bit*/ 0); 5671 ctl_done((union ctl_io *)ctsio); 5672 return (CTL_RETVAL_COMPLETE); 5673 } 5674 5675 /* 5676 * If we've got a kernel request that hasn't been malloced yet, 5677 * malloc it and tell the caller the data buffer is here. 5678 */ 5679 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5680 if (lun->write_buffer == NULL) { 5681 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5682 M_CTL, M_WAITOK); 5683 } 5684 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5685 ctsio->kern_data_len = len; 5686 ctsio->kern_total_len = len; 5687 ctsio->kern_rel_offset = 0; 5688 ctsio->kern_sg_entries = 0; 5689 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5690 ctsio->be_move_done = ctl_config_move_done; 5691 ctl_datamove((union ctl_io *)ctsio); 5692 5693 return (CTL_RETVAL_COMPLETE); 5694 } 5695 5696 ctl_set_success(ctsio); 5697 ctl_done((union ctl_io *)ctsio); 5698 return (CTL_RETVAL_COMPLETE); 5699 } 5700 5701 static int 5702 ctl_write_same_cont(union ctl_io *io) 5703 { 5704 struct ctl_lun *lun = CTL_LUN(io); 5705 struct ctl_scsiio *ctsio; 5706 struct ctl_lba_len_flags *lbalen; 5707 int retval; 5708 5709 ctsio = &io->scsiio; 5710 ctsio->io_hdr.status = CTL_STATUS_NONE; 5711 lbalen = (struct ctl_lba_len_flags *) 5712 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5713 lbalen->lba += lbalen->len; 5714 if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { 5715 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 5716 lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; 5717 } 5718 5719 CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n")); 5720 retval = lun->backend->config_write((union ctl_io *)ctsio); 5721 return (retval); 5722 } 5723 5724 int 5725 ctl_write_same(struct ctl_scsiio *ctsio) 5726 { 5727 struct ctl_lun *lun = CTL_LUN(ctsio); 5728 struct ctl_lba_len_flags *lbalen; 5729 const char *val; 5730 uint64_t lba, ival; 5731 uint32_t num_blocks; 5732 int len, retval; 5733 uint8_t byte2; 5734 5735 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5736 5737 switch (ctsio->cdb[0]) { 5738 case WRITE_SAME_10: { 5739 struct scsi_write_same_10 *cdb; 5740 5741 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5742 5743 lba = scsi_4btoul(cdb->addr); 5744 num_blocks = scsi_2btoul(cdb->length); 5745 byte2 = cdb->byte2; 5746 break; 5747 } 5748 case WRITE_SAME_16: { 5749 struct scsi_write_same_16 *cdb; 5750 5751 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5752 5753 lba = scsi_8btou64(cdb->addr); 5754 num_blocks = scsi_4btoul(cdb->length); 5755 byte2 = cdb->byte2; 5756 break; 5757 } 5758 default: 5759 /* 5760 * We got a command we don't support. This shouldn't 5761 * happen, commands should be filtered out above us. 5762 */ 5763 ctl_set_invalid_opcode(ctsio); 5764 ctl_done((union ctl_io *)ctsio); 5765 5766 return (CTL_RETVAL_COMPLETE); 5767 break; /* NOTREACHED */ 5768 } 5769 5770 /* ANCHOR flag can be used only together with UNMAP */ 5771 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5772 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5773 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5774 ctl_done((union ctl_io *)ctsio); 5775 return (CTL_RETVAL_COMPLETE); 5776 } 5777 5778 /* 5779 * The first check is to make sure we're in bounds, the second 5780 * check is to catch wrap-around problems. If the lba + num blocks 5781 * is less than the lba, then we've wrapped around and the block 5782 * range is invalid anyway. 5783 */ 5784 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5785 || ((lba + num_blocks) < lba)) { 5786 ctl_set_lba_out_of_range(ctsio, 5787 MAX(lba, lun->be_lun->maxlba + 1)); 5788 ctl_done((union ctl_io *)ctsio); 5789 return (CTL_RETVAL_COMPLETE); 5790 } 5791 5792 /* Zero number of blocks means "to the last logical block" */ 5793 if (num_blocks == 0) { 5794 ival = UINT64_MAX; 5795 val = dnvlist_get_string(lun->be_lun->options, 5796 "write_same_max_lba", NULL); 5797 if (val != NULL) 5798 ctl_expand_number(val, &ival); 5799 if ((lun->be_lun->maxlba + 1) - lba > ival) { 5800 ctl_set_invalid_field(ctsio, 5801 /*sks_valid*/ 1, /*command*/ 1, 5802 /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, 5803 /*bit_valid*/ 0, /*bit*/ 0); 5804 ctl_done((union ctl_io *)ctsio); 5805 return (CTL_RETVAL_COMPLETE); 5806 } 5807 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5808 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 5809 ctsio->io_cont = ctl_write_same_cont; 5810 num_blocks = 1 << 31; 5811 } else 5812 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5813 } 5814 5815 len = lun->be_lun->blocksize; 5816 5817 /* 5818 * If we've got a kernel request that hasn't been malloced yet, 5819 * malloc it and tell the caller the data buffer is here. 5820 */ 5821 if ((byte2 & SWS_NDOB) == 0 && 5822 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5823 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5824 ctsio->kern_data_len = len; 5825 ctsio->kern_total_len = len; 5826 ctsio->kern_rel_offset = 0; 5827 ctsio->kern_sg_entries = 0; 5828 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5829 ctsio->be_move_done = ctl_config_move_done; 5830 ctl_datamove((union ctl_io *)ctsio); 5831 5832 return (CTL_RETVAL_COMPLETE); 5833 } 5834 5835 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5836 lbalen->lba = lba; 5837 lbalen->len = num_blocks; 5838 lbalen->flags = byte2; 5839 retval = lun->backend->config_write((union ctl_io *)ctsio); 5840 5841 return (retval); 5842 } 5843 5844 int 5845 ctl_unmap(struct ctl_scsiio *ctsio) 5846 { 5847 struct ctl_lun *lun = CTL_LUN(ctsio); 5848 struct scsi_unmap *cdb; 5849 struct ctl_ptr_len_flags *ptrlen; 5850 struct scsi_unmap_header *hdr; 5851 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5852 uint64_t lba; 5853 uint32_t num_blocks; 5854 int len, retval; 5855 uint8_t byte2; 5856 5857 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5858 5859 cdb = (struct scsi_unmap *)ctsio->cdb; 5860 len = scsi_2btoul(cdb->length); 5861 byte2 = cdb->byte2; 5862 5863 /* 5864 * If we've got a kernel request that hasn't been malloced yet, 5865 * malloc it and tell the caller the data buffer is here. 5866 */ 5867 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5868 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5869 ctsio->kern_data_len = len; 5870 ctsio->kern_total_len = len; 5871 ctsio->kern_rel_offset = 0; 5872 ctsio->kern_sg_entries = 0; 5873 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5874 ctsio->be_move_done = ctl_config_move_done; 5875 ctl_datamove((union ctl_io *)ctsio); 5876 5877 return (CTL_RETVAL_COMPLETE); 5878 } 5879 5880 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5881 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5882 if (len < sizeof (*hdr) || 5883 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5884 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5885 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5886 ctl_set_invalid_field(ctsio, 5887 /*sks_valid*/ 0, 5888 /*command*/ 0, 5889 /*field*/ 0, 5890 /*bit_valid*/ 0, 5891 /*bit*/ 0); 5892 goto done; 5893 } 5894 len = scsi_2btoul(hdr->desc_length); 5895 buf = (struct scsi_unmap_desc *)(hdr + 1); 5896 end = buf + len / sizeof(*buf); 5897 5898 endnz = buf; 5899 for (range = buf; range < end; range++) { 5900 lba = scsi_8btou64(range->lba); 5901 num_blocks = scsi_4btoul(range->length); 5902 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5903 || ((lba + num_blocks) < lba)) { 5904 ctl_set_lba_out_of_range(ctsio, 5905 MAX(lba, lun->be_lun->maxlba + 1)); 5906 ctl_done((union ctl_io *)ctsio); 5907 return (CTL_RETVAL_COMPLETE); 5908 } 5909 if (num_blocks != 0) 5910 endnz = range + 1; 5911 } 5912 5913 /* 5914 * Block backend can not handle zero last range. 5915 * Filter it out and return if there is nothing left. 5916 */ 5917 len = (uint8_t *)endnz - (uint8_t *)buf; 5918 if (len == 0) { 5919 ctl_set_success(ctsio); 5920 goto done; 5921 } 5922 5923 mtx_lock(&lun->lun_lock); 5924 ptrlen = (struct ctl_ptr_len_flags *) 5925 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5926 ptrlen->ptr = (void *)buf; 5927 ptrlen->len = len; 5928 ptrlen->flags = byte2; 5929 ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); 5930 mtx_unlock(&lun->lun_lock); 5931 5932 retval = lun->backend->config_write((union ctl_io *)ctsio); 5933 return (retval); 5934 5935 done: 5936 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5937 free(ctsio->kern_data_ptr, M_CTL); 5938 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5939 } 5940 ctl_done((union ctl_io *)ctsio); 5941 return (CTL_RETVAL_COMPLETE); 5942 } 5943 5944 int 5945 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5946 struct ctl_page_index *page_index, uint8_t *page_ptr) 5947 { 5948 struct ctl_lun *lun = CTL_LUN(ctsio); 5949 uint8_t *current_cp; 5950 int set_ua; 5951 uint32_t initidx; 5952 5953 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5954 set_ua = 0; 5955 5956 current_cp = (page_index->page_data + (page_index->page_len * 5957 CTL_PAGE_CURRENT)); 5958 5959 mtx_lock(&lun->lun_lock); 5960 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5961 memcpy(current_cp, page_ptr, page_index->page_len); 5962 set_ua = 1; 5963 } 5964 if (set_ua != 0) 5965 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5966 mtx_unlock(&lun->lun_lock); 5967 if (set_ua) { 5968 ctl_isc_announce_mode(lun, 5969 ctl_get_initindex(&ctsio->io_hdr.nexus), 5970 page_index->page_code, page_index->subpage); 5971 } 5972 return (CTL_RETVAL_COMPLETE); 5973 } 5974 5975 static void 5976 ctl_ie_timer(void *arg) 5977 { 5978 struct ctl_lun *lun = arg; 5979 uint64_t t; 5980 5981 if (lun->ie_asc == 0) 5982 return; 5983 5984 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5985 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5986 else 5987 lun->ie_reported = 0; 5988 5989 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5990 lun->ie_reportcnt++; 5991 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5992 if (t == 0 || t == UINT32_MAX) 5993 t = 3000; /* 5 min */ 5994 callout_schedule_sbt(&lun->ie_callout, SBT_1S / 10 * t, 5995 SBT_1S / 10, 0); 5996 } 5997 } 5998 5999 int 6000 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 6001 struct ctl_page_index *page_index, uint8_t *page_ptr) 6002 { 6003 struct ctl_lun *lun = CTL_LUN(ctsio); 6004 struct scsi_info_exceptions_page *pg; 6005 uint64_t t; 6006 6007 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 6008 6009 pg = (struct scsi_info_exceptions_page *)page_ptr; 6010 mtx_lock(&lun->lun_lock); 6011 if (pg->info_flags & SIEP_FLAGS_TEST) { 6012 lun->ie_asc = 0x5d; 6013 lun->ie_ascq = 0xff; 6014 if (pg->mrie == SIEP_MRIE_UA) { 6015 ctl_est_ua_all(lun, -1, CTL_UA_IE); 6016 lun->ie_reported = 1; 6017 } else { 6018 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 6019 lun->ie_reported = -1; 6020 } 6021 lun->ie_reportcnt = 1; 6022 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 6023 lun->ie_reportcnt++; 6024 t = scsi_4btoul(pg->interval_timer); 6025 if (t == 0 || t == UINT32_MAX) 6026 t = 3000; /* 5 min */ 6027 callout_reset_sbt(&lun->ie_callout, SBT_1S / 10 * t, 6028 SBT_1S / 10, ctl_ie_timer, lun, 0); 6029 } 6030 } else { 6031 lun->ie_asc = 0; 6032 lun->ie_ascq = 0; 6033 lun->ie_reported = 1; 6034 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 6035 lun->ie_reportcnt = UINT32_MAX; 6036 callout_stop(&lun->ie_callout); 6037 } 6038 mtx_unlock(&lun->lun_lock); 6039 return (CTL_RETVAL_COMPLETE); 6040 } 6041 6042 static int 6043 ctl_do_mode_select(union ctl_io *io) 6044 { 6045 struct ctl_lun *lun = CTL_LUN(io); 6046 struct scsi_mode_page_header *page_header; 6047 struct ctl_page_index *page_index; 6048 struct ctl_scsiio *ctsio; 6049 int page_len, page_len_offset, page_len_size; 6050 union ctl_modepage_info *modepage_info; 6051 uint16_t *len_left, *len_used; 6052 int retval, i; 6053 6054 ctsio = &io->scsiio; 6055 page_index = NULL; 6056 page_len = 0; 6057 6058 modepage_info = (union ctl_modepage_info *) 6059 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6060 len_left = &modepage_info->header.len_left; 6061 len_used = &modepage_info->header.len_used; 6062 6063 do_next_page: 6064 6065 page_header = (struct scsi_mode_page_header *) 6066 (ctsio->kern_data_ptr + *len_used); 6067 6068 if (*len_left == 0) { 6069 free(ctsio->kern_data_ptr, M_CTL); 6070 ctl_set_success(ctsio); 6071 ctl_done((union ctl_io *)ctsio); 6072 return (CTL_RETVAL_COMPLETE); 6073 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6074 free(ctsio->kern_data_ptr, M_CTL); 6075 ctl_set_param_len_error(ctsio); 6076 ctl_done((union ctl_io *)ctsio); 6077 return (CTL_RETVAL_COMPLETE); 6078 6079 } else if ((page_header->page_code & SMPH_SPF) 6080 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6081 free(ctsio->kern_data_ptr, M_CTL); 6082 ctl_set_param_len_error(ctsio); 6083 ctl_done((union ctl_io *)ctsio); 6084 return (CTL_RETVAL_COMPLETE); 6085 } 6086 6087 /* 6088 * XXX KDM should we do something with the block descriptor? 6089 */ 6090 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6091 page_index = &lun->mode_pages.index[i]; 6092 if (lun->be_lun->lun_type == T_DIRECT && 6093 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6094 continue; 6095 if (lun->be_lun->lun_type == T_PROCESSOR && 6096 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6097 continue; 6098 if (lun->be_lun->lun_type == T_CDROM && 6099 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6100 continue; 6101 6102 if ((page_index->page_code & SMPH_PC_MASK) != 6103 (page_header->page_code & SMPH_PC_MASK)) 6104 continue; 6105 6106 /* 6107 * If neither page has a subpage code, then we've got a 6108 * match. 6109 */ 6110 if (((page_index->page_code & SMPH_SPF) == 0) 6111 && ((page_header->page_code & SMPH_SPF) == 0)) { 6112 page_len = page_header->page_length; 6113 break; 6114 } 6115 6116 /* 6117 * If both pages have subpages, then the subpage numbers 6118 * have to match. 6119 */ 6120 if ((page_index->page_code & SMPH_SPF) 6121 && (page_header->page_code & SMPH_SPF)) { 6122 struct scsi_mode_page_header_sp *sph; 6123 6124 sph = (struct scsi_mode_page_header_sp *)page_header; 6125 if (page_index->subpage == sph->subpage) { 6126 page_len = scsi_2btoul(sph->page_length); 6127 break; 6128 } 6129 } 6130 } 6131 6132 /* 6133 * If we couldn't find the page, or if we don't have a mode select 6134 * handler for it, send back an error to the user. 6135 */ 6136 if ((i >= CTL_NUM_MODE_PAGES) 6137 || (page_index->select_handler == NULL)) { 6138 ctl_set_invalid_field(ctsio, 6139 /*sks_valid*/ 1, 6140 /*command*/ 0, 6141 /*field*/ *len_used, 6142 /*bit_valid*/ 0, 6143 /*bit*/ 0); 6144 free(ctsio->kern_data_ptr, M_CTL); 6145 ctl_done((union ctl_io *)ctsio); 6146 return (CTL_RETVAL_COMPLETE); 6147 } 6148 6149 if (page_index->page_code & SMPH_SPF) { 6150 page_len_offset = 2; 6151 page_len_size = 2; 6152 } else { 6153 page_len_size = 1; 6154 page_len_offset = 1; 6155 } 6156 6157 /* 6158 * If the length the initiator gives us isn't the one we specify in 6159 * the mode page header, or if they didn't specify enough data in 6160 * the CDB to avoid truncating this page, kick out the request. 6161 */ 6162 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6163 ctl_set_invalid_field(ctsio, 6164 /*sks_valid*/ 1, 6165 /*command*/ 0, 6166 /*field*/ *len_used + page_len_offset, 6167 /*bit_valid*/ 0, 6168 /*bit*/ 0); 6169 free(ctsio->kern_data_ptr, M_CTL); 6170 ctl_done((union ctl_io *)ctsio); 6171 return (CTL_RETVAL_COMPLETE); 6172 } 6173 if (*len_left < page_index->page_len) { 6174 free(ctsio->kern_data_ptr, M_CTL); 6175 ctl_set_param_len_error(ctsio); 6176 ctl_done((union ctl_io *)ctsio); 6177 return (CTL_RETVAL_COMPLETE); 6178 } 6179 6180 /* 6181 * Run through the mode page, checking to make sure that the bits 6182 * the user changed are actually legal for him to change. 6183 */ 6184 for (i = 0; i < page_index->page_len; i++) { 6185 uint8_t *user_byte, *change_mask, *current_byte; 6186 int bad_bit; 6187 int j; 6188 6189 user_byte = (uint8_t *)page_header + i; 6190 change_mask = page_index->page_data + 6191 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6192 current_byte = page_index->page_data + 6193 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6194 6195 /* 6196 * Check to see whether the user set any bits in this byte 6197 * that he is not allowed to set. 6198 */ 6199 if ((*user_byte & ~(*change_mask)) == 6200 (*current_byte & ~(*change_mask))) 6201 continue; 6202 6203 /* 6204 * Go through bit by bit to determine which one is illegal. 6205 */ 6206 bad_bit = 0; 6207 for (j = 7; j >= 0; j--) { 6208 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6209 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6210 bad_bit = i; 6211 break; 6212 } 6213 } 6214 ctl_set_invalid_field(ctsio, 6215 /*sks_valid*/ 1, 6216 /*command*/ 0, 6217 /*field*/ *len_used + i, 6218 /*bit_valid*/ 1, 6219 /*bit*/ bad_bit); 6220 free(ctsio->kern_data_ptr, M_CTL); 6221 ctl_done((union ctl_io *)ctsio); 6222 return (CTL_RETVAL_COMPLETE); 6223 } 6224 6225 /* 6226 * Decrement these before we call the page handler, since we may 6227 * end up getting called back one way or another before the handler 6228 * returns to this context. 6229 */ 6230 *len_left -= page_index->page_len; 6231 *len_used += page_index->page_len; 6232 6233 retval = page_index->select_handler(ctsio, page_index, 6234 (uint8_t *)page_header); 6235 6236 /* 6237 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6238 * wait until this queued command completes to finish processing 6239 * the mode page. If it returns anything other than 6240 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6241 * already set the sense information, freed the data pointer, and 6242 * completed the io for us. 6243 */ 6244 if (retval != CTL_RETVAL_COMPLETE) 6245 goto bailout_no_done; 6246 6247 /* 6248 * If the initiator sent us more than one page, parse the next one. 6249 */ 6250 if (*len_left > 0) 6251 goto do_next_page; 6252 6253 ctl_set_success(ctsio); 6254 free(ctsio->kern_data_ptr, M_CTL); 6255 ctl_done((union ctl_io *)ctsio); 6256 6257 bailout_no_done: 6258 6259 return (CTL_RETVAL_COMPLETE); 6260 6261 } 6262 6263 int 6264 ctl_mode_select(struct ctl_scsiio *ctsio) 6265 { 6266 struct ctl_lun *lun = CTL_LUN(ctsio); 6267 union ctl_modepage_info *modepage_info; 6268 int bd_len, i, header_size, param_len, rtd; 6269 uint32_t initidx; 6270 6271 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6272 switch (ctsio->cdb[0]) { 6273 case MODE_SELECT_6: { 6274 struct scsi_mode_select_6 *cdb; 6275 6276 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6277 6278 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6279 param_len = cdb->length; 6280 header_size = sizeof(struct scsi_mode_header_6); 6281 break; 6282 } 6283 case MODE_SELECT_10: { 6284 struct scsi_mode_select_10 *cdb; 6285 6286 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6287 6288 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6289 param_len = scsi_2btoul(cdb->length); 6290 header_size = sizeof(struct scsi_mode_header_10); 6291 break; 6292 } 6293 default: 6294 ctl_set_invalid_opcode(ctsio); 6295 ctl_done((union ctl_io *)ctsio); 6296 return (CTL_RETVAL_COMPLETE); 6297 } 6298 6299 if (rtd) { 6300 if (param_len != 0) { 6301 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6302 /*command*/ 1, /*field*/ 0, 6303 /*bit_valid*/ 0, /*bit*/ 0); 6304 ctl_done((union ctl_io *)ctsio); 6305 return (CTL_RETVAL_COMPLETE); 6306 } 6307 6308 /* Revert to defaults. */ 6309 ctl_init_page_index(lun); 6310 mtx_lock(&lun->lun_lock); 6311 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6312 mtx_unlock(&lun->lun_lock); 6313 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6314 ctl_isc_announce_mode(lun, -1, 6315 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6316 lun->mode_pages.index[i].subpage); 6317 } 6318 ctl_set_success(ctsio); 6319 ctl_done((union ctl_io *)ctsio); 6320 return (CTL_RETVAL_COMPLETE); 6321 } 6322 6323 /* 6324 * From SPC-3: 6325 * "A parameter list length of zero indicates that the Data-Out Buffer 6326 * shall be empty. This condition shall not be considered as an error." 6327 */ 6328 if (param_len == 0) { 6329 ctl_set_success(ctsio); 6330 ctl_done((union ctl_io *)ctsio); 6331 return (CTL_RETVAL_COMPLETE); 6332 } 6333 6334 /* 6335 * Since we'll hit this the first time through, prior to 6336 * allocation, we don't need to free a data buffer here. 6337 */ 6338 if (param_len < header_size) { 6339 ctl_set_param_len_error(ctsio); 6340 ctl_done((union ctl_io *)ctsio); 6341 return (CTL_RETVAL_COMPLETE); 6342 } 6343 6344 /* 6345 * Allocate the data buffer and grab the user's data. In theory, 6346 * we shouldn't have to sanity check the parameter list length here 6347 * because the maximum size is 64K. We should be able to malloc 6348 * that much without too many problems. 6349 */ 6350 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6351 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6352 ctsio->kern_data_len = param_len; 6353 ctsio->kern_total_len = param_len; 6354 ctsio->kern_rel_offset = 0; 6355 ctsio->kern_sg_entries = 0; 6356 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6357 ctsio->be_move_done = ctl_config_move_done; 6358 ctl_datamove((union ctl_io *)ctsio); 6359 6360 return (CTL_RETVAL_COMPLETE); 6361 } 6362 6363 switch (ctsio->cdb[0]) { 6364 case MODE_SELECT_6: { 6365 struct scsi_mode_header_6 *mh6; 6366 6367 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6368 bd_len = mh6->blk_desc_len; 6369 break; 6370 } 6371 case MODE_SELECT_10: { 6372 struct scsi_mode_header_10 *mh10; 6373 6374 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6375 bd_len = scsi_2btoul(mh10->blk_desc_len); 6376 break; 6377 } 6378 default: 6379 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6380 } 6381 6382 if (param_len < (header_size + bd_len)) { 6383 free(ctsio->kern_data_ptr, M_CTL); 6384 ctl_set_param_len_error(ctsio); 6385 ctl_done((union ctl_io *)ctsio); 6386 return (CTL_RETVAL_COMPLETE); 6387 } 6388 6389 /* 6390 * Set the IO_CONT flag, so that if this I/O gets passed to 6391 * ctl_config_write_done(), it'll get passed back to 6392 * ctl_do_mode_select() for further processing, or completion if 6393 * we're all done. 6394 */ 6395 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6396 ctsio->io_cont = ctl_do_mode_select; 6397 6398 modepage_info = (union ctl_modepage_info *) 6399 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6400 memset(modepage_info, 0, sizeof(*modepage_info)); 6401 modepage_info->header.len_left = param_len - header_size - bd_len; 6402 modepage_info->header.len_used = header_size + bd_len; 6403 6404 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6405 } 6406 6407 int 6408 ctl_mode_sense(struct ctl_scsiio *ctsio) 6409 { 6410 struct ctl_lun *lun = CTL_LUN(ctsio); 6411 int pc, page_code, llba, subpage; 6412 int alloc_len, page_len, header_len, bd_len, total_len; 6413 void *block_desc; 6414 struct ctl_page_index *page_index; 6415 6416 llba = 0; 6417 6418 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6419 6420 switch (ctsio->cdb[0]) { 6421 case MODE_SENSE_6: { 6422 struct scsi_mode_sense_6 *cdb; 6423 6424 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6425 6426 header_len = sizeof(struct scsi_mode_hdr_6); 6427 if (cdb->byte2 & SMS_DBD) 6428 bd_len = 0; 6429 else 6430 bd_len = sizeof(struct scsi_mode_block_descr); 6431 header_len += bd_len; 6432 6433 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6434 page_code = cdb->page & SMS_PAGE_CODE; 6435 subpage = cdb->subpage; 6436 alloc_len = cdb->length; 6437 break; 6438 } 6439 case MODE_SENSE_10: { 6440 struct scsi_mode_sense_10 *cdb; 6441 6442 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6443 6444 header_len = sizeof(struct scsi_mode_hdr_10); 6445 if (cdb->byte2 & SMS_DBD) { 6446 bd_len = 0; 6447 } else if (lun->be_lun->lun_type == T_DIRECT) { 6448 if (cdb->byte2 & SMS10_LLBAA) { 6449 llba = 1; 6450 bd_len = sizeof(struct scsi_mode_block_descr_dlong); 6451 } else 6452 bd_len = sizeof(struct scsi_mode_block_descr_dshort); 6453 } else 6454 bd_len = sizeof(struct scsi_mode_block_descr); 6455 header_len += bd_len; 6456 6457 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6458 page_code = cdb->page & SMS_PAGE_CODE; 6459 subpage = cdb->subpage; 6460 alloc_len = scsi_2btoul(cdb->length); 6461 break; 6462 } 6463 default: 6464 ctl_set_invalid_opcode(ctsio); 6465 ctl_done((union ctl_io *)ctsio); 6466 return (CTL_RETVAL_COMPLETE); 6467 break; /* NOTREACHED */ 6468 } 6469 6470 /* 6471 * We have to make a first pass through to calculate the size of 6472 * the pages that match the user's query. Then we allocate enough 6473 * memory to hold it, and actually copy the data into the buffer. 6474 */ 6475 switch (page_code) { 6476 case SMS_ALL_PAGES_PAGE: { 6477 u_int i; 6478 6479 page_len = 0; 6480 6481 /* 6482 * At the moment, values other than 0 and 0xff here are 6483 * reserved according to SPC-3. 6484 */ 6485 if ((subpage != SMS_SUBPAGE_PAGE_0) 6486 && (subpage != SMS_SUBPAGE_ALL)) { 6487 ctl_set_invalid_field(ctsio, 6488 /*sks_valid*/ 1, 6489 /*command*/ 1, 6490 /*field*/ 3, 6491 /*bit_valid*/ 0, 6492 /*bit*/ 0); 6493 ctl_done((union ctl_io *)ctsio); 6494 return (CTL_RETVAL_COMPLETE); 6495 } 6496 6497 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6498 page_index = &lun->mode_pages.index[i]; 6499 6500 /* Make sure the page is supported for this dev type */ 6501 if (lun->be_lun->lun_type == T_DIRECT && 6502 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6503 continue; 6504 if (lun->be_lun->lun_type == T_PROCESSOR && 6505 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6506 continue; 6507 if (lun->be_lun->lun_type == T_CDROM && 6508 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6509 continue; 6510 6511 /* 6512 * We don't use this subpage if the user didn't 6513 * request all subpages. 6514 */ 6515 if ((page_index->subpage != 0) 6516 && (subpage == SMS_SUBPAGE_PAGE_0)) 6517 continue; 6518 6519 page_len += page_index->page_len; 6520 } 6521 break; 6522 } 6523 default: { 6524 u_int i; 6525 6526 page_len = 0; 6527 6528 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6529 page_index = &lun->mode_pages.index[i]; 6530 6531 /* Make sure the page is supported for this dev type */ 6532 if (lun->be_lun->lun_type == T_DIRECT && 6533 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6534 continue; 6535 if (lun->be_lun->lun_type == T_PROCESSOR && 6536 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6537 continue; 6538 if (lun->be_lun->lun_type == T_CDROM && 6539 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6540 continue; 6541 6542 /* Look for the right page code */ 6543 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6544 continue; 6545 6546 /* Look for the right subpage or the subpage wildcard*/ 6547 if ((page_index->subpage != subpage) 6548 && (subpage != SMS_SUBPAGE_ALL)) 6549 continue; 6550 6551 page_len += page_index->page_len; 6552 } 6553 6554 if (page_len == 0) { 6555 ctl_set_invalid_field(ctsio, 6556 /*sks_valid*/ 1, 6557 /*command*/ 1, 6558 /*field*/ 2, 6559 /*bit_valid*/ 1, 6560 /*bit*/ 5); 6561 ctl_done((union ctl_io *)ctsio); 6562 return (CTL_RETVAL_COMPLETE); 6563 } 6564 break; 6565 } 6566 } 6567 6568 total_len = header_len + page_len; 6569 6570 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6571 ctsio->kern_sg_entries = 0; 6572 ctsio->kern_rel_offset = 0; 6573 ctsio->kern_data_len = min(total_len, alloc_len); 6574 ctsio->kern_total_len = ctsio->kern_data_len; 6575 6576 switch (ctsio->cdb[0]) { 6577 case MODE_SENSE_6: { 6578 struct scsi_mode_hdr_6 *header; 6579 6580 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6581 6582 header->datalen = MIN(total_len - 1, 254); 6583 if (lun->be_lun->lun_type == T_DIRECT) { 6584 header->dev_specific = 0x10; /* DPOFUA */ 6585 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6586 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6587 header->dev_specific |= 0x80; /* WP */ 6588 } 6589 header->block_descr_len = bd_len; 6590 block_desc = &header[1]; 6591 break; 6592 } 6593 case MODE_SENSE_10: { 6594 struct scsi_mode_hdr_10 *header; 6595 int datalen; 6596 6597 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6598 6599 datalen = MIN(total_len - 2, 65533); 6600 scsi_ulto2b(datalen, header->datalen); 6601 if (lun->be_lun->lun_type == T_DIRECT) { 6602 header->dev_specific = 0x10; /* DPOFUA */ 6603 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6604 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6605 header->dev_specific |= 0x80; /* WP */ 6606 } 6607 if (llba) 6608 header->flags |= SMH_LONGLBA; 6609 scsi_ulto2b(bd_len, header->block_descr_len); 6610 block_desc = &header[1]; 6611 break; 6612 } 6613 default: 6614 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6615 } 6616 6617 /* 6618 * If we've got a disk, use its blocksize in the block 6619 * descriptor. Otherwise, just set it to 0. 6620 */ 6621 if (bd_len > 0) { 6622 if (lun->be_lun->lun_type == T_DIRECT) { 6623 if (llba) { 6624 struct scsi_mode_block_descr_dlong *bd = block_desc; 6625 if (lun->be_lun->maxlba != 0) 6626 scsi_u64to8b(lun->be_lun->maxlba + 1, 6627 bd->num_blocks); 6628 scsi_ulto4b(lun->be_lun->blocksize, 6629 bd->block_len); 6630 } else { 6631 struct scsi_mode_block_descr_dshort *bd = block_desc; 6632 if (lun->be_lun->maxlba != 0) 6633 scsi_ulto4b(MIN(lun->be_lun->maxlba+1, 6634 UINT32_MAX), bd->num_blocks); 6635 scsi_ulto3b(lun->be_lun->blocksize, 6636 bd->block_len); 6637 } 6638 } else { 6639 struct scsi_mode_block_descr *bd = block_desc; 6640 scsi_ulto3b(0, bd->block_len); 6641 } 6642 } 6643 6644 switch (page_code) { 6645 case SMS_ALL_PAGES_PAGE: { 6646 int i, data_used; 6647 6648 data_used = header_len; 6649 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6650 struct ctl_page_index *page_index; 6651 6652 page_index = &lun->mode_pages.index[i]; 6653 if (lun->be_lun->lun_type == T_DIRECT && 6654 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6655 continue; 6656 if (lun->be_lun->lun_type == T_PROCESSOR && 6657 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6658 continue; 6659 if (lun->be_lun->lun_type == T_CDROM && 6660 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6661 continue; 6662 6663 /* 6664 * We don't use this subpage if the user didn't 6665 * request all subpages. We already checked (above) 6666 * to make sure the user only specified a subpage 6667 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6668 */ 6669 if ((page_index->subpage != 0) 6670 && (subpage == SMS_SUBPAGE_PAGE_0)) 6671 continue; 6672 6673 /* 6674 * Call the handler, if it exists, to update the 6675 * page to the latest values. 6676 */ 6677 if (page_index->sense_handler != NULL) 6678 page_index->sense_handler(ctsio, page_index,pc); 6679 6680 memcpy(ctsio->kern_data_ptr + data_used, 6681 page_index->page_data + 6682 (page_index->page_len * pc), 6683 page_index->page_len); 6684 data_used += page_index->page_len; 6685 } 6686 break; 6687 } 6688 default: { 6689 int i, data_used; 6690 6691 data_used = header_len; 6692 6693 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6694 struct ctl_page_index *page_index; 6695 6696 page_index = &lun->mode_pages.index[i]; 6697 6698 /* Look for the right page code */ 6699 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6700 continue; 6701 6702 /* Look for the right subpage or the subpage wildcard*/ 6703 if ((page_index->subpage != subpage) 6704 && (subpage != SMS_SUBPAGE_ALL)) 6705 continue; 6706 6707 /* Make sure the page is supported for this dev type */ 6708 if (lun->be_lun->lun_type == T_DIRECT && 6709 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6710 continue; 6711 if (lun->be_lun->lun_type == T_PROCESSOR && 6712 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6713 continue; 6714 if (lun->be_lun->lun_type == T_CDROM && 6715 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6716 continue; 6717 6718 /* 6719 * Call the handler, if it exists, to update the 6720 * page to the latest values. 6721 */ 6722 if (page_index->sense_handler != NULL) 6723 page_index->sense_handler(ctsio, page_index,pc); 6724 6725 memcpy(ctsio->kern_data_ptr + data_used, 6726 page_index->page_data + 6727 (page_index->page_len * pc), 6728 page_index->page_len); 6729 data_used += page_index->page_len; 6730 } 6731 break; 6732 } 6733 } 6734 6735 ctl_set_success(ctsio); 6736 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6737 ctsio->be_move_done = ctl_config_move_done; 6738 ctl_datamove((union ctl_io *)ctsio); 6739 return (CTL_RETVAL_COMPLETE); 6740 } 6741 6742 int 6743 ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, 6744 struct ctl_page_index *page_index, 6745 int pc) 6746 { 6747 struct ctl_lun *lun = CTL_LUN(ctsio); 6748 struct scsi_log_temperature *data; 6749 const char *value; 6750 6751 data = (struct scsi_log_temperature *)page_index->page_data; 6752 6753 scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); 6754 data->hdr.param_control = SLP_LBIN; 6755 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6756 sizeof(struct scsi_log_param_header); 6757 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6758 NULL)) != NULL) 6759 data->temperature = strtol(value, NULL, 0); 6760 else 6761 data->temperature = 0xff; 6762 data++; 6763 6764 scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); 6765 data->hdr.param_control = SLP_LBIN; 6766 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6767 sizeof(struct scsi_log_param_header); 6768 if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", 6769 NULL)) != NULL) 6770 data->temperature = strtol(value, NULL, 0); 6771 else 6772 data->temperature = 0xff; 6773 return (0); 6774 } 6775 6776 int 6777 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6778 struct ctl_page_index *page_index, 6779 int pc) 6780 { 6781 struct ctl_lun *lun = CTL_LUN(ctsio); 6782 struct scsi_log_param_header *phdr; 6783 uint8_t *data; 6784 uint64_t val; 6785 6786 data = page_index->page_data; 6787 6788 if (lun->backend->lun_attr != NULL && 6789 (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) 6790 != UINT64_MAX) { 6791 phdr = (struct scsi_log_param_header *)data; 6792 scsi_ulto2b(0x0001, phdr->param_code); 6793 phdr->param_control = SLP_LBIN | SLP_LP; 6794 phdr->param_len = 8; 6795 data = (uint8_t *)(phdr + 1); 6796 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6797 data[4] = 0x02; /* per-pool */ 6798 data += phdr->param_len; 6799 } 6800 6801 if (lun->backend->lun_attr != NULL && 6802 (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) 6803 != UINT64_MAX) { 6804 phdr = (struct scsi_log_param_header *)data; 6805 scsi_ulto2b(0x0002, phdr->param_code); 6806 phdr->param_control = SLP_LBIN | SLP_LP; 6807 phdr->param_len = 8; 6808 data = (uint8_t *)(phdr + 1); 6809 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6810 data[4] = 0x01; /* per-LUN */ 6811 data += phdr->param_len; 6812 } 6813 6814 if (lun->backend->lun_attr != NULL && 6815 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) 6816 != UINT64_MAX) { 6817 phdr = (struct scsi_log_param_header *)data; 6818 scsi_ulto2b(0x00f1, phdr->param_code); 6819 phdr->param_control = SLP_LBIN | SLP_LP; 6820 phdr->param_len = 8; 6821 data = (uint8_t *)(phdr + 1); 6822 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6823 data[4] = 0x02; /* per-pool */ 6824 data += phdr->param_len; 6825 } 6826 6827 if (lun->backend->lun_attr != NULL && 6828 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) 6829 != UINT64_MAX) { 6830 phdr = (struct scsi_log_param_header *)data; 6831 scsi_ulto2b(0x00f2, phdr->param_code); 6832 phdr->param_control = SLP_LBIN | SLP_LP; 6833 phdr->param_len = 8; 6834 data = (uint8_t *)(phdr + 1); 6835 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6836 data[4] = 0x02; /* per-pool */ 6837 data += phdr->param_len; 6838 } 6839 6840 page_index->page_len = data - page_index->page_data; 6841 return (0); 6842 } 6843 6844 int 6845 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6846 struct ctl_page_index *page_index, 6847 int pc) 6848 { 6849 struct ctl_lun *lun = CTL_LUN(ctsio); 6850 struct stat_page *data; 6851 struct bintime *t; 6852 6853 data = (struct stat_page *)page_index->page_data; 6854 6855 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6856 data->sap.hdr.param_control = SLP_LBIN; 6857 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6858 sizeof(struct scsi_log_param_header); 6859 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6860 data->sap.read_num); 6861 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6862 data->sap.write_num); 6863 if (lun->be_lun->blocksize > 0) { 6864 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6865 lun->be_lun->blocksize, data->sap.recvieved_lba); 6866 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6867 lun->be_lun->blocksize, data->sap.transmitted_lba); 6868 } 6869 t = &lun->stats.time[CTL_STATS_READ]; 6870 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6871 data->sap.read_int); 6872 t = &lun->stats.time[CTL_STATS_WRITE]; 6873 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6874 data->sap.write_int); 6875 scsi_u64to8b(0, data->sap.weighted_num); 6876 scsi_u64to8b(0, data->sap.weighted_int); 6877 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6878 data->it.hdr.param_control = SLP_LBIN; 6879 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6880 sizeof(struct scsi_log_param_header); 6881 #ifdef CTL_TIME_IO 6882 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6883 #endif 6884 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6885 data->it.hdr.param_control = SLP_LBIN; 6886 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6887 sizeof(struct scsi_log_param_header); 6888 scsi_ulto4b(3, data->ti.exponent); 6889 scsi_ulto4b(1, data->ti.integer); 6890 return (0); 6891 } 6892 6893 int 6894 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6895 struct ctl_page_index *page_index, 6896 int pc) 6897 { 6898 struct ctl_lun *lun = CTL_LUN(ctsio); 6899 struct scsi_log_informational_exceptions *data; 6900 const char *value; 6901 6902 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6903 6904 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6905 data->hdr.param_control = SLP_LBIN; 6906 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6907 sizeof(struct scsi_log_param_header); 6908 data->ie_asc = lun->ie_asc; 6909 data->ie_ascq = lun->ie_ascq; 6910 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6911 NULL)) != NULL) 6912 data->temperature = strtol(value, NULL, 0); 6913 else 6914 data->temperature = 0xff; 6915 return (0); 6916 } 6917 6918 int 6919 ctl_log_sense(struct ctl_scsiio *ctsio) 6920 { 6921 struct ctl_lun *lun = CTL_LUN(ctsio); 6922 int i, pc, page_code, subpage; 6923 int alloc_len, total_len; 6924 struct ctl_page_index *page_index; 6925 struct scsi_log_sense *cdb; 6926 struct scsi_log_header *header; 6927 6928 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6929 6930 cdb = (struct scsi_log_sense *)ctsio->cdb; 6931 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6932 page_code = cdb->page & SLS_PAGE_CODE; 6933 subpage = cdb->subpage; 6934 alloc_len = scsi_2btoul(cdb->length); 6935 6936 page_index = NULL; 6937 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6938 page_index = &lun->log_pages.index[i]; 6939 6940 /* Look for the right page code */ 6941 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6942 continue; 6943 6944 /* Look for the right subpage or the subpage wildcard*/ 6945 if (page_index->subpage != subpage) 6946 continue; 6947 6948 break; 6949 } 6950 if (i >= CTL_NUM_LOG_PAGES) { 6951 ctl_set_invalid_field(ctsio, 6952 /*sks_valid*/ 1, 6953 /*command*/ 1, 6954 /*field*/ 2, 6955 /*bit_valid*/ 0, 6956 /*bit*/ 0); 6957 ctl_done((union ctl_io *)ctsio); 6958 return (CTL_RETVAL_COMPLETE); 6959 } 6960 6961 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6962 6963 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6964 ctsio->kern_sg_entries = 0; 6965 ctsio->kern_rel_offset = 0; 6966 ctsio->kern_data_len = min(total_len, alloc_len); 6967 ctsio->kern_total_len = ctsio->kern_data_len; 6968 6969 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6970 header->page = page_index->page_code; 6971 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6972 header->page |= SL_DS; 6973 if (page_index->subpage) { 6974 header->page |= SL_SPF; 6975 header->subpage = page_index->subpage; 6976 } 6977 scsi_ulto2b(page_index->page_len, header->datalen); 6978 6979 /* 6980 * Call the handler, if it exists, to update the 6981 * page to the latest values. 6982 */ 6983 if (page_index->sense_handler != NULL) 6984 page_index->sense_handler(ctsio, page_index, pc); 6985 6986 memcpy(header + 1, page_index->page_data, page_index->page_len); 6987 6988 ctl_set_success(ctsio); 6989 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6990 ctsio->be_move_done = ctl_config_move_done; 6991 ctl_datamove((union ctl_io *)ctsio); 6992 return (CTL_RETVAL_COMPLETE); 6993 } 6994 6995 int 6996 ctl_read_capacity(struct ctl_scsiio *ctsio) 6997 { 6998 struct ctl_lun *lun = CTL_LUN(ctsio); 6999 struct scsi_read_capacity *cdb; 7000 struct scsi_read_capacity_data *data; 7001 uint32_t lba; 7002 7003 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7004 7005 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7006 7007 lba = scsi_4btoul(cdb->addr); 7008 if (((cdb->pmi & SRC_PMI) == 0) 7009 && (lba != 0)) { 7010 ctl_set_invalid_field(/*ctsio*/ ctsio, 7011 /*sks_valid*/ 1, 7012 /*command*/ 1, 7013 /*field*/ 2, 7014 /*bit_valid*/ 0, 7015 /*bit*/ 0); 7016 ctl_done((union ctl_io *)ctsio); 7017 return (CTL_RETVAL_COMPLETE); 7018 } 7019 7020 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7021 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7022 ctsio->kern_data_len = sizeof(*data); 7023 ctsio->kern_total_len = sizeof(*data); 7024 ctsio->kern_rel_offset = 0; 7025 ctsio->kern_sg_entries = 0; 7026 7027 /* 7028 * If the maximum LBA is greater than 0xfffffffe, the user must 7029 * issue a SERVICE ACTION IN (16) command, with the read capacity 7030 * serivce action set. 7031 */ 7032 if (lun->be_lun->maxlba > 0xfffffffe) 7033 scsi_ulto4b(0xffffffff, data->addr); 7034 else 7035 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7036 7037 /* 7038 * XXX KDM this may not be 512 bytes... 7039 */ 7040 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7041 7042 ctl_set_success(ctsio); 7043 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7044 ctsio->be_move_done = ctl_config_move_done; 7045 ctl_datamove((union ctl_io *)ctsio); 7046 return (CTL_RETVAL_COMPLETE); 7047 } 7048 7049 int 7050 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7051 { 7052 struct ctl_lun *lun = CTL_LUN(ctsio); 7053 struct scsi_read_capacity_16 *cdb; 7054 struct scsi_read_capacity_data_long *data; 7055 uint64_t lba; 7056 uint32_t alloc_len; 7057 7058 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7059 7060 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7061 7062 alloc_len = scsi_4btoul(cdb->alloc_len); 7063 lba = scsi_8btou64(cdb->addr); 7064 7065 if ((cdb->reladr & SRC16_PMI) 7066 && (lba != 0)) { 7067 ctl_set_invalid_field(/*ctsio*/ ctsio, 7068 /*sks_valid*/ 1, 7069 /*command*/ 1, 7070 /*field*/ 2, 7071 /*bit_valid*/ 0, 7072 /*bit*/ 0); 7073 ctl_done((union ctl_io *)ctsio); 7074 return (CTL_RETVAL_COMPLETE); 7075 } 7076 7077 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7078 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7079 ctsio->kern_rel_offset = 0; 7080 ctsio->kern_sg_entries = 0; 7081 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 7082 ctsio->kern_total_len = ctsio->kern_data_len; 7083 7084 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7085 /* XXX KDM this may not be 512 bytes... */ 7086 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7087 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7088 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7089 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7090 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7091 7092 ctl_set_success(ctsio); 7093 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7094 ctsio->be_move_done = ctl_config_move_done; 7095 ctl_datamove((union ctl_io *)ctsio); 7096 return (CTL_RETVAL_COMPLETE); 7097 } 7098 7099 int 7100 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7101 { 7102 struct ctl_lun *lun = CTL_LUN(ctsio); 7103 struct scsi_get_lba_status *cdb; 7104 struct scsi_get_lba_status_data *data; 7105 struct ctl_lba_len_flags *lbalen; 7106 uint64_t lba; 7107 uint32_t alloc_len, total_len; 7108 int retval; 7109 7110 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7111 7112 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7113 lba = scsi_8btou64(cdb->addr); 7114 alloc_len = scsi_4btoul(cdb->alloc_len); 7115 7116 if (lba > lun->be_lun->maxlba) { 7117 ctl_set_lba_out_of_range(ctsio, lba); 7118 ctl_done((union ctl_io *)ctsio); 7119 return (CTL_RETVAL_COMPLETE); 7120 } 7121 7122 total_len = sizeof(*data) + sizeof(data->descr[0]); 7123 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7124 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7125 ctsio->kern_rel_offset = 0; 7126 ctsio->kern_sg_entries = 0; 7127 ctsio->kern_data_len = min(total_len, alloc_len); 7128 ctsio->kern_total_len = ctsio->kern_data_len; 7129 7130 /* Fill dummy data in case backend can't tell anything. */ 7131 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7132 scsi_u64to8b(lba, data->descr[0].addr); 7133 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7134 data->descr[0].length); 7135 data->descr[0].status = 0; /* Mapped or unknown. */ 7136 7137 ctl_set_success(ctsio); 7138 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7139 ctsio->be_move_done = ctl_config_move_done; 7140 7141 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7142 lbalen->lba = lba; 7143 lbalen->len = total_len; 7144 lbalen->flags = 0; 7145 retval = lun->backend->config_read((union ctl_io *)ctsio); 7146 return (retval); 7147 } 7148 7149 int 7150 ctl_read_defect(struct ctl_scsiio *ctsio) 7151 { 7152 struct scsi_read_defect_data_10 *ccb10; 7153 struct scsi_read_defect_data_12 *ccb12; 7154 struct scsi_read_defect_data_hdr_10 *data10; 7155 struct scsi_read_defect_data_hdr_12 *data12; 7156 uint32_t alloc_len, data_len; 7157 uint8_t format; 7158 7159 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7160 7161 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7162 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7163 format = ccb10->format; 7164 alloc_len = scsi_2btoul(ccb10->alloc_length); 7165 data_len = sizeof(*data10); 7166 } else { 7167 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7168 format = ccb12->format; 7169 alloc_len = scsi_4btoul(ccb12->alloc_length); 7170 data_len = sizeof(*data12); 7171 } 7172 if (alloc_len == 0) { 7173 ctl_set_success(ctsio); 7174 ctl_done((union ctl_io *)ctsio); 7175 return (CTL_RETVAL_COMPLETE); 7176 } 7177 7178 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7179 ctsio->kern_rel_offset = 0; 7180 ctsio->kern_sg_entries = 0; 7181 ctsio->kern_data_len = min(data_len, alloc_len); 7182 ctsio->kern_total_len = ctsio->kern_data_len; 7183 7184 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7185 data10 = (struct scsi_read_defect_data_hdr_10 *) 7186 ctsio->kern_data_ptr; 7187 data10->format = format; 7188 scsi_ulto2b(0, data10->length); 7189 } else { 7190 data12 = (struct scsi_read_defect_data_hdr_12 *) 7191 ctsio->kern_data_ptr; 7192 data12->format = format; 7193 scsi_ulto2b(0, data12->generation); 7194 scsi_ulto4b(0, data12->length); 7195 } 7196 7197 ctl_set_success(ctsio); 7198 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7199 ctsio->be_move_done = ctl_config_move_done; 7200 ctl_datamove((union ctl_io *)ctsio); 7201 return (CTL_RETVAL_COMPLETE); 7202 } 7203 7204 int 7205 ctl_report_ident_info(struct ctl_scsiio *ctsio) 7206 { 7207 struct ctl_lun *lun = CTL_LUN(ctsio); 7208 struct scsi_report_ident_info *cdb; 7209 struct scsi_report_ident_info_data *rii_ptr; 7210 struct scsi_report_ident_info_descr *riid_ptr; 7211 const char *oii, *otii; 7212 int retval, alloc_len, total_len = 0, len = 0; 7213 7214 CTL_DEBUG_PRINT(("ctl_report_ident_info\n")); 7215 7216 cdb = (struct scsi_report_ident_info *)ctsio->cdb; 7217 retval = CTL_RETVAL_COMPLETE; 7218 7219 total_len = sizeof(struct scsi_report_ident_info_data); 7220 switch (cdb->type) { 7221 case RII_LUII: 7222 oii = dnvlist_get_string(lun->be_lun->options, 7223 "ident_info", NULL); 7224 if (oii) 7225 len = strlen(oii); /* Approximately */ 7226 break; 7227 case RII_LUTII: 7228 otii = dnvlist_get_string(lun->be_lun->options, 7229 "text_ident_info", NULL); 7230 if (otii) 7231 len = strlen(otii) + 1; /* NULL-terminated */ 7232 break; 7233 case RII_IIS: 7234 len = 2 * sizeof(struct scsi_report_ident_info_descr); 7235 break; 7236 default: 7237 ctl_set_invalid_field(/*ctsio*/ ctsio, 7238 /*sks_valid*/ 1, 7239 /*command*/ 1, 7240 /*field*/ 11, 7241 /*bit_valid*/ 1, 7242 /*bit*/ 2); 7243 ctl_done((union ctl_io *)ctsio); 7244 return(retval); 7245 } 7246 total_len += len; 7247 alloc_len = scsi_4btoul(cdb->length); 7248 7249 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7250 ctsio->kern_sg_entries = 0; 7251 ctsio->kern_rel_offset = 0; 7252 ctsio->kern_data_len = min(total_len, alloc_len); 7253 ctsio->kern_total_len = ctsio->kern_data_len; 7254 7255 rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; 7256 switch (cdb->type) { 7257 case RII_LUII: 7258 if (oii) { 7259 if (oii[0] == '0' && oii[1] == 'x') 7260 len = hex2bin(oii, (uint8_t *)(rii_ptr + 1), len); 7261 else 7262 strncpy((uint8_t *)(rii_ptr + 1), oii, len); 7263 } 7264 break; 7265 case RII_LUTII: 7266 if (otii) 7267 strlcpy((uint8_t *)(rii_ptr + 1), otii, len); 7268 break; 7269 case RII_IIS: 7270 riid_ptr = (struct scsi_report_ident_info_descr *)(rii_ptr + 1); 7271 riid_ptr->type = RII_LUII; 7272 scsi_ulto2b(0xffff, riid_ptr->length); 7273 riid_ptr++; 7274 riid_ptr->type = RII_LUTII; 7275 scsi_ulto2b(0xffff, riid_ptr->length); 7276 } 7277 scsi_ulto2b(len, rii_ptr->length); 7278 7279 ctl_set_success(ctsio); 7280 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7281 ctsio->be_move_done = ctl_config_move_done; 7282 ctl_datamove((union ctl_io *)ctsio); 7283 return(retval); 7284 } 7285 7286 int 7287 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7288 { 7289 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7290 struct ctl_lun *lun = CTL_LUN(ctsio); 7291 struct scsi_maintenance_in *cdb; 7292 int retval; 7293 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7294 int num_ha_groups, num_target_ports, shared_group; 7295 struct ctl_port *port; 7296 struct scsi_target_group_data *rtg_ptr; 7297 struct scsi_target_group_data_extended *rtg_ext_ptr; 7298 struct scsi_target_port_group_descriptor *tpg_desc; 7299 7300 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7301 7302 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7303 retval = CTL_RETVAL_COMPLETE; 7304 7305 switch (cdb->byte2 & STG_PDF_MASK) { 7306 case STG_PDF_LENGTH: 7307 ext = 0; 7308 break; 7309 case STG_PDF_EXTENDED: 7310 ext = 1; 7311 break; 7312 default: 7313 ctl_set_invalid_field(/*ctsio*/ ctsio, 7314 /*sks_valid*/ 1, 7315 /*command*/ 1, 7316 /*field*/ 2, 7317 /*bit_valid*/ 1, 7318 /*bit*/ 5); 7319 ctl_done((union ctl_io *)ctsio); 7320 return(retval); 7321 } 7322 7323 num_target_ports = 0; 7324 shared_group = (softc->is_single != 0); 7325 mtx_lock(&softc->ctl_lock); 7326 STAILQ_FOREACH(port, &softc->port_list, links) { 7327 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7328 continue; 7329 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7330 continue; 7331 num_target_ports++; 7332 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7333 shared_group = 1; 7334 } 7335 mtx_unlock(&softc->ctl_lock); 7336 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7337 7338 if (ext) 7339 total_len = sizeof(struct scsi_target_group_data_extended); 7340 else 7341 total_len = sizeof(struct scsi_target_group_data); 7342 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7343 (shared_group + num_ha_groups) + 7344 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7345 7346 alloc_len = scsi_4btoul(cdb->length); 7347 7348 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7349 ctsio->kern_sg_entries = 0; 7350 ctsio->kern_rel_offset = 0; 7351 ctsio->kern_data_len = min(total_len, alloc_len); 7352 ctsio->kern_total_len = ctsio->kern_data_len; 7353 7354 if (ext) { 7355 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7356 ctsio->kern_data_ptr; 7357 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7358 rtg_ext_ptr->format_type = 0x10; 7359 rtg_ext_ptr->implicit_transition_time = 0; 7360 tpg_desc = &rtg_ext_ptr->groups[0]; 7361 } else { 7362 rtg_ptr = (struct scsi_target_group_data *) 7363 ctsio->kern_data_ptr; 7364 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7365 tpg_desc = &rtg_ptr->groups[0]; 7366 } 7367 7368 mtx_lock(&softc->ctl_lock); 7369 pg = softc->port_min / softc->port_cnt; 7370 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7371 /* Some shelf is known to be primary. */ 7372 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7373 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7374 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7375 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7376 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7377 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7378 else 7379 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7380 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7381 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7382 } else { 7383 ts = os; 7384 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7385 } 7386 } else { 7387 /* No known primary shelf. */ 7388 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7389 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7390 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7391 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7392 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7393 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7394 } else { 7395 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7396 } 7397 } 7398 if (shared_group) { 7399 tpg_desc->pref_state = ts; 7400 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7401 TPG_U_SUP | TPG_T_SUP; 7402 scsi_ulto2b(1, tpg_desc->target_port_group); 7403 tpg_desc->status = TPG_IMPLICIT; 7404 pc = 0; 7405 STAILQ_FOREACH(port, &softc->port_list, links) { 7406 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7407 continue; 7408 if (!softc->is_single && 7409 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7410 continue; 7411 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7412 continue; 7413 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7414 relative_target_port_identifier); 7415 pc++; 7416 } 7417 tpg_desc->target_port_count = pc; 7418 tpg_desc = (struct scsi_target_port_group_descriptor *) 7419 &tpg_desc->descriptors[pc]; 7420 } 7421 for (g = 0; g < num_ha_groups; g++) { 7422 tpg_desc->pref_state = (g == pg) ? ts : os; 7423 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7424 TPG_U_SUP | TPG_T_SUP; 7425 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7426 tpg_desc->status = TPG_IMPLICIT; 7427 pc = 0; 7428 STAILQ_FOREACH(port, &softc->port_list, links) { 7429 if (port->targ_port < g * softc->port_cnt || 7430 port->targ_port >= (g + 1) * softc->port_cnt) 7431 continue; 7432 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7433 continue; 7434 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7435 continue; 7436 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7437 continue; 7438 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7439 relative_target_port_identifier); 7440 pc++; 7441 } 7442 tpg_desc->target_port_count = pc; 7443 tpg_desc = (struct scsi_target_port_group_descriptor *) 7444 &tpg_desc->descriptors[pc]; 7445 } 7446 mtx_unlock(&softc->ctl_lock); 7447 7448 ctl_set_success(ctsio); 7449 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7450 ctsio->be_move_done = ctl_config_move_done; 7451 ctl_datamove((union ctl_io *)ctsio); 7452 return(retval); 7453 } 7454 7455 int 7456 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7457 { 7458 struct ctl_lun *lun = CTL_LUN(ctsio); 7459 struct scsi_report_supported_opcodes *cdb; 7460 const struct ctl_cmd_entry *entry, *sentry; 7461 struct scsi_report_supported_opcodes_all *all; 7462 struct scsi_report_supported_opcodes_descr *descr; 7463 struct scsi_report_supported_opcodes_one *one; 7464 int retval; 7465 int alloc_len, total_len; 7466 int opcode, service_action, i, j, num; 7467 7468 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7469 7470 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7471 retval = CTL_RETVAL_COMPLETE; 7472 7473 opcode = cdb->requested_opcode; 7474 service_action = scsi_2btoul(cdb->requested_service_action); 7475 switch (cdb->options & RSO_OPTIONS_MASK) { 7476 case RSO_OPTIONS_ALL: 7477 num = 0; 7478 for (i = 0; i < 256; i++) { 7479 entry = &ctl_cmd_table[i]; 7480 if (entry->flags & CTL_CMD_FLAG_SA5) { 7481 for (j = 0; j < 32; j++) { 7482 sentry = &((const struct ctl_cmd_entry *) 7483 entry->execute)[j]; 7484 if (ctl_cmd_applicable( 7485 lun->be_lun->lun_type, sentry)) 7486 num++; 7487 } 7488 } else { 7489 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7490 entry)) 7491 num++; 7492 } 7493 } 7494 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7495 num * sizeof(struct scsi_report_supported_opcodes_descr); 7496 break; 7497 case RSO_OPTIONS_OC: 7498 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7499 ctl_set_invalid_field(/*ctsio*/ ctsio, 7500 /*sks_valid*/ 1, 7501 /*command*/ 1, 7502 /*field*/ 2, 7503 /*bit_valid*/ 1, 7504 /*bit*/ 2); 7505 ctl_done((union ctl_io *)ctsio); 7506 return (CTL_RETVAL_COMPLETE); 7507 } 7508 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7509 break; 7510 case RSO_OPTIONS_OC_SA: 7511 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7512 service_action >= 32) { 7513 ctl_set_invalid_field(/*ctsio*/ ctsio, 7514 /*sks_valid*/ 1, 7515 /*command*/ 1, 7516 /*field*/ 2, 7517 /*bit_valid*/ 1, 7518 /*bit*/ 2); 7519 ctl_done((union ctl_io *)ctsio); 7520 return (CTL_RETVAL_COMPLETE); 7521 } 7522 /* FALLTHROUGH */ 7523 case RSO_OPTIONS_OC_ASA: 7524 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7525 break; 7526 default: 7527 ctl_set_invalid_field(/*ctsio*/ ctsio, 7528 /*sks_valid*/ 1, 7529 /*command*/ 1, 7530 /*field*/ 2, 7531 /*bit_valid*/ 1, 7532 /*bit*/ 2); 7533 ctl_done((union ctl_io *)ctsio); 7534 return (CTL_RETVAL_COMPLETE); 7535 } 7536 7537 alloc_len = scsi_4btoul(cdb->length); 7538 7539 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7540 ctsio->kern_sg_entries = 0; 7541 ctsio->kern_rel_offset = 0; 7542 ctsio->kern_data_len = min(total_len, alloc_len); 7543 ctsio->kern_total_len = ctsio->kern_data_len; 7544 7545 switch (cdb->options & RSO_OPTIONS_MASK) { 7546 case RSO_OPTIONS_ALL: 7547 all = (struct scsi_report_supported_opcodes_all *) 7548 ctsio->kern_data_ptr; 7549 num = 0; 7550 for (i = 0; i < 256; i++) { 7551 entry = &ctl_cmd_table[i]; 7552 if (entry->flags & CTL_CMD_FLAG_SA5) { 7553 for (j = 0; j < 32; j++) { 7554 sentry = &((const struct ctl_cmd_entry *) 7555 entry->execute)[j]; 7556 if (!ctl_cmd_applicable( 7557 lun->be_lun->lun_type, sentry)) 7558 continue; 7559 descr = &all->descr[num++]; 7560 descr->opcode = i; 7561 scsi_ulto2b(j, descr->service_action); 7562 descr->flags = RSO_SERVACTV; 7563 scsi_ulto2b(sentry->length, 7564 descr->cdb_length); 7565 } 7566 } else { 7567 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7568 entry)) 7569 continue; 7570 descr = &all->descr[num++]; 7571 descr->opcode = i; 7572 scsi_ulto2b(0, descr->service_action); 7573 descr->flags = 0; 7574 scsi_ulto2b(entry->length, descr->cdb_length); 7575 } 7576 } 7577 scsi_ulto4b( 7578 num * sizeof(struct scsi_report_supported_opcodes_descr), 7579 all->length); 7580 break; 7581 case RSO_OPTIONS_OC: 7582 one = (struct scsi_report_supported_opcodes_one *) 7583 ctsio->kern_data_ptr; 7584 entry = &ctl_cmd_table[opcode]; 7585 goto fill_one; 7586 case RSO_OPTIONS_OC_SA: 7587 one = (struct scsi_report_supported_opcodes_one *) 7588 ctsio->kern_data_ptr; 7589 entry = &ctl_cmd_table[opcode]; 7590 entry = &((const struct ctl_cmd_entry *) 7591 entry->execute)[service_action]; 7592 fill_one: 7593 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7594 one->support = 3; 7595 scsi_ulto2b(entry->length, one->cdb_length); 7596 one->cdb_usage[0] = opcode; 7597 memcpy(&one->cdb_usage[1], entry->usage, 7598 entry->length - 1); 7599 } else 7600 one->support = 1; 7601 break; 7602 case RSO_OPTIONS_OC_ASA: 7603 one = (struct scsi_report_supported_opcodes_one *) 7604 ctsio->kern_data_ptr; 7605 entry = &ctl_cmd_table[opcode]; 7606 if (entry->flags & CTL_CMD_FLAG_SA5) { 7607 entry = &((const struct ctl_cmd_entry *) 7608 entry->execute)[service_action]; 7609 } else if (service_action != 0) { 7610 one->support = 1; 7611 break; 7612 } 7613 goto fill_one; 7614 } 7615 7616 ctl_set_success(ctsio); 7617 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7618 ctsio->be_move_done = ctl_config_move_done; 7619 ctl_datamove((union ctl_io *)ctsio); 7620 return(retval); 7621 } 7622 7623 int 7624 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7625 { 7626 struct scsi_report_supported_tmf *cdb; 7627 struct scsi_report_supported_tmf_ext_data *data; 7628 int retval; 7629 int alloc_len, total_len; 7630 7631 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7632 7633 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7634 7635 retval = CTL_RETVAL_COMPLETE; 7636 7637 if (cdb->options & RST_REPD) 7638 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7639 else 7640 total_len = sizeof(struct scsi_report_supported_tmf_data); 7641 alloc_len = scsi_4btoul(cdb->length); 7642 7643 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7644 ctsio->kern_sg_entries = 0; 7645 ctsio->kern_rel_offset = 0; 7646 ctsio->kern_data_len = min(total_len, alloc_len); 7647 ctsio->kern_total_len = ctsio->kern_data_len; 7648 7649 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7650 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7651 RST_TRS; 7652 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7653 data->length = total_len - 4; 7654 7655 ctl_set_success(ctsio); 7656 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7657 ctsio->be_move_done = ctl_config_move_done; 7658 ctl_datamove((union ctl_io *)ctsio); 7659 return (retval); 7660 } 7661 7662 int 7663 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7664 { 7665 struct scsi_report_timestamp *cdb; 7666 struct scsi_report_timestamp_data *data; 7667 struct timeval tv; 7668 int64_t timestamp; 7669 int retval; 7670 int alloc_len, total_len; 7671 7672 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7673 7674 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7675 7676 retval = CTL_RETVAL_COMPLETE; 7677 7678 total_len = sizeof(struct scsi_report_timestamp_data); 7679 alloc_len = scsi_4btoul(cdb->length); 7680 7681 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7682 ctsio->kern_sg_entries = 0; 7683 ctsio->kern_rel_offset = 0; 7684 ctsio->kern_data_len = min(total_len, alloc_len); 7685 ctsio->kern_total_len = ctsio->kern_data_len; 7686 7687 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7688 scsi_ulto2b(sizeof(*data) - 2, data->length); 7689 data->origin = RTS_ORIG_OUTSIDE; 7690 getmicrotime(&tv); 7691 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7692 scsi_ulto4b(timestamp >> 16, data->timestamp); 7693 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7694 7695 ctl_set_success(ctsio); 7696 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7697 ctsio->be_move_done = ctl_config_move_done; 7698 ctl_datamove((union ctl_io *)ctsio); 7699 return (retval); 7700 } 7701 7702 int 7703 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7704 { 7705 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7706 struct ctl_lun *lun = CTL_LUN(ctsio); 7707 struct scsi_per_res_in *cdb; 7708 int alloc_len, total_len = 0; 7709 /* struct scsi_per_res_in_rsrv in_data; */ 7710 uint64_t key; 7711 7712 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7713 7714 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7715 7716 alloc_len = scsi_2btoul(cdb->length); 7717 7718 retry: 7719 mtx_lock(&lun->lun_lock); 7720 switch (cdb->action) { 7721 case SPRI_RK: /* read keys */ 7722 total_len = sizeof(struct scsi_per_res_in_keys) + 7723 lun->pr_key_count * 7724 sizeof(struct scsi_per_res_key); 7725 break; 7726 case SPRI_RR: /* read reservation */ 7727 if (lun->flags & CTL_LUN_PR_RESERVED) 7728 total_len = sizeof(struct scsi_per_res_in_rsrv); 7729 else 7730 total_len = sizeof(struct scsi_per_res_in_header); 7731 break; 7732 case SPRI_RC: /* report capabilities */ 7733 total_len = sizeof(struct scsi_per_res_cap); 7734 break; 7735 case SPRI_RS: /* read full status */ 7736 total_len = sizeof(struct scsi_per_res_in_header) + 7737 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7738 lun->pr_key_count; 7739 break; 7740 default: 7741 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7742 } 7743 mtx_unlock(&lun->lun_lock); 7744 7745 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7746 ctsio->kern_rel_offset = 0; 7747 ctsio->kern_sg_entries = 0; 7748 ctsio->kern_data_len = min(total_len, alloc_len); 7749 ctsio->kern_total_len = ctsio->kern_data_len; 7750 7751 mtx_lock(&lun->lun_lock); 7752 switch (cdb->action) { 7753 case SPRI_RK: { // read keys 7754 struct scsi_per_res_in_keys *res_keys; 7755 int i, key_count; 7756 7757 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7758 7759 /* 7760 * We had to drop the lock to allocate our buffer, which 7761 * leaves time for someone to come in with another 7762 * persistent reservation. (That is unlikely, though, 7763 * since this should be the only persistent reservation 7764 * command active right now.) 7765 */ 7766 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7767 (lun->pr_key_count * 7768 sizeof(struct scsi_per_res_key)))){ 7769 mtx_unlock(&lun->lun_lock); 7770 free(ctsio->kern_data_ptr, M_CTL); 7771 printf("%s: reservation length changed, retrying\n", 7772 __func__); 7773 goto retry; 7774 } 7775 7776 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7777 7778 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7779 lun->pr_key_count, res_keys->header.length); 7780 7781 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7782 if ((key = ctl_get_prkey(lun, i)) == 0) 7783 continue; 7784 7785 /* 7786 * We used lun->pr_key_count to calculate the 7787 * size to allocate. If it turns out the number of 7788 * initiators with the registered flag set is 7789 * larger than that (i.e. they haven't been kept in 7790 * sync), we've got a problem. 7791 */ 7792 if (key_count >= lun->pr_key_count) { 7793 key_count++; 7794 continue; 7795 } 7796 scsi_u64to8b(key, res_keys->keys[key_count].key); 7797 key_count++; 7798 } 7799 break; 7800 } 7801 case SPRI_RR: { // read reservation 7802 struct scsi_per_res_in_rsrv *res; 7803 int tmp_len, header_only; 7804 7805 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7806 7807 scsi_ulto4b(lun->pr_generation, res->header.generation); 7808 7809 if (lun->flags & CTL_LUN_PR_RESERVED) 7810 { 7811 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7812 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7813 res->header.length); 7814 header_only = 0; 7815 } else { 7816 tmp_len = sizeof(struct scsi_per_res_in_header); 7817 scsi_ulto4b(0, res->header.length); 7818 header_only = 1; 7819 } 7820 7821 /* 7822 * We had to drop the lock to allocate our buffer, which 7823 * leaves time for someone to come in with another 7824 * persistent reservation. (That is unlikely, though, 7825 * since this should be the only persistent reservation 7826 * command active right now.) 7827 */ 7828 if (tmp_len != total_len) { 7829 mtx_unlock(&lun->lun_lock); 7830 free(ctsio->kern_data_ptr, M_CTL); 7831 printf("%s: reservation status changed, retrying\n", 7832 __func__); 7833 goto retry; 7834 } 7835 7836 /* 7837 * No reservation held, so we're done. 7838 */ 7839 if (header_only != 0) 7840 break; 7841 7842 /* 7843 * If the registration is an All Registrants type, the key 7844 * is 0, since it doesn't really matter. 7845 */ 7846 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7847 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7848 res->data.reservation); 7849 } 7850 res->data.scopetype = lun->pr_res_type; 7851 break; 7852 } 7853 case SPRI_RC: //report capabilities 7854 { 7855 struct scsi_per_res_cap *res_cap; 7856 uint16_t type_mask; 7857 7858 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7859 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7860 res_cap->flags1 = SPRI_CRH; 7861 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7862 type_mask = SPRI_TM_WR_EX_AR | 7863 SPRI_TM_EX_AC_RO | 7864 SPRI_TM_WR_EX_RO | 7865 SPRI_TM_EX_AC | 7866 SPRI_TM_WR_EX | 7867 SPRI_TM_EX_AC_AR; 7868 scsi_ulto2b(type_mask, res_cap->type_mask); 7869 break; 7870 } 7871 case SPRI_RS: { // read full status 7872 struct scsi_per_res_in_full *res_status; 7873 struct scsi_per_res_in_full_desc *res_desc; 7874 struct ctl_port *port; 7875 int i, len; 7876 7877 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7878 7879 /* 7880 * We had to drop the lock to allocate our buffer, which 7881 * leaves time for someone to come in with another 7882 * persistent reservation. (That is unlikely, though, 7883 * since this should be the only persistent reservation 7884 * command active right now.) 7885 */ 7886 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7887 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7888 lun->pr_key_count)){ 7889 mtx_unlock(&lun->lun_lock); 7890 free(ctsio->kern_data_ptr, M_CTL); 7891 printf("%s: reservation length changed, retrying\n", 7892 __func__); 7893 goto retry; 7894 } 7895 7896 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7897 7898 res_desc = &res_status->desc[0]; 7899 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7900 if ((key = ctl_get_prkey(lun, i)) == 0) 7901 continue; 7902 7903 scsi_u64to8b(key, res_desc->res_key.key); 7904 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7905 (lun->pr_res_idx == i || 7906 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7907 res_desc->flags = SPRI_FULL_R_HOLDER; 7908 res_desc->scopetype = lun->pr_res_type; 7909 } 7910 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7911 res_desc->rel_trgt_port_id); 7912 len = 0; 7913 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7914 if (port != NULL) 7915 len = ctl_create_iid(port, 7916 i % CTL_MAX_INIT_PER_PORT, 7917 res_desc->transport_id); 7918 scsi_ulto4b(len, res_desc->additional_length); 7919 res_desc = (struct scsi_per_res_in_full_desc *) 7920 &res_desc->transport_id[len]; 7921 } 7922 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7923 res_status->header.length); 7924 break; 7925 } 7926 default: 7927 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7928 } 7929 mtx_unlock(&lun->lun_lock); 7930 7931 ctl_set_success(ctsio); 7932 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7933 ctsio->be_move_done = ctl_config_move_done; 7934 ctl_datamove((union ctl_io *)ctsio); 7935 return (CTL_RETVAL_COMPLETE); 7936 } 7937 7938 /* 7939 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7940 * it should return. 7941 */ 7942 static int 7943 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7944 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7945 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7946 struct scsi_per_res_out_parms* param) 7947 { 7948 union ctl_ha_msg persis_io; 7949 int i; 7950 7951 mtx_lock(&lun->lun_lock); 7952 if (sa_res_key == 0) { 7953 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7954 /* validate scope and type */ 7955 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7956 SPR_LU_SCOPE) { 7957 mtx_unlock(&lun->lun_lock); 7958 ctl_set_invalid_field(/*ctsio*/ ctsio, 7959 /*sks_valid*/ 1, 7960 /*command*/ 1, 7961 /*field*/ 2, 7962 /*bit_valid*/ 1, 7963 /*bit*/ 4); 7964 ctl_done((union ctl_io *)ctsio); 7965 return (1); 7966 } 7967 7968 if (type>8 || type==2 || type==4 || type==0) { 7969 mtx_unlock(&lun->lun_lock); 7970 ctl_set_invalid_field(/*ctsio*/ ctsio, 7971 /*sks_valid*/ 1, 7972 /*command*/ 1, 7973 /*field*/ 2, 7974 /*bit_valid*/ 1, 7975 /*bit*/ 0); 7976 ctl_done((union ctl_io *)ctsio); 7977 return (1); 7978 } 7979 7980 /* 7981 * Unregister everybody else and build UA for 7982 * them 7983 */ 7984 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7985 if (i == residx || ctl_get_prkey(lun, i) == 0) 7986 continue; 7987 7988 ctl_clr_prkey(lun, i); 7989 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7990 } 7991 lun->pr_key_count = 1; 7992 lun->pr_res_type = type; 7993 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7994 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7995 lun->pr_res_idx = residx; 7996 lun->pr_generation++; 7997 mtx_unlock(&lun->lun_lock); 7998 7999 /* send msg to other side */ 8000 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8001 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8002 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8003 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8004 persis_io.pr.pr_info.res_type = type; 8005 memcpy(persis_io.pr.pr_info.sa_res_key, 8006 param->serv_act_res_key, 8007 sizeof(param->serv_act_res_key)); 8008 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8009 sizeof(persis_io.pr), M_WAITOK); 8010 } else { 8011 /* not all registrants */ 8012 mtx_unlock(&lun->lun_lock); 8013 free(ctsio->kern_data_ptr, M_CTL); 8014 ctl_set_invalid_field(ctsio, 8015 /*sks_valid*/ 1, 8016 /*command*/ 0, 8017 /*field*/ 8, 8018 /*bit_valid*/ 0, 8019 /*bit*/ 0); 8020 ctl_done((union ctl_io *)ctsio); 8021 return (1); 8022 } 8023 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8024 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 8025 int found = 0; 8026 8027 if (res_key == sa_res_key) { 8028 /* special case */ 8029 /* 8030 * The spec implies this is not good but doesn't 8031 * say what to do. There are two choices either 8032 * generate a res conflict or check condition 8033 * with illegal field in parameter data. Since 8034 * that is what is done when the sa_res_key is 8035 * zero I'll take that approach since this has 8036 * to do with the sa_res_key. 8037 */ 8038 mtx_unlock(&lun->lun_lock); 8039 free(ctsio->kern_data_ptr, M_CTL); 8040 ctl_set_invalid_field(ctsio, 8041 /*sks_valid*/ 1, 8042 /*command*/ 0, 8043 /*field*/ 8, 8044 /*bit_valid*/ 0, 8045 /*bit*/ 0); 8046 ctl_done((union ctl_io *)ctsio); 8047 return (1); 8048 } 8049 8050 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8051 if (ctl_get_prkey(lun, i) != sa_res_key) 8052 continue; 8053 8054 found = 1; 8055 ctl_clr_prkey(lun, i); 8056 lun->pr_key_count--; 8057 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8058 } 8059 if (!found) { 8060 mtx_unlock(&lun->lun_lock); 8061 free(ctsio->kern_data_ptr, M_CTL); 8062 ctl_set_reservation_conflict(ctsio); 8063 ctl_done((union ctl_io *)ctsio); 8064 return (CTL_RETVAL_COMPLETE); 8065 } 8066 lun->pr_generation++; 8067 mtx_unlock(&lun->lun_lock); 8068 8069 /* send msg to other side */ 8070 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8071 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8072 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8073 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8074 persis_io.pr.pr_info.res_type = type; 8075 memcpy(persis_io.pr.pr_info.sa_res_key, 8076 param->serv_act_res_key, 8077 sizeof(param->serv_act_res_key)); 8078 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8079 sizeof(persis_io.pr), M_WAITOK); 8080 } else { 8081 /* Reserved but not all registrants */ 8082 /* sa_res_key is res holder */ 8083 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 8084 /* validate scope and type */ 8085 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8086 SPR_LU_SCOPE) { 8087 mtx_unlock(&lun->lun_lock); 8088 ctl_set_invalid_field(/*ctsio*/ ctsio, 8089 /*sks_valid*/ 1, 8090 /*command*/ 1, 8091 /*field*/ 2, 8092 /*bit_valid*/ 1, 8093 /*bit*/ 4); 8094 ctl_done((union ctl_io *)ctsio); 8095 return (1); 8096 } 8097 8098 if (type>8 || type==2 || type==4 || type==0) { 8099 mtx_unlock(&lun->lun_lock); 8100 ctl_set_invalid_field(/*ctsio*/ ctsio, 8101 /*sks_valid*/ 1, 8102 /*command*/ 1, 8103 /*field*/ 2, 8104 /*bit_valid*/ 1, 8105 /*bit*/ 0); 8106 ctl_done((union ctl_io *)ctsio); 8107 return (1); 8108 } 8109 8110 /* 8111 * Do the following: 8112 * if sa_res_key != res_key remove all 8113 * registrants w/sa_res_key and generate UA 8114 * for these registrants(Registrations 8115 * Preempted) if it wasn't an exclusive 8116 * reservation generate UA(Reservations 8117 * Preempted) for all other registered nexuses 8118 * if the type has changed. Establish the new 8119 * reservation and holder. If res_key and 8120 * sa_res_key are the same do the above 8121 * except don't unregister the res holder. 8122 */ 8123 8124 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8125 if (i == residx || ctl_get_prkey(lun, i) == 0) 8126 continue; 8127 8128 if (sa_res_key == ctl_get_prkey(lun, i)) { 8129 ctl_clr_prkey(lun, i); 8130 lun->pr_key_count--; 8131 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8132 } else if (type != lun->pr_res_type && 8133 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8134 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8135 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8136 } 8137 } 8138 lun->pr_res_type = type; 8139 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8140 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8141 lun->pr_res_idx = residx; 8142 else 8143 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8144 lun->pr_generation++; 8145 mtx_unlock(&lun->lun_lock); 8146 8147 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8148 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8149 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8150 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8151 persis_io.pr.pr_info.res_type = type; 8152 memcpy(persis_io.pr.pr_info.sa_res_key, 8153 param->serv_act_res_key, 8154 sizeof(param->serv_act_res_key)); 8155 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8156 sizeof(persis_io.pr), M_WAITOK); 8157 } else { 8158 /* 8159 * sa_res_key is not the res holder just 8160 * remove registrants 8161 */ 8162 int found=0; 8163 8164 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8165 if (sa_res_key != ctl_get_prkey(lun, i)) 8166 continue; 8167 8168 found = 1; 8169 ctl_clr_prkey(lun, i); 8170 lun->pr_key_count--; 8171 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8172 } 8173 8174 if (!found) { 8175 mtx_unlock(&lun->lun_lock); 8176 free(ctsio->kern_data_ptr, M_CTL); 8177 ctl_set_reservation_conflict(ctsio); 8178 ctl_done((union ctl_io *)ctsio); 8179 return (1); 8180 } 8181 lun->pr_generation++; 8182 mtx_unlock(&lun->lun_lock); 8183 8184 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8185 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8186 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8187 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8188 persis_io.pr.pr_info.res_type = type; 8189 memcpy(persis_io.pr.pr_info.sa_res_key, 8190 param->serv_act_res_key, 8191 sizeof(param->serv_act_res_key)); 8192 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8193 sizeof(persis_io.pr), M_WAITOK); 8194 } 8195 } 8196 return (0); 8197 } 8198 8199 static void 8200 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8201 { 8202 uint64_t sa_res_key; 8203 int i; 8204 8205 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8206 8207 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8208 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8209 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8210 if (sa_res_key == 0) { 8211 /* 8212 * Unregister everybody else and build UA for 8213 * them 8214 */ 8215 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8216 if (i == msg->pr.pr_info.residx || 8217 ctl_get_prkey(lun, i) == 0) 8218 continue; 8219 8220 ctl_clr_prkey(lun, i); 8221 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8222 } 8223 8224 lun->pr_key_count = 1; 8225 lun->pr_res_type = msg->pr.pr_info.res_type; 8226 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8227 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8228 lun->pr_res_idx = msg->pr.pr_info.residx; 8229 } else { 8230 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8231 if (sa_res_key == ctl_get_prkey(lun, i)) 8232 continue; 8233 8234 ctl_clr_prkey(lun, i); 8235 lun->pr_key_count--; 8236 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8237 } 8238 } 8239 } else { 8240 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8241 if (i == msg->pr.pr_info.residx || 8242 ctl_get_prkey(lun, i) == 0) 8243 continue; 8244 8245 if (sa_res_key == ctl_get_prkey(lun, i)) { 8246 ctl_clr_prkey(lun, i); 8247 lun->pr_key_count--; 8248 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8249 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8250 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8251 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8252 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8253 } 8254 } 8255 lun->pr_res_type = msg->pr.pr_info.res_type; 8256 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8257 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8258 lun->pr_res_idx = msg->pr.pr_info.residx; 8259 else 8260 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8261 } 8262 lun->pr_generation++; 8263 8264 } 8265 8266 int 8267 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8268 { 8269 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8270 struct ctl_lun *lun = CTL_LUN(ctsio); 8271 int retval; 8272 uint32_t param_len; 8273 struct scsi_per_res_out *cdb; 8274 struct scsi_per_res_out_parms* param; 8275 uint32_t residx; 8276 uint64_t res_key, sa_res_key, key; 8277 uint8_t type; 8278 union ctl_ha_msg persis_io; 8279 int i; 8280 8281 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8282 8283 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8284 retval = CTL_RETVAL_COMPLETE; 8285 8286 /* 8287 * We only support whole-LUN scope. The scope & type are ignored for 8288 * register, register and ignore existing key and clear. 8289 * We sometimes ignore scope and type on preempts too!! 8290 * Verify reservation type here as well. 8291 */ 8292 type = cdb->scope_type & SPR_TYPE_MASK; 8293 if ((cdb->action == SPRO_RESERVE) 8294 || (cdb->action == SPRO_RELEASE)) { 8295 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8296 ctl_set_invalid_field(/*ctsio*/ ctsio, 8297 /*sks_valid*/ 1, 8298 /*command*/ 1, 8299 /*field*/ 2, 8300 /*bit_valid*/ 1, 8301 /*bit*/ 4); 8302 ctl_done((union ctl_io *)ctsio); 8303 return (CTL_RETVAL_COMPLETE); 8304 } 8305 8306 if (type>8 || type==2 || type==4 || type==0) { 8307 ctl_set_invalid_field(/*ctsio*/ ctsio, 8308 /*sks_valid*/ 1, 8309 /*command*/ 1, 8310 /*field*/ 2, 8311 /*bit_valid*/ 1, 8312 /*bit*/ 0); 8313 ctl_done((union ctl_io *)ctsio); 8314 return (CTL_RETVAL_COMPLETE); 8315 } 8316 } 8317 8318 param_len = scsi_4btoul(cdb->length); 8319 8320 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8321 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8322 ctsio->kern_data_len = param_len; 8323 ctsio->kern_total_len = param_len; 8324 ctsio->kern_rel_offset = 0; 8325 ctsio->kern_sg_entries = 0; 8326 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8327 ctsio->be_move_done = ctl_config_move_done; 8328 ctl_datamove((union ctl_io *)ctsio); 8329 8330 return (CTL_RETVAL_COMPLETE); 8331 } 8332 8333 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8334 8335 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8336 res_key = scsi_8btou64(param->res_key.key); 8337 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8338 8339 /* 8340 * Validate the reservation key here except for SPRO_REG_IGNO 8341 * This must be done for all other service actions 8342 */ 8343 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8344 mtx_lock(&lun->lun_lock); 8345 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8346 if (res_key != key) { 8347 /* 8348 * The current key passed in doesn't match 8349 * the one the initiator previously 8350 * registered. 8351 */ 8352 mtx_unlock(&lun->lun_lock); 8353 free(ctsio->kern_data_ptr, M_CTL); 8354 ctl_set_reservation_conflict(ctsio); 8355 ctl_done((union ctl_io *)ctsio); 8356 return (CTL_RETVAL_COMPLETE); 8357 } 8358 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8359 /* 8360 * We are not registered 8361 */ 8362 mtx_unlock(&lun->lun_lock); 8363 free(ctsio->kern_data_ptr, M_CTL); 8364 ctl_set_reservation_conflict(ctsio); 8365 ctl_done((union ctl_io *)ctsio); 8366 return (CTL_RETVAL_COMPLETE); 8367 } else if (res_key != 0) { 8368 /* 8369 * We are not registered and trying to register but 8370 * the register key isn't zero. 8371 */ 8372 mtx_unlock(&lun->lun_lock); 8373 free(ctsio->kern_data_ptr, M_CTL); 8374 ctl_set_reservation_conflict(ctsio); 8375 ctl_done((union ctl_io *)ctsio); 8376 return (CTL_RETVAL_COMPLETE); 8377 } 8378 mtx_unlock(&lun->lun_lock); 8379 } 8380 8381 switch (cdb->action & SPRO_ACTION_MASK) { 8382 case SPRO_REGISTER: 8383 case SPRO_REG_IGNO: { 8384 /* 8385 * We don't support any of these options, as we report in 8386 * the read capabilities request (see 8387 * ctl_persistent_reserve_in(), above). 8388 */ 8389 if ((param->flags & SPR_SPEC_I_PT) 8390 || (param->flags & SPR_ALL_TG_PT) 8391 || (param->flags & SPR_APTPL)) { 8392 int bit_ptr; 8393 8394 if (param->flags & SPR_APTPL) 8395 bit_ptr = 0; 8396 else if (param->flags & SPR_ALL_TG_PT) 8397 bit_ptr = 2; 8398 else /* SPR_SPEC_I_PT */ 8399 bit_ptr = 3; 8400 8401 free(ctsio->kern_data_ptr, M_CTL); 8402 ctl_set_invalid_field(ctsio, 8403 /*sks_valid*/ 1, 8404 /*command*/ 0, 8405 /*field*/ 20, 8406 /*bit_valid*/ 1, 8407 /*bit*/ bit_ptr); 8408 ctl_done((union ctl_io *)ctsio); 8409 return (CTL_RETVAL_COMPLETE); 8410 } 8411 8412 mtx_lock(&lun->lun_lock); 8413 8414 /* 8415 * The initiator wants to clear the 8416 * key/unregister. 8417 */ 8418 if (sa_res_key == 0) { 8419 if ((res_key == 0 8420 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8421 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8422 && ctl_get_prkey(lun, residx) == 0)) { 8423 mtx_unlock(&lun->lun_lock); 8424 goto done; 8425 } 8426 8427 ctl_clr_prkey(lun, residx); 8428 lun->pr_key_count--; 8429 8430 if (residx == lun->pr_res_idx) { 8431 lun->flags &= ~CTL_LUN_PR_RESERVED; 8432 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8433 8434 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8435 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8436 lun->pr_key_count) { 8437 /* 8438 * If the reservation is a registrants 8439 * only type we need to generate a UA 8440 * for other registered inits. The 8441 * sense code should be RESERVATIONS 8442 * RELEASED 8443 */ 8444 8445 for (i = softc->init_min; i < softc->init_max; i++){ 8446 if (ctl_get_prkey(lun, i) == 0) 8447 continue; 8448 ctl_est_ua(lun, i, 8449 CTL_UA_RES_RELEASE); 8450 } 8451 } 8452 lun->pr_res_type = 0; 8453 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8454 if (lun->pr_key_count==0) { 8455 lun->flags &= ~CTL_LUN_PR_RESERVED; 8456 lun->pr_res_type = 0; 8457 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8458 } 8459 } 8460 lun->pr_generation++; 8461 mtx_unlock(&lun->lun_lock); 8462 8463 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8464 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8465 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8466 persis_io.pr.pr_info.residx = residx; 8467 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8468 sizeof(persis_io.pr), M_WAITOK); 8469 } else /* sa_res_key != 0 */ { 8470 /* 8471 * If we aren't registered currently then increment 8472 * the key count and set the registered flag. 8473 */ 8474 ctl_alloc_prkey(lun, residx); 8475 if (ctl_get_prkey(lun, residx) == 0) 8476 lun->pr_key_count++; 8477 ctl_set_prkey(lun, residx, sa_res_key); 8478 lun->pr_generation++; 8479 mtx_unlock(&lun->lun_lock); 8480 8481 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8482 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8483 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8484 persis_io.pr.pr_info.residx = residx; 8485 memcpy(persis_io.pr.pr_info.sa_res_key, 8486 param->serv_act_res_key, 8487 sizeof(param->serv_act_res_key)); 8488 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8489 sizeof(persis_io.pr), M_WAITOK); 8490 } 8491 8492 break; 8493 } 8494 case SPRO_RESERVE: 8495 mtx_lock(&lun->lun_lock); 8496 if (lun->flags & CTL_LUN_PR_RESERVED) { 8497 /* 8498 * if this isn't the reservation holder and it's 8499 * not a "all registrants" type or if the type is 8500 * different then we have a conflict 8501 */ 8502 if ((lun->pr_res_idx != residx 8503 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8504 || lun->pr_res_type != type) { 8505 mtx_unlock(&lun->lun_lock); 8506 free(ctsio->kern_data_ptr, M_CTL); 8507 ctl_set_reservation_conflict(ctsio); 8508 ctl_done((union ctl_io *)ctsio); 8509 return (CTL_RETVAL_COMPLETE); 8510 } 8511 mtx_unlock(&lun->lun_lock); 8512 } else /* create a reservation */ { 8513 /* 8514 * If it's not an "all registrants" type record 8515 * reservation holder 8516 */ 8517 if (type != SPR_TYPE_WR_EX_AR 8518 && type != SPR_TYPE_EX_AC_AR) 8519 lun->pr_res_idx = residx; /* Res holder */ 8520 else 8521 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8522 8523 lun->flags |= CTL_LUN_PR_RESERVED; 8524 lun->pr_res_type = type; 8525 8526 mtx_unlock(&lun->lun_lock); 8527 8528 /* send msg to other side */ 8529 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8530 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8531 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8532 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8533 persis_io.pr.pr_info.res_type = type; 8534 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8535 sizeof(persis_io.pr), M_WAITOK); 8536 } 8537 break; 8538 8539 case SPRO_RELEASE: 8540 mtx_lock(&lun->lun_lock); 8541 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8542 /* No reservation exists return good status */ 8543 mtx_unlock(&lun->lun_lock); 8544 goto done; 8545 } 8546 /* 8547 * Is this nexus a reservation holder? 8548 */ 8549 if (lun->pr_res_idx != residx 8550 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8551 /* 8552 * not a res holder return good status but 8553 * do nothing 8554 */ 8555 mtx_unlock(&lun->lun_lock); 8556 goto done; 8557 } 8558 8559 if (lun->pr_res_type != type) { 8560 mtx_unlock(&lun->lun_lock); 8561 free(ctsio->kern_data_ptr, M_CTL); 8562 ctl_set_illegal_pr_release(ctsio); 8563 ctl_done((union ctl_io *)ctsio); 8564 return (CTL_RETVAL_COMPLETE); 8565 } 8566 8567 /* okay to release */ 8568 lun->flags &= ~CTL_LUN_PR_RESERVED; 8569 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8570 lun->pr_res_type = 0; 8571 8572 /* 8573 * If this isn't an exclusive access reservation and NUAR 8574 * is not set, generate UA for all other registrants. 8575 */ 8576 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8577 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8578 for (i = softc->init_min; i < softc->init_max; i++) { 8579 if (i == residx || ctl_get_prkey(lun, i) == 0) 8580 continue; 8581 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8582 } 8583 } 8584 mtx_unlock(&lun->lun_lock); 8585 8586 /* Send msg to other side */ 8587 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8588 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8589 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8590 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8591 sizeof(persis_io.pr), M_WAITOK); 8592 break; 8593 8594 case SPRO_CLEAR: 8595 /* send msg to other side */ 8596 8597 mtx_lock(&lun->lun_lock); 8598 lun->flags &= ~CTL_LUN_PR_RESERVED; 8599 lun->pr_res_type = 0; 8600 lun->pr_key_count = 0; 8601 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8602 8603 ctl_clr_prkey(lun, residx); 8604 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8605 if (ctl_get_prkey(lun, i) != 0) { 8606 ctl_clr_prkey(lun, i); 8607 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8608 } 8609 lun->pr_generation++; 8610 mtx_unlock(&lun->lun_lock); 8611 8612 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8613 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8614 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8615 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8616 sizeof(persis_io.pr), M_WAITOK); 8617 break; 8618 8619 case SPRO_PREEMPT: 8620 case SPRO_PRE_ABO: { 8621 int nretval; 8622 8623 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8624 residx, ctsio, cdb, param); 8625 if (nretval != 0) 8626 return (CTL_RETVAL_COMPLETE); 8627 break; 8628 } 8629 default: 8630 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8631 } 8632 8633 done: 8634 free(ctsio->kern_data_ptr, M_CTL); 8635 ctl_set_success(ctsio); 8636 ctl_done((union ctl_io *)ctsio); 8637 8638 return (retval); 8639 } 8640 8641 /* 8642 * This routine is for handling a message from the other SC pertaining to 8643 * persistent reserve out. All the error checking will have been done 8644 * so only performing the action need be done here to keep the two 8645 * in sync. 8646 */ 8647 static void 8648 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8649 { 8650 struct ctl_softc *softc = CTL_SOFTC(io); 8651 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8652 struct ctl_lun *lun; 8653 int i; 8654 uint32_t residx, targ_lun; 8655 8656 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8657 mtx_lock(&softc->ctl_lock); 8658 if (targ_lun >= ctl_max_luns || 8659 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8660 mtx_unlock(&softc->ctl_lock); 8661 return; 8662 } 8663 mtx_lock(&lun->lun_lock); 8664 mtx_unlock(&softc->ctl_lock); 8665 if (lun->flags & CTL_LUN_DISABLED) { 8666 mtx_unlock(&lun->lun_lock); 8667 return; 8668 } 8669 residx = ctl_get_initindex(&msg->hdr.nexus); 8670 switch(msg->pr.pr_info.action) { 8671 case CTL_PR_REG_KEY: 8672 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8673 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8674 lun->pr_key_count++; 8675 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8676 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8677 lun->pr_generation++; 8678 break; 8679 8680 case CTL_PR_UNREG_KEY: 8681 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8682 lun->pr_key_count--; 8683 8684 /* XXX Need to see if the reservation has been released */ 8685 /* if so do we need to generate UA? */ 8686 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8687 lun->flags &= ~CTL_LUN_PR_RESERVED; 8688 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8689 8690 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8691 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8692 lun->pr_key_count) { 8693 /* 8694 * If the reservation is a registrants 8695 * only type we need to generate a UA 8696 * for other registered inits. The 8697 * sense code should be RESERVATIONS 8698 * RELEASED 8699 */ 8700 8701 for (i = softc->init_min; i < softc->init_max; i++) { 8702 if (ctl_get_prkey(lun, i) == 0) 8703 continue; 8704 8705 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8706 } 8707 } 8708 lun->pr_res_type = 0; 8709 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8710 if (lun->pr_key_count==0) { 8711 lun->flags &= ~CTL_LUN_PR_RESERVED; 8712 lun->pr_res_type = 0; 8713 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8714 } 8715 } 8716 lun->pr_generation++; 8717 break; 8718 8719 case CTL_PR_RESERVE: 8720 lun->flags |= CTL_LUN_PR_RESERVED; 8721 lun->pr_res_type = msg->pr.pr_info.res_type; 8722 lun->pr_res_idx = msg->pr.pr_info.residx; 8723 8724 break; 8725 8726 case CTL_PR_RELEASE: 8727 /* 8728 * If this isn't an exclusive access reservation and NUAR 8729 * is not set, generate UA for all other registrants. 8730 */ 8731 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8732 lun->pr_res_type != SPR_TYPE_WR_EX && 8733 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8734 for (i = softc->init_min; i < softc->init_max; i++) { 8735 if (i == residx || ctl_get_prkey(lun, i) == 0) 8736 continue; 8737 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8738 } 8739 } 8740 8741 lun->flags &= ~CTL_LUN_PR_RESERVED; 8742 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8743 lun->pr_res_type = 0; 8744 break; 8745 8746 case CTL_PR_PREEMPT: 8747 ctl_pro_preempt_other(lun, msg); 8748 break; 8749 case CTL_PR_CLEAR: 8750 lun->flags &= ~CTL_LUN_PR_RESERVED; 8751 lun->pr_res_type = 0; 8752 lun->pr_key_count = 0; 8753 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8754 8755 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8756 if (ctl_get_prkey(lun, i) == 0) 8757 continue; 8758 ctl_clr_prkey(lun, i); 8759 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8760 } 8761 lun->pr_generation++; 8762 break; 8763 } 8764 8765 mtx_unlock(&lun->lun_lock); 8766 } 8767 8768 int 8769 ctl_read_write(struct ctl_scsiio *ctsio) 8770 { 8771 struct ctl_lun *lun = CTL_LUN(ctsio); 8772 struct ctl_lba_len_flags *lbalen; 8773 uint64_t lba; 8774 uint32_t num_blocks; 8775 int flags, retval; 8776 int isread; 8777 8778 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8779 8780 flags = 0; 8781 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8782 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8783 switch (ctsio->cdb[0]) { 8784 case READ_6: 8785 case WRITE_6: { 8786 struct scsi_rw_6 *cdb; 8787 8788 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8789 8790 lba = scsi_3btoul(cdb->addr); 8791 /* only 5 bits are valid in the most significant address byte */ 8792 lba &= 0x1fffff; 8793 num_blocks = cdb->length; 8794 /* 8795 * This is correct according to SBC-2. 8796 */ 8797 if (num_blocks == 0) 8798 num_blocks = 256; 8799 break; 8800 } 8801 case READ_10: 8802 case WRITE_10: { 8803 struct scsi_rw_10 *cdb; 8804 8805 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8806 if (cdb->byte2 & SRW10_FUA) 8807 flags |= CTL_LLF_FUA; 8808 if (cdb->byte2 & SRW10_DPO) 8809 flags |= CTL_LLF_DPO; 8810 lba = scsi_4btoul(cdb->addr); 8811 num_blocks = scsi_2btoul(cdb->length); 8812 break; 8813 } 8814 case WRITE_VERIFY_10: { 8815 struct scsi_write_verify_10 *cdb; 8816 8817 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8818 flags |= CTL_LLF_FUA; 8819 if (cdb->byte2 & SWV_DPO) 8820 flags |= CTL_LLF_DPO; 8821 lba = scsi_4btoul(cdb->addr); 8822 num_blocks = scsi_2btoul(cdb->length); 8823 break; 8824 } 8825 case READ_12: 8826 case WRITE_12: { 8827 struct scsi_rw_12 *cdb; 8828 8829 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8830 if (cdb->byte2 & SRW12_FUA) 8831 flags |= CTL_LLF_FUA; 8832 if (cdb->byte2 & SRW12_DPO) 8833 flags |= CTL_LLF_DPO; 8834 lba = scsi_4btoul(cdb->addr); 8835 num_blocks = scsi_4btoul(cdb->length); 8836 break; 8837 } 8838 case WRITE_VERIFY_12: { 8839 struct scsi_write_verify_12 *cdb; 8840 8841 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8842 flags |= CTL_LLF_FUA; 8843 if (cdb->byte2 & SWV_DPO) 8844 flags |= CTL_LLF_DPO; 8845 lba = scsi_4btoul(cdb->addr); 8846 num_blocks = scsi_4btoul(cdb->length); 8847 break; 8848 } 8849 case READ_16: 8850 case WRITE_16: { 8851 struct scsi_rw_16 *cdb; 8852 8853 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8854 if (cdb->byte2 & SRW12_FUA) 8855 flags |= CTL_LLF_FUA; 8856 if (cdb->byte2 & SRW12_DPO) 8857 flags |= CTL_LLF_DPO; 8858 lba = scsi_8btou64(cdb->addr); 8859 num_blocks = scsi_4btoul(cdb->length); 8860 break; 8861 } 8862 case WRITE_ATOMIC_16: { 8863 struct scsi_write_atomic_16 *cdb; 8864 8865 if (lun->be_lun->atomicblock == 0) { 8866 ctl_set_invalid_opcode(ctsio); 8867 ctl_done((union ctl_io *)ctsio); 8868 return (CTL_RETVAL_COMPLETE); 8869 } 8870 8871 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8872 if (cdb->byte2 & SRW12_FUA) 8873 flags |= CTL_LLF_FUA; 8874 if (cdb->byte2 & SRW12_DPO) 8875 flags |= CTL_LLF_DPO; 8876 lba = scsi_8btou64(cdb->addr); 8877 num_blocks = scsi_2btoul(cdb->length); 8878 if (num_blocks > lun->be_lun->atomicblock) { 8879 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8880 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8881 /*bit*/ 0); 8882 ctl_done((union ctl_io *)ctsio); 8883 return (CTL_RETVAL_COMPLETE); 8884 } 8885 break; 8886 } 8887 case WRITE_VERIFY_16: { 8888 struct scsi_write_verify_16 *cdb; 8889 8890 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8891 flags |= CTL_LLF_FUA; 8892 if (cdb->byte2 & SWV_DPO) 8893 flags |= CTL_LLF_DPO; 8894 lba = scsi_8btou64(cdb->addr); 8895 num_blocks = scsi_4btoul(cdb->length); 8896 break; 8897 } 8898 default: 8899 /* 8900 * We got a command we don't support. This shouldn't 8901 * happen, commands should be filtered out above us. 8902 */ 8903 ctl_set_invalid_opcode(ctsio); 8904 ctl_done((union ctl_io *)ctsio); 8905 8906 return (CTL_RETVAL_COMPLETE); 8907 break; /* NOTREACHED */ 8908 } 8909 8910 /* 8911 * The first check is to make sure we're in bounds, the second 8912 * check is to catch wrap-around problems. If the lba + num blocks 8913 * is less than the lba, then we've wrapped around and the block 8914 * range is invalid anyway. 8915 */ 8916 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8917 || ((lba + num_blocks) < lba)) { 8918 ctl_set_lba_out_of_range(ctsio, 8919 MAX(lba, lun->be_lun->maxlba + 1)); 8920 ctl_done((union ctl_io *)ctsio); 8921 return (CTL_RETVAL_COMPLETE); 8922 } 8923 8924 /* 8925 * According to SBC-3, a transfer length of 0 is not an error. 8926 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8927 * translates to 256 blocks for those commands. 8928 */ 8929 if (num_blocks == 0) { 8930 ctl_set_success(ctsio); 8931 ctl_done((union ctl_io *)ctsio); 8932 return (CTL_RETVAL_COMPLETE); 8933 } 8934 8935 /* Set FUA and/or DPO if caches are disabled. */ 8936 if (isread) { 8937 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8938 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8939 } else { 8940 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8941 flags |= CTL_LLF_FUA; 8942 } 8943 8944 lbalen = (struct ctl_lba_len_flags *) 8945 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8946 lbalen->lba = lba; 8947 lbalen->len = num_blocks; 8948 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8949 8950 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8951 ctsio->kern_rel_offset = 0; 8952 8953 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8954 8955 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8956 return (retval); 8957 } 8958 8959 static int 8960 ctl_cnw_cont(union ctl_io *io) 8961 { 8962 struct ctl_lun *lun = CTL_LUN(io); 8963 struct ctl_scsiio *ctsio; 8964 struct ctl_lba_len_flags *lbalen; 8965 int retval; 8966 8967 ctsio = &io->scsiio; 8968 ctsio->io_hdr.status = CTL_STATUS_NONE; 8969 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8970 lbalen = (struct ctl_lba_len_flags *) 8971 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8972 lbalen->flags &= ~CTL_LLF_COMPARE; 8973 lbalen->flags |= CTL_LLF_WRITE; 8974 8975 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8976 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8977 return (retval); 8978 } 8979 8980 int 8981 ctl_cnw(struct ctl_scsiio *ctsio) 8982 { 8983 struct ctl_lun *lun = CTL_LUN(ctsio); 8984 struct ctl_lba_len_flags *lbalen; 8985 uint64_t lba; 8986 uint32_t num_blocks; 8987 int flags, retval; 8988 8989 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8990 8991 flags = 0; 8992 switch (ctsio->cdb[0]) { 8993 case COMPARE_AND_WRITE: { 8994 struct scsi_compare_and_write *cdb; 8995 8996 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8997 if (cdb->byte2 & SRW10_FUA) 8998 flags |= CTL_LLF_FUA; 8999 if (cdb->byte2 & SRW10_DPO) 9000 flags |= CTL_LLF_DPO; 9001 lba = scsi_8btou64(cdb->addr); 9002 num_blocks = cdb->length; 9003 break; 9004 } 9005 default: 9006 /* 9007 * We got a command we don't support. This shouldn't 9008 * happen, commands should be filtered out above us. 9009 */ 9010 ctl_set_invalid_opcode(ctsio); 9011 ctl_done((union ctl_io *)ctsio); 9012 9013 return (CTL_RETVAL_COMPLETE); 9014 break; /* NOTREACHED */ 9015 } 9016 9017 /* 9018 * The first check is to make sure we're in bounds, the second 9019 * check is to catch wrap-around problems. If the lba + num blocks 9020 * is less than the lba, then we've wrapped around and the block 9021 * range is invalid anyway. 9022 */ 9023 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9024 || ((lba + num_blocks) < lba)) { 9025 ctl_set_lba_out_of_range(ctsio, 9026 MAX(lba, lun->be_lun->maxlba + 1)); 9027 ctl_done((union ctl_io *)ctsio); 9028 return (CTL_RETVAL_COMPLETE); 9029 } 9030 9031 /* 9032 * According to SBC-3, a transfer length of 0 is not an error. 9033 */ 9034 if (num_blocks == 0) { 9035 ctl_set_success(ctsio); 9036 ctl_done((union ctl_io *)ctsio); 9037 return (CTL_RETVAL_COMPLETE); 9038 } 9039 9040 /* Set FUA if write cache is disabled. */ 9041 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 9042 flags |= CTL_LLF_FUA; 9043 9044 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 9045 ctsio->kern_rel_offset = 0; 9046 9047 /* 9048 * Set the IO_CONT flag, so that if this I/O gets passed to 9049 * ctl_data_submit_done(), it'll get passed back to 9050 * ctl_ctl_cnw_cont() for further processing. 9051 */ 9052 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 9053 ctsio->io_cont = ctl_cnw_cont; 9054 9055 lbalen = (struct ctl_lba_len_flags *) 9056 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9057 lbalen->lba = lba; 9058 lbalen->len = num_blocks; 9059 lbalen->flags = CTL_LLF_COMPARE | flags; 9060 9061 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 9062 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9063 return (retval); 9064 } 9065 9066 int 9067 ctl_verify(struct ctl_scsiio *ctsio) 9068 { 9069 struct ctl_lun *lun = CTL_LUN(ctsio); 9070 struct ctl_lba_len_flags *lbalen; 9071 uint64_t lba; 9072 uint32_t num_blocks; 9073 int bytchk, flags; 9074 int retval; 9075 9076 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9077 9078 bytchk = 0; 9079 flags = CTL_LLF_FUA; 9080 switch (ctsio->cdb[0]) { 9081 case VERIFY_10: { 9082 struct scsi_verify_10 *cdb; 9083 9084 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9085 if (cdb->byte2 & SVFY_BYTCHK) 9086 bytchk = 1; 9087 if (cdb->byte2 & SVFY_DPO) 9088 flags |= CTL_LLF_DPO; 9089 lba = scsi_4btoul(cdb->addr); 9090 num_blocks = scsi_2btoul(cdb->length); 9091 break; 9092 } 9093 case VERIFY_12: { 9094 struct scsi_verify_12 *cdb; 9095 9096 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9097 if (cdb->byte2 & SVFY_BYTCHK) 9098 bytchk = 1; 9099 if (cdb->byte2 & SVFY_DPO) 9100 flags |= CTL_LLF_DPO; 9101 lba = scsi_4btoul(cdb->addr); 9102 num_blocks = scsi_4btoul(cdb->length); 9103 break; 9104 } 9105 case VERIFY_16: { 9106 struct scsi_rw_16 *cdb; 9107 9108 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9109 if (cdb->byte2 & SVFY_BYTCHK) 9110 bytchk = 1; 9111 if (cdb->byte2 & SVFY_DPO) 9112 flags |= CTL_LLF_DPO; 9113 lba = scsi_8btou64(cdb->addr); 9114 num_blocks = scsi_4btoul(cdb->length); 9115 break; 9116 } 9117 default: 9118 /* 9119 * We got a command we don't support. This shouldn't 9120 * happen, commands should be filtered out above us. 9121 */ 9122 ctl_set_invalid_opcode(ctsio); 9123 ctl_done((union ctl_io *)ctsio); 9124 return (CTL_RETVAL_COMPLETE); 9125 } 9126 9127 /* 9128 * The first check is to make sure we're in bounds, the second 9129 * check is to catch wrap-around problems. If the lba + num blocks 9130 * is less than the lba, then we've wrapped around and the block 9131 * range is invalid anyway. 9132 */ 9133 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9134 || ((lba + num_blocks) < lba)) { 9135 ctl_set_lba_out_of_range(ctsio, 9136 MAX(lba, lun->be_lun->maxlba + 1)); 9137 ctl_done((union ctl_io *)ctsio); 9138 return (CTL_RETVAL_COMPLETE); 9139 } 9140 9141 /* 9142 * According to SBC-3, a transfer length of 0 is not an error. 9143 */ 9144 if (num_blocks == 0) { 9145 ctl_set_success(ctsio); 9146 ctl_done((union ctl_io *)ctsio); 9147 return (CTL_RETVAL_COMPLETE); 9148 } 9149 9150 lbalen = (struct ctl_lba_len_flags *) 9151 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9152 lbalen->lba = lba; 9153 lbalen->len = num_blocks; 9154 if (bytchk) { 9155 lbalen->flags = CTL_LLF_COMPARE | flags; 9156 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9157 } else { 9158 lbalen->flags = CTL_LLF_VERIFY | flags; 9159 ctsio->kern_total_len = 0; 9160 } 9161 ctsio->kern_rel_offset = 0; 9162 9163 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9164 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9165 return (retval); 9166 } 9167 9168 int 9169 ctl_report_luns(struct ctl_scsiio *ctsio) 9170 { 9171 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9172 struct ctl_port *port = CTL_PORT(ctsio); 9173 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9174 struct scsi_report_luns *cdb; 9175 struct scsi_report_luns_data *lun_data; 9176 int num_filled, num_luns, num_port_luns, retval; 9177 uint32_t alloc_len, lun_datalen; 9178 uint32_t initidx, targ_lun_id, lun_id; 9179 9180 retval = CTL_RETVAL_COMPLETE; 9181 cdb = (struct scsi_report_luns *)ctsio->cdb; 9182 9183 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9184 9185 num_luns = 0; 9186 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; 9187 mtx_lock(&softc->ctl_lock); 9188 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9189 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9190 num_luns++; 9191 } 9192 mtx_unlock(&softc->ctl_lock); 9193 9194 switch (cdb->select_report) { 9195 case RPL_REPORT_DEFAULT: 9196 case RPL_REPORT_ALL: 9197 case RPL_REPORT_NONSUBSID: 9198 break; 9199 case RPL_REPORT_WELLKNOWN: 9200 case RPL_REPORT_ADMIN: 9201 case RPL_REPORT_CONGLOM: 9202 num_luns = 0; 9203 break; 9204 default: 9205 ctl_set_invalid_field(ctsio, 9206 /*sks_valid*/ 1, 9207 /*command*/ 1, 9208 /*field*/ 2, 9209 /*bit_valid*/ 0, 9210 /*bit*/ 0); 9211 ctl_done((union ctl_io *)ctsio); 9212 return (retval); 9213 break; /* NOTREACHED */ 9214 } 9215 9216 alloc_len = scsi_4btoul(cdb->length); 9217 /* 9218 * The initiator has to allocate at least 16 bytes for this request, 9219 * so he can at least get the header and the first LUN. Otherwise 9220 * we reject the request (per SPC-3 rev 14, section 6.21). 9221 */ 9222 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9223 sizeof(struct scsi_report_luns_lundata))) { 9224 ctl_set_invalid_field(ctsio, 9225 /*sks_valid*/ 1, 9226 /*command*/ 1, 9227 /*field*/ 6, 9228 /*bit_valid*/ 0, 9229 /*bit*/ 0); 9230 ctl_done((union ctl_io *)ctsio); 9231 return (retval); 9232 } 9233 9234 lun_datalen = sizeof(*lun_data) + 9235 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9236 9237 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9238 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9239 ctsio->kern_sg_entries = 0; 9240 9241 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9242 9243 mtx_lock(&softc->ctl_lock); 9244 for (targ_lun_id = 0, num_filled = 0; 9245 targ_lun_id < num_port_luns && num_filled < num_luns; 9246 targ_lun_id++) { 9247 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9248 if (lun_id == UINT32_MAX) 9249 continue; 9250 lun = softc->ctl_luns[lun_id]; 9251 if (lun == NULL) 9252 continue; 9253 9254 be64enc(lun_data->luns[num_filled++].lundata, 9255 ctl_encode_lun(targ_lun_id)); 9256 9257 /* 9258 * According to SPC-3, rev 14 section 6.21: 9259 * 9260 * "The execution of a REPORT LUNS command to any valid and 9261 * installed logical unit shall clear the REPORTED LUNS DATA 9262 * HAS CHANGED unit attention condition for all logical 9263 * units of that target with respect to the requesting 9264 * initiator. A valid and installed logical unit is one 9265 * having a PERIPHERAL QUALIFIER of 000b in the standard 9266 * INQUIRY data (see 6.4.2)." 9267 * 9268 * If request_lun is NULL, the LUN this report luns command 9269 * was issued to is either disabled or doesn't exist. In that 9270 * case, we shouldn't clear any pending lun change unit 9271 * attention. 9272 */ 9273 if (request_lun != NULL) { 9274 mtx_lock(&lun->lun_lock); 9275 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9276 mtx_unlock(&lun->lun_lock); 9277 } 9278 } 9279 mtx_unlock(&softc->ctl_lock); 9280 9281 /* 9282 * It's quite possible that we've returned fewer LUNs than we allocated 9283 * space for. Trim it. 9284 */ 9285 lun_datalen = sizeof(*lun_data) + 9286 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9287 ctsio->kern_rel_offset = 0; 9288 ctsio->kern_sg_entries = 0; 9289 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9290 ctsio->kern_total_len = ctsio->kern_data_len; 9291 9292 /* 9293 * We set this to the actual data length, regardless of how much 9294 * space we actually have to return results. If the user looks at 9295 * this value, he'll know whether or not he allocated enough space 9296 * and reissue the command if necessary. We don't support well 9297 * known logical units, so if the user asks for that, return none. 9298 */ 9299 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9300 9301 /* 9302 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9303 * this request. 9304 */ 9305 ctl_set_success(ctsio); 9306 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9307 ctsio->be_move_done = ctl_config_move_done; 9308 ctl_datamove((union ctl_io *)ctsio); 9309 return (retval); 9310 } 9311 9312 int 9313 ctl_request_sense(struct ctl_scsiio *ctsio) 9314 { 9315 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9316 struct ctl_lun *lun = CTL_LUN(ctsio); 9317 struct scsi_request_sense *cdb; 9318 struct scsi_sense_data *sense_ptr, *ps; 9319 uint32_t initidx; 9320 int have_error; 9321 u_int sense_len = SSD_FULL_SIZE; 9322 scsi_sense_data_type sense_format; 9323 ctl_ua_type ua_type; 9324 uint8_t asc = 0, ascq = 0; 9325 9326 cdb = (struct scsi_request_sense *)ctsio->cdb; 9327 9328 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9329 9330 /* 9331 * Determine which sense format the user wants. 9332 */ 9333 if (cdb->byte2 & SRS_DESC) 9334 sense_format = SSD_TYPE_DESC; 9335 else 9336 sense_format = SSD_TYPE_FIXED; 9337 9338 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9339 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9340 ctsio->kern_sg_entries = 0; 9341 ctsio->kern_rel_offset = 0; 9342 9343 /* 9344 * struct scsi_sense_data, which is currently set to 256 bytes, is 9345 * larger than the largest allowed value for the length field in the 9346 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9347 */ 9348 ctsio->kern_data_len = cdb->length; 9349 ctsio->kern_total_len = cdb->length; 9350 9351 /* 9352 * If we don't have a LUN, we don't have any pending sense. 9353 */ 9354 if (lun == NULL || 9355 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9356 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9357 /* "Logical unit not supported" */ 9358 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9359 /*current_error*/ 1, 9360 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9361 /*asc*/ 0x25, 9362 /*ascq*/ 0x00, 9363 SSD_ELEM_NONE); 9364 goto send; 9365 } 9366 9367 have_error = 0; 9368 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9369 /* 9370 * Check for pending sense, and then for pending unit attentions. 9371 * Pending sense gets returned first, then pending unit attentions. 9372 */ 9373 mtx_lock(&lun->lun_lock); 9374 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9375 if (ps != NULL) 9376 ps += initidx % CTL_MAX_INIT_PER_PORT; 9377 if (ps != NULL && ps->error_code != 0) { 9378 scsi_sense_data_type stored_format; 9379 9380 /* 9381 * Check to see which sense format was used for the stored 9382 * sense data. 9383 */ 9384 stored_format = scsi_sense_type(ps); 9385 9386 /* 9387 * If the user requested a different sense format than the 9388 * one we stored, then we need to convert it to the other 9389 * format. If we're going from descriptor to fixed format 9390 * sense data, we may lose things in translation, depending 9391 * on what options were used. 9392 * 9393 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9394 * for some reason we'll just copy it out as-is. 9395 */ 9396 if ((stored_format == SSD_TYPE_FIXED) 9397 && (sense_format == SSD_TYPE_DESC)) 9398 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9399 ps, (struct scsi_sense_data_desc *)sense_ptr); 9400 else if ((stored_format == SSD_TYPE_DESC) 9401 && (sense_format == SSD_TYPE_FIXED)) 9402 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9403 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9404 else 9405 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9406 9407 ps->error_code = 0; 9408 have_error = 1; 9409 } else { 9410 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9411 sense_format); 9412 if (ua_type != CTL_UA_NONE) 9413 have_error = 1; 9414 } 9415 if (have_error == 0) { 9416 /* 9417 * Report informational exception if have one and allowed. 9418 */ 9419 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9420 asc = lun->ie_asc; 9421 ascq = lun->ie_ascq; 9422 } 9423 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9424 /*current_error*/ 1, 9425 /*sense_key*/ SSD_KEY_NO_SENSE, 9426 /*asc*/ asc, 9427 /*ascq*/ ascq, 9428 SSD_ELEM_NONE); 9429 } 9430 mtx_unlock(&lun->lun_lock); 9431 9432 send: 9433 /* 9434 * We report the SCSI status as OK, since the status of the command 9435 * itself is OK. We're reporting sense as parameter data. 9436 */ 9437 ctl_set_success(ctsio); 9438 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9439 ctsio->be_move_done = ctl_config_move_done; 9440 ctl_datamove((union ctl_io *)ctsio); 9441 return (CTL_RETVAL_COMPLETE); 9442 } 9443 9444 int 9445 ctl_tur(struct ctl_scsiio *ctsio) 9446 { 9447 9448 CTL_DEBUG_PRINT(("ctl_tur\n")); 9449 9450 ctl_set_success(ctsio); 9451 ctl_done((union ctl_io *)ctsio); 9452 9453 return (CTL_RETVAL_COMPLETE); 9454 } 9455 9456 /* 9457 * SCSI VPD page 0x00, the Supported VPD Pages page. 9458 */ 9459 static int 9460 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9461 { 9462 struct ctl_lun *lun = CTL_LUN(ctsio); 9463 struct scsi_vpd_supported_pages *pages; 9464 int sup_page_size; 9465 int p; 9466 9467 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9468 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9469 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9470 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9471 ctsio->kern_rel_offset = 0; 9472 ctsio->kern_sg_entries = 0; 9473 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9474 ctsio->kern_total_len = ctsio->kern_data_len; 9475 9476 /* 9477 * The control device is always connected. The disk device, on the 9478 * other hand, may not be online all the time. Need to change this 9479 * to figure out whether the disk device is actually online or not. 9480 */ 9481 if (lun != NULL) 9482 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9483 lun->be_lun->lun_type; 9484 else 9485 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9486 9487 p = 0; 9488 /* Supported VPD pages */ 9489 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9490 /* Serial Number */ 9491 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9492 /* Device Identification */ 9493 pages->page_list[p++] = SVPD_DEVICE_ID; 9494 /* Extended INQUIRY Data */ 9495 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9496 /* Mode Page Policy */ 9497 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9498 /* SCSI Ports */ 9499 pages->page_list[p++] = SVPD_SCSI_PORTS; 9500 /* Third-party Copy */ 9501 pages->page_list[p++] = SVPD_SCSI_TPC; 9502 /* SCSI Feature Sets */ 9503 pages->page_list[p++] = SVPD_SCSI_SFS; 9504 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9505 /* Block limits */ 9506 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9507 /* Block Device Characteristics */ 9508 pages->page_list[p++] = SVPD_BDC; 9509 /* Logical Block Provisioning */ 9510 pages->page_list[p++] = SVPD_LBP; 9511 } 9512 pages->length = p; 9513 9514 ctl_set_success(ctsio); 9515 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9516 ctsio->be_move_done = ctl_config_move_done; 9517 ctl_datamove((union ctl_io *)ctsio); 9518 return (CTL_RETVAL_COMPLETE); 9519 } 9520 9521 /* 9522 * SCSI VPD page 0x80, the Unit Serial Number page. 9523 */ 9524 static int 9525 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9526 { 9527 struct ctl_lun *lun = CTL_LUN(ctsio); 9528 struct scsi_vpd_unit_serial_number *sn_ptr; 9529 int data_len; 9530 9531 data_len = 4 + CTL_SN_LEN; 9532 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9533 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9534 ctsio->kern_rel_offset = 0; 9535 ctsio->kern_sg_entries = 0; 9536 ctsio->kern_data_len = min(data_len, alloc_len); 9537 ctsio->kern_total_len = ctsio->kern_data_len; 9538 9539 /* 9540 * The control device is always connected. The disk device, on the 9541 * other hand, may not be online all the time. Need to change this 9542 * to figure out whether the disk device is actually online or not. 9543 */ 9544 if (lun != NULL) 9545 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9546 lun->be_lun->lun_type; 9547 else 9548 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9549 9550 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9551 sn_ptr->length = CTL_SN_LEN; 9552 /* 9553 * If we don't have a LUN, we just leave the serial number as 9554 * all spaces. 9555 */ 9556 if (lun != NULL) { 9557 strncpy((char *)sn_ptr->serial_num, 9558 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9559 } else 9560 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9561 9562 ctl_set_success(ctsio); 9563 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9564 ctsio->be_move_done = ctl_config_move_done; 9565 ctl_datamove((union ctl_io *)ctsio); 9566 return (CTL_RETVAL_COMPLETE); 9567 } 9568 9569 /* 9570 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9571 */ 9572 static int 9573 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9574 { 9575 struct ctl_lun *lun = CTL_LUN(ctsio); 9576 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9577 int data_len; 9578 9579 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9580 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9581 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9582 ctsio->kern_sg_entries = 0; 9583 ctsio->kern_rel_offset = 0; 9584 ctsio->kern_data_len = min(data_len, alloc_len); 9585 ctsio->kern_total_len = ctsio->kern_data_len; 9586 9587 /* 9588 * The control device is always connected. The disk device, on the 9589 * other hand, may not be online all the time. 9590 */ 9591 if (lun != NULL) 9592 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9593 lun->be_lun->lun_type; 9594 else 9595 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9596 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9597 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9598 /* 9599 * We support head of queue, ordered and simple tags. 9600 */ 9601 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9602 /* 9603 * Volatile cache supported. 9604 */ 9605 eid_ptr->flags3 = SVPD_EID_V_SUP; 9606 9607 /* 9608 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9609 * attention for a particular IT nexus on all LUNs once we report 9610 * it to that nexus once. This bit is required as of SPC-4. 9611 */ 9612 eid_ptr->flags4 = SVPD_EID_LUICLR; 9613 9614 /* 9615 * We support revert to defaults (RTD) bit in MODE SELECT. 9616 */ 9617 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9618 9619 /* 9620 * XXX KDM in order to correctly answer this, we would need 9621 * information from the SIM to determine how much sense data it 9622 * can send. So this would really be a path inquiry field, most 9623 * likely. This can be set to a maximum of 252 according to SPC-4, 9624 * but the hardware may or may not be able to support that much. 9625 * 0 just means that the maximum sense data length is not reported. 9626 */ 9627 eid_ptr->max_sense_length = 0; 9628 9629 ctl_set_success(ctsio); 9630 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9631 ctsio->be_move_done = ctl_config_move_done; 9632 ctl_datamove((union ctl_io *)ctsio); 9633 return (CTL_RETVAL_COMPLETE); 9634 } 9635 9636 static int 9637 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9638 { 9639 struct ctl_lun *lun = CTL_LUN(ctsio); 9640 struct scsi_vpd_mode_page_policy *mpp_ptr; 9641 int data_len; 9642 9643 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9644 sizeof(struct scsi_vpd_mode_page_policy_descr); 9645 9646 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9647 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9648 ctsio->kern_rel_offset = 0; 9649 ctsio->kern_sg_entries = 0; 9650 ctsio->kern_data_len = min(data_len, alloc_len); 9651 ctsio->kern_total_len = ctsio->kern_data_len; 9652 9653 /* 9654 * The control device is always connected. The disk device, on the 9655 * other hand, may not be online all the time. 9656 */ 9657 if (lun != NULL) 9658 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9659 lun->be_lun->lun_type; 9660 else 9661 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9662 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9663 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9664 mpp_ptr->descr[0].page_code = 0x3f; 9665 mpp_ptr->descr[0].subpage_code = 0xff; 9666 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9667 9668 ctl_set_success(ctsio); 9669 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9670 ctsio->be_move_done = ctl_config_move_done; 9671 ctl_datamove((union ctl_io *)ctsio); 9672 return (CTL_RETVAL_COMPLETE); 9673 } 9674 9675 /* 9676 * SCSI VPD page 0x83, the Device Identification page. 9677 */ 9678 static int 9679 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9680 { 9681 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9682 struct ctl_port *port = CTL_PORT(ctsio); 9683 struct ctl_lun *lun = CTL_LUN(ctsio); 9684 struct scsi_vpd_device_id *devid_ptr; 9685 struct scsi_vpd_id_descriptor *desc; 9686 int data_len, g; 9687 uint8_t proto; 9688 9689 data_len = sizeof(struct scsi_vpd_device_id) + 9690 sizeof(struct scsi_vpd_id_descriptor) + 9691 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9692 sizeof(struct scsi_vpd_id_descriptor) + 9693 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9694 if (lun && lun->lun_devid) 9695 data_len += lun->lun_devid->len; 9696 if (port && port->port_devid) 9697 data_len += port->port_devid->len; 9698 if (port && port->target_devid) 9699 data_len += port->target_devid->len; 9700 9701 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9702 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9703 ctsio->kern_sg_entries = 0; 9704 ctsio->kern_rel_offset = 0; 9705 ctsio->kern_sg_entries = 0; 9706 ctsio->kern_data_len = min(data_len, alloc_len); 9707 ctsio->kern_total_len = ctsio->kern_data_len; 9708 9709 /* 9710 * The control device is always connected. The disk device, on the 9711 * other hand, may not be online all the time. 9712 */ 9713 if (lun != NULL) 9714 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9715 lun->be_lun->lun_type; 9716 else 9717 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9718 devid_ptr->page_code = SVPD_DEVICE_ID; 9719 scsi_ulto2b(data_len - 4, devid_ptr->length); 9720 9721 if (port && port->port_type == CTL_PORT_FC) 9722 proto = SCSI_PROTO_FC << 4; 9723 else if (port && port->port_type == CTL_PORT_SAS) 9724 proto = SCSI_PROTO_SAS << 4; 9725 else if (port && port->port_type == CTL_PORT_ISCSI) 9726 proto = SCSI_PROTO_ISCSI << 4; 9727 else 9728 proto = SCSI_PROTO_SPI << 4; 9729 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9730 9731 /* 9732 * We're using a LUN association here. i.e., this device ID is a 9733 * per-LUN identifier. 9734 */ 9735 if (lun && lun->lun_devid) { 9736 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9737 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9738 lun->lun_devid->len); 9739 } 9740 9741 /* 9742 * This is for the WWPN which is a port association. 9743 */ 9744 if (port && port->port_devid) { 9745 memcpy(desc, port->port_devid->data, port->port_devid->len); 9746 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9747 port->port_devid->len); 9748 } 9749 9750 /* 9751 * This is for the Relative Target Port(type 4h) identifier 9752 */ 9753 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9754 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9755 SVPD_ID_TYPE_RELTARG; 9756 desc->length = 4; 9757 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9758 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9759 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9760 9761 /* 9762 * This is for the Target Port Group(type 5h) identifier 9763 */ 9764 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9765 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9766 SVPD_ID_TYPE_TPORTGRP; 9767 desc->length = 4; 9768 if (softc->is_single || 9769 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9770 g = 1; 9771 else 9772 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9773 scsi_ulto2b(g, &desc->identifier[2]); 9774 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9775 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9776 9777 /* 9778 * This is for the Target identifier 9779 */ 9780 if (port && port->target_devid) { 9781 memcpy(desc, port->target_devid->data, port->target_devid->len); 9782 } 9783 9784 ctl_set_success(ctsio); 9785 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9786 ctsio->be_move_done = ctl_config_move_done; 9787 ctl_datamove((union ctl_io *)ctsio); 9788 return (CTL_RETVAL_COMPLETE); 9789 } 9790 9791 static int 9792 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9793 { 9794 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9795 struct ctl_lun *lun = CTL_LUN(ctsio); 9796 struct scsi_vpd_scsi_ports *sp; 9797 struct scsi_vpd_port_designation *pd; 9798 struct scsi_vpd_port_designation_cont *pdc; 9799 struct ctl_port *port; 9800 int data_len, num_target_ports, iid_len, id_len; 9801 9802 num_target_ports = 0; 9803 iid_len = 0; 9804 id_len = 0; 9805 mtx_lock(&softc->ctl_lock); 9806 STAILQ_FOREACH(port, &softc->port_list, links) { 9807 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9808 continue; 9809 if (lun != NULL && 9810 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9811 continue; 9812 num_target_ports++; 9813 if (port->init_devid) 9814 iid_len += port->init_devid->len; 9815 if (port->port_devid) 9816 id_len += port->port_devid->len; 9817 } 9818 mtx_unlock(&softc->ctl_lock); 9819 9820 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9821 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9822 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9823 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9824 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9825 ctsio->kern_sg_entries = 0; 9826 ctsio->kern_rel_offset = 0; 9827 ctsio->kern_sg_entries = 0; 9828 ctsio->kern_data_len = min(data_len, alloc_len); 9829 ctsio->kern_total_len = ctsio->kern_data_len; 9830 9831 /* 9832 * The control device is always connected. The disk device, on the 9833 * other hand, may not be online all the time. Need to change this 9834 * to figure out whether the disk device is actually online or not. 9835 */ 9836 if (lun != NULL) 9837 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9838 lun->be_lun->lun_type; 9839 else 9840 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9841 9842 sp->page_code = SVPD_SCSI_PORTS; 9843 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9844 sp->page_length); 9845 pd = &sp->design[0]; 9846 9847 mtx_lock(&softc->ctl_lock); 9848 STAILQ_FOREACH(port, &softc->port_list, links) { 9849 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9850 continue; 9851 if (lun != NULL && 9852 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9853 continue; 9854 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9855 if (port->init_devid) { 9856 iid_len = port->init_devid->len; 9857 memcpy(pd->initiator_transportid, 9858 port->init_devid->data, port->init_devid->len); 9859 } else 9860 iid_len = 0; 9861 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9862 pdc = (struct scsi_vpd_port_designation_cont *) 9863 (&pd->initiator_transportid[iid_len]); 9864 if (port->port_devid) { 9865 id_len = port->port_devid->len; 9866 memcpy(pdc->target_port_descriptors, 9867 port->port_devid->data, port->port_devid->len); 9868 } else 9869 id_len = 0; 9870 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9871 pd = (struct scsi_vpd_port_designation *) 9872 ((uint8_t *)pdc->target_port_descriptors + id_len); 9873 } 9874 mtx_unlock(&softc->ctl_lock); 9875 9876 ctl_set_success(ctsio); 9877 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9878 ctsio->be_move_done = ctl_config_move_done; 9879 ctl_datamove((union ctl_io *)ctsio); 9880 return (CTL_RETVAL_COMPLETE); 9881 } 9882 9883 static int 9884 ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) 9885 { 9886 struct ctl_lun *lun = CTL_LUN(ctsio); 9887 struct scsi_vpd_sfs *sfs_ptr; 9888 int sfs_page_size, n; 9889 9890 sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; 9891 ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); 9892 sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; 9893 ctsio->kern_sg_entries = 0; 9894 ctsio->kern_rel_offset = 0; 9895 ctsio->kern_sg_entries = 0; 9896 ctsio->kern_data_len = min(sfs_page_size, alloc_len); 9897 ctsio->kern_total_len = ctsio->kern_data_len; 9898 9899 /* 9900 * The control device is always connected. The disk device, on the 9901 * other hand, may not be online all the time. Need to change this 9902 * to figure out whether the disk device is actually online or not. 9903 */ 9904 if (lun != NULL) 9905 sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9906 lun->be_lun->lun_type; 9907 else 9908 sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9909 9910 sfs_ptr->page_code = SVPD_SCSI_SFS; 9911 n = 0; 9912 /* Discovery 2016 */ 9913 scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); 9914 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9915 /* SBC Base 2016 */ 9916 scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); 9917 /* SBC Base 2010 */ 9918 scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); 9919 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9920 /* Basic Provisioning 2016 */ 9921 scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); 9922 } 9923 /* Drive Maintenance 2016 */ 9924 //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); 9925 } 9926 scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); 9927 9928 ctl_set_success(ctsio); 9929 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9930 ctsio->be_move_done = ctl_config_move_done; 9931 ctl_datamove((union ctl_io *)ctsio); 9932 return (CTL_RETVAL_COMPLETE); 9933 } 9934 9935 static int 9936 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9937 { 9938 struct ctl_lun *lun = CTL_LUN(ctsio); 9939 struct scsi_vpd_block_limits *bl_ptr; 9940 const char *val; 9941 uint64_t ival; 9942 9943 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9944 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9945 ctsio->kern_sg_entries = 0; 9946 ctsio->kern_rel_offset = 0; 9947 ctsio->kern_sg_entries = 0; 9948 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9949 ctsio->kern_total_len = ctsio->kern_data_len; 9950 9951 /* 9952 * The control device is always connected. The disk device, on the 9953 * other hand, may not be online all the time. Need to change this 9954 * to figure out whether the disk device is actually online or not. 9955 */ 9956 if (lun != NULL) 9957 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9958 lun->be_lun->lun_type; 9959 else 9960 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9961 9962 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9963 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9964 bl_ptr->max_cmp_write_len = 0xff; 9965 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9966 if (lun != NULL) { 9967 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9968 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9969 ival = 0xffffffff; 9970 val = dnvlist_get_string(lun->be_lun->options, 9971 "unmap_max_lba", NULL); 9972 if (val != NULL) 9973 ctl_expand_number(val, &ival); 9974 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9975 ival = 0xffffffff; 9976 val = dnvlist_get_string(lun->be_lun->options, 9977 "unmap_max_descr", NULL); 9978 if (val != NULL) 9979 ctl_expand_number(val, &ival); 9980 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9981 if (lun->be_lun->ublockexp != 0) { 9982 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9983 bl_ptr->opt_unmap_grain); 9984 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9985 bl_ptr->unmap_grain_align); 9986 } 9987 } 9988 scsi_ulto4b(lun->be_lun->atomicblock, 9989 bl_ptr->max_atomic_transfer_length); 9990 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9991 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9992 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9993 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9994 ival = UINT64_MAX; 9995 val = dnvlist_get_string(lun->be_lun->options, 9996 "write_same_max_lba", NULL); 9997 if (val != NULL) 9998 ctl_expand_number(val, &ival); 9999 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 10000 if (lun->be_lun->maxlba + 1 > ival) 10001 bl_ptr->flags |= SVPD_BL_WSNZ; 10002 } 10003 10004 ctl_set_success(ctsio); 10005 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10006 ctsio->be_move_done = ctl_config_move_done; 10007 ctl_datamove((union ctl_io *)ctsio); 10008 return (CTL_RETVAL_COMPLETE); 10009 } 10010 10011 static int 10012 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 10013 { 10014 struct ctl_lun *lun = CTL_LUN(ctsio); 10015 struct scsi_vpd_block_device_characteristics *bdc_ptr; 10016 const char *value; 10017 u_int i; 10018 10019 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 10020 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 10021 ctsio->kern_sg_entries = 0; 10022 ctsio->kern_rel_offset = 0; 10023 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 10024 ctsio->kern_total_len = ctsio->kern_data_len; 10025 10026 /* 10027 * The control device is always connected. The disk device, on the 10028 * other hand, may not be online all the time. Need to change this 10029 * to figure out whether the disk device is actually online or not. 10030 */ 10031 if (lun != NULL) 10032 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10033 lun->be_lun->lun_type; 10034 else 10035 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10036 bdc_ptr->page_code = SVPD_BDC; 10037 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10038 if (lun != NULL && 10039 (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) 10040 i = strtol(value, NULL, 0); 10041 else 10042 i = CTL_DEFAULT_ROTATION_RATE; 10043 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10044 if (lun != NULL && 10045 (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) 10046 i = strtol(value, NULL, 0); 10047 else 10048 i = 0; 10049 bdc_ptr->wab_wac_ff = (i & 0x0f); 10050 bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; 10051 10052 ctl_set_success(ctsio); 10053 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10054 ctsio->be_move_done = ctl_config_move_done; 10055 ctl_datamove((union ctl_io *)ctsio); 10056 return (CTL_RETVAL_COMPLETE); 10057 } 10058 10059 static int 10060 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10061 { 10062 struct ctl_lun *lun = CTL_LUN(ctsio); 10063 struct scsi_vpd_logical_block_prov *lbp_ptr; 10064 const char *value; 10065 10066 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10067 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10068 ctsio->kern_sg_entries = 0; 10069 ctsio->kern_rel_offset = 0; 10070 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 10071 ctsio->kern_total_len = ctsio->kern_data_len; 10072 10073 /* 10074 * The control device is always connected. The disk device, on the 10075 * other hand, may not be online all the time. Need to change this 10076 * to figure out whether the disk device is actually online or not. 10077 */ 10078 if (lun != NULL) 10079 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10080 lun->be_lun->lun_type; 10081 else 10082 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10083 10084 lbp_ptr->page_code = SVPD_LBP; 10085 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10086 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10087 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10088 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10089 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10090 value = dnvlist_get_string(lun->be_lun->options, 10091 "provisioning_type", NULL); 10092 if (value != NULL) { 10093 if (strcmp(value, "resource") == 0) 10094 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 10095 else if (strcmp(value, "thin") == 0) 10096 lbp_ptr->prov_type = SVPD_LBP_THIN; 10097 } else 10098 lbp_ptr->prov_type = SVPD_LBP_THIN; 10099 } 10100 10101 ctl_set_success(ctsio); 10102 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10103 ctsio->be_move_done = ctl_config_move_done; 10104 ctl_datamove((union ctl_io *)ctsio); 10105 return (CTL_RETVAL_COMPLETE); 10106 } 10107 10108 /* 10109 * INQUIRY with the EVPD bit set. 10110 */ 10111 static int 10112 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10113 { 10114 struct ctl_lun *lun = CTL_LUN(ctsio); 10115 struct scsi_inquiry *cdb; 10116 int alloc_len, retval; 10117 10118 cdb = (struct scsi_inquiry *)ctsio->cdb; 10119 alloc_len = scsi_2btoul(cdb->length); 10120 10121 switch (cdb->page_code) { 10122 case SVPD_SUPPORTED_PAGES: 10123 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10124 break; 10125 case SVPD_UNIT_SERIAL_NUMBER: 10126 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10127 break; 10128 case SVPD_DEVICE_ID: 10129 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10130 break; 10131 case SVPD_EXTENDED_INQUIRY_DATA: 10132 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10133 break; 10134 case SVPD_MODE_PAGE_POLICY: 10135 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10136 break; 10137 case SVPD_SCSI_PORTS: 10138 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10139 break; 10140 case SVPD_SCSI_TPC: 10141 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10142 break; 10143 case SVPD_SCSI_SFS: 10144 retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); 10145 break; 10146 case SVPD_BLOCK_LIMITS: 10147 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10148 goto err; 10149 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10150 break; 10151 case SVPD_BDC: 10152 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10153 goto err; 10154 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10155 break; 10156 case SVPD_LBP: 10157 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10158 goto err; 10159 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10160 break; 10161 default: 10162 err: 10163 ctl_set_invalid_field(ctsio, 10164 /*sks_valid*/ 1, 10165 /*command*/ 1, 10166 /*field*/ 2, 10167 /*bit_valid*/ 0, 10168 /*bit*/ 0); 10169 ctl_done((union ctl_io *)ctsio); 10170 retval = CTL_RETVAL_COMPLETE; 10171 break; 10172 } 10173 10174 return (retval); 10175 } 10176 10177 /* 10178 * Standard INQUIRY data. 10179 */ 10180 static int 10181 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10182 { 10183 struct ctl_softc *softc = CTL_SOFTC(ctsio); 10184 struct ctl_port *port = CTL_PORT(ctsio); 10185 struct ctl_lun *lun = CTL_LUN(ctsio); 10186 struct scsi_inquiry_data *inq_ptr; 10187 struct scsi_inquiry *cdb; 10188 const char *val; 10189 uint32_t alloc_len, data_len; 10190 ctl_port_type port_type; 10191 10192 port_type = port->port_type; 10193 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10194 port_type = CTL_PORT_SCSI; 10195 10196 cdb = (struct scsi_inquiry *)ctsio->cdb; 10197 alloc_len = scsi_2btoul(cdb->length); 10198 10199 /* 10200 * We malloc the full inquiry data size here and fill it 10201 * in. If the user only asks for less, we'll give him 10202 * that much. 10203 */ 10204 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10205 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10206 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10207 ctsio->kern_sg_entries = 0; 10208 ctsio->kern_rel_offset = 0; 10209 ctsio->kern_data_len = min(data_len, alloc_len); 10210 ctsio->kern_total_len = ctsio->kern_data_len; 10211 10212 if (lun != NULL) { 10213 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10214 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10215 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10216 lun->be_lun->lun_type; 10217 } else { 10218 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10219 lun->be_lun->lun_type; 10220 } 10221 if (lun->flags & CTL_LUN_REMOVABLE) 10222 inq_ptr->dev_qual2 |= SID_RMB; 10223 } else 10224 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10225 10226 /* RMB in byte 2 is 0 */ 10227 inq_ptr->version = SCSI_REV_SPC5; 10228 10229 /* 10230 * According to SAM-3, even if a device only supports a single 10231 * level of LUN addressing, it should still set the HISUP bit: 10232 * 10233 * 4.9.1 Logical unit numbers overview 10234 * 10235 * All logical unit number formats described in this standard are 10236 * hierarchical in structure even when only a single level in that 10237 * hierarchy is used. The HISUP bit shall be set to one in the 10238 * standard INQUIRY data (see SPC-2) when any logical unit number 10239 * format described in this standard is used. Non-hierarchical 10240 * formats are outside the scope of this standard. 10241 * 10242 * Therefore we set the HiSup bit here. 10243 * 10244 * The response format is 2, per SPC-3. 10245 */ 10246 inq_ptr->response_format = SID_HiSup | 2; 10247 10248 inq_ptr->additional_length = data_len - 10249 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10250 CTL_DEBUG_PRINT(("additional_length = %d\n", 10251 inq_ptr->additional_length)); 10252 10253 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10254 if (port_type == CTL_PORT_SCSI) 10255 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10256 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10257 inq_ptr->flags = SID_CmdQue; 10258 if (port_type == CTL_PORT_SCSI) 10259 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10260 10261 /* 10262 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10263 * We have 8 bytes for the vendor name, and 16 bytes for the device 10264 * name and 4 bytes for the revision. 10265 */ 10266 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10267 "vendor", NULL)) == NULL) { 10268 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10269 } else { 10270 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10271 strncpy(inq_ptr->vendor, val, 10272 min(sizeof(inq_ptr->vendor), strlen(val))); 10273 } 10274 if (lun == NULL) { 10275 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10276 sizeof(inq_ptr->product)); 10277 } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", 10278 NULL)) == NULL) { 10279 switch (lun->be_lun->lun_type) { 10280 case T_DIRECT: 10281 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10282 sizeof(inq_ptr->product)); 10283 break; 10284 case T_PROCESSOR: 10285 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10286 sizeof(inq_ptr->product)); 10287 break; 10288 case T_CDROM: 10289 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10290 sizeof(inq_ptr->product)); 10291 break; 10292 default: 10293 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10294 sizeof(inq_ptr->product)); 10295 break; 10296 } 10297 } else { 10298 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10299 strncpy(inq_ptr->product, val, 10300 min(sizeof(inq_ptr->product), strlen(val))); 10301 } 10302 10303 /* 10304 * XXX make this a macro somewhere so it automatically gets 10305 * incremented when we make changes. 10306 */ 10307 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10308 "revision", NULL)) == NULL) { 10309 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10310 } else { 10311 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10312 strncpy(inq_ptr->revision, val, 10313 min(sizeof(inq_ptr->revision), strlen(val))); 10314 } 10315 10316 /* 10317 * For parallel SCSI, we support double transition and single 10318 * transition clocking. We also support QAS (Quick Arbitration 10319 * and Selection) and Information Unit transfers on both the 10320 * control and array devices. 10321 */ 10322 if (port_type == CTL_PORT_SCSI) 10323 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10324 SID_SPI_IUS; 10325 10326 /* SAM-6 (no version claimed) */ 10327 scsi_ulto2b(0x00C0, inq_ptr->version1); 10328 /* SPC-5 (no version claimed) */ 10329 scsi_ulto2b(0x05C0, inq_ptr->version2); 10330 if (port_type == CTL_PORT_FC) { 10331 /* FCP-2 ANSI INCITS.350:2003 */ 10332 scsi_ulto2b(0x0917, inq_ptr->version3); 10333 } else if (port_type == CTL_PORT_SCSI) { 10334 /* SPI-4 ANSI INCITS.362:200x */ 10335 scsi_ulto2b(0x0B56, inq_ptr->version3); 10336 } else if (port_type == CTL_PORT_ISCSI) { 10337 /* iSCSI (no version claimed) */ 10338 scsi_ulto2b(0x0960, inq_ptr->version3); 10339 } else if (port_type == CTL_PORT_SAS) { 10340 /* SAS (no version claimed) */ 10341 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10342 } else if (port_type == CTL_PORT_UMASS) { 10343 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10344 scsi_ulto2b(0x1730, inq_ptr->version3); 10345 } 10346 10347 if (lun == NULL) { 10348 /* SBC-4 (no version claimed) */ 10349 scsi_ulto2b(0x0600, inq_ptr->version4); 10350 } else { 10351 switch (lun->be_lun->lun_type) { 10352 case T_DIRECT: 10353 /* SBC-4 (no version claimed) */ 10354 scsi_ulto2b(0x0600, inq_ptr->version4); 10355 break; 10356 case T_PROCESSOR: 10357 break; 10358 case T_CDROM: 10359 /* MMC-6 (no version claimed) */ 10360 scsi_ulto2b(0x04E0, inq_ptr->version4); 10361 break; 10362 default: 10363 break; 10364 } 10365 } 10366 10367 ctl_set_success(ctsio); 10368 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10369 ctsio->be_move_done = ctl_config_move_done; 10370 ctl_datamove((union ctl_io *)ctsio); 10371 return (CTL_RETVAL_COMPLETE); 10372 } 10373 10374 int 10375 ctl_inquiry(struct ctl_scsiio *ctsio) 10376 { 10377 struct scsi_inquiry *cdb; 10378 int retval; 10379 10380 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10381 10382 cdb = (struct scsi_inquiry *)ctsio->cdb; 10383 if (cdb->byte2 & SI_EVPD) 10384 retval = ctl_inquiry_evpd(ctsio); 10385 else if (cdb->page_code == 0) 10386 retval = ctl_inquiry_std(ctsio); 10387 else { 10388 ctl_set_invalid_field(ctsio, 10389 /*sks_valid*/ 1, 10390 /*command*/ 1, 10391 /*field*/ 2, 10392 /*bit_valid*/ 0, 10393 /*bit*/ 0); 10394 ctl_done((union ctl_io *)ctsio); 10395 return (CTL_RETVAL_COMPLETE); 10396 } 10397 10398 return (retval); 10399 } 10400 10401 int 10402 ctl_get_config(struct ctl_scsiio *ctsio) 10403 { 10404 struct ctl_lun *lun = CTL_LUN(ctsio); 10405 struct scsi_get_config_header *hdr; 10406 struct scsi_get_config_feature *feature; 10407 struct scsi_get_config *cdb; 10408 uint32_t alloc_len, data_len; 10409 int rt, starting; 10410 10411 cdb = (struct scsi_get_config *)ctsio->cdb; 10412 rt = (cdb->rt & SGC_RT_MASK); 10413 starting = scsi_2btoul(cdb->starting_feature); 10414 alloc_len = scsi_2btoul(cdb->length); 10415 10416 data_len = sizeof(struct scsi_get_config_header) + 10417 sizeof(struct scsi_get_config_feature) + 8 + 10418 sizeof(struct scsi_get_config_feature) + 8 + 10419 sizeof(struct scsi_get_config_feature) + 4 + 10420 sizeof(struct scsi_get_config_feature) + 4 + 10421 sizeof(struct scsi_get_config_feature) + 8 + 10422 sizeof(struct scsi_get_config_feature) + 10423 sizeof(struct scsi_get_config_feature) + 4 + 10424 sizeof(struct scsi_get_config_feature) + 4 + 10425 sizeof(struct scsi_get_config_feature) + 4 + 10426 sizeof(struct scsi_get_config_feature) + 4 + 10427 sizeof(struct scsi_get_config_feature) + 4 + 10428 sizeof(struct scsi_get_config_feature) + 4; 10429 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10430 ctsio->kern_sg_entries = 0; 10431 ctsio->kern_rel_offset = 0; 10432 10433 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10434 if (lun->flags & CTL_LUN_NO_MEDIA) 10435 scsi_ulto2b(0x0000, hdr->current_profile); 10436 else 10437 scsi_ulto2b(0x0010, hdr->current_profile); 10438 feature = (struct scsi_get_config_feature *)(hdr + 1); 10439 10440 if (starting > 0x003b) 10441 goto done; 10442 if (starting > 0x003a) 10443 goto f3b; 10444 if (starting > 0x002b) 10445 goto f3a; 10446 if (starting > 0x002a) 10447 goto f2b; 10448 if (starting > 0x001f) 10449 goto f2a; 10450 if (starting > 0x001e) 10451 goto f1f; 10452 if (starting > 0x001d) 10453 goto f1e; 10454 if (starting > 0x0010) 10455 goto f1d; 10456 if (starting > 0x0003) 10457 goto f10; 10458 if (starting > 0x0002) 10459 goto f3; 10460 if (starting > 0x0001) 10461 goto f2; 10462 if (starting > 0x0000) 10463 goto f1; 10464 10465 /* Profile List */ 10466 scsi_ulto2b(0x0000, feature->feature_code); 10467 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10468 feature->add_length = 8; 10469 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10470 feature->feature_data[2] = 0x00; 10471 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10472 feature->feature_data[6] = 0x01; 10473 feature = (struct scsi_get_config_feature *) 10474 &feature->feature_data[feature->add_length]; 10475 10476 f1: /* Core */ 10477 scsi_ulto2b(0x0001, feature->feature_code); 10478 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10479 feature->add_length = 8; 10480 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10481 feature->feature_data[4] = 0x03; 10482 feature = (struct scsi_get_config_feature *) 10483 &feature->feature_data[feature->add_length]; 10484 10485 f2: /* Morphing */ 10486 scsi_ulto2b(0x0002, feature->feature_code); 10487 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10488 feature->add_length = 4; 10489 feature->feature_data[0] = 0x02; 10490 feature = (struct scsi_get_config_feature *) 10491 &feature->feature_data[feature->add_length]; 10492 10493 f3: /* Removable Medium */ 10494 scsi_ulto2b(0x0003, feature->feature_code); 10495 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10496 feature->add_length = 4; 10497 feature->feature_data[0] = 0x39; 10498 feature = (struct scsi_get_config_feature *) 10499 &feature->feature_data[feature->add_length]; 10500 10501 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10502 goto done; 10503 10504 f10: /* Random Read */ 10505 scsi_ulto2b(0x0010, feature->feature_code); 10506 feature->flags = 0x00; 10507 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10508 feature->flags |= SGC_F_CURRENT; 10509 feature->add_length = 8; 10510 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10511 scsi_ulto2b(1, &feature->feature_data[4]); 10512 feature->feature_data[6] = 0x00; 10513 feature = (struct scsi_get_config_feature *) 10514 &feature->feature_data[feature->add_length]; 10515 10516 f1d: /* Multi-Read */ 10517 scsi_ulto2b(0x001D, feature->feature_code); 10518 feature->flags = 0x00; 10519 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10520 feature->flags |= SGC_F_CURRENT; 10521 feature->add_length = 0; 10522 feature = (struct scsi_get_config_feature *) 10523 &feature->feature_data[feature->add_length]; 10524 10525 f1e: /* CD Read */ 10526 scsi_ulto2b(0x001E, feature->feature_code); 10527 feature->flags = 0x00; 10528 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10529 feature->flags |= SGC_F_CURRENT; 10530 feature->add_length = 4; 10531 feature->feature_data[0] = 0x00; 10532 feature = (struct scsi_get_config_feature *) 10533 &feature->feature_data[feature->add_length]; 10534 10535 f1f: /* DVD Read */ 10536 scsi_ulto2b(0x001F, feature->feature_code); 10537 feature->flags = 0x08; 10538 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10539 feature->flags |= SGC_F_CURRENT; 10540 feature->add_length = 4; 10541 feature->feature_data[0] = 0x01; 10542 feature->feature_data[2] = 0x03; 10543 feature = (struct scsi_get_config_feature *) 10544 &feature->feature_data[feature->add_length]; 10545 10546 f2a: /* DVD+RW */ 10547 scsi_ulto2b(0x002A, feature->feature_code); 10548 feature->flags = 0x04; 10549 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10550 feature->flags |= SGC_F_CURRENT; 10551 feature->add_length = 4; 10552 feature->feature_data[0] = 0x00; 10553 feature->feature_data[1] = 0x00; 10554 feature = (struct scsi_get_config_feature *) 10555 &feature->feature_data[feature->add_length]; 10556 10557 f2b: /* DVD+R */ 10558 scsi_ulto2b(0x002B, feature->feature_code); 10559 feature->flags = 0x00; 10560 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10561 feature->flags |= SGC_F_CURRENT; 10562 feature->add_length = 4; 10563 feature->feature_data[0] = 0x00; 10564 feature = (struct scsi_get_config_feature *) 10565 &feature->feature_data[feature->add_length]; 10566 10567 f3a: /* DVD+RW Dual Layer */ 10568 scsi_ulto2b(0x003A, feature->feature_code); 10569 feature->flags = 0x00; 10570 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10571 feature->flags |= SGC_F_CURRENT; 10572 feature->add_length = 4; 10573 feature->feature_data[0] = 0x00; 10574 feature->feature_data[1] = 0x00; 10575 feature = (struct scsi_get_config_feature *) 10576 &feature->feature_data[feature->add_length]; 10577 10578 f3b: /* DVD+R Dual Layer */ 10579 scsi_ulto2b(0x003B, feature->feature_code); 10580 feature->flags = 0x00; 10581 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10582 feature->flags |= SGC_F_CURRENT; 10583 feature->add_length = 4; 10584 feature->feature_data[0] = 0x00; 10585 feature = (struct scsi_get_config_feature *) 10586 &feature->feature_data[feature->add_length]; 10587 10588 done: 10589 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10590 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10591 feature = (struct scsi_get_config_feature *)(hdr + 1); 10592 if (scsi_2btoul(feature->feature_code) == starting) 10593 feature = (struct scsi_get_config_feature *) 10594 &feature->feature_data[feature->add_length]; 10595 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10596 } 10597 scsi_ulto4b(data_len - 4, hdr->data_length); 10598 ctsio->kern_data_len = min(data_len, alloc_len); 10599 ctsio->kern_total_len = ctsio->kern_data_len; 10600 10601 ctl_set_success(ctsio); 10602 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10603 ctsio->be_move_done = ctl_config_move_done; 10604 ctl_datamove((union ctl_io *)ctsio); 10605 return (CTL_RETVAL_COMPLETE); 10606 } 10607 10608 int 10609 ctl_get_event_status(struct ctl_scsiio *ctsio) 10610 { 10611 struct scsi_get_event_status_header *hdr; 10612 struct scsi_get_event_status *cdb; 10613 uint32_t alloc_len, data_len; 10614 10615 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10616 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10617 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10618 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10619 ctl_done((union ctl_io *)ctsio); 10620 return (CTL_RETVAL_COMPLETE); 10621 } 10622 alloc_len = scsi_2btoul(cdb->length); 10623 10624 data_len = sizeof(struct scsi_get_event_status_header); 10625 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10626 ctsio->kern_sg_entries = 0; 10627 ctsio->kern_rel_offset = 0; 10628 ctsio->kern_data_len = min(data_len, alloc_len); 10629 ctsio->kern_total_len = ctsio->kern_data_len; 10630 10631 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10632 scsi_ulto2b(0, hdr->descr_length); 10633 hdr->nea_class = SGESN_NEA; 10634 hdr->supported_class = 0; 10635 10636 ctl_set_success(ctsio); 10637 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10638 ctsio->be_move_done = ctl_config_move_done; 10639 ctl_datamove((union ctl_io *)ctsio); 10640 return (CTL_RETVAL_COMPLETE); 10641 } 10642 10643 int 10644 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10645 { 10646 struct scsi_mechanism_status_header *hdr; 10647 struct scsi_mechanism_status *cdb; 10648 uint32_t alloc_len, data_len; 10649 10650 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10651 alloc_len = scsi_2btoul(cdb->length); 10652 10653 data_len = sizeof(struct scsi_mechanism_status_header); 10654 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10655 ctsio->kern_sg_entries = 0; 10656 ctsio->kern_rel_offset = 0; 10657 ctsio->kern_data_len = min(data_len, alloc_len); 10658 ctsio->kern_total_len = ctsio->kern_data_len; 10659 10660 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10661 hdr->state1 = 0x00; 10662 hdr->state2 = 0xe0; 10663 scsi_ulto3b(0, hdr->lba); 10664 hdr->slots_num = 0; 10665 scsi_ulto2b(0, hdr->slots_length); 10666 10667 ctl_set_success(ctsio); 10668 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10669 ctsio->be_move_done = ctl_config_move_done; 10670 ctl_datamove((union ctl_io *)ctsio); 10671 return (CTL_RETVAL_COMPLETE); 10672 } 10673 10674 static void 10675 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10676 { 10677 10678 lba += 150; 10679 buf[0] = 0; 10680 buf[1] = bin2bcd((lba / 75) / 60); 10681 buf[2] = bin2bcd((lba / 75) % 60); 10682 buf[3] = bin2bcd(lba % 75); 10683 } 10684 10685 int 10686 ctl_read_toc(struct ctl_scsiio *ctsio) 10687 { 10688 struct ctl_lun *lun = CTL_LUN(ctsio); 10689 struct scsi_read_toc_hdr *hdr; 10690 struct scsi_read_toc_type01_descr *descr; 10691 struct scsi_read_toc *cdb; 10692 uint32_t alloc_len, data_len; 10693 int format, msf; 10694 10695 cdb = (struct scsi_read_toc *)ctsio->cdb; 10696 msf = (cdb->byte2 & CD_MSF) != 0; 10697 format = cdb->format; 10698 alloc_len = scsi_2btoul(cdb->data_len); 10699 10700 data_len = sizeof(struct scsi_read_toc_hdr); 10701 if (format == 0) 10702 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10703 else 10704 data_len += sizeof(struct scsi_read_toc_type01_descr); 10705 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10706 ctsio->kern_sg_entries = 0; 10707 ctsio->kern_rel_offset = 0; 10708 ctsio->kern_data_len = min(data_len, alloc_len); 10709 ctsio->kern_total_len = ctsio->kern_data_len; 10710 10711 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10712 if (format == 0) { 10713 scsi_ulto2b(0x12, hdr->data_length); 10714 hdr->first = 1; 10715 hdr->last = 1; 10716 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10717 descr->addr_ctl = 0x14; 10718 descr->track_number = 1; 10719 if (msf) 10720 ctl_ultomsf(0, descr->track_start); 10721 else 10722 scsi_ulto4b(0, descr->track_start); 10723 descr++; 10724 descr->addr_ctl = 0x14; 10725 descr->track_number = 0xaa; 10726 if (msf) 10727 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10728 else 10729 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10730 } else { 10731 scsi_ulto2b(0x0a, hdr->data_length); 10732 hdr->first = 1; 10733 hdr->last = 1; 10734 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10735 descr->addr_ctl = 0x14; 10736 descr->track_number = 1; 10737 if (msf) 10738 ctl_ultomsf(0, descr->track_start); 10739 else 10740 scsi_ulto4b(0, descr->track_start); 10741 } 10742 10743 ctl_set_success(ctsio); 10744 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10745 ctsio->be_move_done = ctl_config_move_done; 10746 ctl_datamove((union ctl_io *)ctsio); 10747 return (CTL_RETVAL_COMPLETE); 10748 } 10749 10750 /* 10751 * For known CDB types, parse the LBA and length. 10752 */ 10753 static int 10754 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10755 { 10756 10757 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 10758 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 10759 10760 switch (io->scsiio.cdb[0]) { 10761 case COMPARE_AND_WRITE: { 10762 struct scsi_compare_and_write *cdb; 10763 10764 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10765 10766 *lba = scsi_8btou64(cdb->addr); 10767 *len = cdb->length; 10768 break; 10769 } 10770 case READ_6: 10771 case WRITE_6: { 10772 struct scsi_rw_6 *cdb; 10773 10774 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10775 10776 *lba = scsi_3btoul(cdb->addr); 10777 /* only 5 bits are valid in the most significant address byte */ 10778 *lba &= 0x1fffff; 10779 *len = cdb->length; 10780 break; 10781 } 10782 case READ_10: 10783 case WRITE_10: { 10784 struct scsi_rw_10 *cdb; 10785 10786 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10787 10788 *lba = scsi_4btoul(cdb->addr); 10789 *len = scsi_2btoul(cdb->length); 10790 break; 10791 } 10792 case WRITE_VERIFY_10: { 10793 struct scsi_write_verify_10 *cdb; 10794 10795 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10796 10797 *lba = scsi_4btoul(cdb->addr); 10798 *len = scsi_2btoul(cdb->length); 10799 break; 10800 } 10801 case READ_12: 10802 case WRITE_12: { 10803 struct scsi_rw_12 *cdb; 10804 10805 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10806 10807 *lba = scsi_4btoul(cdb->addr); 10808 *len = scsi_4btoul(cdb->length); 10809 break; 10810 } 10811 case WRITE_VERIFY_12: { 10812 struct scsi_write_verify_12 *cdb; 10813 10814 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10815 10816 *lba = scsi_4btoul(cdb->addr); 10817 *len = scsi_4btoul(cdb->length); 10818 break; 10819 } 10820 case READ_16: 10821 case WRITE_16: { 10822 struct scsi_rw_16 *cdb; 10823 10824 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10825 10826 *lba = scsi_8btou64(cdb->addr); 10827 *len = scsi_4btoul(cdb->length); 10828 break; 10829 } 10830 case WRITE_ATOMIC_16: { 10831 struct scsi_write_atomic_16 *cdb; 10832 10833 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10834 10835 *lba = scsi_8btou64(cdb->addr); 10836 *len = scsi_2btoul(cdb->length); 10837 break; 10838 } 10839 case WRITE_VERIFY_16: { 10840 struct scsi_write_verify_16 *cdb; 10841 10842 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10843 10844 *lba = scsi_8btou64(cdb->addr); 10845 *len = scsi_4btoul(cdb->length); 10846 break; 10847 } 10848 case WRITE_SAME_10: { 10849 struct scsi_write_same_10 *cdb; 10850 10851 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10852 10853 *lba = scsi_4btoul(cdb->addr); 10854 *len = scsi_2btoul(cdb->length); 10855 break; 10856 } 10857 case WRITE_SAME_16: { 10858 struct scsi_write_same_16 *cdb; 10859 10860 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10861 10862 *lba = scsi_8btou64(cdb->addr); 10863 *len = scsi_4btoul(cdb->length); 10864 break; 10865 } 10866 case VERIFY_10: { 10867 struct scsi_verify_10 *cdb; 10868 10869 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10870 10871 *lba = scsi_4btoul(cdb->addr); 10872 *len = scsi_2btoul(cdb->length); 10873 break; 10874 } 10875 case VERIFY_12: { 10876 struct scsi_verify_12 *cdb; 10877 10878 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10879 10880 *lba = scsi_4btoul(cdb->addr); 10881 *len = scsi_4btoul(cdb->length); 10882 break; 10883 } 10884 case VERIFY_16: { 10885 struct scsi_verify_16 *cdb; 10886 10887 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10888 10889 *lba = scsi_8btou64(cdb->addr); 10890 *len = scsi_4btoul(cdb->length); 10891 break; 10892 } 10893 case UNMAP: { 10894 *lba = 0; 10895 *len = UINT64_MAX; 10896 break; 10897 } 10898 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10899 struct scsi_get_lba_status *cdb; 10900 10901 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10902 *lba = scsi_8btou64(cdb->addr); 10903 *len = UINT32_MAX; 10904 break; 10905 } 10906 default: 10907 *lba = 0; 10908 *len = UINT64_MAX; 10909 return (1); 10910 } 10911 10912 return (0); 10913 } 10914 10915 static ctl_action 10916 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10917 bool seq) 10918 { 10919 uint64_t endlba1, endlba2; 10920 10921 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10922 endlba2 = lba2 + len2 - 1; 10923 10924 if ((endlba1 < lba2) || (endlba2 < lba1)) 10925 return (CTL_ACTION_PASS); 10926 else 10927 return (CTL_ACTION_BLOCK); 10928 } 10929 10930 static int 10931 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10932 { 10933 struct ctl_ptr_len_flags *ptrlen; 10934 struct scsi_unmap_desc *buf, *end, *range; 10935 uint64_t lba; 10936 uint32_t len; 10937 10938 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 10939 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 10940 10941 /* If not UNMAP -- go other way. */ 10942 if (io->scsiio.cdb[0] != UNMAP) 10943 return (CTL_ACTION_SKIP); 10944 10945 /* If UNMAP without data -- block and wait for data. */ 10946 ptrlen = (struct ctl_ptr_len_flags *) 10947 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10948 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10949 ptrlen->ptr == NULL) 10950 return (CTL_ACTION_BLOCK); 10951 10952 /* UNMAP with data -- check for collision. */ 10953 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10954 end = buf + ptrlen->len / sizeof(*buf); 10955 for (range = buf; range < end; range++) { 10956 lba = scsi_8btou64(range->lba); 10957 len = scsi_4btoul(range->length); 10958 if ((lba < lba2 + len2) && (lba + len > lba2)) 10959 return (CTL_ACTION_BLOCK); 10960 } 10961 return (CTL_ACTION_PASS); 10962 } 10963 10964 static ctl_action 10965 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10966 { 10967 uint64_t lba1, lba2; 10968 uint64_t len1, len2; 10969 int retval; 10970 10971 retval = ctl_get_lba_len(io2, &lba2, &len2); 10972 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10973 10974 retval = ctl_extent_check_unmap(io1, lba2, len2); 10975 if (retval != CTL_ACTION_SKIP) 10976 return (retval); 10977 10978 retval = ctl_get_lba_len(io1, &lba1, &len1); 10979 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10980 10981 if (seq && (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)) 10982 seq = FALSE; 10983 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10984 } 10985 10986 static ctl_action 10987 ctl_seq_check(union ctl_io *io1, union ctl_io *io2) 10988 { 10989 uint64_t lba1, lba2; 10990 uint64_t len1, len2; 10991 int retval __diagused; 10992 10993 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10994 return (CTL_ACTION_PASS); 10995 retval = ctl_get_lba_len(io1, &lba1, &len1); 10996 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10997 retval = ctl_get_lba_len(io2, &lba2, &len2); 10998 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10999 11000 if (lba1 + len1 == lba2) 11001 return (CTL_ACTION_BLOCK); 11002 return (CTL_ACTION_PASS); 11003 } 11004 11005 static ctl_action 11006 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 11007 const uint8_t *serialize_row, union ctl_io *ooa_io) 11008 { 11009 11010 /* 11011 * The initiator attempted multiple untagged commands at the same 11012 * time. Can't do that. 11013 */ 11014 if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11015 && __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11016 && ((pending_io->io_hdr.nexus.targ_port == 11017 ooa_io->io_hdr.nexus.targ_port) 11018 && (pending_io->io_hdr.nexus.initid == 11019 ooa_io->io_hdr.nexus.initid)) 11020 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 11021 CTL_FLAG_STATUS_SENT)) == 0)) 11022 return (CTL_ACTION_OVERLAP); 11023 11024 /* 11025 * The initiator attempted to send multiple tagged commands with 11026 * the same ID. (It's fine if different initiators have the same 11027 * tag ID.) 11028 * 11029 * Even if all of those conditions are true, we don't kill the I/O 11030 * if the command ahead of us has been aborted. We won't end up 11031 * sending it to the FETD, and it's perfectly legal to resend a 11032 * command with the same tag number as long as the previous 11033 * instance of this tag number has been aborted somehow. 11034 */ 11035 if (__predict_true(pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11036 && __predict_true(ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11037 && __predict_false(pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 11038 && ((pending_io->io_hdr.nexus.targ_port == 11039 ooa_io->io_hdr.nexus.targ_port) 11040 && (pending_io->io_hdr.nexus.initid == 11041 ooa_io->io_hdr.nexus.initid)) 11042 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 11043 CTL_FLAG_STATUS_SENT)) == 0)) 11044 return (CTL_ACTION_OVERLAP_TAG); 11045 11046 /* 11047 * If we get a head of queue tag, SAM-3 says that we should 11048 * immediately execute it. 11049 * 11050 * What happens if this command would normally block for some other 11051 * reason? e.g. a request sense with a head of queue tag 11052 * immediately after a write. Normally that would block, but this 11053 * will result in its getting executed immediately... 11054 * 11055 * We currently return "pass" instead of "skip", so we'll end up 11056 * going through the rest of the queue to check for overlapped tags. 11057 * 11058 * XXX KDM check for other types of blockage first?? 11059 */ 11060 if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 11061 return (CTL_ACTION_PASS); 11062 11063 /* 11064 * Simple tags get blocked until all head of queue and ordered tags 11065 * ahead of them have completed. I'm lumping untagged commands in 11066 * with simple tags here. XXX KDM is that the right thing to do? 11067 */ 11068 if (__predict_false(ooa_io->scsiio.tag_type == CTL_TAG_ORDERED) || 11069 __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 11070 return (CTL_ACTION_BLOCK); 11071 11072 /* Unsupported command in OOA queue. */ 11073 if (__predict_false(ooa_io->scsiio.seridx == CTL_SERIDX_INVLD)) 11074 return (CTL_ACTION_PASS); 11075 11076 switch (serialize_row[ooa_io->scsiio.seridx]) { 11077 case CTL_SER_SEQ: 11078 if (lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 11079 return (ctl_seq_check(ooa_io, pending_io)); 11080 /* FALLTHROUGH */ 11081 case CTL_SER_PASS: 11082 return (CTL_ACTION_PASS); 11083 case CTL_SER_EXTENTOPT: 11084 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == 11085 SCP_QUEUE_ALG_UNRESTRICTED) 11086 return (CTL_ACTION_PASS); 11087 /* FALLTHROUGH */ 11088 case CTL_SER_EXTENT: 11089 return (ctl_extent_check(ooa_io, pending_io, 11090 (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11091 case CTL_SER_BLOCKOPT: 11092 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == 11093 SCP_QUEUE_ALG_UNRESTRICTED) 11094 return (CTL_ACTION_PASS); 11095 /* FALLTHROUGH */ 11096 case CTL_SER_BLOCK: 11097 return (CTL_ACTION_BLOCK); 11098 default: 11099 __assert_unreachable(); 11100 } 11101 } 11102 11103 /* 11104 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11105 * Assumptions: 11106 * - pending_io is generally either incoming, or on the blocked queue 11107 * - starting I/O is the I/O we want to start the check with. 11108 */ 11109 static ctl_action 11110 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11111 union ctl_io **starting_io) 11112 { 11113 union ctl_io *ooa_io = *starting_io; 11114 const uint8_t *serialize_row; 11115 ctl_action action; 11116 11117 mtx_assert(&lun->lun_lock, MA_OWNED); 11118 11119 /* 11120 * Aborted commands are not going to be executed and may even 11121 * not report completion, so we don't care about their order. 11122 * Let them complete ASAP to clean the OOA queue. 11123 */ 11124 if (__predict_false(pending_io->io_hdr.flags & CTL_FLAG_ABORT)) 11125 return (CTL_ACTION_SKIP); 11126 11127 /* 11128 * Ordered tags have to block until all items ahead of them have 11129 * completed. If we get called with an ordered tag, we always 11130 * block, if something else is ahead of us in the queue. 11131 */ 11132 if ((pending_io->scsiio.tag_type == CTL_TAG_ORDERED) && 11133 (ooa_io != NULL)) 11134 return (CTL_ACTION_BLOCK); 11135 11136 serialize_row = ctl_serialize_table[pending_io->scsiio.seridx]; 11137 11138 /* 11139 * Run back along the OOA queue, starting with the current 11140 * blocked I/O and going through every I/O before it on the 11141 * queue. If starting_io is NULL, we'll just end up returning 11142 * CTL_ACTION_PASS. 11143 */ 11144 for (; ooa_io != NULL; 11145 ooa_io = (union ctl_io *)LIST_NEXT(&ooa_io->io_hdr, ooa_links)) { 11146 action = ctl_check_for_blockage(lun, pending_io, serialize_row, 11147 ooa_io); 11148 if (action != CTL_ACTION_PASS) { 11149 *starting_io = ooa_io; 11150 return (action); 11151 } 11152 } 11153 11154 *starting_io = NULL; 11155 return (CTL_ACTION_PASS); 11156 } 11157 11158 /* 11159 * Try to unblock the specified I/O. 11160 * 11161 * skip parameter allows explicitly skip present blocker of the I/O, 11162 * starting from the previous one on OOA queue. It can be used when 11163 * we know for sure that the blocker I/O does no longer count. 11164 */ 11165 static void 11166 ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) 11167 { 11168 struct ctl_softc *softc = lun->ctl_softc; 11169 union ctl_io *bio, *obio; 11170 const struct ctl_cmd_entry *entry; 11171 union ctl_ha_msg msg_info; 11172 ctl_action action; 11173 11174 mtx_assert(&lun->lun_lock, MA_OWNED); 11175 11176 if (io->io_hdr.blocker == NULL) 11177 return; 11178 11179 obio = bio = io->io_hdr.blocker; 11180 if (skip) 11181 bio = (union ctl_io *)LIST_NEXT(&bio->io_hdr, ooa_links); 11182 action = ctl_check_ooa(lun, io, &bio); 11183 if (action == CTL_ACTION_BLOCK) { 11184 /* Still blocked, but may be by different I/O now. */ 11185 if (bio != obio) { 11186 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, 11187 &io->io_hdr, blocked_links); 11188 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, 11189 &io->io_hdr, blocked_links); 11190 io->io_hdr.blocker = bio; 11191 } 11192 return; 11193 } 11194 11195 /* No longer blocked, one way or another. */ 11196 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); 11197 io->io_hdr.blocker = NULL; 11198 11199 switch (action) { 11200 case CTL_ACTION_PASS: 11201 case CTL_ACTION_SKIP: 11202 11203 /* Serializing commands from the other SC retire there. */ 11204 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11205 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11206 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11207 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11208 msg_info.hdr.serializing_sc = io; 11209 msg_info.hdr.msg_type = CTL_MSG_R2R; 11210 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11211 sizeof(msg_info.hdr), M_NOWAIT); 11212 break; 11213 } 11214 11215 /* 11216 * Check this I/O for LUN state changes that may have happened 11217 * while this command was blocked. The LUN state may have been 11218 * changed by a command ahead of us in the queue. 11219 */ 11220 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11221 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 11222 ctl_done(io); 11223 break; 11224 } 11225 11226 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11227 ctl_enqueue_rtr(io); 11228 break; 11229 default: 11230 __assert_unreachable(); 11231 case CTL_ACTION_OVERLAP: 11232 ctl_set_overlapped_cmd(&io->scsiio); 11233 goto error; 11234 case CTL_ACTION_OVERLAP_TAG: 11235 ctl_set_overlapped_tag(&io->scsiio, 11236 io->scsiio.tag_num & 0xff); 11237 error: 11238 /* Serializing commands from the other SC are done here. */ 11239 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11240 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11241 ctl_try_unblock_others(lun, io, TRUE); 11242 LIST_REMOVE(&io->io_hdr, ooa_links); 11243 11244 ctl_copy_sense_data_back(io, &msg_info); 11245 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11246 msg_info.hdr.serializing_sc = NULL; 11247 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 11248 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11249 sizeof(msg_info.scsi), M_WAITOK); 11250 ctl_free_io(io); 11251 break; 11252 } 11253 11254 ctl_done(io); 11255 break; 11256 } 11257 } 11258 11259 /* 11260 * Try to unblock I/Os blocked by the specified I/O. 11261 * 11262 * skip parameter allows explicitly skip the specified I/O as blocker, 11263 * starting from the previous one on the OOA queue. It can be used when 11264 * we know for sure that the specified I/O does no longer count (done). 11265 * It has to be still on OOA queue though so that we know where to start. 11266 */ 11267 static void 11268 ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) 11269 { 11270 union ctl_io *io, *next_io; 11271 11272 mtx_assert(&lun->lun_lock, MA_OWNED); 11273 11274 for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); 11275 io != NULL; io = next_io) { 11276 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); 11277 11278 KASSERT(io->io_hdr.blocker != NULL, 11279 ("I/O %p on blocked list without blocker", io)); 11280 ctl_try_unblock_io(lun, io, skip); 11281 } 11282 KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), 11283 ("blocked_queue is not empty after skipping %p", bio)); 11284 } 11285 11286 /* 11287 * This routine (with one exception) checks LUN flags that can be set by 11288 * commands ahead of us in the OOA queue. These flags have to be checked 11289 * when a command initially comes in, and when we pull a command off the 11290 * blocked queue and are preparing to execute it. The reason we have to 11291 * check these flags for commands on the blocked queue is that the LUN 11292 * state may have been changed by a command ahead of us while we're on the 11293 * blocked queue. 11294 * 11295 * Ordering is somewhat important with these checks, so please pay 11296 * careful attention to the placement of any new checks. 11297 */ 11298 static int 11299 ctl_scsiio_lun_check(struct ctl_lun *lun, 11300 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11301 { 11302 struct ctl_softc *softc = lun->ctl_softc; 11303 int retval; 11304 uint32_t residx; 11305 11306 retval = 0; 11307 11308 mtx_assert(&lun->lun_lock, MA_OWNED); 11309 11310 /* 11311 * If this shelf is a secondary shelf controller, we may have to 11312 * reject some commands disallowed by HA mode and link state. 11313 */ 11314 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11315 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11316 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11317 ctl_set_lun_unavail(ctsio); 11318 retval = 1; 11319 goto bailout; 11320 } 11321 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11322 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11323 ctl_set_lun_transit(ctsio); 11324 retval = 1; 11325 goto bailout; 11326 } 11327 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11328 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11329 ctl_set_lun_standby(ctsio); 11330 retval = 1; 11331 goto bailout; 11332 } 11333 11334 /* The rest of checks are only done on executing side */ 11335 if (softc->ha_mode == CTL_HA_MODE_XFER) 11336 goto bailout; 11337 } 11338 11339 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11340 if (lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11341 ctl_set_hw_write_protected(ctsio); 11342 retval = 1; 11343 goto bailout; 11344 } 11345 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11346 ctl_set_sense(ctsio, /*current_error*/ 1, 11347 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11348 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11349 retval = 1; 11350 goto bailout; 11351 } 11352 } 11353 11354 /* 11355 * Check for a reservation conflict. If this command isn't allowed 11356 * even on reserved LUNs, and if this initiator isn't the one who 11357 * reserved us, reject the command with a reservation conflict. 11358 */ 11359 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11360 if ((lun->flags & CTL_LUN_RESERVED) 11361 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11362 if (lun->res_idx != residx) { 11363 ctl_set_reservation_conflict(ctsio); 11364 retval = 1; 11365 goto bailout; 11366 } 11367 } 11368 11369 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11370 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11371 /* No reservation or command is allowed. */; 11372 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11373 (lun->pr_res_type == SPR_TYPE_WR_EX || 11374 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11375 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11376 /* The command is allowed for Write Exclusive resv. */; 11377 } else { 11378 /* 11379 * if we aren't registered or it's a res holder type 11380 * reservation and this isn't the res holder then set a 11381 * conflict. 11382 */ 11383 if (ctl_get_prkey(lun, residx) == 0 || 11384 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11385 ctl_set_reservation_conflict(ctsio); 11386 retval = 1; 11387 goto bailout; 11388 } 11389 } 11390 11391 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11392 if (lun->flags & CTL_LUN_EJECTED) 11393 ctl_set_lun_ejected(ctsio); 11394 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11395 if (lun->flags & CTL_LUN_REMOVABLE) 11396 ctl_set_lun_no_media(ctsio); 11397 else 11398 ctl_set_lun_int_reqd(ctsio); 11399 } else if (lun->flags & CTL_LUN_STOPPED) 11400 ctl_set_lun_stopped(ctsio); 11401 else 11402 goto bailout; 11403 retval = 1; 11404 goto bailout; 11405 } 11406 11407 bailout: 11408 return (retval); 11409 } 11410 11411 static void 11412 ctl_failover_io(union ctl_io *io, int have_lock) 11413 { 11414 ctl_set_busy(&io->scsiio); 11415 ctl_done(io); 11416 } 11417 11418 static void 11419 ctl_failover_lun(union ctl_io *rio) 11420 { 11421 struct ctl_softc *softc = CTL_SOFTC(rio); 11422 struct ctl_lun *lun; 11423 struct ctl_io_hdr *io, *next_io; 11424 uint32_t targ_lun; 11425 11426 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11427 CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun)); 11428 11429 /* Find and lock the LUN. */ 11430 mtx_lock(&softc->ctl_lock); 11431 if (targ_lun > ctl_max_luns || 11432 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11433 mtx_unlock(&softc->ctl_lock); 11434 return; 11435 } 11436 mtx_lock(&lun->lun_lock); 11437 mtx_unlock(&softc->ctl_lock); 11438 if (lun->flags & CTL_LUN_DISABLED) { 11439 mtx_unlock(&lun->lun_lock); 11440 return; 11441 } 11442 11443 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11444 LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11445 /* We are master */ 11446 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11447 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11448 io->flags |= CTL_FLAG_ABORT | 11449 CTL_FLAG_FAILOVER; 11450 ctl_try_unblock_io(lun, 11451 (union ctl_io *)io, FALSE); 11452 } else { /* This can be only due to DATAMOVE */ 11453 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11454 io->flags &= ~CTL_FLAG_DMA_INPROG; 11455 io->flags |= CTL_FLAG_IO_ACTIVE; 11456 io->port_status = 31340; 11457 ctl_enqueue_isc((union ctl_io *)io); 11458 } 11459 } else 11460 /* We are slave */ 11461 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11462 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11463 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11464 io->flags |= CTL_FLAG_FAILOVER; 11465 } else { 11466 ctl_set_busy(&((union ctl_io *)io)-> 11467 scsiio); 11468 ctl_done((union ctl_io *)io); 11469 } 11470 } 11471 } 11472 } else { /* SERIALIZE modes */ 11473 LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11474 /* We are master */ 11475 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11476 if (io->blocker != NULL) { 11477 TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, 11478 io, blocked_links); 11479 io->blocker = NULL; 11480 } 11481 ctl_try_unblock_others(lun, (union ctl_io *)io, 11482 TRUE); 11483 LIST_REMOVE(io, ooa_links); 11484 ctl_free_io((union ctl_io *)io); 11485 } else 11486 /* We are slave */ 11487 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11488 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11489 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11490 ctl_set_busy(&((union ctl_io *)io)-> 11491 scsiio); 11492 ctl_done((union ctl_io *)io); 11493 } 11494 } 11495 } 11496 } 11497 mtx_unlock(&lun->lun_lock); 11498 } 11499 11500 static void 11501 ctl_scsiio_precheck(struct ctl_scsiio *ctsio) 11502 { 11503 struct ctl_softc *softc = CTL_SOFTC(ctsio); 11504 struct ctl_lun *lun; 11505 const struct ctl_cmd_entry *entry; 11506 union ctl_io *bio; 11507 uint32_t initidx, targ_lun; 11508 11509 lun = NULL; 11510 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11511 if (targ_lun < ctl_max_luns) 11512 lun = softc->ctl_luns[targ_lun]; 11513 if (lun) { 11514 /* 11515 * If the LUN is invalid, pretend that it doesn't exist. 11516 * It will go away as soon as all pending I/O has been 11517 * completed. 11518 */ 11519 mtx_lock(&lun->lun_lock); 11520 if (lun->flags & CTL_LUN_DISABLED) { 11521 mtx_unlock(&lun->lun_lock); 11522 lun = NULL; 11523 } 11524 } 11525 CTL_LUN(ctsio) = lun; 11526 if (lun) { 11527 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11528 11529 /* 11530 * Every I/O goes into the OOA queue for a particular LUN, 11531 * and stays there until completion. 11532 */ 11533 #ifdef CTL_TIME_IO 11534 if (LIST_EMPTY(&lun->ooa_queue)) 11535 lun->idle_time += getsbinuptime() - lun->last_busy; 11536 #endif 11537 LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11538 } 11539 11540 /* Get command entry and return error if it is unsuppotyed. */ 11541 entry = ctl_validate_command(ctsio); 11542 if (entry == NULL) { 11543 if (lun) 11544 mtx_unlock(&lun->lun_lock); 11545 return; 11546 } 11547 11548 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11549 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11550 11551 /* 11552 * Check to see whether we can send this command to LUNs that don't 11553 * exist. This should pretty much only be the case for inquiry 11554 * and request sense. Further checks, below, really require having 11555 * a LUN, so we can't really check the command anymore. Just put 11556 * it on the rtr queue. 11557 */ 11558 if (lun == NULL) { 11559 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11560 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11561 ctl_enqueue_rtr((union ctl_io *)ctsio); 11562 return; 11563 } 11564 11565 ctl_set_unsupported_lun(ctsio); 11566 ctl_done((union ctl_io *)ctsio); 11567 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11568 return; 11569 } else { 11570 /* 11571 * Make sure we support this particular command on this LUN. 11572 * e.g., we don't support writes to the control LUN. 11573 */ 11574 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11575 mtx_unlock(&lun->lun_lock); 11576 ctl_set_invalid_opcode(ctsio); 11577 ctl_done((union ctl_io *)ctsio); 11578 return; 11579 } 11580 } 11581 11582 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11583 11584 /* 11585 * If we've got a request sense, it'll clear the contingent 11586 * allegiance condition. Otherwise, if we have a CA condition for 11587 * this initiator, clear it, because it sent down a command other 11588 * than request sense. 11589 */ 11590 if (ctsio->cdb[0] != REQUEST_SENSE) { 11591 struct scsi_sense_data *ps; 11592 11593 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11594 if (ps != NULL) 11595 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11596 } 11597 11598 /* 11599 * If the command has this flag set, it handles its own unit 11600 * attention reporting, we shouldn't do anything. Otherwise we 11601 * check for any pending unit attentions, and send them back to the 11602 * initiator. We only do this when a command initially comes in, 11603 * not when we pull it off the blocked queue. 11604 * 11605 * According to SAM-3, section 5.3.2, the order that things get 11606 * presented back to the host is basically unit attentions caused 11607 * by some sort of reset event, busy status, reservation conflicts 11608 * or task set full, and finally any other status. 11609 * 11610 * One issue here is that some of the unit attentions we report 11611 * don't fall into the "reset" category (e.g. "reported luns data 11612 * has changed"). So reporting it here, before the reservation 11613 * check, may be technically wrong. I guess the only thing to do 11614 * would be to check for and report the reset events here, and then 11615 * check for the other unit attention types after we check for a 11616 * reservation conflict. 11617 * 11618 * XXX KDM need to fix this 11619 */ 11620 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11621 ctl_ua_type ua_type; 11622 u_int sense_len = 0; 11623 11624 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11625 &sense_len, SSD_TYPE_NONE); 11626 if (ua_type != CTL_UA_NONE) { 11627 mtx_unlock(&lun->lun_lock); 11628 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11629 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11630 ctsio->sense_len = sense_len; 11631 ctl_done((union ctl_io *)ctsio); 11632 return; 11633 } 11634 } 11635 11636 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11637 mtx_unlock(&lun->lun_lock); 11638 ctl_done((union ctl_io *)ctsio); 11639 return; 11640 } 11641 11642 /* 11643 * XXX CHD this is where we want to send IO to other side if 11644 * this LUN is secondary on this SC. We will need to make a copy 11645 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11646 * the copy we send as FROM_OTHER. 11647 * We also need to stuff the address of the original IO so we can 11648 * find it easily. Something similar will need be done on the other 11649 * side so when we are done we can find the copy. 11650 */ 11651 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11652 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11653 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11654 union ctl_ha_msg msg_info; 11655 int isc_retval; 11656 11657 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11658 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11659 mtx_unlock(&lun->lun_lock); 11660 11661 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11662 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11663 msg_info.hdr.serializing_sc = NULL; 11664 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11665 msg_info.scsi.tag_num = ctsio->tag_num; 11666 msg_info.scsi.tag_type = ctsio->tag_type; 11667 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11668 msg_info.scsi.cdb_len = ctsio->cdb_len; 11669 msg_info.scsi.priority = ctsio->priority; 11670 11671 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11672 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11673 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11674 ctsio->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11675 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 11676 ctl_set_busy(ctsio); 11677 ctl_done((union ctl_io *)ctsio); 11678 return; 11679 } 11680 return; 11681 } 11682 11683 bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); 11684 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 11685 case CTL_ACTION_PASS: 11686 case CTL_ACTION_SKIP: 11687 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11688 mtx_unlock(&lun->lun_lock); 11689 ctl_enqueue_rtr((union ctl_io *)ctsio); 11690 break; 11691 case CTL_ACTION_BLOCK: 11692 ctsio->io_hdr.blocker = bio; 11693 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 11694 blocked_links); 11695 mtx_unlock(&lun->lun_lock); 11696 break; 11697 case CTL_ACTION_OVERLAP: 11698 mtx_unlock(&lun->lun_lock); 11699 ctl_set_overlapped_cmd(ctsio); 11700 ctl_done((union ctl_io *)ctsio); 11701 break; 11702 case CTL_ACTION_OVERLAP_TAG: 11703 mtx_unlock(&lun->lun_lock); 11704 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11705 ctl_done((union ctl_io *)ctsio); 11706 break; 11707 default: 11708 __assert_unreachable(); 11709 } 11710 } 11711 11712 const struct ctl_cmd_entry * 11713 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11714 { 11715 const struct ctl_cmd_entry *entry; 11716 int service_action; 11717 11718 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11719 if (sa) 11720 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11721 if (entry->flags & CTL_CMD_FLAG_SA5) { 11722 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11723 entry = &((const struct ctl_cmd_entry *) 11724 entry->execute)[service_action]; 11725 } 11726 return (entry); 11727 } 11728 11729 const struct ctl_cmd_entry * 11730 ctl_validate_command(struct ctl_scsiio *ctsio) 11731 { 11732 const struct ctl_cmd_entry *entry; 11733 int i, sa; 11734 uint8_t diff; 11735 11736 entry = ctl_get_cmd_entry(ctsio, &sa); 11737 ctsio->seridx = entry->seridx; 11738 if (entry->execute == NULL) { 11739 if (sa) 11740 ctl_set_invalid_field(ctsio, 11741 /*sks_valid*/ 1, 11742 /*command*/ 1, 11743 /*field*/ 1, 11744 /*bit_valid*/ 1, 11745 /*bit*/ 4); 11746 else 11747 ctl_set_invalid_opcode(ctsio); 11748 ctl_done((union ctl_io *)ctsio); 11749 return (NULL); 11750 } 11751 KASSERT(entry->length > 0, 11752 ("Not defined length for command 0x%02x/0x%02x", 11753 ctsio->cdb[0], ctsio->cdb[1])); 11754 for (i = 1; i < entry->length; i++) { 11755 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11756 if (diff == 0) 11757 continue; 11758 ctl_set_invalid_field(ctsio, 11759 /*sks_valid*/ 1, 11760 /*command*/ 1, 11761 /*field*/ i, 11762 /*bit_valid*/ 1, 11763 /*bit*/ fls(diff) - 1); 11764 ctl_done((union ctl_io *)ctsio); 11765 return (NULL); 11766 } 11767 return (entry); 11768 } 11769 11770 static int 11771 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11772 { 11773 11774 switch (lun_type) { 11775 case T_DIRECT: 11776 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11777 return (0); 11778 break; 11779 case T_PROCESSOR: 11780 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11781 return (0); 11782 break; 11783 case T_CDROM: 11784 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11785 return (0); 11786 break; 11787 default: 11788 return (0); 11789 } 11790 return (1); 11791 } 11792 11793 static int 11794 ctl_scsiio(struct ctl_scsiio *ctsio) 11795 { 11796 int retval; 11797 const struct ctl_cmd_entry *entry; 11798 11799 retval = CTL_RETVAL_COMPLETE; 11800 11801 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11802 11803 entry = ctl_get_cmd_entry(ctsio, NULL); 11804 11805 /* 11806 * If this I/O has been aborted, just send it straight to 11807 * ctl_done() without executing it. 11808 */ 11809 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11810 ctl_done((union ctl_io *)ctsio); 11811 goto bailout; 11812 } 11813 11814 /* 11815 * All the checks should have been handled by ctl_scsiio_precheck(). 11816 * We should be clear now to just execute the I/O. 11817 */ 11818 retval = entry->execute(ctsio); 11819 11820 bailout: 11821 return (retval); 11822 } 11823 11824 static int 11825 ctl_target_reset(union ctl_io *io) 11826 { 11827 struct ctl_softc *softc = CTL_SOFTC(io); 11828 struct ctl_port *port = CTL_PORT(io); 11829 struct ctl_lun *lun; 11830 uint32_t initidx; 11831 ctl_ua_type ua_type; 11832 11833 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11834 union ctl_ha_msg msg_info; 11835 11836 msg_info.hdr.nexus = io->io_hdr.nexus; 11837 msg_info.task.task_action = io->taskio.task_action; 11838 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11839 msg_info.hdr.original_sc = NULL; 11840 msg_info.hdr.serializing_sc = NULL; 11841 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11842 sizeof(msg_info.task), M_WAITOK); 11843 } 11844 11845 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11846 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11847 ua_type = CTL_UA_TARG_RESET; 11848 else 11849 ua_type = CTL_UA_BUS_RESET; 11850 mtx_lock(&softc->ctl_lock); 11851 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11852 if (port != NULL && 11853 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11854 continue; 11855 ctl_do_lun_reset(lun, initidx, ua_type); 11856 } 11857 mtx_unlock(&softc->ctl_lock); 11858 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11859 return (0); 11860 } 11861 11862 /* 11863 * The LUN should always be set. The I/O is optional, and is used to 11864 * distinguish between I/Os sent by this initiator, and by other 11865 * initiators. We set unit attention for initiators other than this one. 11866 * SAM-3 is vague on this point. It does say that a unit attention should 11867 * be established for other initiators when a LUN is reset (see section 11868 * 5.7.3), but it doesn't specifically say that the unit attention should 11869 * be established for this particular initiator when a LUN is reset. Here 11870 * is the relevant text, from SAM-3 rev 8: 11871 * 11872 * 5.7.2 When a SCSI initiator port aborts its own tasks 11873 * 11874 * When a SCSI initiator port causes its own task(s) to be aborted, no 11875 * notification that the task(s) have been aborted shall be returned to 11876 * the SCSI initiator port other than the completion response for the 11877 * command or task management function action that caused the task(s) to 11878 * be aborted and notification(s) associated with related effects of the 11879 * action (e.g., a reset unit attention condition). 11880 * 11881 * XXX KDM for now, we're setting unit attention for all initiators. 11882 */ 11883 static void 11884 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11885 { 11886 struct ctl_io_hdr *xioh; 11887 int i; 11888 11889 mtx_lock(&lun->lun_lock); 11890 /* Abort tasks. */ 11891 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 11892 xioh->flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11893 ctl_try_unblock_io(lun, (union ctl_io *)xioh, FALSE); 11894 } 11895 /* Clear CA. */ 11896 for (i = 0; i < ctl_max_ports; i++) { 11897 free(lun->pending_sense[i], M_CTL); 11898 lun->pending_sense[i] = NULL; 11899 } 11900 /* Clear reservation. */ 11901 lun->flags &= ~CTL_LUN_RESERVED; 11902 /* Clear prevent media removal. */ 11903 if (lun->prevent) { 11904 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11905 ctl_clear_mask(lun->prevent, i); 11906 lun->prevent_count = 0; 11907 } 11908 /* Clear TPC status */ 11909 ctl_tpc_lun_clear(lun, -1); 11910 /* Establish UA. */ 11911 #if 0 11912 ctl_est_ua_all(lun, initidx, ua_type); 11913 #else 11914 ctl_est_ua_all(lun, -1, ua_type); 11915 #endif 11916 mtx_unlock(&lun->lun_lock); 11917 } 11918 11919 static int 11920 ctl_lun_reset(union ctl_io *io) 11921 { 11922 struct ctl_softc *softc = CTL_SOFTC(io); 11923 struct ctl_lun *lun; 11924 uint32_t targ_lun, initidx; 11925 11926 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11927 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11928 mtx_lock(&softc->ctl_lock); 11929 if (targ_lun >= ctl_max_luns || 11930 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11931 mtx_unlock(&softc->ctl_lock); 11932 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11933 return (1); 11934 } 11935 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11936 mtx_unlock(&softc->ctl_lock); 11937 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11938 11939 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11940 union ctl_ha_msg msg_info; 11941 11942 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11943 msg_info.hdr.nexus = io->io_hdr.nexus; 11944 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11945 msg_info.hdr.original_sc = NULL; 11946 msg_info.hdr.serializing_sc = NULL; 11947 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11948 sizeof(msg_info.task), M_WAITOK); 11949 } 11950 return (0); 11951 } 11952 11953 static void 11954 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11955 int other_sc) 11956 { 11957 struct ctl_io_hdr *xioh; 11958 11959 mtx_assert(&lun->lun_lock, MA_OWNED); 11960 11961 /* 11962 * Run through the OOA queue and attempt to find the given I/O. 11963 * The target port, initiator ID, tag type and tag number have to 11964 * match the values that we got from the initiator. If we have an 11965 * untagged command to abort, simply abort the first untagged command 11966 * we come to. We only allow one untagged command at a time of course. 11967 */ 11968 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 11969 union ctl_io *xio = (union ctl_io *)xioh; 11970 if ((targ_port == UINT32_MAX || 11971 targ_port == xioh->nexus.targ_port) && 11972 (init_id == UINT32_MAX || 11973 init_id == xioh->nexus.initid)) { 11974 if (targ_port != xioh->nexus.targ_port || 11975 init_id != xioh->nexus.initid) 11976 xioh->flags |= CTL_FLAG_ABORT_STATUS; 11977 xioh->flags |= CTL_FLAG_ABORT; 11978 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11979 union ctl_ha_msg msg_info; 11980 11981 msg_info.hdr.nexus = xioh->nexus; 11982 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11983 msg_info.task.tag_num = xio->scsiio.tag_num; 11984 msg_info.task.tag_type = xio->scsiio.tag_type; 11985 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11986 msg_info.hdr.original_sc = NULL; 11987 msg_info.hdr.serializing_sc = NULL; 11988 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11989 sizeof(msg_info.task), M_NOWAIT); 11990 } 11991 ctl_try_unblock_io(lun, xio, FALSE); 11992 } 11993 } 11994 } 11995 11996 static int 11997 ctl_abort_task_set(union ctl_io *io) 11998 { 11999 struct ctl_softc *softc = CTL_SOFTC(io); 12000 struct ctl_lun *lun; 12001 uint32_t targ_lun; 12002 12003 /* 12004 * Look up the LUN. 12005 */ 12006 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12007 mtx_lock(&softc->ctl_lock); 12008 if (targ_lun >= ctl_max_luns || 12009 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12010 mtx_unlock(&softc->ctl_lock); 12011 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12012 return (1); 12013 } 12014 12015 mtx_lock(&lun->lun_lock); 12016 mtx_unlock(&softc->ctl_lock); 12017 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 12018 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12019 io->io_hdr.nexus.initid, 12020 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12021 } else { /* CTL_TASK_CLEAR_TASK_SET */ 12022 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 12023 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12024 } 12025 mtx_unlock(&lun->lun_lock); 12026 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12027 return (0); 12028 } 12029 12030 static void 12031 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 12032 ctl_ua_type ua_type) 12033 { 12034 struct ctl_lun *lun; 12035 struct scsi_sense_data *ps; 12036 uint32_t p, i; 12037 12038 p = initidx / CTL_MAX_INIT_PER_PORT; 12039 i = initidx % CTL_MAX_INIT_PER_PORT; 12040 mtx_lock(&softc->ctl_lock); 12041 STAILQ_FOREACH(lun, &softc->lun_list, links) { 12042 mtx_lock(&lun->lun_lock); 12043 /* Abort tasks. */ 12044 ctl_abort_tasks_lun(lun, p, i, 1); 12045 /* Clear CA. */ 12046 ps = lun->pending_sense[p]; 12047 if (ps != NULL) 12048 ps[i].error_code = 0; 12049 /* Clear reservation. */ 12050 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 12051 lun->flags &= ~CTL_LUN_RESERVED; 12052 /* Clear prevent media removal. */ 12053 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 12054 ctl_clear_mask(lun->prevent, initidx); 12055 lun->prevent_count--; 12056 } 12057 /* Clear TPC status */ 12058 ctl_tpc_lun_clear(lun, initidx); 12059 /* Establish UA. */ 12060 ctl_est_ua(lun, initidx, ua_type); 12061 mtx_unlock(&lun->lun_lock); 12062 } 12063 mtx_unlock(&softc->ctl_lock); 12064 } 12065 12066 static int 12067 ctl_i_t_nexus_reset(union ctl_io *io) 12068 { 12069 struct ctl_softc *softc = CTL_SOFTC(io); 12070 uint32_t initidx; 12071 12072 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12073 union ctl_ha_msg msg_info; 12074 12075 msg_info.hdr.nexus = io->io_hdr.nexus; 12076 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 12077 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12078 msg_info.hdr.original_sc = NULL; 12079 msg_info.hdr.serializing_sc = NULL; 12080 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12081 sizeof(msg_info.task), M_WAITOK); 12082 } 12083 12084 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12085 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 12086 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12087 return (0); 12088 } 12089 12090 static int 12091 ctl_abort_task(union ctl_io *io) 12092 { 12093 struct ctl_softc *softc = CTL_SOFTC(io); 12094 struct ctl_io_hdr *xioh; 12095 struct ctl_lun *lun; 12096 uint32_t targ_lun; 12097 12098 /* 12099 * Look up the LUN. 12100 */ 12101 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12102 mtx_lock(&softc->ctl_lock); 12103 if (targ_lun >= ctl_max_luns || 12104 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12105 mtx_unlock(&softc->ctl_lock); 12106 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12107 return (1); 12108 } 12109 12110 mtx_lock(&lun->lun_lock); 12111 mtx_unlock(&softc->ctl_lock); 12112 /* 12113 * Run through the OOA queue and attempt to find the given I/O. 12114 * The target port, initiator ID, tag type and tag number have to 12115 * match the values that we got from the initiator. If we have an 12116 * untagged command to abort, simply abort the first untagged command 12117 * we come to. We only allow one untagged command at a time of course. 12118 */ 12119 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 12120 union ctl_io *xio = (union ctl_io *)xioh; 12121 if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) 12122 || (xioh->nexus.initid != io->io_hdr.nexus.initid) 12123 || (xioh->flags & CTL_FLAG_ABORT)) 12124 continue; 12125 12126 /* 12127 * If the abort says that the task is untagged, the 12128 * task in the queue must be untagged. Otherwise, 12129 * we just check to see whether the tag numbers 12130 * match. This is because the QLogic firmware 12131 * doesn't pass back the tag type in an abort 12132 * request. 12133 */ 12134 #if 0 12135 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12136 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12137 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12138 #else 12139 /* 12140 * XXX KDM we've got problems with FC, because it 12141 * doesn't send down a tag type with aborts. So we 12142 * can only really go by the tag number... 12143 * This may cause problems with parallel SCSI. 12144 * Need to figure that out!! 12145 */ 12146 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12147 #endif 12148 xioh->flags |= CTL_FLAG_ABORT; 12149 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12150 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12151 union ctl_ha_msg msg_info; 12152 12153 msg_info.hdr.nexus = io->io_hdr.nexus; 12154 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12155 msg_info.task.tag_num = io->taskio.tag_num; 12156 msg_info.task.tag_type = io->taskio.tag_type; 12157 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12158 msg_info.hdr.original_sc = NULL; 12159 msg_info.hdr.serializing_sc = NULL; 12160 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12161 sizeof(msg_info.task), M_NOWAIT); 12162 } 12163 ctl_try_unblock_io(lun, xio, FALSE); 12164 } 12165 } 12166 mtx_unlock(&lun->lun_lock); 12167 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12168 return (0); 12169 } 12170 12171 static int 12172 ctl_query_task(union ctl_io *io, int task_set) 12173 { 12174 struct ctl_softc *softc = CTL_SOFTC(io); 12175 struct ctl_io_hdr *xioh; 12176 struct ctl_lun *lun; 12177 int found = 0; 12178 uint32_t targ_lun; 12179 12180 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12181 mtx_lock(&softc->ctl_lock); 12182 if (targ_lun >= ctl_max_luns || 12183 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12184 mtx_unlock(&softc->ctl_lock); 12185 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12186 return (1); 12187 } 12188 mtx_lock(&lun->lun_lock); 12189 mtx_unlock(&softc->ctl_lock); 12190 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 12191 union ctl_io *xio = (union ctl_io *)xioh; 12192 if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) 12193 || (xioh->nexus.initid != io->io_hdr.nexus.initid) 12194 || (xioh->flags & CTL_FLAG_ABORT)) 12195 continue; 12196 12197 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12198 found = 1; 12199 break; 12200 } 12201 } 12202 mtx_unlock(&lun->lun_lock); 12203 if (found) 12204 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12205 else 12206 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12207 return (0); 12208 } 12209 12210 static int 12211 ctl_query_async_event(union ctl_io *io) 12212 { 12213 struct ctl_softc *softc = CTL_SOFTC(io); 12214 struct ctl_lun *lun; 12215 ctl_ua_type ua; 12216 uint32_t targ_lun, initidx; 12217 12218 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12219 mtx_lock(&softc->ctl_lock); 12220 if (targ_lun >= ctl_max_luns || 12221 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12222 mtx_unlock(&softc->ctl_lock); 12223 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12224 return (1); 12225 } 12226 mtx_lock(&lun->lun_lock); 12227 mtx_unlock(&softc->ctl_lock); 12228 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12229 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12230 mtx_unlock(&lun->lun_lock); 12231 if (ua != CTL_UA_NONE) 12232 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12233 else 12234 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12235 return (0); 12236 } 12237 12238 static void 12239 ctl_run_task(union ctl_io *io) 12240 { 12241 int retval = 1; 12242 12243 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12244 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12245 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12246 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12247 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12248 switch (io->taskio.task_action) { 12249 case CTL_TASK_ABORT_TASK: 12250 retval = ctl_abort_task(io); 12251 break; 12252 case CTL_TASK_ABORT_TASK_SET: 12253 case CTL_TASK_CLEAR_TASK_SET: 12254 retval = ctl_abort_task_set(io); 12255 break; 12256 case CTL_TASK_CLEAR_ACA: 12257 break; 12258 case CTL_TASK_I_T_NEXUS_RESET: 12259 retval = ctl_i_t_nexus_reset(io); 12260 break; 12261 case CTL_TASK_LUN_RESET: 12262 retval = ctl_lun_reset(io); 12263 break; 12264 case CTL_TASK_TARGET_RESET: 12265 case CTL_TASK_BUS_RESET: 12266 retval = ctl_target_reset(io); 12267 break; 12268 case CTL_TASK_PORT_LOGIN: 12269 break; 12270 case CTL_TASK_PORT_LOGOUT: 12271 break; 12272 case CTL_TASK_QUERY_TASK: 12273 retval = ctl_query_task(io, 0); 12274 break; 12275 case CTL_TASK_QUERY_TASK_SET: 12276 retval = ctl_query_task(io, 1); 12277 break; 12278 case CTL_TASK_QUERY_ASYNC_EVENT: 12279 retval = ctl_query_async_event(io); 12280 break; 12281 default: 12282 printf("%s: got unknown task management event %d\n", 12283 __func__, io->taskio.task_action); 12284 break; 12285 } 12286 if (retval == 0) 12287 io->io_hdr.status = CTL_SUCCESS; 12288 else 12289 io->io_hdr.status = CTL_ERROR; 12290 ctl_done(io); 12291 } 12292 12293 /* 12294 * For HA operation. Handle commands that come in from the other 12295 * controller. 12296 */ 12297 static void 12298 ctl_handle_isc(union ctl_io *io) 12299 { 12300 struct ctl_softc *softc = CTL_SOFTC(io); 12301 struct ctl_lun *lun; 12302 const struct ctl_cmd_entry *entry; 12303 uint32_t targ_lun; 12304 12305 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12306 switch (io->io_hdr.msg_type) { 12307 case CTL_MSG_SERIALIZE: 12308 ctl_serialize_other_sc_cmd(&io->scsiio); 12309 break; 12310 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12311 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12312 if (targ_lun >= ctl_max_luns || 12313 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12314 ctl_done(io); 12315 break; 12316 } 12317 mtx_lock(&lun->lun_lock); 12318 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12319 mtx_unlock(&lun->lun_lock); 12320 ctl_done(io); 12321 break; 12322 } 12323 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12324 mtx_unlock(&lun->lun_lock); 12325 ctl_enqueue_rtr(io); 12326 break; 12327 case CTL_MSG_FINISH_IO: 12328 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12329 ctl_done(io); 12330 break; 12331 } 12332 if (targ_lun >= ctl_max_luns || 12333 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12334 ctl_free_io(io); 12335 break; 12336 } 12337 mtx_lock(&lun->lun_lock); 12338 ctl_try_unblock_others(lun, io, TRUE); 12339 LIST_REMOVE(&io->io_hdr, ooa_links); 12340 mtx_unlock(&lun->lun_lock); 12341 ctl_free_io(io); 12342 break; 12343 case CTL_MSG_PERS_ACTION: 12344 ctl_hndl_per_res_out_on_other_sc(io); 12345 ctl_free_io(io); 12346 break; 12347 case CTL_MSG_BAD_JUJU: 12348 ctl_done(io); 12349 break; 12350 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12351 ctl_datamove_remote(io); 12352 break; 12353 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12354 ctl_datamove_done(io, false); 12355 break; 12356 case CTL_MSG_FAILOVER: 12357 ctl_failover_lun(io); 12358 ctl_free_io(io); 12359 break; 12360 default: 12361 printf("%s: Invalid message type %d\n", 12362 __func__, io->io_hdr.msg_type); 12363 ctl_free_io(io); 12364 break; 12365 } 12366 12367 } 12368 12369 /* 12370 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12371 * there is no match. 12372 */ 12373 static ctl_lun_error_pattern 12374 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12375 { 12376 const struct ctl_cmd_entry *entry; 12377 ctl_lun_error_pattern filtered_pattern, pattern; 12378 12379 pattern = desc->error_pattern; 12380 12381 /* 12382 * XXX KDM we need more data passed into this function to match a 12383 * custom pattern, and we actually need to implement custom pattern 12384 * matching. 12385 */ 12386 if (pattern & CTL_LUN_PAT_CMD) 12387 return (CTL_LUN_PAT_CMD); 12388 12389 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12390 return (CTL_LUN_PAT_ANY); 12391 12392 entry = ctl_get_cmd_entry(ctsio, NULL); 12393 12394 filtered_pattern = entry->pattern & pattern; 12395 12396 /* 12397 * If the user requested specific flags in the pattern (e.g. 12398 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12399 * flags. 12400 * 12401 * If the user did not specify any flags, it doesn't matter whether 12402 * or not the command supports the flags. 12403 */ 12404 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12405 (pattern & ~CTL_LUN_PAT_MASK)) 12406 return (CTL_LUN_PAT_NONE); 12407 12408 /* 12409 * If the user asked for a range check, see if the requested LBA 12410 * range overlaps with this command's LBA range. 12411 */ 12412 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12413 uint64_t lba1; 12414 uint64_t len1; 12415 ctl_action action; 12416 int retval; 12417 12418 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12419 if (retval != 0) 12420 return (CTL_LUN_PAT_NONE); 12421 12422 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12423 desc->lba_range.len, FALSE); 12424 /* 12425 * A "pass" means that the LBA ranges don't overlap, so 12426 * this doesn't match the user's range criteria. 12427 */ 12428 if (action == CTL_ACTION_PASS) 12429 return (CTL_LUN_PAT_NONE); 12430 } 12431 12432 return (filtered_pattern); 12433 } 12434 12435 static void 12436 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12437 { 12438 struct ctl_error_desc *desc, *desc2; 12439 12440 mtx_assert(&lun->lun_lock, MA_OWNED); 12441 12442 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12443 ctl_lun_error_pattern pattern; 12444 /* 12445 * Check to see whether this particular command matches 12446 * the pattern in the descriptor. 12447 */ 12448 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12449 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12450 continue; 12451 12452 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12453 case CTL_LUN_INJ_ABORTED: 12454 ctl_set_aborted(&io->scsiio); 12455 break; 12456 case CTL_LUN_INJ_MEDIUM_ERR: 12457 ctl_set_medium_error(&io->scsiio, 12458 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12459 CTL_FLAG_DATA_OUT); 12460 break; 12461 case CTL_LUN_INJ_UA: 12462 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12463 * OCCURRED */ 12464 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12465 break; 12466 case CTL_LUN_INJ_CUSTOM: 12467 /* 12468 * We're assuming the user knows what he is doing. 12469 * Just copy the sense information without doing 12470 * checks. 12471 */ 12472 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12473 MIN(sizeof(desc->custom_sense), 12474 sizeof(io->scsiio.sense_data))); 12475 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12476 io->scsiio.sense_len = SSD_FULL_SIZE; 12477 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12478 break; 12479 case CTL_LUN_INJ_NONE: 12480 default: 12481 /* 12482 * If this is an error injection type we don't know 12483 * about, clear the continuous flag (if it is set) 12484 * so it will get deleted below. 12485 */ 12486 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12487 break; 12488 } 12489 /* 12490 * By default, each error injection action is a one-shot 12491 */ 12492 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12493 continue; 12494 12495 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12496 12497 free(desc, M_CTL); 12498 } 12499 } 12500 12501 #ifdef CTL_IO_DELAY 12502 static void 12503 ctl_datamove_timer_wakeup(void *arg) 12504 { 12505 union ctl_io *io; 12506 12507 io = (union ctl_io *)arg; 12508 12509 ctl_datamove(io); 12510 } 12511 #endif /* CTL_IO_DELAY */ 12512 12513 static void 12514 ctl_datamove_done_process(union ctl_io *io) 12515 { 12516 #ifdef CTL_TIME_IO 12517 struct bintime cur_bt; 12518 #endif 12519 12520 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 12521 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 12522 12523 #ifdef CTL_TIME_IO 12524 getbinuptime(&cur_bt); 12525 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12526 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12527 #endif 12528 io->io_hdr.num_dmas++; 12529 12530 if ((io->io_hdr.port_status != 0) && 12531 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 12532 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 12533 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 12534 /*retry_count*/ io->io_hdr.port_status); 12535 } else if (io->scsiio.kern_data_resid != 0 && 12536 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 12537 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 12538 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 12539 ctl_set_invalid_field_ciu(&io->scsiio); 12540 } else if (ctl_debug & CTL_DEBUG_CDB_DATA) 12541 ctl_data_print(io); 12542 } 12543 12544 void 12545 ctl_datamove_done(union ctl_io *io, bool samethr) 12546 { 12547 12548 ctl_datamove_done_process(io); 12549 io->scsiio.be_move_done(io, samethr); 12550 } 12551 12552 void 12553 ctl_datamove(union ctl_io *io) 12554 { 12555 void (*fe_datamove)(union ctl_io *io); 12556 12557 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12558 12559 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12560 12561 /* No data transferred yet. Frontend must update this when done. */ 12562 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12563 12564 #ifdef CTL_TIME_IO 12565 getbinuptime(&io->io_hdr.dma_start_bt); 12566 #endif /* CTL_TIME_IO */ 12567 12568 #ifdef CTL_IO_DELAY 12569 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12570 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12571 } else { 12572 struct ctl_lun *lun; 12573 12574 lun = CTL_LUN(io); 12575 if ((lun != NULL) 12576 && (lun->delay_info.datamove_delay > 0)) { 12577 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12578 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12579 callout_reset(&io->io_hdr.delay_callout, 12580 lun->delay_info.datamove_delay * hz, 12581 ctl_datamove_timer_wakeup, io); 12582 if (lun->delay_info.datamove_type == 12583 CTL_DELAY_TYPE_ONESHOT) 12584 lun->delay_info.datamove_delay = 0; 12585 return; 12586 } 12587 } 12588 #endif 12589 12590 /* 12591 * This command has been aborted. Set the port status, so we fail 12592 * the data move. 12593 */ 12594 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12595 printf("ctl_datamove: tag 0x%jx on (%u:%u:%u) aborted\n", 12596 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12597 io->io_hdr.nexus.targ_port, 12598 io->io_hdr.nexus.targ_lun); 12599 io->io_hdr.port_status = 31337; 12600 ctl_datamove_done_process(io); 12601 io->scsiio.be_move_done(io, true); 12602 return; 12603 } 12604 12605 /* Don't confuse frontend with zero length data move. */ 12606 if (io->scsiio.kern_data_len == 0) { 12607 ctl_datamove_done_process(io); 12608 io->scsiio.be_move_done(io, true); 12609 return; 12610 } 12611 12612 fe_datamove = CTL_PORT(io)->fe_datamove; 12613 fe_datamove(io); 12614 } 12615 12616 static void 12617 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12618 { 12619 union ctl_ha_msg msg; 12620 #ifdef CTL_TIME_IO 12621 struct bintime cur_bt; 12622 #endif 12623 12624 memset(&msg, 0, sizeof(msg)); 12625 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12626 msg.hdr.original_sc = io; 12627 msg.hdr.serializing_sc = io->io_hdr.remote_io; 12628 msg.hdr.nexus = io->io_hdr.nexus; 12629 msg.hdr.status = io->io_hdr.status; 12630 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12631 msg.scsi.tag_num = io->scsiio.tag_num; 12632 msg.scsi.tag_type = io->scsiio.tag_type; 12633 msg.scsi.scsi_status = io->scsiio.scsi_status; 12634 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12635 io->scsiio.sense_len); 12636 msg.scsi.sense_len = io->scsiio.sense_len; 12637 msg.scsi.port_status = io->io_hdr.port_status; 12638 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12639 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12640 ctl_failover_io(io, /*have_lock*/ have_lock); 12641 return; 12642 } 12643 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12644 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12645 msg.scsi.sense_len, M_WAITOK); 12646 12647 #ifdef CTL_TIME_IO 12648 getbinuptime(&cur_bt); 12649 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12650 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12651 #endif 12652 io->io_hdr.num_dmas++; 12653 } 12654 12655 /* 12656 * The DMA to the remote side is done, now we need to tell the other side 12657 * we're done so it can continue with its data movement. 12658 */ 12659 static void 12660 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12661 { 12662 union ctl_io *io; 12663 uint32_t i; 12664 12665 io = rq->context; 12666 12667 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12668 printf("%s: ISC DMA write failed with error %d", __func__, 12669 rq->ret); 12670 ctl_set_internal_failure(&io->scsiio, 12671 /*sks_valid*/ 1, 12672 /*retry_count*/ rq->ret); 12673 } 12674 12675 ctl_dt_req_free(rq); 12676 12677 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12678 free(CTL_LSGLT(io)[i].addr, M_CTL); 12679 free(CTL_RSGL(io), M_CTL); 12680 CTL_RSGL(io) = NULL; 12681 CTL_LSGL(io) = NULL; 12682 12683 /* 12684 * The data is in local and remote memory, so now we need to send 12685 * status (good or back) back to the other side. 12686 */ 12687 ctl_send_datamove_done(io, /*have_lock*/ 0); 12688 } 12689 12690 /* 12691 * We've moved the data from the host/controller into local memory. Now we 12692 * need to push it over to the remote controller's memory. 12693 */ 12694 static int 12695 ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr) 12696 { 12697 int retval; 12698 12699 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12700 ctl_datamove_remote_write_cb); 12701 return (retval); 12702 } 12703 12704 static void 12705 ctl_datamove_remote_write(union ctl_io *io) 12706 { 12707 int retval; 12708 void (*fe_datamove)(union ctl_io *io); 12709 12710 /* 12711 * - Get the data from the host/HBA into local memory. 12712 * - DMA memory from the local controller to the remote controller. 12713 * - Send status back to the remote controller. 12714 */ 12715 12716 retval = ctl_datamove_remote_sgl_setup(io); 12717 if (retval != 0) 12718 return; 12719 12720 /* Switch the pointer over so the FETD knows what to do */ 12721 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12722 12723 /* 12724 * Use a custom move done callback, since we need to send completion 12725 * back to the other controller, not to the backend on this side. 12726 */ 12727 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12728 12729 fe_datamove = CTL_PORT(io)->fe_datamove; 12730 fe_datamove(io); 12731 } 12732 12733 static int 12734 ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr) 12735 { 12736 uint32_t i; 12737 12738 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12739 free(CTL_LSGLT(io)[i].addr, M_CTL); 12740 free(CTL_RSGL(io), M_CTL); 12741 CTL_RSGL(io) = NULL; 12742 CTL_LSGL(io) = NULL; 12743 12744 /* 12745 * The read is done, now we need to send status (good or bad) back 12746 * to the other side. 12747 */ 12748 ctl_send_datamove_done(io, /*have_lock*/ 0); 12749 12750 return (0); 12751 } 12752 12753 static void 12754 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12755 { 12756 union ctl_io *io; 12757 void (*fe_datamove)(union ctl_io *io); 12758 12759 io = rq->context; 12760 12761 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12762 printf("%s: ISC DMA read failed with error %d\n", __func__, 12763 rq->ret); 12764 ctl_set_internal_failure(&io->scsiio, 12765 /*sks_valid*/ 1, 12766 /*retry_count*/ rq->ret); 12767 } 12768 12769 ctl_dt_req_free(rq); 12770 12771 /* Switch the pointer over so the FETD knows what to do */ 12772 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12773 12774 /* 12775 * Use a custom move done callback, since we need to send completion 12776 * back to the other controller, not to the backend on this side. 12777 */ 12778 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12779 12780 /* XXX KDM add checks like the ones in ctl_datamove? */ 12781 12782 fe_datamove = CTL_PORT(io)->fe_datamove; 12783 fe_datamove(io); 12784 } 12785 12786 static int 12787 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12788 { 12789 struct ctl_sg_entry *local_sglist; 12790 uint32_t len_to_go; 12791 int retval; 12792 int i; 12793 12794 retval = 0; 12795 local_sglist = CTL_LSGL(io); 12796 len_to_go = io->scsiio.kern_data_len; 12797 12798 /* 12799 * The difficult thing here is that the size of the various 12800 * S/G segments may be different than the size from the 12801 * remote controller. That'll make it harder when DMAing 12802 * the data back to the other side. 12803 */ 12804 for (i = 0; len_to_go > 0; i++) { 12805 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12806 local_sglist[i].addr = 12807 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12808 12809 len_to_go -= local_sglist[i].len; 12810 } 12811 /* 12812 * Reset the number of S/G entries accordingly. The original 12813 * number of S/G entries is available in rem_sg_entries. 12814 */ 12815 io->scsiio.kern_sg_entries = i; 12816 12817 return (retval); 12818 } 12819 12820 static int 12821 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12822 ctl_ha_dt_cb callback) 12823 { 12824 struct ctl_ha_dt_req *rq; 12825 struct ctl_sg_entry *remote_sglist, *local_sglist; 12826 uint32_t local_used, remote_used, total_used; 12827 int i, j, isc_ret; 12828 12829 rq = ctl_dt_req_alloc(); 12830 12831 /* 12832 * If we failed to allocate the request, and if the DMA didn't fail 12833 * anyway, set busy status. This is just a resource allocation 12834 * failure. 12835 */ 12836 if ((rq == NULL) 12837 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12838 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12839 ctl_set_busy(&io->scsiio); 12840 12841 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12842 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12843 if (rq != NULL) 12844 ctl_dt_req_free(rq); 12845 12846 /* 12847 * The data move failed. We need to return status back 12848 * to the other controller. No point in trying to DMA 12849 * data to the remote controller. 12850 */ 12851 12852 ctl_send_datamove_done(io, /*have_lock*/ 0); 12853 12854 return (1); 12855 } 12856 12857 local_sglist = CTL_LSGL(io); 12858 remote_sglist = CTL_RSGL(io); 12859 local_used = 0; 12860 remote_used = 0; 12861 total_used = 0; 12862 12863 /* 12864 * Pull/push the data over the wire from/to the other controller. 12865 * This takes into account the possibility that the local and 12866 * remote sglists may not be identical in terms of the size of 12867 * the elements and the number of elements. 12868 * 12869 * One fundamental assumption here is that the length allocated for 12870 * both the local and remote sglists is identical. Otherwise, we've 12871 * essentially got a coding error of some sort. 12872 */ 12873 isc_ret = CTL_HA_STATUS_SUCCESS; 12874 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12875 uint32_t cur_len; 12876 uint8_t *tmp_ptr; 12877 12878 rq->command = command; 12879 rq->context = io; 12880 12881 /* 12882 * Both pointers should be aligned. But it is possible 12883 * that the allocation length is not. They should both 12884 * also have enough slack left over at the end, though, 12885 * to round up to the next 8 byte boundary. 12886 */ 12887 cur_len = MIN(local_sglist[i].len - local_used, 12888 remote_sglist[j].len - remote_used); 12889 rq->size = cur_len; 12890 12891 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12892 tmp_ptr += local_used; 12893 12894 #if 0 12895 /* Use physical addresses when talking to ISC hardware */ 12896 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12897 /* XXX KDM use busdma */ 12898 rq->local = vtophys(tmp_ptr); 12899 } else 12900 rq->local = tmp_ptr; 12901 #else 12902 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12903 ("HA does not support BUS_ADDR")); 12904 rq->local = tmp_ptr; 12905 #endif 12906 12907 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12908 tmp_ptr += remote_used; 12909 rq->remote = tmp_ptr; 12910 12911 rq->callback = NULL; 12912 12913 local_used += cur_len; 12914 if (local_used >= local_sglist[i].len) { 12915 i++; 12916 local_used = 0; 12917 } 12918 12919 remote_used += cur_len; 12920 if (remote_used >= remote_sglist[j].len) { 12921 j++; 12922 remote_used = 0; 12923 } 12924 total_used += cur_len; 12925 12926 if (total_used >= io->scsiio.kern_data_len) 12927 rq->callback = callback; 12928 12929 isc_ret = ctl_dt_single(rq); 12930 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12931 break; 12932 } 12933 if (isc_ret != CTL_HA_STATUS_WAIT) { 12934 rq->ret = isc_ret; 12935 callback(rq); 12936 } 12937 12938 return (0); 12939 } 12940 12941 static void 12942 ctl_datamove_remote_read(union ctl_io *io) 12943 { 12944 int retval; 12945 uint32_t i; 12946 12947 /* 12948 * This will send an error to the other controller in the case of a 12949 * failure. 12950 */ 12951 retval = ctl_datamove_remote_sgl_setup(io); 12952 if (retval != 0) 12953 return; 12954 12955 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12956 ctl_datamove_remote_read_cb); 12957 if (retval != 0) { 12958 /* 12959 * Make sure we free memory if there was an error.. The 12960 * ctl_datamove_remote_xfer() function will send the 12961 * datamove done message, or call the callback with an 12962 * error if there is a problem. 12963 */ 12964 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12965 free(CTL_LSGLT(io)[i].addr, M_CTL); 12966 free(CTL_RSGL(io), M_CTL); 12967 CTL_RSGL(io) = NULL; 12968 CTL_LSGL(io) = NULL; 12969 } 12970 } 12971 12972 /* 12973 * Process a datamove request from the other controller. This is used for 12974 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12975 * first. Once that is complete, the data gets DMAed into the remote 12976 * controller's memory. For reads, we DMA from the remote controller's 12977 * memory into our memory first, and then move it out to the FETD. 12978 */ 12979 static void 12980 ctl_datamove_remote(union ctl_io *io) 12981 { 12982 12983 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12984 12985 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12986 ctl_failover_io(io, /*have_lock*/ 0); 12987 return; 12988 } 12989 12990 /* 12991 * Note that we look for an aborted I/O here, but don't do some of 12992 * the other checks that ctl_datamove() normally does. 12993 * We don't need to run the datamove delay code, since that should 12994 * have been done if need be on the other controller. 12995 */ 12996 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12997 printf("%s: tag 0x%jx on (%u:%u:%u) aborted\n", __func__, 12998 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12999 io->io_hdr.nexus.targ_port, 13000 io->io_hdr.nexus.targ_lun); 13001 io->io_hdr.port_status = 31338; 13002 ctl_send_datamove_done(io, /*have_lock*/ 0); 13003 return; 13004 } 13005 13006 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 13007 ctl_datamove_remote_write(io); 13008 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 13009 ctl_datamove_remote_read(io); 13010 else { 13011 io->io_hdr.port_status = 31339; 13012 ctl_send_datamove_done(io, /*have_lock*/ 0); 13013 } 13014 } 13015 13016 static void 13017 ctl_process_done(union ctl_io *io) 13018 { 13019 struct ctl_softc *softc = CTL_SOFTC(io); 13020 struct ctl_port *port = CTL_PORT(io); 13021 struct ctl_lun *lun = CTL_LUN(io); 13022 void (*fe_done)(union ctl_io *io); 13023 union ctl_ha_msg msg; 13024 13025 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13026 fe_done = port->fe_done; 13027 13028 #ifdef CTL_TIME_IO 13029 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13030 char str[256]; 13031 char path_str[64]; 13032 struct sbuf sb; 13033 13034 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13035 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13036 13037 ctl_io_sbuf(io, &sb); 13038 sbuf_cat(&sb, path_str); 13039 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13040 (intmax_t)time_uptime - io->io_hdr.start_time); 13041 sbuf_finish(&sb); 13042 printf("%s", sbuf_data(&sb)); 13043 } 13044 #endif /* CTL_TIME_IO */ 13045 13046 switch (io->io_hdr.io_type) { 13047 case CTL_IO_SCSI: 13048 break; 13049 case CTL_IO_TASK: 13050 if (ctl_debug & CTL_DEBUG_INFO) 13051 ctl_io_error_print(io, NULL); 13052 fe_done(io); 13053 return; 13054 default: 13055 panic("%s: Invalid CTL I/O type %d\n", 13056 __func__, io->io_hdr.io_type); 13057 } 13058 13059 if (lun == NULL) { 13060 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13061 io->io_hdr.nexus.targ_mapped_lun)); 13062 goto bailout; 13063 } 13064 13065 mtx_lock(&lun->lun_lock); 13066 13067 /* 13068 * Check to see if we have any informational exception and status 13069 * of this command can be modified to report it in form of either 13070 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 13071 */ 13072 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 13073 io->io_hdr.status == CTL_SUCCESS && 13074 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 13075 uint8_t mrie = lun->MODE_IE.mrie; 13076 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 13077 (lun->MODE_VER.byte3 & SMS_VER_PER)); 13078 if (((mrie == SIEP_MRIE_REC_COND && per) || 13079 mrie == SIEP_MRIE_REC_UNCOND || 13080 mrie == SIEP_MRIE_NO_SENSE) && 13081 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 13082 CTL_CMD_FLAG_NO_SENSE) == 0) { 13083 ctl_set_sense(&io->scsiio, 13084 /*current_error*/ 1, 13085 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 13086 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 13087 /*asc*/ lun->ie_asc, 13088 /*ascq*/ lun->ie_ascq, 13089 SSD_ELEM_NONE); 13090 lun->ie_reported = 1; 13091 } 13092 } else if (lun->ie_reported < 0) 13093 lun->ie_reported = 0; 13094 13095 /* 13096 * Check to see if we have any errors to inject here. We only 13097 * inject errors for commands that don't already have errors set. 13098 */ 13099 if (!STAILQ_EMPTY(&lun->error_list) && 13100 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13101 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13102 ctl_inject_error(lun, io); 13103 13104 /* 13105 * XXX KDM how do we treat commands that aren't completed 13106 * successfully? 13107 * 13108 * XXX KDM should we also track I/O latency? 13109 */ 13110 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13111 io->io_hdr.io_type == CTL_IO_SCSI) { 13112 int type; 13113 #ifdef CTL_TIME_IO 13114 struct bintime bt; 13115 13116 getbinuptime(&bt); 13117 bintime_sub(&bt, &io->io_hdr.start_bt); 13118 #endif 13119 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13120 CTL_FLAG_DATA_IN) 13121 type = CTL_STATS_READ; 13122 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13123 CTL_FLAG_DATA_OUT) 13124 type = CTL_STATS_WRITE; 13125 else 13126 type = CTL_STATS_NO_IO; 13127 13128 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13129 lun->stats.operations[type] ++; 13130 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13131 #ifdef CTL_TIME_IO 13132 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13133 bintime_add(&lun->stats.time[type], &bt); 13134 #endif 13135 13136 mtx_lock(&port->port_lock); 13137 port->stats.bytes[type] += io->scsiio.kern_total_len; 13138 port->stats.operations[type] ++; 13139 port->stats.dmas[type] += io->io_hdr.num_dmas; 13140 #ifdef CTL_TIME_IO 13141 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13142 bintime_add(&port->stats.time[type], &bt); 13143 #endif 13144 mtx_unlock(&port->port_lock); 13145 } 13146 13147 /* 13148 * Run through the blocked queue of this I/O and see if anything 13149 * can be unblocked, now that this I/O is done and will be removed. 13150 * We need to do it before removal to have OOA position to start. 13151 */ 13152 ctl_try_unblock_others(lun, io, TRUE); 13153 13154 /* 13155 * Remove this from the OOA queue. 13156 */ 13157 LIST_REMOVE(&io->io_hdr, ooa_links); 13158 #ifdef CTL_TIME_IO 13159 if (LIST_EMPTY(&lun->ooa_queue)) 13160 lun->last_busy = getsbinuptime(); 13161 #endif 13162 13163 /* 13164 * If the LUN has been invalidated, free it if there is nothing 13165 * left on its OOA queue. 13166 */ 13167 if ((lun->flags & CTL_LUN_INVALID) 13168 && LIST_EMPTY(&lun->ooa_queue)) { 13169 mtx_unlock(&lun->lun_lock); 13170 ctl_free_lun(lun); 13171 } else 13172 mtx_unlock(&lun->lun_lock); 13173 13174 bailout: 13175 13176 /* 13177 * If this command has been aborted, make sure we set the status 13178 * properly. The FETD is responsible for freeing the I/O and doing 13179 * whatever it needs to do to clean up its state. 13180 */ 13181 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13182 ctl_set_task_aborted(&io->scsiio); 13183 13184 /* 13185 * If enabled, print command error status. 13186 */ 13187 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13188 (ctl_debug & CTL_DEBUG_INFO) != 0) 13189 ctl_io_error_print(io, NULL); 13190 13191 /* 13192 * Tell the FETD or the other shelf controller we're done with this 13193 * command. Note that only SCSI commands get to this point. Task 13194 * management commands are completed above. 13195 */ 13196 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13197 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13198 memset(&msg, 0, sizeof(msg)); 13199 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13200 msg.hdr.serializing_sc = io->io_hdr.remote_io; 13201 msg.hdr.nexus = io->io_hdr.nexus; 13202 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13203 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13204 M_WAITOK); 13205 } 13206 13207 fe_done(io); 13208 } 13209 13210 /* 13211 * Front end should call this if it doesn't do autosense. When the request 13212 * sense comes back in from the initiator, we'll dequeue this and send it. 13213 */ 13214 int 13215 ctl_queue_sense(union ctl_io *io) 13216 { 13217 struct ctl_softc *softc = CTL_SOFTC(io); 13218 struct ctl_port *port = CTL_PORT(io); 13219 struct ctl_lun *lun; 13220 struct scsi_sense_data *ps; 13221 uint32_t initidx, p, targ_lun; 13222 13223 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13224 13225 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13226 13227 /* 13228 * LUN lookup will likely move to the ctl_work_thread() once we 13229 * have our new queueing infrastructure (that doesn't put things on 13230 * a per-LUN queue initially). That is so that we can handle 13231 * things like an INQUIRY to a LUN that we don't have enabled. We 13232 * can't deal with that right now. 13233 * If we don't have a LUN for this, just toss the sense information. 13234 */ 13235 mtx_lock(&softc->ctl_lock); 13236 if (targ_lun >= ctl_max_luns || 13237 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13238 mtx_unlock(&softc->ctl_lock); 13239 goto bailout; 13240 } 13241 mtx_lock(&lun->lun_lock); 13242 mtx_unlock(&softc->ctl_lock); 13243 13244 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13245 p = initidx / CTL_MAX_INIT_PER_PORT; 13246 if (lun->pending_sense[p] == NULL) { 13247 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13248 M_CTL, M_NOWAIT | M_ZERO); 13249 } 13250 if ((ps = lun->pending_sense[p]) != NULL) { 13251 ps += initidx % CTL_MAX_INIT_PER_PORT; 13252 memset(ps, 0, sizeof(*ps)); 13253 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13254 } 13255 mtx_unlock(&lun->lun_lock); 13256 13257 bailout: 13258 ctl_free_io(io); 13259 return (CTL_RETVAL_COMPLETE); 13260 } 13261 13262 /* 13263 * Primary command inlet from frontend ports. All SCSI and task I/O 13264 * requests must go through this function. 13265 */ 13266 int 13267 ctl_queue(union ctl_io *io) 13268 { 13269 struct ctl_port *port = CTL_PORT(io); 13270 13271 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13272 13273 #ifdef CTL_TIME_IO 13274 io->io_hdr.start_time = time_uptime; 13275 getbinuptime(&io->io_hdr.start_bt); 13276 #endif /* CTL_TIME_IO */ 13277 13278 /* Map FE-specific LUN ID into global one. */ 13279 io->io_hdr.nexus.targ_mapped_lun = 13280 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13281 13282 switch (io->io_hdr.io_type) { 13283 case CTL_IO_SCSI: 13284 case CTL_IO_TASK: 13285 if (ctl_debug & CTL_DEBUG_CDB) 13286 ctl_io_print(io); 13287 ctl_enqueue_incoming(io); 13288 break; 13289 default: 13290 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13291 return (EINVAL); 13292 } 13293 13294 return (CTL_RETVAL_COMPLETE); 13295 } 13296 13297 int 13298 ctl_run(union ctl_io *io) 13299 { 13300 struct ctl_port *port = CTL_PORT(io); 13301 13302 CTL_DEBUG_PRINT(("ctl_run cdb[0]=%02X\n", io->scsiio.cdb[0])); 13303 13304 #ifdef CTL_TIME_IO 13305 io->io_hdr.start_time = time_uptime; 13306 getbinuptime(&io->io_hdr.start_bt); 13307 #endif /* CTL_TIME_IO */ 13308 13309 /* Map FE-specific LUN ID into global one. */ 13310 io->io_hdr.nexus.targ_mapped_lun = 13311 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13312 13313 switch (io->io_hdr.io_type) { 13314 case CTL_IO_SCSI: 13315 if (ctl_debug & CTL_DEBUG_CDB) 13316 ctl_io_print(io); 13317 ctl_scsiio_precheck(&io->scsiio); 13318 break; 13319 case CTL_IO_TASK: 13320 if (ctl_debug & CTL_DEBUG_CDB) 13321 ctl_io_print(io); 13322 ctl_run_task(io); 13323 break; 13324 default: 13325 printf("ctl_run: unknown I/O type %d\n", io->io_hdr.io_type); 13326 return (EINVAL); 13327 } 13328 13329 return (CTL_RETVAL_COMPLETE); 13330 } 13331 13332 #ifdef CTL_IO_DELAY 13333 static void 13334 ctl_done_timer_wakeup(void *arg) 13335 { 13336 union ctl_io *io; 13337 13338 io = (union ctl_io *)arg; 13339 ctl_done(io); 13340 } 13341 #endif /* CTL_IO_DELAY */ 13342 13343 void 13344 ctl_serseq_done(union ctl_io *io) 13345 { 13346 struct ctl_lun *lun = CTL_LUN(io); 13347 13348 /* This is racy, but should not be a problem. */ 13349 if (!TAILQ_EMPTY(&io->io_hdr.blocked_queue)) { 13350 mtx_lock(&lun->lun_lock); 13351 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13352 ctl_try_unblock_others(lun, io, FALSE); 13353 mtx_unlock(&lun->lun_lock); 13354 } else 13355 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13356 } 13357 13358 void 13359 ctl_done(union ctl_io *io) 13360 { 13361 13362 /* 13363 * Enable this to catch duplicate completion issues. 13364 */ 13365 #if 0 13366 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13367 printf("%s: type %d msg %d cdb %x iptl: " 13368 "%u:%u:%u tag 0x%04x " 13369 "flag %#x status %x\n", 13370 __func__, 13371 io->io_hdr.io_type, 13372 io->io_hdr.msg_type, 13373 io->scsiio.cdb[0], 13374 io->io_hdr.nexus.initid, 13375 io->io_hdr.nexus.targ_port, 13376 io->io_hdr.nexus.targ_lun, 13377 (io->io_hdr.io_type == 13378 CTL_IO_TASK) ? 13379 io->taskio.tag_num : 13380 io->scsiio.tag_num, 13381 io->io_hdr.flags, 13382 io->io_hdr.status); 13383 } else 13384 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13385 #endif 13386 13387 /* 13388 * This is an internal copy of an I/O, and should not go through 13389 * the normal done processing logic. 13390 */ 13391 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13392 return; 13393 13394 #ifdef CTL_IO_DELAY 13395 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13396 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13397 } else { 13398 struct ctl_lun *lun = CTL_LUN(io); 13399 13400 if ((lun != NULL) 13401 && (lun->delay_info.done_delay > 0)) { 13402 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13403 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13404 callout_reset(&io->io_hdr.delay_callout, 13405 lun->delay_info.done_delay * hz, 13406 ctl_done_timer_wakeup, io); 13407 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13408 lun->delay_info.done_delay = 0; 13409 return; 13410 } 13411 } 13412 #endif /* CTL_IO_DELAY */ 13413 13414 ctl_enqueue_done(io); 13415 } 13416 13417 static void 13418 ctl_work_thread(void *arg) 13419 { 13420 struct ctl_thread *thr = (struct ctl_thread *)arg; 13421 struct ctl_softc *softc = thr->ctl_softc; 13422 union ctl_io *io; 13423 int retval; 13424 13425 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13426 thread_lock(curthread); 13427 sched_prio(curthread, PUSER - 1); 13428 thread_unlock(curthread); 13429 13430 while (!softc->shutdown) { 13431 /* 13432 * We handle the queues in this order: 13433 * - ISC 13434 * - done queue (to free up resources, unblock other commands) 13435 * - incoming queue 13436 * - RtR queue 13437 * 13438 * If those queues are empty, we break out of the loop and 13439 * go to sleep. 13440 */ 13441 mtx_lock(&thr->queue_lock); 13442 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13443 if (io != NULL) { 13444 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13445 mtx_unlock(&thr->queue_lock); 13446 ctl_handle_isc(io); 13447 continue; 13448 } 13449 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13450 if (io != NULL) { 13451 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13452 /* clear any blocked commands, call fe_done */ 13453 mtx_unlock(&thr->queue_lock); 13454 ctl_process_done(io); 13455 continue; 13456 } 13457 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13458 if (io != NULL) { 13459 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13460 mtx_unlock(&thr->queue_lock); 13461 if (io->io_hdr.io_type == CTL_IO_TASK) 13462 ctl_run_task(io); 13463 else 13464 ctl_scsiio_precheck(&io->scsiio); 13465 continue; 13466 } 13467 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13468 if (io != NULL) { 13469 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13470 mtx_unlock(&thr->queue_lock); 13471 retval = ctl_scsiio(&io->scsiio); 13472 if (retval != CTL_RETVAL_COMPLETE) 13473 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13474 continue; 13475 } 13476 13477 /* Sleep until we have something to do. */ 13478 mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); 13479 } 13480 thr->thread = NULL; 13481 kthread_exit(); 13482 } 13483 13484 static void 13485 ctl_thresh_thread(void *arg) 13486 { 13487 struct ctl_softc *softc = (struct ctl_softc *)arg; 13488 struct ctl_lun *lun; 13489 struct ctl_logical_block_provisioning_page *page; 13490 const char *attr; 13491 union ctl_ha_msg msg; 13492 uint64_t thres, val; 13493 int i, e, set; 13494 13495 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13496 thread_lock(curthread); 13497 sched_prio(curthread, PUSER - 1); 13498 thread_unlock(curthread); 13499 13500 while (!softc->shutdown) { 13501 mtx_lock(&softc->ctl_lock); 13502 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13503 if ((lun->flags & CTL_LUN_DISABLED) || 13504 (lun->flags & CTL_LUN_NO_MEDIA) || 13505 lun->backend->lun_attr == NULL) 13506 continue; 13507 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13508 softc->ha_mode == CTL_HA_MODE_XFER) 13509 continue; 13510 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13511 continue; 13512 e = 0; 13513 page = &lun->MODE_LBP; 13514 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13515 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13516 continue; 13517 thres = scsi_4btoul(page->descr[i].count); 13518 thres <<= CTL_LBP_EXPONENT; 13519 switch (page->descr[i].resource) { 13520 case 0x01: 13521 attr = "blocksavail"; 13522 break; 13523 case 0x02: 13524 attr = "blocksused"; 13525 break; 13526 case 0xf1: 13527 attr = "poolblocksavail"; 13528 break; 13529 case 0xf2: 13530 attr = "poolblocksused"; 13531 break; 13532 default: 13533 continue; 13534 } 13535 mtx_unlock(&softc->ctl_lock); // XXX 13536 val = lun->backend->lun_attr(lun->be_lun, attr); 13537 mtx_lock(&softc->ctl_lock); 13538 if (val == UINT64_MAX) 13539 continue; 13540 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13541 == SLBPPD_ARMING_INC) 13542 e = (val >= thres); 13543 else 13544 e = (val <= thres); 13545 if (e) 13546 break; 13547 } 13548 mtx_lock(&lun->lun_lock); 13549 if (e) { 13550 scsi_u64to8b((uint8_t *)&page->descr[i] - 13551 (uint8_t *)page, lun->ua_tpt_info); 13552 if (lun->lasttpt == 0 || 13553 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13554 lun->lasttpt = time_uptime; 13555 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13556 set = 1; 13557 } else 13558 set = 0; 13559 } else { 13560 lun->lasttpt = 0; 13561 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13562 set = -1; 13563 } 13564 mtx_unlock(&lun->lun_lock); 13565 if (set != 0 && 13566 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13567 /* Send msg to other side. */ 13568 bzero(&msg.ua, sizeof(msg.ua)); 13569 msg.hdr.msg_type = CTL_MSG_UA; 13570 msg.hdr.nexus.initid = -1; 13571 msg.hdr.nexus.targ_port = -1; 13572 msg.hdr.nexus.targ_lun = lun->lun; 13573 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13574 msg.ua.ua_all = 1; 13575 msg.ua.ua_set = (set > 0); 13576 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13577 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13578 mtx_unlock(&softc->ctl_lock); // XXX 13579 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13580 sizeof(msg.ua), M_WAITOK); 13581 mtx_lock(&softc->ctl_lock); 13582 } 13583 } 13584 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13585 PDROP, "-", CTL_LBP_PERIOD * hz); 13586 } 13587 softc->thresh_thread = NULL; 13588 kthread_exit(); 13589 } 13590 13591 static void 13592 ctl_enqueue_incoming(union ctl_io *io) 13593 { 13594 struct ctl_softc *softc = CTL_SOFTC(io); 13595 struct ctl_thread *thr; 13596 u_int idx; 13597 13598 idx = (io->io_hdr.nexus.targ_port * 127 + 13599 io->io_hdr.nexus.initid) % worker_threads; 13600 thr = &softc->threads[idx]; 13601 mtx_lock(&thr->queue_lock); 13602 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13603 mtx_unlock(&thr->queue_lock); 13604 wakeup(thr); 13605 } 13606 13607 static void 13608 ctl_enqueue_rtr(union ctl_io *io) 13609 { 13610 struct ctl_softc *softc = CTL_SOFTC(io); 13611 struct ctl_thread *thr; 13612 13613 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13614 mtx_lock(&thr->queue_lock); 13615 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13616 mtx_unlock(&thr->queue_lock); 13617 wakeup(thr); 13618 } 13619 13620 static void 13621 ctl_enqueue_done(union ctl_io *io) 13622 { 13623 struct ctl_softc *softc = CTL_SOFTC(io); 13624 struct ctl_thread *thr; 13625 13626 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13627 mtx_lock(&thr->queue_lock); 13628 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13629 mtx_unlock(&thr->queue_lock); 13630 wakeup(thr); 13631 } 13632 13633 static void 13634 ctl_enqueue_isc(union ctl_io *io) 13635 { 13636 struct ctl_softc *softc = CTL_SOFTC(io); 13637 struct ctl_thread *thr; 13638 13639 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13640 mtx_lock(&thr->queue_lock); 13641 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13642 mtx_unlock(&thr->queue_lock); 13643 wakeup(thr); 13644 } 13645 13646 /* 13647 * vim: ts=8 13648 */ 13649