1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org> 8 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org> 9 * All rights reserved. 10 * 11 * Portions of this software were developed by Edward Tomasz Napierala 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification. 20 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 21 * substantially similar to the "NO WARRANTY" disclaimer below 22 * ("Disclaimer") and any redistribution must be conditioned upon 23 * including a substantially similar Disclaimer requirement for further 24 * binary redistribution. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 * $Id$ 40 */ 41 /* 42 * CAM Target Layer, a SCSI device emulation subsystem. 43 * 44 * Author: Ken Merry <ken@FreeBSD.org> 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/ctype.h> 53 #include <sys/kernel.h> 54 #include <sys/types.h> 55 #include <sys/kthread.h> 56 #include <sys/bio.h> 57 #include <sys/fcntl.h> 58 #include <sys/lock.h> 59 #include <sys/module.h> 60 #include <sys/mutex.h> 61 #include <sys/condvar.h> 62 #include <sys/malloc.h> 63 #include <sys/conf.h> 64 #include <sys/ioccom.h> 65 #include <sys/queue.h> 66 #include <sys/sbuf.h> 67 #include <sys/smp.h> 68 #include <sys/endian.h> 69 #include <sys/sysctl.h> 70 #include <sys/nv.h> 71 #include <sys/dnv.h> 72 #include <vm/uma.h> 73 74 #include <cam/cam.h> 75 #include <cam/scsi/scsi_all.h> 76 #include <cam/scsi/scsi_cd.h> 77 #include <cam/scsi/scsi_da.h> 78 #include <cam/ctl/ctl_io.h> 79 #include <cam/ctl/ctl.h> 80 #include <cam/ctl/ctl_frontend.h> 81 #include <cam/ctl/ctl_util.h> 82 #include <cam/ctl/ctl_backend.h> 83 #include <cam/ctl/ctl_ioctl.h> 84 #include <cam/ctl/ctl_ha.h> 85 #include <cam/ctl/ctl_private.h> 86 #include <cam/ctl/ctl_debug.h> 87 #include <cam/ctl/ctl_scsi_all.h> 88 #include <cam/ctl/ctl_error.h> 89 90 struct ctl_softc *control_softc = NULL; 91 92 /* 93 * Template mode pages. 94 */ 95 96 /* 97 * Note that these are default values only. The actual values will be 98 * filled in when the user does a mode sense. 99 */ 100 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 101 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 102 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 103 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 104 /*read_retry_count*/0, 105 /*correction_span*/0, 106 /*head_offset_count*/0, 107 /*data_strobe_offset_cnt*/0, 108 /*byte8*/SMS_RWER_LBPERE, 109 /*write_retry_count*/0, 110 /*reserved2*/0, 111 /*recovery_time_limit*/{0, 0}, 112 }; 113 114 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 115 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 117 /*byte3*/SMS_RWER_PER, 118 /*read_retry_count*/0, 119 /*correction_span*/0, 120 /*head_offset_count*/0, 121 /*data_strobe_offset_cnt*/0, 122 /*byte8*/SMS_RWER_LBPERE, 123 /*write_retry_count*/0, 124 /*reserved2*/0, 125 /*recovery_time_limit*/{0, 0}, 126 }; 127 128 const static struct scsi_format_page format_page_default = { 129 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 130 /*page_length*/sizeof(struct scsi_format_page) - 2, 131 /*tracks_per_zone*/ {0, 0}, 132 /*alt_sectors_per_zone*/ {0, 0}, 133 /*alt_tracks_per_zone*/ {0, 0}, 134 /*alt_tracks_per_lun*/ {0, 0}, 135 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 136 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 137 /*bytes_per_sector*/ {0, 0}, 138 /*interleave*/ {0, 0}, 139 /*track_skew*/ {0, 0}, 140 /*cylinder_skew*/ {0, 0}, 141 /*flags*/ SFP_HSEC, 142 /*reserved*/ {0, 0, 0} 143 }; 144 145 const static struct scsi_format_page format_page_changeable = { 146 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 147 /*page_length*/sizeof(struct scsi_format_page) - 2, 148 /*tracks_per_zone*/ {0, 0}, 149 /*alt_sectors_per_zone*/ {0, 0}, 150 /*alt_tracks_per_zone*/ {0, 0}, 151 /*alt_tracks_per_lun*/ {0, 0}, 152 /*sectors_per_track*/ {0, 0}, 153 /*bytes_per_sector*/ {0, 0}, 154 /*interleave*/ {0, 0}, 155 /*track_skew*/ {0, 0}, 156 /*cylinder_skew*/ {0, 0}, 157 /*flags*/ 0, 158 /*reserved*/ {0, 0, 0} 159 }; 160 161 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 162 /*page_code*/SMS_RIGID_DISK_PAGE, 163 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 164 /*cylinders*/ {0, 0, 0}, 165 /*heads*/ CTL_DEFAULT_HEADS, 166 /*start_write_precomp*/ {0, 0, 0}, 167 /*start_reduced_current*/ {0, 0, 0}, 168 /*step_rate*/ {0, 0}, 169 /*landing_zone_cylinder*/ {0, 0, 0}, 170 /*rpl*/ SRDP_RPL_DISABLED, 171 /*rotational_offset*/ 0, 172 /*reserved1*/ 0, 173 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 174 CTL_DEFAULT_ROTATION_RATE & 0xff}, 175 /*reserved2*/ {0, 0} 176 }; 177 178 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 179 /*page_code*/SMS_RIGID_DISK_PAGE, 180 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 181 /*cylinders*/ {0, 0, 0}, 182 /*heads*/ 0, 183 /*start_write_precomp*/ {0, 0, 0}, 184 /*start_reduced_current*/ {0, 0, 0}, 185 /*step_rate*/ {0, 0}, 186 /*landing_zone_cylinder*/ {0, 0, 0}, 187 /*rpl*/ 0, 188 /*rotational_offset*/ 0, 189 /*reserved1*/ 0, 190 /*rotation_rate*/ {0, 0}, 191 /*reserved2*/ {0, 0} 192 }; 193 194 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 195 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 196 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 197 /*byte3*/0, 198 /*read_retry_count*/0, 199 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 200 /*recovery_time_limit*/{0, 0}, 201 }; 202 203 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 204 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 205 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 206 /*byte3*/SMS_VER_PER, 207 /*read_retry_count*/0, 208 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 209 /*recovery_time_limit*/{0, 0}, 210 }; 211 212 const static struct scsi_caching_page caching_page_default = { 213 /*page_code*/SMS_CACHING_PAGE, 214 /*page_length*/sizeof(struct scsi_caching_page) - 2, 215 /*flags1*/ SCP_DISC | SCP_WCE, 216 /*ret_priority*/ 0, 217 /*disable_pf_transfer_len*/ {0xff, 0xff}, 218 /*min_prefetch*/ {0, 0}, 219 /*max_prefetch*/ {0xff, 0xff}, 220 /*max_pf_ceiling*/ {0xff, 0xff}, 221 /*flags2*/ 0, 222 /*cache_segments*/ 0, 223 /*cache_seg_size*/ {0, 0}, 224 /*reserved*/ 0, 225 /*non_cache_seg_size*/ {0, 0, 0} 226 }; 227 228 const static struct scsi_caching_page caching_page_changeable = { 229 /*page_code*/SMS_CACHING_PAGE, 230 /*page_length*/sizeof(struct scsi_caching_page) - 2, 231 /*flags1*/ SCP_WCE | SCP_RCD, 232 /*ret_priority*/ 0, 233 /*disable_pf_transfer_len*/ {0, 0}, 234 /*min_prefetch*/ {0, 0}, 235 /*max_prefetch*/ {0, 0}, 236 /*max_pf_ceiling*/ {0, 0}, 237 /*flags2*/ 0, 238 /*cache_segments*/ 0, 239 /*cache_seg_size*/ {0, 0}, 240 /*reserved*/ 0, 241 /*non_cache_seg_size*/ {0, 0, 0} 242 }; 243 244 const static struct scsi_control_page control_page_default = { 245 /*page_code*/SMS_CONTROL_MODE_PAGE, 246 /*page_length*/sizeof(struct scsi_control_page) - 2, 247 /*rlec*/0, 248 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 249 /*eca_and_aen*/0, 250 /*flags4*/SCP_TAS, 251 /*aen_holdoff_period*/{0, 0}, 252 /*busy_timeout_period*/{0, 0}, 253 /*extended_selftest_completion_time*/{0, 0} 254 }; 255 256 const static struct scsi_control_page control_page_changeable = { 257 /*page_code*/SMS_CONTROL_MODE_PAGE, 258 /*page_length*/sizeof(struct scsi_control_page) - 2, 259 /*rlec*/SCP_DSENSE, 260 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 261 /*eca_and_aen*/SCP_SWP, 262 /*flags4*/0, 263 /*aen_holdoff_period*/{0, 0}, 264 /*busy_timeout_period*/{0, 0}, 265 /*extended_selftest_completion_time*/{0, 0} 266 }; 267 268 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 269 270 const static struct scsi_control_ext_page control_ext_page_default = { 271 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 272 /*subpage_code*/0x01, 273 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 274 /*flags*/0, 275 /*prio*/0, 276 /*max_sense*/0 277 }; 278 279 const static struct scsi_control_ext_page control_ext_page_changeable = { 280 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 281 /*subpage_code*/0x01, 282 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 283 /*flags*/0, 284 /*prio*/0, 285 /*max_sense*/0xff 286 }; 287 288 const static struct scsi_info_exceptions_page ie_page_default = { 289 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 290 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 291 /*info_flags*/SIEP_FLAGS_EWASC, 292 /*mrie*/SIEP_MRIE_NO, 293 /*interval_timer*/{0, 0, 0, 0}, 294 /*report_count*/{0, 0, 0, 1} 295 }; 296 297 const static struct scsi_info_exceptions_page ie_page_changeable = { 298 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 299 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 300 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 301 SIEP_FLAGS_LOGERR, 302 /*mrie*/0x0f, 303 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 304 /*report_count*/{0xff, 0xff, 0xff, 0xff} 305 }; 306 307 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 308 309 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 310 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 311 /*subpage_code*/0x02, 312 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 313 /*flags*/0, 314 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 315 /*descr*/{}}, 316 {{/*flags*/0, 317 /*resource*/0x01, 318 /*reserved*/{0, 0}, 319 /*count*/{0, 0, 0, 0}}, 320 {/*flags*/0, 321 /*resource*/0x02, 322 /*reserved*/{0, 0}, 323 /*count*/{0, 0, 0, 0}}, 324 {/*flags*/0, 325 /*resource*/0xf1, 326 /*reserved*/{0, 0}, 327 /*count*/{0, 0, 0, 0}}, 328 {/*flags*/0, 329 /*resource*/0xf2, 330 /*reserved*/{0, 0}, 331 /*count*/{0, 0, 0, 0}} 332 } 333 }; 334 335 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 336 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 337 /*subpage_code*/0x02, 338 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 339 /*flags*/SLBPP_SITUA, 340 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 341 /*descr*/{}}, 342 {{/*flags*/0, 343 /*resource*/0, 344 /*reserved*/{0, 0}, 345 /*count*/{0, 0, 0, 0}}, 346 {/*flags*/0, 347 /*resource*/0, 348 /*reserved*/{0, 0}, 349 /*count*/{0, 0, 0, 0}}, 350 {/*flags*/0, 351 /*resource*/0, 352 /*reserved*/{0, 0}, 353 /*count*/{0, 0, 0, 0}}, 354 {/*flags*/0, 355 /*resource*/0, 356 /*reserved*/{0, 0}, 357 /*count*/{0, 0, 0, 0}} 358 } 359 }; 360 361 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 362 /*page_code*/SMS_CDDVD_CAPS_PAGE, 363 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 364 /*caps1*/0x3f, 365 /*caps2*/0x00, 366 /*caps3*/0xf0, 367 /*caps4*/0x00, 368 /*caps5*/0x29, 369 /*caps6*/0x00, 370 /*obsolete*/{0, 0}, 371 /*nvol_levels*/{0, 0}, 372 /*buffer_size*/{8, 0}, 373 /*obsolete2*/{0, 0}, 374 /*reserved*/0, 375 /*digital*/0, 376 /*obsolete3*/0, 377 /*copy_management*/0, 378 /*reserved2*/0, 379 /*rotation_control*/0, 380 /*cur_write_speed*/0, 381 /*num_speed_descr*/0, 382 }; 383 384 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 385 /*page_code*/SMS_CDDVD_CAPS_PAGE, 386 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 387 /*caps1*/0, 388 /*caps2*/0, 389 /*caps3*/0, 390 /*caps4*/0, 391 /*caps5*/0, 392 /*caps6*/0, 393 /*obsolete*/{0, 0}, 394 /*nvol_levels*/{0, 0}, 395 /*buffer_size*/{0, 0}, 396 /*obsolete2*/{0, 0}, 397 /*reserved*/0, 398 /*digital*/0, 399 /*obsolete3*/0, 400 /*copy_management*/0, 401 /*reserved2*/0, 402 /*rotation_control*/0, 403 /*cur_write_speed*/0, 404 /*num_speed_descr*/0, 405 }; 406 407 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 408 static int worker_threads = -1; 409 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 410 &worker_threads, 1, "Number of worker threads"); 411 static int ctl_debug = CTL_DEBUG_NONE; 412 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 413 &ctl_debug, 0, "Enabled debug flags"); 414 static int ctl_lun_map_size = 1024; 415 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 416 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 417 #ifdef CTL_TIME_IO 418 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 419 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 420 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 421 #endif 422 423 /* 424 * Maximum number of LUNs we support. MUST be a power of 2. 425 */ 426 #define CTL_DEFAULT_MAX_LUNS 1024 427 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 428 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); 429 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, 430 &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); 431 432 /* 433 * Maximum number of ports registered at one time. 434 */ 435 #define CTL_DEFAULT_MAX_PORTS 256 436 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 437 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); 438 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, 439 &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); 440 441 /* 442 * Maximum number of initiators we support. 443 */ 444 #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) 445 446 /* 447 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 448 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 449 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 450 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 451 */ 452 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 453 454 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 455 int param); 456 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 457 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 458 static int ctl_init(void); 459 static int ctl_shutdown(void); 460 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 461 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 462 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 463 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 464 struct ctl_ooa *ooa_hdr, 465 struct ctl_ooa_entry *kern_entries); 466 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 467 struct thread *td); 468 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 469 struct ctl_be_lun *be_lun); 470 static int ctl_free_lun(struct ctl_lun *lun); 471 static void ctl_create_lun(struct ctl_be_lun *be_lun); 472 473 static int ctl_do_mode_select(union ctl_io *io); 474 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 475 uint64_t res_key, uint64_t sa_res_key, 476 uint8_t type, uint32_t residx, 477 struct ctl_scsiio *ctsio, 478 struct scsi_per_res_out *cdb, 479 struct scsi_per_res_out_parms* param); 480 static void ctl_pro_preempt_other(struct ctl_lun *lun, 481 union ctl_ha_msg *msg); 482 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 483 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 484 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 485 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 486 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 487 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 488 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 489 int alloc_len); 490 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 491 int alloc_len); 492 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 493 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 494 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 495 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 496 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 497 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 498 bool seq); 499 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 500 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 501 union ctl_io *pending_io, union ctl_io *ooa_io); 502 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 503 union ctl_io *starting_io); 504 static int ctl_check_blocked(struct ctl_lun *lun); 505 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 506 const struct ctl_cmd_entry *entry, 507 struct ctl_scsiio *ctsio); 508 static void ctl_failover_lun(union ctl_io *io); 509 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 510 struct ctl_scsiio *ctsio); 511 static int ctl_scsiio(struct ctl_scsiio *ctsio); 512 513 static int ctl_target_reset(union ctl_io *io); 514 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 515 ctl_ua_type ua_type); 516 static int ctl_lun_reset(union ctl_io *io); 517 static int ctl_abort_task(union ctl_io *io); 518 static int ctl_abort_task_set(union ctl_io *io); 519 static int ctl_query_task(union ctl_io *io, int task_set); 520 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 521 ctl_ua_type ua_type); 522 static int ctl_i_t_nexus_reset(union ctl_io *io); 523 static int ctl_query_async_event(union ctl_io *io); 524 static void ctl_run_task(union ctl_io *io); 525 #ifdef CTL_IO_DELAY 526 static void ctl_datamove_timer_wakeup(void *arg); 527 static void ctl_done_timer_wakeup(void *arg); 528 #endif /* CTL_IO_DELAY */ 529 530 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 531 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 532 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 533 static void ctl_datamove_remote_write(union ctl_io *io); 534 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 535 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 536 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 537 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 538 ctl_ha_dt_cb callback); 539 static void ctl_datamove_remote_read(union ctl_io *io); 540 static void ctl_datamove_remote(union ctl_io *io); 541 static void ctl_process_done(union ctl_io *io); 542 static void ctl_lun_thread(void *arg); 543 static void ctl_thresh_thread(void *arg); 544 static void ctl_work_thread(void *arg); 545 static void ctl_enqueue_incoming(union ctl_io *io); 546 static void ctl_enqueue_rtr(union ctl_io *io); 547 static void ctl_enqueue_done(union ctl_io *io); 548 static void ctl_enqueue_isc(union ctl_io *io); 549 static const struct ctl_cmd_entry * 550 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 551 static const struct ctl_cmd_entry * 552 ctl_validate_command(struct ctl_scsiio *ctsio); 553 static int ctl_cmd_applicable(uint8_t lun_type, 554 const struct ctl_cmd_entry *entry); 555 static int ctl_ha_init(void); 556 static int ctl_ha_shutdown(void); 557 558 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 559 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 560 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 561 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 562 563 /* 564 * Load the serialization table. This isn't very pretty, but is probably 565 * the easiest way to do it. 566 */ 567 #include "ctl_ser_table.c" 568 569 /* 570 * We only need to define open, close and ioctl routines for this driver. 571 */ 572 static struct cdevsw ctl_cdevsw = { 573 .d_version = D_VERSION, 574 .d_flags = 0, 575 .d_open = ctl_open, 576 .d_close = ctl_close, 577 .d_ioctl = ctl_ioctl, 578 .d_name = "ctl", 579 }; 580 581 582 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 583 584 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 585 586 static moduledata_t ctl_moduledata = { 587 "ctl", 588 ctl_module_event_handler, 589 NULL 590 }; 591 592 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 593 MODULE_VERSION(ctl, 1); 594 595 static struct ctl_frontend ha_frontend = 596 { 597 .name = "ha", 598 .init = ctl_ha_init, 599 .shutdown = ctl_ha_shutdown, 600 }; 601 602 static int 603 ctl_ha_init(void) 604 { 605 struct ctl_softc *softc = control_softc; 606 607 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 608 &softc->othersc_pool) != 0) 609 return (ENOMEM); 610 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 611 ctl_pool_free(softc->othersc_pool); 612 return (EIO); 613 } 614 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 615 != CTL_HA_STATUS_SUCCESS) { 616 ctl_ha_msg_destroy(softc); 617 ctl_pool_free(softc->othersc_pool); 618 return (EIO); 619 } 620 return (0); 621 }; 622 623 static int 624 ctl_ha_shutdown(void) 625 { 626 struct ctl_softc *softc = control_softc; 627 struct ctl_port *port; 628 629 ctl_ha_msg_shutdown(softc); 630 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 631 return (EIO); 632 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 633 return (EIO); 634 ctl_pool_free(softc->othersc_pool); 635 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 636 ctl_port_deregister(port); 637 free(port->port_name, M_CTL); 638 free(port, M_CTL); 639 } 640 return (0); 641 }; 642 643 static void 644 ctl_ha_datamove(union ctl_io *io) 645 { 646 struct ctl_lun *lun = CTL_LUN(io); 647 struct ctl_sg_entry *sgl; 648 union ctl_ha_msg msg; 649 uint32_t sg_entries_sent; 650 int do_sg_copy, i, j; 651 652 memset(&msg.dt, 0, sizeof(msg.dt)); 653 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 654 msg.hdr.original_sc = io->io_hdr.original_sc; 655 msg.hdr.serializing_sc = io; 656 msg.hdr.nexus = io->io_hdr.nexus; 657 msg.hdr.status = io->io_hdr.status; 658 msg.dt.flags = io->io_hdr.flags; 659 660 /* 661 * We convert everything into a S/G list here. We can't 662 * pass by reference, only by value between controllers. 663 * So we can't pass a pointer to the S/G list, only as many 664 * S/G entries as we can fit in here. If it's possible for 665 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 666 * then we need to break this up into multiple transfers. 667 */ 668 if (io->scsiio.kern_sg_entries == 0) { 669 msg.dt.kern_sg_entries = 1; 670 #if 0 671 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 672 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 673 } else { 674 /* XXX KDM use busdma here! */ 675 msg.dt.sg_list[0].addr = 676 (void *)vtophys(io->scsiio.kern_data_ptr); 677 } 678 #else 679 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 680 ("HA does not support BUS_ADDR")); 681 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 682 #endif 683 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 684 do_sg_copy = 0; 685 } else { 686 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 687 do_sg_copy = 1; 688 } 689 690 msg.dt.kern_data_len = io->scsiio.kern_data_len; 691 msg.dt.kern_total_len = io->scsiio.kern_total_len; 692 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 693 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 694 msg.dt.sg_sequence = 0; 695 696 /* 697 * Loop until we've sent all of the S/G entries. On the 698 * other end, we'll recompose these S/G entries into one 699 * contiguous list before processing. 700 */ 701 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 702 msg.dt.sg_sequence++) { 703 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 704 sizeof(msg.dt.sg_list[0])), 705 msg.dt.kern_sg_entries - sg_entries_sent); 706 if (do_sg_copy != 0) { 707 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 708 for (i = sg_entries_sent, j = 0; 709 i < msg.dt.cur_sg_entries; i++, j++) { 710 #if 0 711 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 712 msg.dt.sg_list[j].addr = sgl[i].addr; 713 } else { 714 /* XXX KDM use busdma here! */ 715 msg.dt.sg_list[j].addr = 716 (void *)vtophys(sgl[i].addr); 717 } 718 #else 719 KASSERT((io->io_hdr.flags & 720 CTL_FLAG_BUS_ADDR) == 0, 721 ("HA does not support BUS_ADDR")); 722 msg.dt.sg_list[j].addr = sgl[i].addr; 723 #endif 724 msg.dt.sg_list[j].len = sgl[i].len; 725 } 726 } 727 728 sg_entries_sent += msg.dt.cur_sg_entries; 729 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 730 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 731 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 732 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 733 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 734 io->io_hdr.port_status = 31341; 735 io->scsiio.be_move_done(io); 736 return; 737 } 738 msg.dt.sent_sg_entries = sg_entries_sent; 739 } 740 741 /* 742 * Officially handover the request from us to peer. 743 * If failover has just happened, then we must return error. 744 * If failover happen just after, then it is not our problem. 745 */ 746 if (lun) 747 mtx_lock(&lun->lun_lock); 748 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 749 if (lun) 750 mtx_unlock(&lun->lun_lock); 751 io->io_hdr.port_status = 31342; 752 io->scsiio.be_move_done(io); 753 return; 754 } 755 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 756 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 757 if (lun) 758 mtx_unlock(&lun->lun_lock); 759 } 760 761 static void 762 ctl_ha_done(union ctl_io *io) 763 { 764 union ctl_ha_msg msg; 765 766 if (io->io_hdr.io_type == CTL_IO_SCSI) { 767 memset(&msg, 0, sizeof(msg)); 768 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 769 msg.hdr.original_sc = io->io_hdr.original_sc; 770 msg.hdr.nexus = io->io_hdr.nexus; 771 msg.hdr.status = io->io_hdr.status; 772 msg.scsi.scsi_status = io->scsiio.scsi_status; 773 msg.scsi.tag_num = io->scsiio.tag_num; 774 msg.scsi.tag_type = io->scsiio.tag_type; 775 msg.scsi.sense_len = io->scsiio.sense_len; 776 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 777 io->scsiio.sense_len); 778 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 779 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 780 msg.scsi.sense_len, M_WAITOK); 781 } 782 ctl_free_io(io); 783 } 784 785 static void 786 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 787 union ctl_ha_msg *msg_info) 788 { 789 struct ctl_scsiio *ctsio; 790 791 if (msg_info->hdr.original_sc == NULL) { 792 printf("%s: original_sc == NULL!\n", __func__); 793 /* XXX KDM now what? */ 794 return; 795 } 796 797 ctsio = &msg_info->hdr.original_sc->scsiio; 798 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 799 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 800 ctsio->io_hdr.status = msg_info->hdr.status; 801 ctsio->scsi_status = msg_info->scsi.scsi_status; 802 ctsio->sense_len = msg_info->scsi.sense_len; 803 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 804 msg_info->scsi.sense_len); 805 ctl_enqueue_isc((union ctl_io *)ctsio); 806 } 807 808 static void 809 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 810 union ctl_ha_msg *msg_info) 811 { 812 struct ctl_scsiio *ctsio; 813 814 if (msg_info->hdr.serializing_sc == NULL) { 815 printf("%s: serializing_sc == NULL!\n", __func__); 816 /* XXX KDM now what? */ 817 return; 818 } 819 820 ctsio = &msg_info->hdr.serializing_sc->scsiio; 821 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 822 ctl_enqueue_isc((union ctl_io *)ctsio); 823 } 824 825 void 826 ctl_isc_announce_lun(struct ctl_lun *lun) 827 { 828 struct ctl_softc *softc = lun->ctl_softc; 829 union ctl_ha_msg *msg; 830 struct ctl_ha_msg_lun_pr_key pr_key; 831 int i, k; 832 833 if (softc->ha_link != CTL_HA_LINK_ONLINE) 834 return; 835 mtx_lock(&lun->lun_lock); 836 i = sizeof(msg->lun); 837 if (lun->lun_devid) 838 i += lun->lun_devid->len; 839 i += sizeof(pr_key) * lun->pr_key_count; 840 alloc: 841 mtx_unlock(&lun->lun_lock); 842 msg = malloc(i, M_CTL, M_WAITOK); 843 mtx_lock(&lun->lun_lock); 844 k = sizeof(msg->lun); 845 if (lun->lun_devid) 846 k += lun->lun_devid->len; 847 k += sizeof(pr_key) * lun->pr_key_count; 848 if (i < k) { 849 free(msg, M_CTL); 850 i = k; 851 goto alloc; 852 } 853 bzero(&msg->lun, sizeof(msg->lun)); 854 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 855 msg->hdr.nexus.targ_lun = lun->lun; 856 msg->hdr.nexus.targ_mapped_lun = lun->lun; 857 msg->lun.flags = lun->flags; 858 msg->lun.pr_generation = lun->pr_generation; 859 msg->lun.pr_res_idx = lun->pr_res_idx; 860 msg->lun.pr_res_type = lun->pr_res_type; 861 msg->lun.pr_key_count = lun->pr_key_count; 862 i = 0; 863 if (lun->lun_devid) { 864 msg->lun.lun_devid_len = lun->lun_devid->len; 865 memcpy(&msg->lun.data[i], lun->lun_devid->data, 866 msg->lun.lun_devid_len); 867 i += msg->lun.lun_devid_len; 868 } 869 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 870 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 871 continue; 872 pr_key.pr_iid = k; 873 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 874 i += sizeof(pr_key); 875 } 876 mtx_unlock(&lun->lun_lock); 877 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 878 M_WAITOK); 879 free(msg, M_CTL); 880 881 if (lun->flags & CTL_LUN_PRIMARY_SC) { 882 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 883 ctl_isc_announce_mode(lun, -1, 884 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 885 lun->mode_pages.index[i].subpage); 886 } 887 } 888 } 889 890 void 891 ctl_isc_announce_port(struct ctl_port *port) 892 { 893 struct ctl_softc *softc = port->ctl_softc; 894 union ctl_ha_msg *msg; 895 int i; 896 897 if (port->targ_port < softc->port_min || 898 port->targ_port >= softc->port_max || 899 softc->ha_link != CTL_HA_LINK_ONLINE) 900 return; 901 i = sizeof(msg->port) + strlen(port->port_name) + 1; 902 if (port->lun_map) 903 i += port->lun_map_size * sizeof(uint32_t); 904 if (port->port_devid) 905 i += port->port_devid->len; 906 if (port->target_devid) 907 i += port->target_devid->len; 908 if (port->init_devid) 909 i += port->init_devid->len; 910 msg = malloc(i, M_CTL, M_WAITOK); 911 bzero(&msg->port, sizeof(msg->port)); 912 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 913 msg->hdr.nexus.targ_port = port->targ_port; 914 msg->port.port_type = port->port_type; 915 msg->port.physical_port = port->physical_port; 916 msg->port.virtual_port = port->virtual_port; 917 msg->port.status = port->status; 918 i = 0; 919 msg->port.name_len = sprintf(&msg->port.data[i], 920 "%d:%s", softc->ha_id, port->port_name) + 1; 921 i += msg->port.name_len; 922 if (port->lun_map) { 923 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 924 memcpy(&msg->port.data[i], port->lun_map, 925 msg->port.lun_map_len); 926 i += msg->port.lun_map_len; 927 } 928 if (port->port_devid) { 929 msg->port.port_devid_len = port->port_devid->len; 930 memcpy(&msg->port.data[i], port->port_devid->data, 931 msg->port.port_devid_len); 932 i += msg->port.port_devid_len; 933 } 934 if (port->target_devid) { 935 msg->port.target_devid_len = port->target_devid->len; 936 memcpy(&msg->port.data[i], port->target_devid->data, 937 msg->port.target_devid_len); 938 i += msg->port.target_devid_len; 939 } 940 if (port->init_devid) { 941 msg->port.init_devid_len = port->init_devid->len; 942 memcpy(&msg->port.data[i], port->init_devid->data, 943 msg->port.init_devid_len); 944 i += msg->port.init_devid_len; 945 } 946 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 947 M_WAITOK); 948 free(msg, M_CTL); 949 } 950 951 void 952 ctl_isc_announce_iid(struct ctl_port *port, int iid) 953 { 954 struct ctl_softc *softc = port->ctl_softc; 955 union ctl_ha_msg *msg; 956 int i, l; 957 958 if (port->targ_port < softc->port_min || 959 port->targ_port >= softc->port_max || 960 softc->ha_link != CTL_HA_LINK_ONLINE) 961 return; 962 mtx_lock(&softc->ctl_lock); 963 i = sizeof(msg->iid); 964 l = 0; 965 if (port->wwpn_iid[iid].name) 966 l = strlen(port->wwpn_iid[iid].name) + 1; 967 i += l; 968 msg = malloc(i, M_CTL, M_NOWAIT); 969 if (msg == NULL) { 970 mtx_unlock(&softc->ctl_lock); 971 return; 972 } 973 bzero(&msg->iid, sizeof(msg->iid)); 974 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 975 msg->hdr.nexus.targ_port = port->targ_port; 976 msg->hdr.nexus.initid = iid; 977 msg->iid.in_use = port->wwpn_iid[iid].in_use; 978 msg->iid.name_len = l; 979 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 980 if (port->wwpn_iid[iid].name) 981 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 982 mtx_unlock(&softc->ctl_lock); 983 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 984 free(msg, M_CTL); 985 } 986 987 void 988 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 989 uint8_t page, uint8_t subpage) 990 { 991 struct ctl_softc *softc = lun->ctl_softc; 992 union ctl_ha_msg msg; 993 u_int i; 994 995 if (softc->ha_link != CTL_HA_LINK_ONLINE) 996 return; 997 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 998 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 999 page && lun->mode_pages.index[i].subpage == subpage) 1000 break; 1001 } 1002 if (i == CTL_NUM_MODE_PAGES) 1003 return; 1004 1005 /* Don't try to replicate pages not present on this device. */ 1006 if (lun->mode_pages.index[i].page_data == NULL) 1007 return; 1008 1009 bzero(&msg.mode, sizeof(msg.mode)); 1010 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 1011 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 1012 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 1013 msg.hdr.nexus.targ_lun = lun->lun; 1014 msg.hdr.nexus.targ_mapped_lun = lun->lun; 1015 msg.mode.page_code = page; 1016 msg.mode.subpage = subpage; 1017 msg.mode.page_len = lun->mode_pages.index[i].page_len; 1018 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 1019 msg.mode.page_len); 1020 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 1021 M_WAITOK); 1022 } 1023 1024 static void 1025 ctl_isc_ha_link_up(struct ctl_softc *softc) 1026 { 1027 struct ctl_port *port; 1028 struct ctl_lun *lun; 1029 union ctl_ha_msg msg; 1030 int i; 1031 1032 /* Announce this node parameters to peer for validation. */ 1033 msg.login.msg_type = CTL_MSG_LOGIN; 1034 msg.login.version = CTL_HA_VERSION; 1035 msg.login.ha_mode = softc->ha_mode; 1036 msg.login.ha_id = softc->ha_id; 1037 msg.login.max_luns = ctl_max_luns; 1038 msg.login.max_ports = ctl_max_ports; 1039 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1040 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1041 M_WAITOK); 1042 1043 STAILQ_FOREACH(port, &softc->port_list, links) { 1044 ctl_isc_announce_port(port); 1045 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1046 if (port->wwpn_iid[i].in_use) 1047 ctl_isc_announce_iid(port, i); 1048 } 1049 } 1050 STAILQ_FOREACH(lun, &softc->lun_list, links) 1051 ctl_isc_announce_lun(lun); 1052 } 1053 1054 static void 1055 ctl_isc_ha_link_down(struct ctl_softc *softc) 1056 { 1057 struct ctl_port *port; 1058 struct ctl_lun *lun; 1059 union ctl_io *io; 1060 int i; 1061 1062 mtx_lock(&softc->ctl_lock); 1063 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1064 mtx_lock(&lun->lun_lock); 1065 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1066 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1067 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1068 } 1069 mtx_unlock(&lun->lun_lock); 1070 1071 mtx_unlock(&softc->ctl_lock); 1072 io = ctl_alloc_io(softc->othersc_pool); 1073 mtx_lock(&softc->ctl_lock); 1074 ctl_zero_io(io); 1075 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1076 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1077 ctl_enqueue_isc(io); 1078 } 1079 1080 STAILQ_FOREACH(port, &softc->port_list, links) { 1081 if (port->targ_port >= softc->port_min && 1082 port->targ_port < softc->port_max) 1083 continue; 1084 port->status &= ~CTL_PORT_STATUS_ONLINE; 1085 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1086 port->wwpn_iid[i].in_use = 0; 1087 free(port->wwpn_iid[i].name, M_CTL); 1088 port->wwpn_iid[i].name = NULL; 1089 } 1090 } 1091 mtx_unlock(&softc->ctl_lock); 1092 } 1093 1094 static void 1095 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1096 { 1097 struct ctl_lun *lun; 1098 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1099 1100 mtx_lock(&softc->ctl_lock); 1101 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || 1102 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1103 mtx_unlock(&softc->ctl_lock); 1104 return; 1105 } 1106 mtx_lock(&lun->lun_lock); 1107 mtx_unlock(&softc->ctl_lock); 1108 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1109 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1110 if (msg->ua.ua_all) { 1111 if (msg->ua.ua_set) 1112 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1113 else 1114 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1115 } else { 1116 if (msg->ua.ua_set) 1117 ctl_est_ua(lun, iid, msg->ua.ua_type); 1118 else 1119 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1120 } 1121 mtx_unlock(&lun->lun_lock); 1122 } 1123 1124 static void 1125 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1126 { 1127 struct ctl_lun *lun; 1128 struct ctl_ha_msg_lun_pr_key pr_key; 1129 int i, k; 1130 ctl_lun_flags oflags; 1131 uint32_t targ_lun; 1132 1133 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1134 mtx_lock(&softc->ctl_lock); 1135 if (targ_lun >= ctl_max_luns || 1136 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1137 mtx_unlock(&softc->ctl_lock); 1138 return; 1139 } 1140 mtx_lock(&lun->lun_lock); 1141 mtx_unlock(&softc->ctl_lock); 1142 if (lun->flags & CTL_LUN_DISABLED) { 1143 mtx_unlock(&lun->lun_lock); 1144 return; 1145 } 1146 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1147 if (msg->lun.lun_devid_len != i || (i > 0 && 1148 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1149 mtx_unlock(&lun->lun_lock); 1150 printf("%s: Received conflicting HA LUN %d\n", 1151 __func__, targ_lun); 1152 return; 1153 } else { 1154 /* Record whether peer is primary. */ 1155 oflags = lun->flags; 1156 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1157 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1158 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1159 else 1160 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1161 if (oflags != lun->flags) 1162 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1163 1164 /* If peer is primary and we are not -- use data */ 1165 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1166 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1167 lun->pr_generation = msg->lun.pr_generation; 1168 lun->pr_res_idx = msg->lun.pr_res_idx; 1169 lun->pr_res_type = msg->lun.pr_res_type; 1170 lun->pr_key_count = msg->lun.pr_key_count; 1171 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1172 ctl_clr_prkey(lun, k); 1173 for (k = 0; k < msg->lun.pr_key_count; k++) { 1174 memcpy(&pr_key, &msg->lun.data[i], 1175 sizeof(pr_key)); 1176 ctl_alloc_prkey(lun, pr_key.pr_iid); 1177 ctl_set_prkey(lun, pr_key.pr_iid, 1178 pr_key.pr_key); 1179 i += sizeof(pr_key); 1180 } 1181 } 1182 1183 mtx_unlock(&lun->lun_lock); 1184 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1185 __func__, targ_lun, 1186 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1187 "primary" : "secondary")); 1188 1189 /* If we are primary but peer doesn't know -- notify */ 1190 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1191 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1192 ctl_isc_announce_lun(lun); 1193 } 1194 } 1195 1196 static void 1197 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1198 { 1199 struct ctl_port *port; 1200 struct ctl_lun *lun; 1201 int i, new; 1202 1203 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1204 if (port == NULL) { 1205 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1206 msg->hdr.nexus.targ_port)); 1207 new = 1; 1208 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1209 port->frontend = &ha_frontend; 1210 port->targ_port = msg->hdr.nexus.targ_port; 1211 port->fe_datamove = ctl_ha_datamove; 1212 port->fe_done = ctl_ha_done; 1213 } else if (port->frontend == &ha_frontend) { 1214 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1215 msg->hdr.nexus.targ_port)); 1216 new = 0; 1217 } else { 1218 printf("%s: Received conflicting HA port %d\n", 1219 __func__, msg->hdr.nexus.targ_port); 1220 return; 1221 } 1222 port->port_type = msg->port.port_type; 1223 port->physical_port = msg->port.physical_port; 1224 port->virtual_port = msg->port.virtual_port; 1225 port->status = msg->port.status; 1226 i = 0; 1227 free(port->port_name, M_CTL); 1228 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1229 M_CTL); 1230 i += msg->port.name_len; 1231 if (msg->port.lun_map_len != 0) { 1232 if (port->lun_map == NULL || 1233 port->lun_map_size * sizeof(uint32_t) < 1234 msg->port.lun_map_len) { 1235 port->lun_map_size = 0; 1236 free(port->lun_map, M_CTL); 1237 port->lun_map = malloc(msg->port.lun_map_len, 1238 M_CTL, M_WAITOK); 1239 } 1240 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1241 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1242 i += msg->port.lun_map_len; 1243 } else { 1244 port->lun_map_size = 0; 1245 free(port->lun_map, M_CTL); 1246 port->lun_map = NULL; 1247 } 1248 if (msg->port.port_devid_len != 0) { 1249 if (port->port_devid == NULL || 1250 port->port_devid->len < msg->port.port_devid_len) { 1251 free(port->port_devid, M_CTL); 1252 port->port_devid = malloc(sizeof(struct ctl_devid) + 1253 msg->port.port_devid_len, M_CTL, M_WAITOK); 1254 } 1255 memcpy(port->port_devid->data, &msg->port.data[i], 1256 msg->port.port_devid_len); 1257 port->port_devid->len = msg->port.port_devid_len; 1258 i += msg->port.port_devid_len; 1259 } else { 1260 free(port->port_devid, M_CTL); 1261 port->port_devid = NULL; 1262 } 1263 if (msg->port.target_devid_len != 0) { 1264 if (port->target_devid == NULL || 1265 port->target_devid->len < msg->port.target_devid_len) { 1266 free(port->target_devid, M_CTL); 1267 port->target_devid = malloc(sizeof(struct ctl_devid) + 1268 msg->port.target_devid_len, M_CTL, M_WAITOK); 1269 } 1270 memcpy(port->target_devid->data, &msg->port.data[i], 1271 msg->port.target_devid_len); 1272 port->target_devid->len = msg->port.target_devid_len; 1273 i += msg->port.target_devid_len; 1274 } else { 1275 free(port->target_devid, M_CTL); 1276 port->target_devid = NULL; 1277 } 1278 if (msg->port.init_devid_len != 0) { 1279 if (port->init_devid == NULL || 1280 port->init_devid->len < msg->port.init_devid_len) { 1281 free(port->init_devid, M_CTL); 1282 port->init_devid = malloc(sizeof(struct ctl_devid) + 1283 msg->port.init_devid_len, M_CTL, M_WAITOK); 1284 } 1285 memcpy(port->init_devid->data, &msg->port.data[i], 1286 msg->port.init_devid_len); 1287 port->init_devid->len = msg->port.init_devid_len; 1288 i += msg->port.init_devid_len; 1289 } else { 1290 free(port->init_devid, M_CTL); 1291 port->init_devid = NULL; 1292 } 1293 if (new) { 1294 if (ctl_port_register(port) != 0) { 1295 printf("%s: ctl_port_register() failed with error\n", 1296 __func__); 1297 } 1298 } 1299 mtx_lock(&softc->ctl_lock); 1300 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1301 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1302 continue; 1303 mtx_lock(&lun->lun_lock); 1304 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1305 mtx_unlock(&lun->lun_lock); 1306 } 1307 mtx_unlock(&softc->ctl_lock); 1308 } 1309 1310 static void 1311 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1312 { 1313 struct ctl_port *port; 1314 int iid; 1315 1316 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1317 if (port == NULL) { 1318 printf("%s: Received IID for unknown port %d\n", 1319 __func__, msg->hdr.nexus.targ_port); 1320 return; 1321 } 1322 iid = msg->hdr.nexus.initid; 1323 if (port->wwpn_iid[iid].in_use != 0 && 1324 msg->iid.in_use == 0) 1325 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1326 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1327 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1328 free(port->wwpn_iid[iid].name, M_CTL); 1329 if (msg->iid.name_len) { 1330 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1331 msg->iid.name_len, M_CTL); 1332 } else 1333 port->wwpn_iid[iid].name = NULL; 1334 } 1335 1336 static void 1337 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1338 { 1339 1340 if (msg->login.version != CTL_HA_VERSION) { 1341 printf("CTL HA peers have different versions %d != %d\n", 1342 msg->login.version, CTL_HA_VERSION); 1343 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1344 return; 1345 } 1346 if (msg->login.ha_mode != softc->ha_mode) { 1347 printf("CTL HA peers have different ha_mode %d != %d\n", 1348 msg->login.ha_mode, softc->ha_mode); 1349 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1350 return; 1351 } 1352 if (msg->login.ha_id == softc->ha_id) { 1353 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1354 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1355 return; 1356 } 1357 if (msg->login.max_luns != ctl_max_luns || 1358 msg->login.max_ports != ctl_max_ports || 1359 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1360 printf("CTL HA peers have different limits\n"); 1361 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1362 return; 1363 } 1364 } 1365 1366 static void 1367 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1368 { 1369 struct ctl_lun *lun; 1370 u_int i; 1371 uint32_t initidx, targ_lun; 1372 1373 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1374 mtx_lock(&softc->ctl_lock); 1375 if (targ_lun >= ctl_max_luns || 1376 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1377 mtx_unlock(&softc->ctl_lock); 1378 return; 1379 } 1380 mtx_lock(&lun->lun_lock); 1381 mtx_unlock(&softc->ctl_lock); 1382 if (lun->flags & CTL_LUN_DISABLED) { 1383 mtx_unlock(&lun->lun_lock); 1384 return; 1385 } 1386 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1387 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1388 msg->mode.page_code && 1389 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1390 break; 1391 } 1392 if (i == CTL_NUM_MODE_PAGES) { 1393 mtx_unlock(&lun->lun_lock); 1394 return; 1395 } 1396 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1397 lun->mode_pages.index[i].page_len); 1398 initidx = ctl_get_initindex(&msg->hdr.nexus); 1399 if (initidx != -1) 1400 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1401 mtx_unlock(&lun->lun_lock); 1402 } 1403 1404 /* 1405 * ISC (Inter Shelf Communication) event handler. Events from the HA 1406 * subsystem come in here. 1407 */ 1408 static void 1409 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1410 { 1411 struct ctl_softc *softc = control_softc; 1412 union ctl_io *io; 1413 struct ctl_prio *presio; 1414 ctl_ha_status isc_status; 1415 1416 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1417 if (event == CTL_HA_EVT_MSG_RECV) { 1418 union ctl_ha_msg *msg, msgbuf; 1419 1420 if (param > sizeof(msgbuf)) 1421 msg = malloc(param, M_CTL, M_WAITOK); 1422 else 1423 msg = &msgbuf; 1424 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1425 M_WAITOK); 1426 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1427 printf("%s: Error receiving message: %d\n", 1428 __func__, isc_status); 1429 if (msg != &msgbuf) 1430 free(msg, M_CTL); 1431 return; 1432 } 1433 1434 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1435 switch (msg->hdr.msg_type) { 1436 case CTL_MSG_SERIALIZE: 1437 io = ctl_alloc_io(softc->othersc_pool); 1438 ctl_zero_io(io); 1439 // populate ctsio from msg 1440 io->io_hdr.io_type = CTL_IO_SCSI; 1441 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1442 io->io_hdr.original_sc = msg->hdr.original_sc; 1443 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1444 CTL_FLAG_IO_ACTIVE; 1445 /* 1446 * If we're in serialization-only mode, we don't 1447 * want to go through full done processing. Thus 1448 * the COPY flag. 1449 * 1450 * XXX KDM add another flag that is more specific. 1451 */ 1452 if (softc->ha_mode != CTL_HA_MODE_XFER) 1453 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1454 io->io_hdr.nexus = msg->hdr.nexus; 1455 #if 0 1456 printf("port %u, iid %u, lun %u\n", 1457 io->io_hdr.nexus.targ_port, 1458 io->io_hdr.nexus.initid, 1459 io->io_hdr.nexus.targ_lun); 1460 #endif 1461 io->scsiio.tag_num = msg->scsi.tag_num; 1462 io->scsiio.tag_type = msg->scsi.tag_type; 1463 #ifdef CTL_TIME_IO 1464 io->io_hdr.start_time = time_uptime; 1465 getbinuptime(&io->io_hdr.start_bt); 1466 #endif /* CTL_TIME_IO */ 1467 io->scsiio.cdb_len = msg->scsi.cdb_len; 1468 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1469 CTL_MAX_CDBLEN); 1470 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1471 const struct ctl_cmd_entry *entry; 1472 1473 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1474 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1475 io->io_hdr.flags |= 1476 entry->flags & CTL_FLAG_DATA_MASK; 1477 } 1478 ctl_enqueue_isc(io); 1479 break; 1480 1481 /* Performed on the Originating SC, XFER mode only */ 1482 case CTL_MSG_DATAMOVE: { 1483 struct ctl_sg_entry *sgl; 1484 int i, j; 1485 1486 io = msg->hdr.original_sc; 1487 if (io == NULL) { 1488 printf("%s: original_sc == NULL!\n", __func__); 1489 /* XXX KDM do something here */ 1490 break; 1491 } 1492 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1493 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1494 /* 1495 * Keep track of this, we need to send it back over 1496 * when the datamove is complete. 1497 */ 1498 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1499 if (msg->hdr.status == CTL_SUCCESS) 1500 io->io_hdr.status = msg->hdr.status; 1501 1502 if (msg->dt.sg_sequence == 0) { 1503 #ifdef CTL_TIME_IO 1504 getbinuptime(&io->io_hdr.dma_start_bt); 1505 #endif 1506 i = msg->dt.kern_sg_entries + 1507 msg->dt.kern_data_len / 1508 CTL_HA_DATAMOVE_SEGMENT + 1; 1509 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1510 M_WAITOK | M_ZERO); 1511 io->io_hdr.remote_sglist = sgl; 1512 io->io_hdr.local_sglist = 1513 &sgl[msg->dt.kern_sg_entries]; 1514 1515 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1516 1517 io->scsiio.kern_sg_entries = 1518 msg->dt.kern_sg_entries; 1519 io->scsiio.rem_sg_entries = 1520 msg->dt.kern_sg_entries; 1521 io->scsiio.kern_data_len = 1522 msg->dt.kern_data_len; 1523 io->scsiio.kern_total_len = 1524 msg->dt.kern_total_len; 1525 io->scsiio.kern_data_resid = 1526 msg->dt.kern_data_resid; 1527 io->scsiio.kern_rel_offset = 1528 msg->dt.kern_rel_offset; 1529 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1530 io->io_hdr.flags |= msg->dt.flags & 1531 CTL_FLAG_BUS_ADDR; 1532 } else 1533 sgl = (struct ctl_sg_entry *) 1534 io->scsiio.kern_data_ptr; 1535 1536 for (i = msg->dt.sent_sg_entries, j = 0; 1537 i < (msg->dt.sent_sg_entries + 1538 msg->dt.cur_sg_entries); i++, j++) { 1539 sgl[i].addr = msg->dt.sg_list[j].addr; 1540 sgl[i].len = msg->dt.sg_list[j].len; 1541 1542 #if 0 1543 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1544 __func__, sgl[i].addr, sgl[i].len, j, i); 1545 #endif 1546 } 1547 1548 /* 1549 * If this is the last piece of the I/O, we've got 1550 * the full S/G list. Queue processing in the thread. 1551 * Otherwise wait for the next piece. 1552 */ 1553 if (msg->dt.sg_last != 0) 1554 ctl_enqueue_isc(io); 1555 break; 1556 } 1557 /* Performed on the Serializing (primary) SC, XFER mode only */ 1558 case CTL_MSG_DATAMOVE_DONE: { 1559 if (msg->hdr.serializing_sc == NULL) { 1560 printf("%s: serializing_sc == NULL!\n", 1561 __func__); 1562 /* XXX KDM now what? */ 1563 break; 1564 } 1565 /* 1566 * We grab the sense information here in case 1567 * there was a failure, so we can return status 1568 * back to the initiator. 1569 */ 1570 io = msg->hdr.serializing_sc; 1571 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1572 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1573 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1574 io->io_hdr.port_status = msg->scsi.port_status; 1575 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1576 if (msg->hdr.status != CTL_STATUS_NONE) { 1577 io->io_hdr.status = msg->hdr.status; 1578 io->scsiio.scsi_status = msg->scsi.scsi_status; 1579 io->scsiio.sense_len = msg->scsi.sense_len; 1580 memcpy(&io->scsiio.sense_data, 1581 &msg->scsi.sense_data, 1582 msg->scsi.sense_len); 1583 if (msg->hdr.status == CTL_SUCCESS) 1584 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1585 } 1586 ctl_enqueue_isc(io); 1587 break; 1588 } 1589 1590 /* Preformed on Originating SC, SER_ONLY mode */ 1591 case CTL_MSG_R2R: 1592 io = msg->hdr.original_sc; 1593 if (io == NULL) { 1594 printf("%s: original_sc == NULL!\n", 1595 __func__); 1596 break; 1597 } 1598 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1599 io->io_hdr.msg_type = CTL_MSG_R2R; 1600 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1601 ctl_enqueue_isc(io); 1602 break; 1603 1604 /* 1605 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1606 * mode. 1607 * Performed on the Originating (i.e. secondary) SC in XFER 1608 * mode 1609 */ 1610 case CTL_MSG_FINISH_IO: 1611 if (softc->ha_mode == CTL_HA_MODE_XFER) 1612 ctl_isc_handler_finish_xfer(softc, msg); 1613 else 1614 ctl_isc_handler_finish_ser_only(softc, msg); 1615 break; 1616 1617 /* Preformed on Originating SC */ 1618 case CTL_MSG_BAD_JUJU: 1619 io = msg->hdr.original_sc; 1620 if (io == NULL) { 1621 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1622 __func__); 1623 break; 1624 } 1625 ctl_copy_sense_data(msg, io); 1626 /* 1627 * IO should have already been cleaned up on other 1628 * SC so clear this flag so we won't send a message 1629 * back to finish the IO there. 1630 */ 1631 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1632 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1633 1634 /* io = msg->hdr.serializing_sc; */ 1635 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1636 ctl_enqueue_isc(io); 1637 break; 1638 1639 /* Handle resets sent from the other side */ 1640 case CTL_MSG_MANAGE_TASKS: { 1641 struct ctl_taskio *taskio; 1642 taskio = (struct ctl_taskio *)ctl_alloc_io( 1643 softc->othersc_pool); 1644 ctl_zero_io((union ctl_io *)taskio); 1645 taskio->io_hdr.io_type = CTL_IO_TASK; 1646 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1647 taskio->io_hdr.nexus = msg->hdr.nexus; 1648 taskio->task_action = msg->task.task_action; 1649 taskio->tag_num = msg->task.tag_num; 1650 taskio->tag_type = msg->task.tag_type; 1651 #ifdef CTL_TIME_IO 1652 taskio->io_hdr.start_time = time_uptime; 1653 getbinuptime(&taskio->io_hdr.start_bt); 1654 #endif /* CTL_TIME_IO */ 1655 ctl_run_task((union ctl_io *)taskio); 1656 break; 1657 } 1658 /* Persistent Reserve action which needs attention */ 1659 case CTL_MSG_PERS_ACTION: 1660 presio = (struct ctl_prio *)ctl_alloc_io( 1661 softc->othersc_pool); 1662 ctl_zero_io((union ctl_io *)presio); 1663 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1664 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1665 presio->io_hdr.nexus = msg->hdr.nexus; 1666 presio->pr_msg = msg->pr; 1667 ctl_enqueue_isc((union ctl_io *)presio); 1668 break; 1669 case CTL_MSG_UA: 1670 ctl_isc_ua(softc, msg, param); 1671 break; 1672 case CTL_MSG_PORT_SYNC: 1673 ctl_isc_port_sync(softc, msg, param); 1674 break; 1675 case CTL_MSG_LUN_SYNC: 1676 ctl_isc_lun_sync(softc, msg, param); 1677 break; 1678 case CTL_MSG_IID_SYNC: 1679 ctl_isc_iid_sync(softc, msg, param); 1680 break; 1681 case CTL_MSG_LOGIN: 1682 ctl_isc_login(softc, msg, param); 1683 break; 1684 case CTL_MSG_MODE_SYNC: 1685 ctl_isc_mode_sync(softc, msg, param); 1686 break; 1687 default: 1688 printf("Received HA message of unknown type %d\n", 1689 msg->hdr.msg_type); 1690 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1691 break; 1692 } 1693 if (msg != &msgbuf) 1694 free(msg, M_CTL); 1695 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1696 printf("CTL: HA link status changed from %d to %d\n", 1697 softc->ha_link, param); 1698 if (param == softc->ha_link) 1699 return; 1700 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1701 softc->ha_link = param; 1702 ctl_isc_ha_link_down(softc); 1703 } else { 1704 softc->ha_link = param; 1705 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1706 ctl_isc_ha_link_up(softc); 1707 } 1708 return; 1709 } else { 1710 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1711 return; 1712 } 1713 } 1714 1715 static void 1716 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1717 { 1718 1719 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1720 src->scsi.sense_len); 1721 dest->scsiio.scsi_status = src->scsi.scsi_status; 1722 dest->scsiio.sense_len = src->scsi.sense_len; 1723 dest->io_hdr.status = src->hdr.status; 1724 } 1725 1726 static void 1727 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1728 { 1729 1730 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1731 src->scsiio.sense_len); 1732 dest->scsi.scsi_status = src->scsiio.scsi_status; 1733 dest->scsi.sense_len = src->scsiio.sense_len; 1734 dest->hdr.status = src->io_hdr.status; 1735 } 1736 1737 void 1738 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1739 { 1740 struct ctl_softc *softc = lun->ctl_softc; 1741 ctl_ua_type *pu; 1742 1743 if (initidx < softc->init_min || initidx >= softc->init_max) 1744 return; 1745 mtx_assert(&lun->lun_lock, MA_OWNED); 1746 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1747 if (pu == NULL) 1748 return; 1749 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1750 } 1751 1752 void 1753 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1754 { 1755 int i; 1756 1757 mtx_assert(&lun->lun_lock, MA_OWNED); 1758 if (lun->pending_ua[port] == NULL) 1759 return; 1760 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1761 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1762 continue; 1763 lun->pending_ua[port][i] |= ua; 1764 } 1765 } 1766 1767 void 1768 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1769 { 1770 struct ctl_softc *softc = lun->ctl_softc; 1771 int i; 1772 1773 mtx_assert(&lun->lun_lock, MA_OWNED); 1774 for (i = softc->port_min; i < softc->port_max; i++) 1775 ctl_est_ua_port(lun, i, except, ua); 1776 } 1777 1778 void 1779 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1780 { 1781 struct ctl_softc *softc = lun->ctl_softc; 1782 ctl_ua_type *pu; 1783 1784 if (initidx < softc->init_min || initidx >= softc->init_max) 1785 return; 1786 mtx_assert(&lun->lun_lock, MA_OWNED); 1787 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1788 if (pu == NULL) 1789 return; 1790 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1791 } 1792 1793 void 1794 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1795 { 1796 struct ctl_softc *softc = lun->ctl_softc; 1797 int i, j; 1798 1799 mtx_assert(&lun->lun_lock, MA_OWNED); 1800 for (i = softc->port_min; i < softc->port_max; i++) { 1801 if (lun->pending_ua[i] == NULL) 1802 continue; 1803 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1804 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1805 continue; 1806 lun->pending_ua[i][j] &= ~ua; 1807 } 1808 } 1809 } 1810 1811 void 1812 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1813 ctl_ua_type ua_type) 1814 { 1815 struct ctl_lun *lun; 1816 1817 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1818 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1819 mtx_lock(&lun->lun_lock); 1820 ctl_clr_ua(lun, initidx, ua_type); 1821 mtx_unlock(&lun->lun_lock); 1822 } 1823 } 1824 1825 static int 1826 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1827 { 1828 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1829 struct ctl_lun *lun; 1830 struct ctl_lun_req ireq; 1831 int error, value; 1832 1833 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1834 error = sysctl_handle_int(oidp, &value, 0, req); 1835 if ((error != 0) || (req->newptr == NULL)) 1836 return (error); 1837 1838 mtx_lock(&softc->ctl_lock); 1839 if (value == 0) 1840 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1841 else 1842 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1843 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1844 mtx_unlock(&softc->ctl_lock); 1845 bzero(&ireq, sizeof(ireq)); 1846 ireq.reqtype = CTL_LUNREQ_MODIFY; 1847 ireq.reqdata.modify.lun_id = lun->lun; 1848 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1849 curthread); 1850 if (ireq.status != CTL_LUN_OK) { 1851 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1852 __func__, ireq.status, ireq.error_str); 1853 } 1854 mtx_lock(&softc->ctl_lock); 1855 } 1856 mtx_unlock(&softc->ctl_lock); 1857 return (0); 1858 } 1859 1860 static int 1861 ctl_init(void) 1862 { 1863 struct make_dev_args args; 1864 struct ctl_softc *softc; 1865 int i, error; 1866 1867 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1868 M_WAITOK | M_ZERO); 1869 1870 make_dev_args_init(&args); 1871 args.mda_devsw = &ctl_cdevsw; 1872 args.mda_uid = UID_ROOT; 1873 args.mda_gid = GID_OPERATOR; 1874 args.mda_mode = 0600; 1875 args.mda_si_drv1 = softc; 1876 args.mda_si_drv2 = NULL; 1877 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1878 if (error != 0) { 1879 free(softc, M_DEVBUF); 1880 control_softc = NULL; 1881 return (error); 1882 } 1883 1884 sysctl_ctx_init(&softc->sysctl_ctx); 1885 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1886 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1887 CTLFLAG_RD, 0, "CAM Target Layer"); 1888 1889 if (softc->sysctl_tree == NULL) { 1890 printf("%s: unable to allocate sysctl tree\n", __func__); 1891 destroy_dev(softc->dev); 1892 free(softc, M_DEVBUF); 1893 control_softc = NULL; 1894 return (ENOMEM); 1895 } 1896 1897 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1898 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1899 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1900 softc->flags = 0; 1901 1902 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1903 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1904 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1905 1906 if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { 1907 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", 1908 ctl_max_luns, CTL_DEFAULT_MAX_LUNS); 1909 ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 1910 } 1911 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, 1912 M_DEVBUF, M_WAITOK | M_ZERO); 1913 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * 1914 ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1915 if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { 1916 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", 1917 ctl_max_ports, CTL_DEFAULT_MAX_PORTS); 1918 ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 1919 } 1920 softc->ctl_port_mask = malloc(sizeof(uint32_t) * 1921 ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1922 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, 1923 M_DEVBUF, M_WAITOK | M_ZERO); 1924 1925 1926 /* 1927 * In Copan's HA scheme, the "master" and "slave" roles are 1928 * figured out through the slot the controller is in. Although it 1929 * is an active/active system, someone has to be in charge. 1930 */ 1931 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1932 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1933 "HA head ID (0 - no HA)"); 1934 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1935 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1936 softc->is_single = 1; 1937 softc->port_cnt = ctl_max_ports; 1938 softc->port_min = 0; 1939 } else { 1940 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; 1941 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1942 } 1943 softc->port_max = softc->port_min + softc->port_cnt; 1944 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1945 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1946 1947 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1948 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1949 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1950 1951 STAILQ_INIT(&softc->lun_list); 1952 STAILQ_INIT(&softc->pending_lun_queue); 1953 STAILQ_INIT(&softc->fe_list); 1954 STAILQ_INIT(&softc->port_list); 1955 STAILQ_INIT(&softc->be_list); 1956 ctl_tpc_init(softc); 1957 1958 if (worker_threads <= 0) 1959 worker_threads = max(1, mp_ncpus / 4); 1960 if (worker_threads > CTL_MAX_THREADS) 1961 worker_threads = CTL_MAX_THREADS; 1962 1963 for (i = 0; i < worker_threads; i++) { 1964 struct ctl_thread *thr = &softc->threads[i]; 1965 1966 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1967 thr->ctl_softc = softc; 1968 STAILQ_INIT(&thr->incoming_queue); 1969 STAILQ_INIT(&thr->rtr_queue); 1970 STAILQ_INIT(&thr->done_queue); 1971 STAILQ_INIT(&thr->isc_queue); 1972 1973 error = kproc_kthread_add(ctl_work_thread, thr, 1974 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1975 if (error != 0) { 1976 printf("error creating CTL work thread!\n"); 1977 return (error); 1978 } 1979 } 1980 error = kproc_kthread_add(ctl_lun_thread, softc, 1981 &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun"); 1982 if (error != 0) { 1983 printf("error creating CTL lun thread!\n"); 1984 return (error); 1985 } 1986 error = kproc_kthread_add(ctl_thresh_thread, softc, 1987 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1988 if (error != 0) { 1989 printf("error creating CTL threshold thread!\n"); 1990 return (error); 1991 } 1992 1993 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1994 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1995 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1996 1997 if (softc->is_single == 0) { 1998 if (ctl_frontend_register(&ha_frontend) != 0) 1999 softc->is_single = 1; 2000 } 2001 return (0); 2002 } 2003 2004 static int 2005 ctl_shutdown(void) 2006 { 2007 struct ctl_softc *softc = control_softc; 2008 int i; 2009 2010 if (softc->is_single == 0) 2011 ctl_frontend_deregister(&ha_frontend); 2012 2013 destroy_dev(softc->dev); 2014 2015 /* Shutdown CTL threads. */ 2016 softc->shutdown = 1; 2017 for (i = 0; i < worker_threads; i++) { 2018 struct ctl_thread *thr = &softc->threads[i]; 2019 while (thr->thread != NULL) { 2020 wakeup(thr); 2021 if (thr->thread != NULL) 2022 pause("CTL thr shutdown", 1); 2023 } 2024 mtx_destroy(&thr->queue_lock); 2025 } 2026 while (softc->lun_thread != NULL) { 2027 wakeup(&softc->pending_lun_queue); 2028 if (softc->lun_thread != NULL) 2029 pause("CTL thr shutdown", 1); 2030 } 2031 while (softc->thresh_thread != NULL) { 2032 wakeup(softc->thresh_thread); 2033 if (softc->thresh_thread != NULL) 2034 pause("CTL thr shutdown", 1); 2035 } 2036 2037 ctl_tpc_shutdown(softc); 2038 uma_zdestroy(softc->io_zone); 2039 mtx_destroy(&softc->ctl_lock); 2040 2041 free(softc->ctl_luns, M_DEVBUF); 2042 free(softc->ctl_lun_mask, M_DEVBUF); 2043 free(softc->ctl_port_mask, M_DEVBUF); 2044 free(softc->ctl_ports, M_DEVBUF); 2045 2046 sysctl_ctx_free(&softc->sysctl_ctx); 2047 2048 free(softc, M_DEVBUF); 2049 control_softc = NULL; 2050 return (0); 2051 } 2052 2053 static int 2054 ctl_module_event_handler(module_t mod, int what, void *arg) 2055 { 2056 2057 switch (what) { 2058 case MOD_LOAD: 2059 return (ctl_init()); 2060 case MOD_UNLOAD: 2061 return (ctl_shutdown()); 2062 default: 2063 return (EOPNOTSUPP); 2064 } 2065 } 2066 2067 /* 2068 * XXX KDM should we do some access checks here? Bump a reference count to 2069 * prevent a CTL module from being unloaded while someone has it open? 2070 */ 2071 static int 2072 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2073 { 2074 return (0); 2075 } 2076 2077 static int 2078 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2079 { 2080 return (0); 2081 } 2082 2083 /* 2084 * Remove an initiator by port number and initiator ID. 2085 * Returns 0 for success, -1 for failure. 2086 */ 2087 int 2088 ctl_remove_initiator(struct ctl_port *port, int iid) 2089 { 2090 struct ctl_softc *softc = port->ctl_softc; 2091 int last; 2092 2093 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2094 2095 if (iid > CTL_MAX_INIT_PER_PORT) { 2096 printf("%s: initiator ID %u > maximun %u!\n", 2097 __func__, iid, CTL_MAX_INIT_PER_PORT); 2098 return (-1); 2099 } 2100 2101 mtx_lock(&softc->ctl_lock); 2102 last = (--port->wwpn_iid[iid].in_use == 0); 2103 port->wwpn_iid[iid].last_use = time_uptime; 2104 mtx_unlock(&softc->ctl_lock); 2105 if (last) 2106 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2107 ctl_isc_announce_iid(port, iid); 2108 2109 return (0); 2110 } 2111 2112 /* 2113 * Add an initiator to the initiator map. 2114 * Returns iid for success, < 0 for failure. 2115 */ 2116 int 2117 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2118 { 2119 struct ctl_softc *softc = port->ctl_softc; 2120 time_t best_time; 2121 int i, best; 2122 2123 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2124 2125 if (iid >= CTL_MAX_INIT_PER_PORT) { 2126 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2127 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2128 free(name, M_CTL); 2129 return (-1); 2130 } 2131 2132 mtx_lock(&softc->ctl_lock); 2133 2134 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2135 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2136 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2137 iid = i; 2138 break; 2139 } 2140 if (name != NULL && port->wwpn_iid[i].name != NULL && 2141 strcmp(name, port->wwpn_iid[i].name) == 0) { 2142 iid = i; 2143 break; 2144 } 2145 } 2146 } 2147 2148 if (iid < 0) { 2149 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2150 if (port->wwpn_iid[i].in_use == 0 && 2151 port->wwpn_iid[i].wwpn == 0 && 2152 port->wwpn_iid[i].name == NULL) { 2153 iid = i; 2154 break; 2155 } 2156 } 2157 } 2158 2159 if (iid < 0) { 2160 best = -1; 2161 best_time = INT32_MAX; 2162 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2163 if (port->wwpn_iid[i].in_use == 0) { 2164 if (port->wwpn_iid[i].last_use < best_time) { 2165 best = i; 2166 best_time = port->wwpn_iid[i].last_use; 2167 } 2168 } 2169 } 2170 iid = best; 2171 } 2172 2173 if (iid < 0) { 2174 mtx_unlock(&softc->ctl_lock); 2175 free(name, M_CTL); 2176 return (-2); 2177 } 2178 2179 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2180 /* 2181 * This is not an error yet. 2182 */ 2183 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2184 #if 0 2185 printf("%s: port %d iid %u WWPN %#jx arrived" 2186 " again\n", __func__, port->targ_port, 2187 iid, (uintmax_t)wwpn); 2188 #endif 2189 goto take; 2190 } 2191 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2192 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2193 #if 0 2194 printf("%s: port %d iid %u name '%s' arrived" 2195 " again\n", __func__, port->targ_port, 2196 iid, name); 2197 #endif 2198 goto take; 2199 } 2200 2201 /* 2202 * This is an error, but what do we do about it? The 2203 * driver is telling us we have a new WWPN for this 2204 * initiator ID, so we pretty much need to use it. 2205 */ 2206 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2207 " but WWPN %#jx '%s' is still at that address\n", 2208 __func__, port->targ_port, iid, wwpn, name, 2209 (uintmax_t)port->wwpn_iid[iid].wwpn, 2210 port->wwpn_iid[iid].name); 2211 } 2212 take: 2213 free(port->wwpn_iid[iid].name, M_CTL); 2214 port->wwpn_iid[iid].name = name; 2215 port->wwpn_iid[iid].wwpn = wwpn; 2216 port->wwpn_iid[iid].in_use++; 2217 mtx_unlock(&softc->ctl_lock); 2218 ctl_isc_announce_iid(port, iid); 2219 2220 return (iid); 2221 } 2222 2223 static int 2224 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2225 { 2226 int len; 2227 2228 switch (port->port_type) { 2229 case CTL_PORT_FC: 2230 { 2231 struct scsi_transportid_fcp *id = 2232 (struct scsi_transportid_fcp *)buf; 2233 if (port->wwpn_iid[iid].wwpn == 0) 2234 return (0); 2235 memset(id, 0, sizeof(*id)); 2236 id->format_protocol = SCSI_PROTO_FC; 2237 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2238 return (sizeof(*id)); 2239 } 2240 case CTL_PORT_ISCSI: 2241 { 2242 struct scsi_transportid_iscsi_port *id = 2243 (struct scsi_transportid_iscsi_port *)buf; 2244 if (port->wwpn_iid[iid].name == NULL) 2245 return (0); 2246 memset(id, 0, 256); 2247 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2248 SCSI_PROTO_ISCSI; 2249 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2250 len = roundup2(min(len, 252), 4); 2251 scsi_ulto2b(len, id->additional_length); 2252 return (sizeof(*id) + len); 2253 } 2254 case CTL_PORT_SAS: 2255 { 2256 struct scsi_transportid_sas *id = 2257 (struct scsi_transportid_sas *)buf; 2258 if (port->wwpn_iid[iid].wwpn == 0) 2259 return (0); 2260 memset(id, 0, sizeof(*id)); 2261 id->format_protocol = SCSI_PROTO_SAS; 2262 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2263 return (sizeof(*id)); 2264 } 2265 default: 2266 { 2267 struct scsi_transportid_spi *id = 2268 (struct scsi_transportid_spi *)buf; 2269 memset(id, 0, sizeof(*id)); 2270 id->format_protocol = SCSI_PROTO_SPI; 2271 scsi_ulto2b(iid, id->scsi_addr); 2272 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2273 return (sizeof(*id)); 2274 } 2275 } 2276 } 2277 2278 /* 2279 * Serialize a command that went down the "wrong" side, and so was sent to 2280 * this controller for execution. The logic is a little different than the 2281 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2282 * sent back to the other side, but in the success case, we execute the 2283 * command on this side (XFER mode) or tell the other side to execute it 2284 * (SER_ONLY mode). 2285 */ 2286 static void 2287 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2288 { 2289 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2290 struct ctl_port *port = CTL_PORT(ctsio); 2291 union ctl_ha_msg msg_info; 2292 struct ctl_lun *lun; 2293 const struct ctl_cmd_entry *entry; 2294 uint32_t targ_lun; 2295 2296 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2297 2298 /* Make sure that we know about this port. */ 2299 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2300 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2301 /*retry_count*/ 1); 2302 goto badjuju; 2303 } 2304 2305 /* Make sure that we know about this LUN. */ 2306 mtx_lock(&softc->ctl_lock); 2307 if (targ_lun >= ctl_max_luns || 2308 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2309 mtx_unlock(&softc->ctl_lock); 2310 2311 /* 2312 * The other node would not send this request to us unless 2313 * received announce that we are primary node for this LUN. 2314 * If this LUN does not exist now, it is probably result of 2315 * a race, so respond to initiator in the most opaque way. 2316 */ 2317 ctl_set_busy(ctsio); 2318 goto badjuju; 2319 } 2320 mtx_lock(&lun->lun_lock); 2321 mtx_unlock(&softc->ctl_lock); 2322 2323 /* 2324 * If the LUN is invalid, pretend that it doesn't exist. 2325 * It will go away as soon as all pending I/Os completed. 2326 */ 2327 if (lun->flags & CTL_LUN_DISABLED) { 2328 mtx_unlock(&lun->lun_lock); 2329 ctl_set_busy(ctsio); 2330 goto badjuju; 2331 } 2332 2333 entry = ctl_get_cmd_entry(ctsio, NULL); 2334 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2335 mtx_unlock(&lun->lun_lock); 2336 goto badjuju; 2337 } 2338 2339 CTL_LUN(ctsio) = lun; 2340 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2341 2342 /* 2343 * Every I/O goes into the OOA queue for a 2344 * particular LUN, and stays there until completion. 2345 */ 2346 #ifdef CTL_TIME_IO 2347 if (TAILQ_EMPTY(&lun->ooa_queue)) 2348 lun->idle_time += getsbinuptime() - lun->last_busy; 2349 #endif 2350 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2351 2352 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 2353 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 2354 ooa_links))) { 2355 case CTL_ACTION_BLOCK: 2356 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 2357 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 2358 blocked_links); 2359 mtx_unlock(&lun->lun_lock); 2360 break; 2361 case CTL_ACTION_PASS: 2362 case CTL_ACTION_SKIP: 2363 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2364 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2365 ctl_enqueue_rtr((union ctl_io *)ctsio); 2366 mtx_unlock(&lun->lun_lock); 2367 } else { 2368 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2369 mtx_unlock(&lun->lun_lock); 2370 2371 /* send msg back to other side */ 2372 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2373 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2374 msg_info.hdr.msg_type = CTL_MSG_R2R; 2375 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2376 sizeof(msg_info.hdr), M_WAITOK); 2377 } 2378 break; 2379 case CTL_ACTION_OVERLAP: 2380 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2381 mtx_unlock(&lun->lun_lock); 2382 ctl_set_overlapped_cmd(ctsio); 2383 goto badjuju; 2384 case CTL_ACTION_OVERLAP_TAG: 2385 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2386 mtx_unlock(&lun->lun_lock); 2387 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2388 goto badjuju; 2389 case CTL_ACTION_ERROR: 2390 default: 2391 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2392 mtx_unlock(&lun->lun_lock); 2393 2394 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2395 /*retry_count*/ 0); 2396 badjuju: 2397 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2398 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2399 msg_info.hdr.serializing_sc = NULL; 2400 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2401 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2402 sizeof(msg_info.scsi), M_WAITOK); 2403 ctl_free_io((union ctl_io *)ctsio); 2404 break; 2405 } 2406 } 2407 2408 /* 2409 * Returns 0 for success, errno for failure. 2410 */ 2411 static void 2412 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2413 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2414 { 2415 union ctl_io *io; 2416 2417 mtx_lock(&lun->lun_lock); 2418 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2419 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2420 ooa_links)) { 2421 struct ctl_ooa_entry *entry; 2422 2423 /* 2424 * If we've got more than we can fit, just count the 2425 * remaining entries. 2426 */ 2427 if (*cur_fill_num >= ooa_hdr->alloc_num) 2428 continue; 2429 2430 entry = &kern_entries[*cur_fill_num]; 2431 2432 entry->tag_num = io->scsiio.tag_num; 2433 entry->lun_num = lun->lun; 2434 #ifdef CTL_TIME_IO 2435 entry->start_bt = io->io_hdr.start_bt; 2436 #endif 2437 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2438 entry->cdb_len = io->scsiio.cdb_len; 2439 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2440 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2441 2442 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2443 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2444 2445 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2446 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2447 2448 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2449 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2450 2451 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2452 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2453 } 2454 mtx_unlock(&lun->lun_lock); 2455 } 2456 2457 static void * 2458 ctl_copyin_alloc(void *user_addr, unsigned int len, char *error_str, 2459 size_t error_str_len) 2460 { 2461 void *kptr; 2462 2463 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2464 2465 if (copyin(user_addr, kptr, len) != 0) { 2466 snprintf(error_str, error_str_len, "Error copying %d bytes " 2467 "from user address %p to kernel address %p", len, 2468 user_addr, kptr); 2469 free(kptr, M_CTL); 2470 return (NULL); 2471 } 2472 2473 return (kptr); 2474 } 2475 2476 /* 2477 * Escape characters that are illegal or not recommended in XML. 2478 */ 2479 int 2480 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2481 { 2482 char *end = str + size; 2483 int retval; 2484 2485 retval = 0; 2486 2487 for (; *str && str < end; str++) { 2488 switch (*str) { 2489 case '&': 2490 retval = sbuf_printf(sb, "&"); 2491 break; 2492 case '>': 2493 retval = sbuf_printf(sb, ">"); 2494 break; 2495 case '<': 2496 retval = sbuf_printf(sb, "<"); 2497 break; 2498 default: 2499 retval = sbuf_putc(sb, *str); 2500 break; 2501 } 2502 2503 if (retval != 0) 2504 break; 2505 2506 } 2507 2508 return (retval); 2509 } 2510 2511 static void 2512 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2513 { 2514 struct scsi_vpd_id_descriptor *desc; 2515 int i; 2516 2517 if (id == NULL || id->len < 4) 2518 return; 2519 desc = (struct scsi_vpd_id_descriptor *)id->data; 2520 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2521 case SVPD_ID_TYPE_T10: 2522 sbuf_printf(sb, "t10."); 2523 break; 2524 case SVPD_ID_TYPE_EUI64: 2525 sbuf_printf(sb, "eui."); 2526 break; 2527 case SVPD_ID_TYPE_NAA: 2528 sbuf_printf(sb, "naa."); 2529 break; 2530 case SVPD_ID_TYPE_SCSI_NAME: 2531 break; 2532 } 2533 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2534 case SVPD_ID_CODESET_BINARY: 2535 for (i = 0; i < desc->length; i++) 2536 sbuf_printf(sb, "%02x", desc->identifier[i]); 2537 break; 2538 case SVPD_ID_CODESET_ASCII: 2539 sbuf_printf(sb, "%.*s", (int)desc->length, 2540 (char *)desc->identifier); 2541 break; 2542 case SVPD_ID_CODESET_UTF8: 2543 sbuf_printf(sb, "%s", (char *)desc->identifier); 2544 break; 2545 } 2546 } 2547 2548 static int 2549 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2550 struct thread *td) 2551 { 2552 struct ctl_softc *softc = dev->si_drv1; 2553 struct ctl_port *port; 2554 struct ctl_lun *lun; 2555 int retval; 2556 2557 retval = 0; 2558 2559 switch (cmd) { 2560 case CTL_IO: 2561 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2562 break; 2563 case CTL_ENABLE_PORT: 2564 case CTL_DISABLE_PORT: 2565 case CTL_SET_PORT_WWNS: { 2566 struct ctl_port *port; 2567 struct ctl_port_entry *entry; 2568 2569 entry = (struct ctl_port_entry *)addr; 2570 2571 mtx_lock(&softc->ctl_lock); 2572 STAILQ_FOREACH(port, &softc->port_list, links) { 2573 int action, done; 2574 2575 if (port->targ_port < softc->port_min || 2576 port->targ_port >= softc->port_max) 2577 continue; 2578 2579 action = 0; 2580 done = 0; 2581 if ((entry->port_type == CTL_PORT_NONE) 2582 && (entry->targ_port == port->targ_port)) { 2583 /* 2584 * If the user only wants to enable or 2585 * disable or set WWNs on a specific port, 2586 * do the operation and we're done. 2587 */ 2588 action = 1; 2589 done = 1; 2590 } else if (entry->port_type & port->port_type) { 2591 /* 2592 * Compare the user's type mask with the 2593 * particular frontend type to see if we 2594 * have a match. 2595 */ 2596 action = 1; 2597 done = 0; 2598 2599 /* 2600 * Make sure the user isn't trying to set 2601 * WWNs on multiple ports at the same time. 2602 */ 2603 if (cmd == CTL_SET_PORT_WWNS) { 2604 printf("%s: Can't set WWNs on " 2605 "multiple ports\n", __func__); 2606 retval = EINVAL; 2607 break; 2608 } 2609 } 2610 if (action == 0) 2611 continue; 2612 2613 /* 2614 * XXX KDM we have to drop the lock here, because 2615 * the online/offline operations can potentially 2616 * block. We need to reference count the frontends 2617 * so they can't go away, 2618 */ 2619 if (cmd == CTL_ENABLE_PORT) { 2620 mtx_unlock(&softc->ctl_lock); 2621 ctl_port_online(port); 2622 mtx_lock(&softc->ctl_lock); 2623 } else if (cmd == CTL_DISABLE_PORT) { 2624 mtx_unlock(&softc->ctl_lock); 2625 ctl_port_offline(port); 2626 mtx_lock(&softc->ctl_lock); 2627 } else if (cmd == CTL_SET_PORT_WWNS) { 2628 ctl_port_set_wwns(port, 2629 (entry->flags & CTL_PORT_WWNN_VALID) ? 2630 1 : 0, entry->wwnn, 2631 (entry->flags & CTL_PORT_WWPN_VALID) ? 2632 1 : 0, entry->wwpn); 2633 } 2634 if (done != 0) 2635 break; 2636 } 2637 mtx_unlock(&softc->ctl_lock); 2638 break; 2639 } 2640 case CTL_GET_OOA: { 2641 struct ctl_ooa *ooa_hdr; 2642 struct ctl_ooa_entry *entries; 2643 uint32_t cur_fill_num; 2644 2645 ooa_hdr = (struct ctl_ooa *)addr; 2646 2647 if ((ooa_hdr->alloc_len == 0) 2648 || (ooa_hdr->alloc_num == 0)) { 2649 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2650 "must be non-zero\n", __func__, 2651 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2652 retval = EINVAL; 2653 break; 2654 } 2655 2656 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2657 sizeof(struct ctl_ooa_entry))) { 2658 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2659 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2660 __func__, ooa_hdr->alloc_len, 2661 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2662 retval = EINVAL; 2663 break; 2664 } 2665 2666 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2667 if (entries == NULL) { 2668 printf("%s: could not allocate %d bytes for OOA " 2669 "dump\n", __func__, ooa_hdr->alloc_len); 2670 retval = ENOMEM; 2671 break; 2672 } 2673 2674 mtx_lock(&softc->ctl_lock); 2675 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2676 (ooa_hdr->lun_num >= ctl_max_luns || 2677 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2678 mtx_unlock(&softc->ctl_lock); 2679 free(entries, M_CTL); 2680 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2681 __func__, (uintmax_t)ooa_hdr->lun_num); 2682 retval = EINVAL; 2683 break; 2684 } 2685 2686 cur_fill_num = 0; 2687 2688 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2689 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2690 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2691 ooa_hdr, entries); 2692 } 2693 } else { 2694 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2695 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2696 entries); 2697 } 2698 mtx_unlock(&softc->ctl_lock); 2699 2700 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2701 ooa_hdr->fill_len = ooa_hdr->fill_num * 2702 sizeof(struct ctl_ooa_entry); 2703 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2704 if (retval != 0) { 2705 printf("%s: error copying out %d bytes for OOA dump\n", 2706 __func__, ooa_hdr->fill_len); 2707 } 2708 2709 getbinuptime(&ooa_hdr->cur_bt); 2710 2711 if (cur_fill_num > ooa_hdr->alloc_num) { 2712 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2713 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2714 } else { 2715 ooa_hdr->dropped_num = 0; 2716 ooa_hdr->status = CTL_OOA_OK; 2717 } 2718 2719 free(entries, M_CTL); 2720 break; 2721 } 2722 case CTL_DELAY_IO: { 2723 struct ctl_io_delay_info *delay_info; 2724 2725 delay_info = (struct ctl_io_delay_info *)addr; 2726 2727 #ifdef CTL_IO_DELAY 2728 mtx_lock(&softc->ctl_lock); 2729 if (delay_info->lun_id >= ctl_max_luns || 2730 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2731 mtx_unlock(&softc->ctl_lock); 2732 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2733 break; 2734 } 2735 mtx_lock(&lun->lun_lock); 2736 mtx_unlock(&softc->ctl_lock); 2737 delay_info->status = CTL_DELAY_STATUS_OK; 2738 switch (delay_info->delay_type) { 2739 case CTL_DELAY_TYPE_CONT: 2740 case CTL_DELAY_TYPE_ONESHOT: 2741 break; 2742 default: 2743 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2744 break; 2745 } 2746 switch (delay_info->delay_loc) { 2747 case CTL_DELAY_LOC_DATAMOVE: 2748 lun->delay_info.datamove_type = delay_info->delay_type; 2749 lun->delay_info.datamove_delay = delay_info->delay_secs; 2750 break; 2751 case CTL_DELAY_LOC_DONE: 2752 lun->delay_info.done_type = delay_info->delay_type; 2753 lun->delay_info.done_delay = delay_info->delay_secs; 2754 break; 2755 default: 2756 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2757 break; 2758 } 2759 mtx_unlock(&lun->lun_lock); 2760 #else 2761 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2762 #endif /* CTL_IO_DELAY */ 2763 break; 2764 } 2765 #ifdef CTL_LEGACY_STATS 2766 case CTL_GETSTATS: { 2767 struct ctl_stats *stats = (struct ctl_stats *)addr; 2768 int i; 2769 2770 /* 2771 * XXX KDM no locking here. If the LUN list changes, 2772 * things can blow up. 2773 */ 2774 i = 0; 2775 stats->status = CTL_SS_OK; 2776 stats->fill_len = 0; 2777 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2778 if (stats->fill_len + sizeof(lun->legacy_stats) > 2779 stats->alloc_len) { 2780 stats->status = CTL_SS_NEED_MORE_SPACE; 2781 break; 2782 } 2783 retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++], 2784 sizeof(lun->legacy_stats)); 2785 if (retval != 0) 2786 break; 2787 stats->fill_len += sizeof(lun->legacy_stats); 2788 } 2789 stats->num_luns = softc->num_luns; 2790 stats->flags = CTL_STATS_FLAG_NONE; 2791 #ifdef CTL_TIME_IO 2792 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 2793 #endif 2794 getnanouptime(&stats->timestamp); 2795 break; 2796 } 2797 #endif /* CTL_LEGACY_STATS */ 2798 case CTL_ERROR_INJECT: { 2799 struct ctl_error_desc *err_desc, *new_err_desc; 2800 2801 err_desc = (struct ctl_error_desc *)addr; 2802 2803 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2804 M_WAITOK | M_ZERO); 2805 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2806 2807 mtx_lock(&softc->ctl_lock); 2808 if (err_desc->lun_id >= ctl_max_luns || 2809 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2810 mtx_unlock(&softc->ctl_lock); 2811 free(new_err_desc, M_CTL); 2812 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2813 __func__, (uintmax_t)err_desc->lun_id); 2814 retval = EINVAL; 2815 break; 2816 } 2817 mtx_lock(&lun->lun_lock); 2818 mtx_unlock(&softc->ctl_lock); 2819 2820 /* 2821 * We could do some checking here to verify the validity 2822 * of the request, but given the complexity of error 2823 * injection requests, the checking logic would be fairly 2824 * complex. 2825 * 2826 * For now, if the request is invalid, it just won't get 2827 * executed and might get deleted. 2828 */ 2829 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2830 2831 /* 2832 * XXX KDM check to make sure the serial number is unique, 2833 * in case we somehow manage to wrap. That shouldn't 2834 * happen for a very long time, but it's the right thing to 2835 * do. 2836 */ 2837 new_err_desc->serial = lun->error_serial; 2838 err_desc->serial = lun->error_serial; 2839 lun->error_serial++; 2840 2841 mtx_unlock(&lun->lun_lock); 2842 break; 2843 } 2844 case CTL_ERROR_INJECT_DELETE: { 2845 struct ctl_error_desc *delete_desc, *desc, *desc2; 2846 int delete_done; 2847 2848 delete_desc = (struct ctl_error_desc *)addr; 2849 delete_done = 0; 2850 2851 mtx_lock(&softc->ctl_lock); 2852 if (delete_desc->lun_id >= ctl_max_luns || 2853 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2854 mtx_unlock(&softc->ctl_lock); 2855 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2856 __func__, (uintmax_t)delete_desc->lun_id); 2857 retval = EINVAL; 2858 break; 2859 } 2860 mtx_lock(&lun->lun_lock); 2861 mtx_unlock(&softc->ctl_lock); 2862 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2863 if (desc->serial != delete_desc->serial) 2864 continue; 2865 2866 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2867 links); 2868 free(desc, M_CTL); 2869 delete_done = 1; 2870 } 2871 mtx_unlock(&lun->lun_lock); 2872 if (delete_done == 0) { 2873 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2874 "error serial %ju on LUN %u\n", __func__, 2875 delete_desc->serial, delete_desc->lun_id); 2876 retval = EINVAL; 2877 break; 2878 } 2879 break; 2880 } 2881 case CTL_DUMP_STRUCTS: { 2882 int j, k; 2883 struct ctl_port *port; 2884 struct ctl_frontend *fe; 2885 2886 mtx_lock(&softc->ctl_lock); 2887 printf("CTL Persistent Reservation information start:\n"); 2888 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2889 mtx_lock(&lun->lun_lock); 2890 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2891 mtx_unlock(&lun->lun_lock); 2892 continue; 2893 } 2894 2895 for (j = 0; j < ctl_max_ports; j++) { 2896 if (lun->pr_keys[j] == NULL) 2897 continue; 2898 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2899 if (lun->pr_keys[j][k] == 0) 2900 continue; 2901 printf(" LUN %ju port %d iid %d key " 2902 "%#jx\n", lun->lun, j, k, 2903 (uintmax_t)lun->pr_keys[j][k]); 2904 } 2905 } 2906 mtx_unlock(&lun->lun_lock); 2907 } 2908 printf("CTL Persistent Reservation information end\n"); 2909 printf("CTL Ports:\n"); 2910 STAILQ_FOREACH(port, &softc->port_list, links) { 2911 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2912 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2913 port->frontend->name, port->port_type, 2914 port->physical_port, port->virtual_port, 2915 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2916 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2917 if (port->wwpn_iid[j].in_use == 0 && 2918 port->wwpn_iid[j].wwpn == 0 && 2919 port->wwpn_iid[j].name == NULL) 2920 continue; 2921 2922 printf(" iid %u use %d WWPN %#jx '%s'\n", 2923 j, port->wwpn_iid[j].in_use, 2924 (uintmax_t)port->wwpn_iid[j].wwpn, 2925 port->wwpn_iid[j].name); 2926 } 2927 } 2928 printf("CTL Port information end\n"); 2929 mtx_unlock(&softc->ctl_lock); 2930 /* 2931 * XXX KDM calling this without a lock. We'd likely want 2932 * to drop the lock before calling the frontend's dump 2933 * routine anyway. 2934 */ 2935 printf("CTL Frontends:\n"); 2936 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2937 printf(" Frontend '%s'\n", fe->name); 2938 if (fe->fe_dump != NULL) 2939 fe->fe_dump(); 2940 } 2941 printf("CTL Frontend information end\n"); 2942 break; 2943 } 2944 case CTL_LUN_REQ: { 2945 struct ctl_lun_req *lun_req; 2946 struct ctl_backend_driver *backend; 2947 void *packed; 2948 nvlist_t *tmp_args_nvl; 2949 size_t packed_len; 2950 2951 lun_req = (struct ctl_lun_req *)addr; 2952 tmp_args_nvl = lun_req->args_nvl; 2953 2954 backend = ctl_backend_find(lun_req->backend); 2955 if (backend == NULL) { 2956 lun_req->status = CTL_LUN_ERROR; 2957 snprintf(lun_req->error_str, 2958 sizeof(lun_req->error_str), 2959 "Backend \"%s\" not found.", 2960 lun_req->backend); 2961 break; 2962 } 2963 2964 if (lun_req->args != NULL) { 2965 lun_req->args_nvl = nvlist_unpack(lun_req->args, 2966 lun_req->args_len, 0); 2967 2968 if (lun_req->args_nvl == NULL) { 2969 lun_req->status = CTL_LUN_ERROR; 2970 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2971 "Cannot unpack args nvlist."); 2972 break; 2973 } 2974 } else 2975 lun_req->args_nvl = nvlist_create(0); 2976 2977 retval = backend->ioctl(dev, cmd, addr, flag, td); 2978 nvlist_destroy(lun_req->args_nvl); 2979 lun_req->args_nvl = tmp_args_nvl; 2980 2981 if (lun_req->result_nvl != NULL) { 2982 if (lun_req->result != NULL) { 2983 packed = nvlist_pack(lun_req->result_nvl, 2984 &packed_len); 2985 if (packed == NULL) { 2986 lun_req->status = CTL_LUN_ERROR; 2987 snprintf(lun_req->error_str, 2988 sizeof(lun_req->error_str), 2989 "Cannot pack result nvlist."); 2990 break; 2991 } 2992 2993 if (packed_len > lun_req->result_len) { 2994 lun_req->status = CTL_LUN_ERROR; 2995 snprintf(lun_req->error_str, 2996 sizeof(lun_req->error_str), 2997 "Result nvlist too large."); 2998 free(packed, M_NVLIST); 2999 break; 3000 } 3001 3002 if (copyout(packed, lun_req->result, packed_len)) { 3003 lun_req->status = CTL_LUN_ERROR; 3004 snprintf(lun_req->error_str, 3005 sizeof(lun_req->error_str), 3006 "Cannot copyout() the result."); 3007 free(packed, M_NVLIST); 3008 break; 3009 } 3010 3011 lun_req->result_len = packed_len; 3012 free(packed, M_NVLIST); 3013 } 3014 3015 nvlist_destroy(lun_req->result_nvl); 3016 } 3017 break; 3018 } 3019 case CTL_LUN_LIST: { 3020 struct sbuf *sb; 3021 struct ctl_lun_list *list; 3022 const char *name, *value; 3023 void *cookie; 3024 int type; 3025 3026 list = (struct ctl_lun_list *)addr; 3027 3028 /* 3029 * Allocate a fixed length sbuf here, based on the length 3030 * of the user's buffer. We could allocate an auto-extending 3031 * buffer, and then tell the user how much larger our 3032 * amount of data is than his buffer, but that presents 3033 * some problems: 3034 * 3035 * 1. The sbuf(9) routines use a blocking malloc, and so 3036 * we can't hold a lock while calling them with an 3037 * auto-extending buffer. 3038 * 3039 * 2. There is not currently a LUN reference counting 3040 * mechanism, outside of outstanding transactions on 3041 * the LUN's OOA queue. So a LUN could go away on us 3042 * while we're getting the LUN number, backend-specific 3043 * information, etc. Thus, given the way things 3044 * currently work, we need to hold the CTL lock while 3045 * grabbing LUN information. 3046 * 3047 * So, from the user's standpoint, the best thing to do is 3048 * allocate what he thinks is a reasonable buffer length, 3049 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3050 * double the buffer length and try again. (And repeat 3051 * that until he succeeds.) 3052 */ 3053 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3054 if (sb == NULL) { 3055 list->status = CTL_LUN_LIST_ERROR; 3056 snprintf(list->error_str, sizeof(list->error_str), 3057 "Unable to allocate %d bytes for LUN list", 3058 list->alloc_len); 3059 break; 3060 } 3061 3062 sbuf_printf(sb, "<ctllunlist>\n"); 3063 3064 mtx_lock(&softc->ctl_lock); 3065 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3066 mtx_lock(&lun->lun_lock); 3067 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3068 (uintmax_t)lun->lun); 3069 3070 /* 3071 * Bail out as soon as we see that we've overfilled 3072 * the buffer. 3073 */ 3074 if (retval != 0) 3075 break; 3076 3077 retval = sbuf_printf(sb, "\t<backend_type>%s" 3078 "</backend_type>\n", 3079 (lun->backend == NULL) ? "none" : 3080 lun->backend->name); 3081 3082 if (retval != 0) 3083 break; 3084 3085 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3086 lun->be_lun->lun_type); 3087 3088 if (retval != 0) 3089 break; 3090 3091 if (lun->backend == NULL) { 3092 retval = sbuf_printf(sb, "</lun>\n"); 3093 if (retval != 0) 3094 break; 3095 continue; 3096 } 3097 3098 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3099 (lun->be_lun->maxlba > 0) ? 3100 lun->be_lun->maxlba + 1 : 0); 3101 3102 if (retval != 0) 3103 break; 3104 3105 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3106 lun->be_lun->blocksize); 3107 3108 if (retval != 0) 3109 break; 3110 3111 retval = sbuf_printf(sb, "\t<serial_number>"); 3112 3113 if (retval != 0) 3114 break; 3115 3116 retval = ctl_sbuf_printf_esc(sb, 3117 lun->be_lun->serial_num, 3118 sizeof(lun->be_lun->serial_num)); 3119 3120 if (retval != 0) 3121 break; 3122 3123 retval = sbuf_printf(sb, "</serial_number>\n"); 3124 3125 if (retval != 0) 3126 break; 3127 3128 retval = sbuf_printf(sb, "\t<device_id>"); 3129 3130 if (retval != 0) 3131 break; 3132 3133 retval = ctl_sbuf_printf_esc(sb, 3134 lun->be_lun->device_id, 3135 sizeof(lun->be_lun->device_id)); 3136 3137 if (retval != 0) 3138 break; 3139 3140 retval = sbuf_printf(sb, "</device_id>\n"); 3141 3142 if (retval != 0) 3143 break; 3144 3145 if (lun->backend->lun_info != NULL) { 3146 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3147 if (retval != 0) 3148 break; 3149 } 3150 3151 cookie = NULL; 3152 while ((name = nvlist_next(lun->be_lun->options, &type, 3153 &cookie)) != NULL) { 3154 sbuf_printf(sb, "\t<%s>", name); 3155 3156 if (type == NV_TYPE_STRING) { 3157 value = dnvlist_get_string( 3158 lun->be_lun->options, name, NULL); 3159 if (value != NULL) 3160 sbuf_printf(sb, "%s", value); 3161 } 3162 3163 sbuf_printf(sb, "</%s>\n", name); 3164 } 3165 3166 retval = sbuf_printf(sb, "</lun>\n"); 3167 3168 if (retval != 0) 3169 break; 3170 mtx_unlock(&lun->lun_lock); 3171 } 3172 if (lun != NULL) 3173 mtx_unlock(&lun->lun_lock); 3174 mtx_unlock(&softc->ctl_lock); 3175 3176 if ((retval != 0) 3177 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3178 retval = 0; 3179 sbuf_delete(sb); 3180 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3181 snprintf(list->error_str, sizeof(list->error_str), 3182 "Out of space, %d bytes is too small", 3183 list->alloc_len); 3184 break; 3185 } 3186 3187 sbuf_finish(sb); 3188 3189 retval = copyout(sbuf_data(sb), list->lun_xml, 3190 sbuf_len(sb) + 1); 3191 3192 list->fill_len = sbuf_len(sb) + 1; 3193 list->status = CTL_LUN_LIST_OK; 3194 sbuf_delete(sb); 3195 break; 3196 } 3197 case CTL_ISCSI: { 3198 struct ctl_iscsi *ci; 3199 struct ctl_frontend *fe; 3200 3201 ci = (struct ctl_iscsi *)addr; 3202 3203 fe = ctl_frontend_find("iscsi"); 3204 if (fe == NULL) { 3205 ci->status = CTL_ISCSI_ERROR; 3206 snprintf(ci->error_str, sizeof(ci->error_str), 3207 "Frontend \"iscsi\" not found."); 3208 break; 3209 } 3210 3211 retval = fe->ioctl(dev, cmd, addr, flag, td); 3212 break; 3213 } 3214 case CTL_PORT_REQ: { 3215 struct ctl_req *req; 3216 struct ctl_frontend *fe; 3217 void *packed; 3218 nvlist_t *tmp_args_nvl; 3219 size_t packed_len; 3220 3221 req = (struct ctl_req *)addr; 3222 tmp_args_nvl = req->args_nvl; 3223 3224 fe = ctl_frontend_find(req->driver); 3225 if (fe == NULL) { 3226 req->status = CTL_LUN_ERROR; 3227 snprintf(req->error_str, sizeof(req->error_str), 3228 "Frontend \"%s\" not found.", req->driver); 3229 break; 3230 } 3231 3232 if (req->args != NULL) { 3233 req->args_nvl = nvlist_unpack(req->args, 3234 req->args_len, 0); 3235 3236 if (req->args_nvl == NULL) { 3237 req->status = CTL_LUN_ERROR; 3238 snprintf(req->error_str, sizeof(req->error_str), 3239 "Cannot unpack args nvlist."); 3240 break; 3241 } 3242 } else 3243 req->args_nvl = nvlist_create(0); 3244 3245 if (fe->ioctl) 3246 retval = fe->ioctl(dev, cmd, addr, flag, td); 3247 else 3248 retval = ENODEV; 3249 3250 nvlist_destroy(req->args_nvl); 3251 req->args_nvl = tmp_args_nvl; 3252 3253 if (req->result_nvl != NULL) { 3254 if (req->result != NULL) { 3255 packed = nvlist_pack(req->result_nvl, 3256 &packed_len); 3257 if (packed == NULL) { 3258 req->status = CTL_LUN_ERROR; 3259 snprintf(req->error_str, 3260 sizeof(req->error_str), 3261 "Cannot pack result nvlist."); 3262 break; 3263 } 3264 3265 if (packed_len > req->result_len) { 3266 req->status = CTL_LUN_ERROR; 3267 snprintf(req->error_str, 3268 sizeof(req->error_str), 3269 "Result nvlist too large."); 3270 free(packed, M_NVLIST); 3271 break; 3272 } 3273 3274 if (copyout(packed, req->result, packed_len)) { 3275 req->status = CTL_LUN_ERROR; 3276 snprintf(req->error_str, 3277 sizeof(req->error_str), 3278 "Cannot copyout() the result."); 3279 free(packed, M_NVLIST); 3280 break; 3281 } 3282 3283 req->result_len = packed_len; 3284 free(packed, M_NVLIST); 3285 } 3286 3287 nvlist_destroy(req->result_nvl); 3288 } 3289 break; 3290 } 3291 case CTL_PORT_LIST: { 3292 struct sbuf *sb; 3293 struct ctl_port *port; 3294 struct ctl_lun_list *list; 3295 const char *name, *value; 3296 void *cookie; 3297 int j, type; 3298 uint32_t plun; 3299 3300 list = (struct ctl_lun_list *)addr; 3301 3302 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3303 if (sb == NULL) { 3304 list->status = CTL_LUN_LIST_ERROR; 3305 snprintf(list->error_str, sizeof(list->error_str), 3306 "Unable to allocate %d bytes for LUN list", 3307 list->alloc_len); 3308 break; 3309 } 3310 3311 sbuf_printf(sb, "<ctlportlist>\n"); 3312 3313 mtx_lock(&softc->ctl_lock); 3314 STAILQ_FOREACH(port, &softc->port_list, links) { 3315 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3316 (uintmax_t)port->targ_port); 3317 3318 /* 3319 * Bail out as soon as we see that we've overfilled 3320 * the buffer. 3321 */ 3322 if (retval != 0) 3323 break; 3324 3325 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3326 "</frontend_type>\n", port->frontend->name); 3327 if (retval != 0) 3328 break; 3329 3330 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3331 port->port_type); 3332 if (retval != 0) 3333 break; 3334 3335 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3336 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3337 if (retval != 0) 3338 break; 3339 3340 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3341 port->port_name); 3342 if (retval != 0) 3343 break; 3344 3345 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3346 port->physical_port); 3347 if (retval != 0) 3348 break; 3349 3350 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3351 port->virtual_port); 3352 if (retval != 0) 3353 break; 3354 3355 if (port->target_devid != NULL) { 3356 sbuf_printf(sb, "\t<target>"); 3357 ctl_id_sbuf(port->target_devid, sb); 3358 sbuf_printf(sb, "</target>\n"); 3359 } 3360 3361 if (port->port_devid != NULL) { 3362 sbuf_printf(sb, "\t<port>"); 3363 ctl_id_sbuf(port->port_devid, sb); 3364 sbuf_printf(sb, "</port>\n"); 3365 } 3366 3367 if (port->port_info != NULL) { 3368 retval = port->port_info(port->onoff_arg, sb); 3369 if (retval != 0) 3370 break; 3371 } 3372 3373 cookie = NULL; 3374 while ((name = nvlist_next(port->options, &type, 3375 &cookie)) != NULL) { 3376 sbuf_printf(sb, "\t<%s>", name); 3377 3378 if (type == NV_TYPE_STRING) { 3379 value = dnvlist_get_string(port->options, 3380 name, NULL); 3381 if (value != NULL) 3382 sbuf_printf(sb, "%s", value); 3383 } 3384 3385 sbuf_printf(sb, "</%s>\n", name); 3386 } 3387 3388 if (port->lun_map != NULL) { 3389 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3390 for (j = 0; j < port->lun_map_size; j++) { 3391 plun = ctl_lun_map_from_port(port, j); 3392 if (plun == UINT32_MAX) 3393 continue; 3394 sbuf_printf(sb, 3395 "\t<lun id=\"%u\">%u</lun>\n", 3396 j, plun); 3397 } 3398 } 3399 3400 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3401 if (port->wwpn_iid[j].in_use == 0 || 3402 (port->wwpn_iid[j].wwpn == 0 && 3403 port->wwpn_iid[j].name == NULL)) 3404 continue; 3405 3406 if (port->wwpn_iid[j].name != NULL) 3407 retval = sbuf_printf(sb, 3408 "\t<initiator id=\"%u\">%s</initiator>\n", 3409 j, port->wwpn_iid[j].name); 3410 else 3411 retval = sbuf_printf(sb, 3412 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3413 j, port->wwpn_iid[j].wwpn); 3414 if (retval != 0) 3415 break; 3416 } 3417 if (retval != 0) 3418 break; 3419 3420 retval = sbuf_printf(sb, "</targ_port>\n"); 3421 if (retval != 0) 3422 break; 3423 } 3424 mtx_unlock(&softc->ctl_lock); 3425 3426 if ((retval != 0) 3427 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3428 retval = 0; 3429 sbuf_delete(sb); 3430 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3431 snprintf(list->error_str, sizeof(list->error_str), 3432 "Out of space, %d bytes is too small", 3433 list->alloc_len); 3434 break; 3435 } 3436 3437 sbuf_finish(sb); 3438 3439 retval = copyout(sbuf_data(sb), list->lun_xml, 3440 sbuf_len(sb) + 1); 3441 3442 list->fill_len = sbuf_len(sb) + 1; 3443 list->status = CTL_LUN_LIST_OK; 3444 sbuf_delete(sb); 3445 break; 3446 } 3447 case CTL_LUN_MAP: { 3448 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3449 struct ctl_port *port; 3450 3451 mtx_lock(&softc->ctl_lock); 3452 if (lm->port < softc->port_min || 3453 lm->port >= softc->port_max || 3454 (port = softc->ctl_ports[lm->port]) == NULL) { 3455 mtx_unlock(&softc->ctl_lock); 3456 return (ENXIO); 3457 } 3458 if (port->status & CTL_PORT_STATUS_ONLINE) { 3459 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3460 if (ctl_lun_map_to_port(port, lun->lun) == 3461 UINT32_MAX) 3462 continue; 3463 mtx_lock(&lun->lun_lock); 3464 ctl_est_ua_port(lun, lm->port, -1, 3465 CTL_UA_LUN_CHANGE); 3466 mtx_unlock(&lun->lun_lock); 3467 } 3468 } 3469 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3470 if (lm->plun != UINT32_MAX) { 3471 if (lm->lun == UINT32_MAX) 3472 retval = ctl_lun_map_unset(port, lm->plun); 3473 else if (lm->lun < ctl_max_luns && 3474 softc->ctl_luns[lm->lun] != NULL) 3475 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3476 else 3477 return (ENXIO); 3478 } else { 3479 if (lm->lun == UINT32_MAX) 3480 retval = ctl_lun_map_deinit(port); 3481 else 3482 retval = ctl_lun_map_init(port); 3483 } 3484 if (port->status & CTL_PORT_STATUS_ONLINE) 3485 ctl_isc_announce_port(port); 3486 break; 3487 } 3488 case CTL_GET_LUN_STATS: { 3489 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3490 int i; 3491 3492 /* 3493 * XXX KDM no locking here. If the LUN list changes, 3494 * things can blow up. 3495 */ 3496 i = 0; 3497 stats->status = CTL_SS_OK; 3498 stats->fill_len = 0; 3499 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3500 if (lun->lun < stats->first_item) 3501 continue; 3502 if (stats->fill_len + sizeof(lun->stats) > 3503 stats->alloc_len) { 3504 stats->status = CTL_SS_NEED_MORE_SPACE; 3505 break; 3506 } 3507 retval = copyout(&lun->stats, &stats->stats[i++], 3508 sizeof(lun->stats)); 3509 if (retval != 0) 3510 break; 3511 stats->fill_len += sizeof(lun->stats); 3512 } 3513 stats->num_items = softc->num_luns; 3514 stats->flags = CTL_STATS_FLAG_NONE; 3515 #ifdef CTL_TIME_IO 3516 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3517 #endif 3518 getnanouptime(&stats->timestamp); 3519 break; 3520 } 3521 case CTL_GET_PORT_STATS: { 3522 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3523 int i; 3524 3525 /* 3526 * XXX KDM no locking here. If the LUN list changes, 3527 * things can blow up. 3528 */ 3529 i = 0; 3530 stats->status = CTL_SS_OK; 3531 stats->fill_len = 0; 3532 STAILQ_FOREACH(port, &softc->port_list, links) { 3533 if (port->targ_port < stats->first_item) 3534 continue; 3535 if (stats->fill_len + sizeof(port->stats) > 3536 stats->alloc_len) { 3537 stats->status = CTL_SS_NEED_MORE_SPACE; 3538 break; 3539 } 3540 retval = copyout(&port->stats, &stats->stats[i++], 3541 sizeof(port->stats)); 3542 if (retval != 0) 3543 break; 3544 stats->fill_len += sizeof(port->stats); 3545 } 3546 stats->num_items = softc->num_ports; 3547 stats->flags = CTL_STATS_FLAG_NONE; 3548 #ifdef CTL_TIME_IO 3549 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3550 #endif 3551 getnanouptime(&stats->timestamp); 3552 break; 3553 } 3554 default: { 3555 /* XXX KDM should we fix this? */ 3556 #if 0 3557 struct ctl_backend_driver *backend; 3558 unsigned int type; 3559 int found; 3560 3561 found = 0; 3562 3563 /* 3564 * We encode the backend type as the ioctl type for backend 3565 * ioctls. So parse it out here, and then search for a 3566 * backend of this type. 3567 */ 3568 type = _IOC_TYPE(cmd); 3569 3570 STAILQ_FOREACH(backend, &softc->be_list, links) { 3571 if (backend->type == type) { 3572 found = 1; 3573 break; 3574 } 3575 } 3576 if (found == 0) { 3577 printf("ctl: unknown ioctl command %#lx or backend " 3578 "%d\n", cmd, type); 3579 retval = EINVAL; 3580 break; 3581 } 3582 retval = backend->ioctl(dev, cmd, addr, flag, td); 3583 #endif 3584 retval = ENOTTY; 3585 break; 3586 } 3587 } 3588 return (retval); 3589 } 3590 3591 uint32_t 3592 ctl_get_initindex(struct ctl_nexus *nexus) 3593 { 3594 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3595 } 3596 3597 int 3598 ctl_lun_map_init(struct ctl_port *port) 3599 { 3600 struct ctl_softc *softc = port->ctl_softc; 3601 struct ctl_lun *lun; 3602 int size = ctl_lun_map_size; 3603 uint32_t i; 3604 3605 if (port->lun_map == NULL || port->lun_map_size < size) { 3606 port->lun_map_size = 0; 3607 free(port->lun_map, M_CTL); 3608 port->lun_map = malloc(size * sizeof(uint32_t), 3609 M_CTL, M_NOWAIT); 3610 } 3611 if (port->lun_map == NULL) 3612 return (ENOMEM); 3613 for (i = 0; i < size; i++) 3614 port->lun_map[i] = UINT32_MAX; 3615 port->lun_map_size = size; 3616 if (port->status & CTL_PORT_STATUS_ONLINE) { 3617 if (port->lun_disable != NULL) { 3618 STAILQ_FOREACH(lun, &softc->lun_list, links) 3619 port->lun_disable(port->targ_lun_arg, lun->lun); 3620 } 3621 ctl_isc_announce_port(port); 3622 } 3623 return (0); 3624 } 3625 3626 int 3627 ctl_lun_map_deinit(struct ctl_port *port) 3628 { 3629 struct ctl_softc *softc = port->ctl_softc; 3630 struct ctl_lun *lun; 3631 3632 if (port->lun_map == NULL) 3633 return (0); 3634 port->lun_map_size = 0; 3635 free(port->lun_map, M_CTL); 3636 port->lun_map = NULL; 3637 if (port->status & CTL_PORT_STATUS_ONLINE) { 3638 if (port->lun_enable != NULL) { 3639 STAILQ_FOREACH(lun, &softc->lun_list, links) 3640 port->lun_enable(port->targ_lun_arg, lun->lun); 3641 } 3642 ctl_isc_announce_port(port); 3643 } 3644 return (0); 3645 } 3646 3647 int 3648 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3649 { 3650 int status; 3651 uint32_t old; 3652 3653 if (port->lun_map == NULL) { 3654 status = ctl_lun_map_init(port); 3655 if (status != 0) 3656 return (status); 3657 } 3658 if (plun >= port->lun_map_size) 3659 return (EINVAL); 3660 old = port->lun_map[plun]; 3661 port->lun_map[plun] = glun; 3662 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3663 if (port->lun_enable != NULL) 3664 port->lun_enable(port->targ_lun_arg, plun); 3665 ctl_isc_announce_port(port); 3666 } 3667 return (0); 3668 } 3669 3670 int 3671 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3672 { 3673 uint32_t old; 3674 3675 if (port->lun_map == NULL || plun >= port->lun_map_size) 3676 return (0); 3677 old = port->lun_map[plun]; 3678 port->lun_map[plun] = UINT32_MAX; 3679 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3680 if (port->lun_disable != NULL) 3681 port->lun_disable(port->targ_lun_arg, plun); 3682 ctl_isc_announce_port(port); 3683 } 3684 return (0); 3685 } 3686 3687 uint32_t 3688 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3689 { 3690 3691 if (port == NULL) 3692 return (UINT32_MAX); 3693 if (port->lun_map == NULL) 3694 return (lun_id); 3695 if (lun_id > port->lun_map_size) 3696 return (UINT32_MAX); 3697 return (port->lun_map[lun_id]); 3698 } 3699 3700 uint32_t 3701 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3702 { 3703 uint32_t i; 3704 3705 if (port == NULL) 3706 return (UINT32_MAX); 3707 if (port->lun_map == NULL) 3708 return (lun_id); 3709 for (i = 0; i < port->lun_map_size; i++) { 3710 if (port->lun_map[i] == lun_id) 3711 return (i); 3712 } 3713 return (UINT32_MAX); 3714 } 3715 3716 uint32_t 3717 ctl_decode_lun(uint64_t encoded) 3718 { 3719 uint8_t lun[8]; 3720 uint32_t result = 0xffffffff; 3721 3722 be64enc(lun, encoded); 3723 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3724 case RPL_LUNDATA_ATYP_PERIPH: 3725 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3726 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3727 result = lun[1]; 3728 break; 3729 case RPL_LUNDATA_ATYP_FLAT: 3730 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3731 lun[6] == 0 && lun[7] == 0) 3732 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3733 break; 3734 case RPL_LUNDATA_ATYP_EXTLUN: 3735 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3736 case 0x02: 3737 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3738 case 0x00: 3739 result = lun[1]; 3740 break; 3741 case 0x10: 3742 result = (lun[1] << 16) + (lun[2] << 8) + 3743 lun[3]; 3744 break; 3745 case 0x20: 3746 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3747 result = (lun[2] << 24) + 3748 (lun[3] << 16) + (lun[4] << 8) + 3749 lun[5]; 3750 break; 3751 } 3752 break; 3753 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3754 result = 0xffffffff; 3755 break; 3756 } 3757 break; 3758 } 3759 return (result); 3760 } 3761 3762 uint64_t 3763 ctl_encode_lun(uint32_t decoded) 3764 { 3765 uint64_t l = decoded; 3766 3767 if (l <= 0xff) 3768 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3769 if (l <= 0x3fff) 3770 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3771 if (l <= 0xffffff) 3772 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3773 (l << 32)); 3774 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3775 } 3776 3777 int 3778 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3779 { 3780 int i; 3781 3782 for (i = first; i < last; i++) { 3783 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3784 return (i); 3785 } 3786 return (-1); 3787 } 3788 3789 int 3790 ctl_set_mask(uint32_t *mask, uint32_t bit) 3791 { 3792 uint32_t chunk, piece; 3793 3794 chunk = bit >> 5; 3795 piece = bit % (sizeof(uint32_t) * 8); 3796 3797 if ((mask[chunk] & (1 << piece)) != 0) 3798 return (-1); 3799 else 3800 mask[chunk] |= (1 << piece); 3801 3802 return (0); 3803 } 3804 3805 int 3806 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3807 { 3808 uint32_t chunk, piece; 3809 3810 chunk = bit >> 5; 3811 piece = bit % (sizeof(uint32_t) * 8); 3812 3813 if ((mask[chunk] & (1 << piece)) == 0) 3814 return (-1); 3815 else 3816 mask[chunk] &= ~(1 << piece); 3817 3818 return (0); 3819 } 3820 3821 int 3822 ctl_is_set(uint32_t *mask, uint32_t bit) 3823 { 3824 uint32_t chunk, piece; 3825 3826 chunk = bit >> 5; 3827 piece = bit % (sizeof(uint32_t) * 8); 3828 3829 if ((mask[chunk] & (1 << piece)) == 0) 3830 return (0); 3831 else 3832 return (1); 3833 } 3834 3835 static uint64_t 3836 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3837 { 3838 uint64_t *t; 3839 3840 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3841 if (t == NULL) 3842 return (0); 3843 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3844 } 3845 3846 static void 3847 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3848 { 3849 uint64_t *t; 3850 3851 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3852 if (t == NULL) 3853 return; 3854 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3855 } 3856 3857 static void 3858 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3859 { 3860 uint64_t *p; 3861 u_int i; 3862 3863 i = residx/CTL_MAX_INIT_PER_PORT; 3864 if (lun->pr_keys[i] != NULL) 3865 return; 3866 mtx_unlock(&lun->lun_lock); 3867 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3868 M_WAITOK | M_ZERO); 3869 mtx_lock(&lun->lun_lock); 3870 if (lun->pr_keys[i] == NULL) 3871 lun->pr_keys[i] = p; 3872 else 3873 free(p, M_CTL); 3874 } 3875 3876 static void 3877 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3878 { 3879 uint64_t *t; 3880 3881 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3882 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3883 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3884 } 3885 3886 /* 3887 * ctl_softc, pool_name, total_ctl_io are passed in. 3888 * npool is passed out. 3889 */ 3890 int 3891 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3892 uint32_t total_ctl_io, void **npool) 3893 { 3894 struct ctl_io_pool *pool; 3895 3896 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3897 M_NOWAIT | M_ZERO); 3898 if (pool == NULL) 3899 return (ENOMEM); 3900 3901 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3902 pool->ctl_softc = ctl_softc; 3903 #ifdef IO_POOLS 3904 pool->zone = uma_zsecond_create(pool->name, NULL, 3905 NULL, NULL, NULL, ctl_softc->io_zone); 3906 /* uma_prealloc(pool->zone, total_ctl_io); */ 3907 #else 3908 pool->zone = ctl_softc->io_zone; 3909 #endif 3910 3911 *npool = pool; 3912 return (0); 3913 } 3914 3915 void 3916 ctl_pool_free(struct ctl_io_pool *pool) 3917 { 3918 3919 if (pool == NULL) 3920 return; 3921 3922 #ifdef IO_POOLS 3923 uma_zdestroy(pool->zone); 3924 #endif 3925 free(pool, M_CTL); 3926 } 3927 3928 union ctl_io * 3929 ctl_alloc_io(void *pool_ref) 3930 { 3931 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3932 union ctl_io *io; 3933 3934 io = uma_zalloc(pool->zone, M_WAITOK); 3935 if (io != NULL) { 3936 io->io_hdr.pool = pool_ref; 3937 CTL_SOFTC(io) = pool->ctl_softc; 3938 } 3939 return (io); 3940 } 3941 3942 union ctl_io * 3943 ctl_alloc_io_nowait(void *pool_ref) 3944 { 3945 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3946 union ctl_io *io; 3947 3948 io = uma_zalloc(pool->zone, M_NOWAIT); 3949 if (io != NULL) { 3950 io->io_hdr.pool = pool_ref; 3951 CTL_SOFTC(io) = pool->ctl_softc; 3952 } 3953 return (io); 3954 } 3955 3956 void 3957 ctl_free_io(union ctl_io *io) 3958 { 3959 struct ctl_io_pool *pool; 3960 3961 if (io == NULL) 3962 return; 3963 3964 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3965 uma_zfree(pool->zone, io); 3966 } 3967 3968 void 3969 ctl_zero_io(union ctl_io *io) 3970 { 3971 struct ctl_io_pool *pool; 3972 3973 if (io == NULL) 3974 return; 3975 3976 /* 3977 * May need to preserve linked list pointers at some point too. 3978 */ 3979 pool = io->io_hdr.pool; 3980 memset(io, 0, sizeof(*io)); 3981 io->io_hdr.pool = pool; 3982 CTL_SOFTC(io) = pool->ctl_softc; 3983 } 3984 3985 int 3986 ctl_expand_number(const char *buf, uint64_t *num) 3987 { 3988 char *endptr; 3989 uint64_t number; 3990 unsigned shift; 3991 3992 number = strtoq(buf, &endptr, 0); 3993 3994 switch (tolower((unsigned char)*endptr)) { 3995 case 'e': 3996 shift = 60; 3997 break; 3998 case 'p': 3999 shift = 50; 4000 break; 4001 case 't': 4002 shift = 40; 4003 break; 4004 case 'g': 4005 shift = 30; 4006 break; 4007 case 'm': 4008 shift = 20; 4009 break; 4010 case 'k': 4011 shift = 10; 4012 break; 4013 case 'b': 4014 case '\0': /* No unit. */ 4015 *num = number; 4016 return (0); 4017 default: 4018 /* Unrecognized unit. */ 4019 return (-1); 4020 } 4021 4022 if ((number << shift) >> shift != number) { 4023 /* Overflow */ 4024 return (-1); 4025 } 4026 *num = number << shift; 4027 return (0); 4028 } 4029 4030 4031 /* 4032 * This routine could be used in the future to load default and/or saved 4033 * mode page parameters for a particuar lun. 4034 */ 4035 static int 4036 ctl_init_page_index(struct ctl_lun *lun) 4037 { 4038 int i, page_code; 4039 struct ctl_page_index *page_index; 4040 const char *value; 4041 uint64_t ival; 4042 4043 memcpy(&lun->mode_pages.index, page_index_template, 4044 sizeof(page_index_template)); 4045 4046 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4047 4048 page_index = &lun->mode_pages.index[i]; 4049 if (lun->be_lun->lun_type == T_DIRECT && 4050 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4051 continue; 4052 if (lun->be_lun->lun_type == T_PROCESSOR && 4053 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4054 continue; 4055 if (lun->be_lun->lun_type == T_CDROM && 4056 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4057 continue; 4058 4059 page_code = page_index->page_code & SMPH_PC_MASK; 4060 switch (page_code) { 4061 case SMS_RW_ERROR_RECOVERY_PAGE: { 4062 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4063 ("subpage %#x for page %#x is incorrect!", 4064 page_index->subpage, page_code)); 4065 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4066 &rw_er_page_default, 4067 sizeof(rw_er_page_default)); 4068 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4069 &rw_er_page_changeable, 4070 sizeof(rw_er_page_changeable)); 4071 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4072 &rw_er_page_default, 4073 sizeof(rw_er_page_default)); 4074 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4075 &rw_er_page_default, 4076 sizeof(rw_er_page_default)); 4077 page_index->page_data = 4078 (uint8_t *)lun->mode_pages.rw_er_page; 4079 break; 4080 } 4081 case SMS_FORMAT_DEVICE_PAGE: { 4082 struct scsi_format_page *format_page; 4083 4084 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4085 ("subpage %#x for page %#x is incorrect!", 4086 page_index->subpage, page_code)); 4087 4088 /* 4089 * Sectors per track are set above. Bytes per 4090 * sector need to be set here on a per-LUN basis. 4091 */ 4092 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4093 &format_page_default, 4094 sizeof(format_page_default)); 4095 memcpy(&lun->mode_pages.format_page[ 4096 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4097 sizeof(format_page_changeable)); 4098 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4099 &format_page_default, 4100 sizeof(format_page_default)); 4101 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4102 &format_page_default, 4103 sizeof(format_page_default)); 4104 4105 format_page = &lun->mode_pages.format_page[ 4106 CTL_PAGE_CURRENT]; 4107 scsi_ulto2b(lun->be_lun->blocksize, 4108 format_page->bytes_per_sector); 4109 4110 format_page = &lun->mode_pages.format_page[ 4111 CTL_PAGE_DEFAULT]; 4112 scsi_ulto2b(lun->be_lun->blocksize, 4113 format_page->bytes_per_sector); 4114 4115 format_page = &lun->mode_pages.format_page[ 4116 CTL_PAGE_SAVED]; 4117 scsi_ulto2b(lun->be_lun->blocksize, 4118 format_page->bytes_per_sector); 4119 4120 page_index->page_data = 4121 (uint8_t *)lun->mode_pages.format_page; 4122 break; 4123 } 4124 case SMS_RIGID_DISK_PAGE: { 4125 struct scsi_rigid_disk_page *rigid_disk_page; 4126 uint32_t sectors_per_cylinder; 4127 uint64_t cylinders; 4128 #ifndef __XSCALE__ 4129 int shift; 4130 #endif /* !__XSCALE__ */ 4131 4132 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4133 ("subpage %#x for page %#x is incorrect!", 4134 page_index->subpage, page_code)); 4135 4136 /* 4137 * Rotation rate and sectors per track are set 4138 * above. We calculate the cylinders here based on 4139 * capacity. Due to the number of heads and 4140 * sectors per track we're using, smaller arrays 4141 * may turn out to have 0 cylinders. Linux and 4142 * FreeBSD don't pay attention to these mode pages 4143 * to figure out capacity, but Solaris does. It 4144 * seems to deal with 0 cylinders just fine, and 4145 * works out a fake geometry based on the capacity. 4146 */ 4147 memcpy(&lun->mode_pages.rigid_disk_page[ 4148 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4149 sizeof(rigid_disk_page_default)); 4150 memcpy(&lun->mode_pages.rigid_disk_page[ 4151 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4152 sizeof(rigid_disk_page_changeable)); 4153 4154 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4155 CTL_DEFAULT_HEADS; 4156 4157 /* 4158 * The divide method here will be more accurate, 4159 * probably, but results in floating point being 4160 * used in the kernel on i386 (__udivdi3()). On the 4161 * XScale, though, __udivdi3() is implemented in 4162 * software. 4163 * 4164 * The shift method for cylinder calculation is 4165 * accurate if sectors_per_cylinder is a power of 4166 * 2. Otherwise it might be slightly off -- you 4167 * might have a bit of a truncation problem. 4168 */ 4169 #ifdef __XSCALE__ 4170 cylinders = (lun->be_lun->maxlba + 1) / 4171 sectors_per_cylinder; 4172 #else 4173 for (shift = 31; shift > 0; shift--) { 4174 if (sectors_per_cylinder & (1 << shift)) 4175 break; 4176 } 4177 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4178 #endif 4179 4180 /* 4181 * We've basically got 3 bytes, or 24 bits for the 4182 * cylinder size in the mode page. If we're over, 4183 * just round down to 2^24. 4184 */ 4185 if (cylinders > 0xffffff) 4186 cylinders = 0xffffff; 4187 4188 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4189 CTL_PAGE_DEFAULT]; 4190 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4191 4192 if ((value = dnvlist_get_string(lun->be_lun->options, 4193 "rpm", NULL)) != NULL) { 4194 scsi_ulto2b(strtol(value, NULL, 0), 4195 rigid_disk_page->rotation_rate); 4196 } 4197 4198 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4199 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4200 sizeof(rigid_disk_page_default)); 4201 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4202 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4203 sizeof(rigid_disk_page_default)); 4204 4205 page_index->page_data = 4206 (uint8_t *)lun->mode_pages.rigid_disk_page; 4207 break; 4208 } 4209 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4210 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4211 ("subpage %#x for page %#x is incorrect!", 4212 page_index->subpage, page_code)); 4213 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4214 &verify_er_page_default, 4215 sizeof(verify_er_page_default)); 4216 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4217 &verify_er_page_changeable, 4218 sizeof(verify_er_page_changeable)); 4219 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4220 &verify_er_page_default, 4221 sizeof(verify_er_page_default)); 4222 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4223 &verify_er_page_default, 4224 sizeof(verify_er_page_default)); 4225 page_index->page_data = 4226 (uint8_t *)lun->mode_pages.verify_er_page; 4227 break; 4228 } 4229 case SMS_CACHING_PAGE: { 4230 struct scsi_caching_page *caching_page; 4231 4232 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4233 ("subpage %#x for page %#x is incorrect!", 4234 page_index->subpage, page_code)); 4235 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4236 &caching_page_default, 4237 sizeof(caching_page_default)); 4238 memcpy(&lun->mode_pages.caching_page[ 4239 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4240 sizeof(caching_page_changeable)); 4241 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4242 &caching_page_default, 4243 sizeof(caching_page_default)); 4244 caching_page = &lun->mode_pages.caching_page[ 4245 CTL_PAGE_SAVED]; 4246 value = dnvlist_get_string(lun->be_lun->options, 4247 "writecache", NULL); 4248 if (value != NULL && strcmp(value, "off") == 0) 4249 caching_page->flags1 &= ~SCP_WCE; 4250 value = dnvlist_get_string(lun->be_lun->options, 4251 "readcache", NULL); 4252 if (value != NULL && strcmp(value, "off") == 0) 4253 caching_page->flags1 |= SCP_RCD; 4254 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4255 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4256 sizeof(caching_page_default)); 4257 page_index->page_data = 4258 (uint8_t *)lun->mode_pages.caching_page; 4259 break; 4260 } 4261 case SMS_CONTROL_MODE_PAGE: { 4262 switch (page_index->subpage) { 4263 case SMS_SUBPAGE_PAGE_0: { 4264 struct scsi_control_page *control_page; 4265 4266 memcpy(&lun->mode_pages.control_page[ 4267 CTL_PAGE_DEFAULT], 4268 &control_page_default, 4269 sizeof(control_page_default)); 4270 memcpy(&lun->mode_pages.control_page[ 4271 CTL_PAGE_CHANGEABLE], 4272 &control_page_changeable, 4273 sizeof(control_page_changeable)); 4274 memcpy(&lun->mode_pages.control_page[ 4275 CTL_PAGE_SAVED], 4276 &control_page_default, 4277 sizeof(control_page_default)); 4278 control_page = &lun->mode_pages.control_page[ 4279 CTL_PAGE_SAVED]; 4280 value = dnvlist_get_string(lun->be_lun->options, 4281 "reordering", NULL); 4282 if (value != NULL && 4283 strcmp(value, "unrestricted") == 0) { 4284 control_page->queue_flags &= 4285 ~SCP_QUEUE_ALG_MASK; 4286 control_page->queue_flags |= 4287 SCP_QUEUE_ALG_UNRESTRICTED; 4288 } 4289 memcpy(&lun->mode_pages.control_page[ 4290 CTL_PAGE_CURRENT], 4291 &lun->mode_pages.control_page[ 4292 CTL_PAGE_SAVED], 4293 sizeof(control_page_default)); 4294 page_index->page_data = 4295 (uint8_t *)lun->mode_pages.control_page; 4296 break; 4297 } 4298 case 0x01: 4299 memcpy(&lun->mode_pages.control_ext_page[ 4300 CTL_PAGE_DEFAULT], 4301 &control_ext_page_default, 4302 sizeof(control_ext_page_default)); 4303 memcpy(&lun->mode_pages.control_ext_page[ 4304 CTL_PAGE_CHANGEABLE], 4305 &control_ext_page_changeable, 4306 sizeof(control_ext_page_changeable)); 4307 memcpy(&lun->mode_pages.control_ext_page[ 4308 CTL_PAGE_SAVED], 4309 &control_ext_page_default, 4310 sizeof(control_ext_page_default)); 4311 memcpy(&lun->mode_pages.control_ext_page[ 4312 CTL_PAGE_CURRENT], 4313 &lun->mode_pages.control_ext_page[ 4314 CTL_PAGE_SAVED], 4315 sizeof(control_ext_page_default)); 4316 page_index->page_data = 4317 (uint8_t *)lun->mode_pages.control_ext_page; 4318 break; 4319 default: 4320 panic("subpage %#x for page %#x is incorrect!", 4321 page_index->subpage, page_code); 4322 } 4323 break; 4324 } 4325 case SMS_INFO_EXCEPTIONS_PAGE: { 4326 switch (page_index->subpage) { 4327 case SMS_SUBPAGE_PAGE_0: 4328 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4329 &ie_page_default, 4330 sizeof(ie_page_default)); 4331 memcpy(&lun->mode_pages.ie_page[ 4332 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4333 sizeof(ie_page_changeable)); 4334 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4335 &ie_page_default, 4336 sizeof(ie_page_default)); 4337 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4338 &ie_page_default, 4339 sizeof(ie_page_default)); 4340 page_index->page_data = 4341 (uint8_t *)lun->mode_pages.ie_page; 4342 break; 4343 case 0x02: { 4344 struct ctl_logical_block_provisioning_page *page; 4345 4346 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4347 &lbp_page_default, 4348 sizeof(lbp_page_default)); 4349 memcpy(&lun->mode_pages.lbp_page[ 4350 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4351 sizeof(lbp_page_changeable)); 4352 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4353 &lbp_page_default, 4354 sizeof(lbp_page_default)); 4355 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4356 value = dnvlist_get_string(lun->be_lun->options, 4357 "avail-threshold", NULL); 4358 if (value != NULL && 4359 ctl_expand_number(value, &ival) == 0) { 4360 page->descr[0].flags |= SLBPPD_ENABLED | 4361 SLBPPD_ARMING_DEC; 4362 if (lun->be_lun->blocksize) 4363 ival /= lun->be_lun->blocksize; 4364 else 4365 ival /= 512; 4366 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4367 page->descr[0].count); 4368 } 4369 value = dnvlist_get_string(lun->be_lun->options, 4370 "used-threshold", NULL); 4371 if (value != NULL && 4372 ctl_expand_number(value, &ival) == 0) { 4373 page->descr[1].flags |= SLBPPD_ENABLED | 4374 SLBPPD_ARMING_INC; 4375 if (lun->be_lun->blocksize) 4376 ival /= lun->be_lun->blocksize; 4377 else 4378 ival /= 512; 4379 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4380 page->descr[1].count); 4381 } 4382 value = dnvlist_get_string(lun->be_lun->options, 4383 "pool-avail-threshold", NULL); 4384 if (value != NULL && 4385 ctl_expand_number(value, &ival) == 0) { 4386 page->descr[2].flags |= SLBPPD_ENABLED | 4387 SLBPPD_ARMING_DEC; 4388 if (lun->be_lun->blocksize) 4389 ival /= lun->be_lun->blocksize; 4390 else 4391 ival /= 512; 4392 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4393 page->descr[2].count); 4394 } 4395 value = dnvlist_get_string(lun->be_lun->options, 4396 "pool-used-threshold", NULL); 4397 if (value != NULL && 4398 ctl_expand_number(value, &ival) == 0) { 4399 page->descr[3].flags |= SLBPPD_ENABLED | 4400 SLBPPD_ARMING_INC; 4401 if (lun->be_lun->blocksize) 4402 ival /= lun->be_lun->blocksize; 4403 else 4404 ival /= 512; 4405 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4406 page->descr[3].count); 4407 } 4408 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4409 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4410 sizeof(lbp_page_default)); 4411 page_index->page_data = 4412 (uint8_t *)lun->mode_pages.lbp_page; 4413 break; 4414 } 4415 default: 4416 panic("subpage %#x for page %#x is incorrect!", 4417 page_index->subpage, page_code); 4418 } 4419 break; 4420 } 4421 case SMS_CDDVD_CAPS_PAGE:{ 4422 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4423 ("subpage %#x for page %#x is incorrect!", 4424 page_index->subpage, page_code)); 4425 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4426 &cddvd_page_default, 4427 sizeof(cddvd_page_default)); 4428 memcpy(&lun->mode_pages.cddvd_page[ 4429 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4430 sizeof(cddvd_page_changeable)); 4431 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4432 &cddvd_page_default, 4433 sizeof(cddvd_page_default)); 4434 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4435 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4436 sizeof(cddvd_page_default)); 4437 page_index->page_data = 4438 (uint8_t *)lun->mode_pages.cddvd_page; 4439 break; 4440 } 4441 default: 4442 panic("invalid page code value %#x", page_code); 4443 } 4444 } 4445 4446 return (CTL_RETVAL_COMPLETE); 4447 } 4448 4449 static int 4450 ctl_init_log_page_index(struct ctl_lun *lun) 4451 { 4452 struct ctl_page_index *page_index; 4453 int i, j, k, prev; 4454 4455 memcpy(&lun->log_pages.index, log_page_index_template, 4456 sizeof(log_page_index_template)); 4457 4458 prev = -1; 4459 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4460 4461 page_index = &lun->log_pages.index[i]; 4462 if (lun->be_lun->lun_type == T_DIRECT && 4463 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4464 continue; 4465 if (lun->be_lun->lun_type == T_PROCESSOR && 4466 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4467 continue; 4468 if (lun->be_lun->lun_type == T_CDROM && 4469 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4470 continue; 4471 4472 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4473 lun->backend->lun_attr == NULL) 4474 continue; 4475 4476 if (page_index->page_code != prev) { 4477 lun->log_pages.pages_page[j] = page_index->page_code; 4478 prev = page_index->page_code; 4479 j++; 4480 } 4481 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4482 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4483 k++; 4484 } 4485 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4486 lun->log_pages.index[0].page_len = j; 4487 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4488 lun->log_pages.index[1].page_len = k * 2; 4489 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4490 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4491 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4492 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4493 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page; 4494 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page); 4495 4496 return (CTL_RETVAL_COMPLETE); 4497 } 4498 4499 static int 4500 hex2bin(const char *str, uint8_t *buf, int buf_size) 4501 { 4502 int i; 4503 u_char c; 4504 4505 memset(buf, 0, buf_size); 4506 while (isspace(str[0])) 4507 str++; 4508 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4509 str += 2; 4510 buf_size *= 2; 4511 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4512 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4513 str++; 4514 c = str[i]; 4515 if (isdigit(c)) 4516 c -= '0'; 4517 else if (isalpha(c)) 4518 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4519 else 4520 break; 4521 if (c >= 16) 4522 break; 4523 if ((i & 1) == 0) 4524 buf[i / 2] |= (c << 4); 4525 else 4526 buf[i / 2] |= c; 4527 } 4528 return ((i + 1) / 2); 4529 } 4530 4531 /* 4532 * LUN allocation. 4533 * 4534 * Requirements: 4535 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4536 * wants us to allocate the LUN and he can block. 4537 * - ctl_softc is always set 4538 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4539 * 4540 * Returns 0 for success, non-zero (errno) for failure. 4541 */ 4542 static int 4543 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4544 struct ctl_be_lun *const be_lun) 4545 { 4546 struct ctl_lun *nlun, *lun; 4547 struct scsi_vpd_id_descriptor *desc; 4548 struct scsi_vpd_id_t10 *t10id; 4549 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4550 int lun_number, lun_malloced; 4551 int devidlen, idlen1, idlen2 = 0, len; 4552 4553 if (be_lun == NULL) 4554 return (EINVAL); 4555 4556 /* 4557 * We currently only support Direct Access or Processor LUN types. 4558 */ 4559 switch (be_lun->lun_type) { 4560 case T_DIRECT: 4561 case T_PROCESSOR: 4562 case T_CDROM: 4563 break; 4564 case T_SEQUENTIAL: 4565 case T_CHANGER: 4566 default: 4567 be_lun->lun_config_status(be_lun->be_lun, 4568 CTL_LUN_CONFIG_FAILURE); 4569 break; 4570 } 4571 if (ctl_lun == NULL) { 4572 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4573 lun_malloced = 1; 4574 } else { 4575 lun_malloced = 0; 4576 lun = ctl_lun; 4577 } 4578 4579 memset(lun, 0, sizeof(*lun)); 4580 if (lun_malloced) 4581 lun->flags = CTL_LUN_MALLOCED; 4582 4583 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * 4584 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4585 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, 4586 M_DEVBUF, M_WAITOK | M_ZERO); 4587 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, 4588 M_DEVBUF, M_WAITOK | M_ZERO); 4589 4590 /* Generate LUN ID. */ 4591 devidlen = max(CTL_DEVID_MIN_LEN, 4592 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4593 idlen1 = sizeof(*t10id) + devidlen; 4594 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4595 scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); 4596 if (scsiname != NULL) { 4597 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4598 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4599 } 4600 eui = dnvlist_get_string(be_lun->options, "eui", NULL); 4601 if (eui != NULL) { 4602 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4603 } 4604 naa = dnvlist_get_string(be_lun->options, "naa", NULL); 4605 if (naa != NULL) { 4606 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4607 } 4608 uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); 4609 if (uuid != NULL) { 4610 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4611 } 4612 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4613 M_CTL, M_WAITOK | M_ZERO); 4614 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4615 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4616 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4617 desc->length = idlen1; 4618 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4619 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4620 if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { 4621 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4622 } else { 4623 strncpy(t10id->vendor, vendor, 4624 min(sizeof(t10id->vendor), strlen(vendor))); 4625 } 4626 strncpy((char *)t10id->vendor_spec_id, 4627 (char *)be_lun->device_id, devidlen); 4628 if (scsiname != NULL) { 4629 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4630 desc->length); 4631 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4632 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4633 SVPD_ID_TYPE_SCSI_NAME; 4634 desc->length = idlen2; 4635 strlcpy(desc->identifier, scsiname, idlen2); 4636 } 4637 if (eui != NULL) { 4638 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4639 desc->length); 4640 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4641 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4642 SVPD_ID_TYPE_EUI64; 4643 desc->length = hex2bin(eui, desc->identifier, 16); 4644 desc->length = desc->length > 12 ? 16 : 4645 (desc->length > 8 ? 12 : 8); 4646 len -= 16 - desc->length; 4647 } 4648 if (naa != NULL) { 4649 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4650 desc->length); 4651 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4652 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4653 SVPD_ID_TYPE_NAA; 4654 desc->length = hex2bin(naa, desc->identifier, 16); 4655 desc->length = desc->length > 8 ? 16 : 8; 4656 len -= 16 - desc->length; 4657 } 4658 if (uuid != NULL) { 4659 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4660 desc->length); 4661 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4662 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4663 SVPD_ID_TYPE_UUID; 4664 desc->identifier[0] = 0x10; 4665 hex2bin(uuid, &desc->identifier[2], 16); 4666 desc->length = 18; 4667 } 4668 lun->lun_devid->len = len; 4669 4670 mtx_lock(&ctl_softc->ctl_lock); 4671 /* 4672 * See if the caller requested a particular LUN number. If so, see 4673 * if it is available. Otherwise, allocate the first available LUN. 4674 */ 4675 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4676 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) 4677 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4678 mtx_unlock(&ctl_softc->ctl_lock); 4679 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { 4680 printf("ctl: requested LUN ID %d is higher " 4681 "than ctl_max_luns - 1 (%d)\n", 4682 be_lun->req_lun_id, ctl_max_luns - 1); 4683 } else { 4684 /* 4685 * XXX KDM return an error, or just assign 4686 * another LUN ID in this case?? 4687 */ 4688 printf("ctl: requested LUN ID %d is already " 4689 "in use\n", be_lun->req_lun_id); 4690 } 4691 fail: 4692 free(lun->lun_devid, M_CTL); 4693 if (lun->flags & CTL_LUN_MALLOCED) 4694 free(lun, M_CTL); 4695 be_lun->lun_config_status(be_lun->be_lun, 4696 CTL_LUN_CONFIG_FAILURE); 4697 return (ENOSPC); 4698 } 4699 lun_number = be_lun->req_lun_id; 4700 } else { 4701 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); 4702 if (lun_number == -1) { 4703 mtx_unlock(&ctl_softc->ctl_lock); 4704 printf("ctl: can't allocate LUN, out of LUNs\n"); 4705 goto fail; 4706 } 4707 } 4708 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4709 mtx_unlock(&ctl_softc->ctl_lock); 4710 4711 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4712 lun->lun = lun_number; 4713 lun->be_lun = be_lun; 4714 /* 4715 * The processor LUN is always enabled. Disk LUNs come on line 4716 * disabled, and must be enabled by the backend. 4717 */ 4718 lun->flags |= CTL_LUN_DISABLED; 4719 lun->backend = be_lun->be; 4720 be_lun->ctl_lun = lun; 4721 be_lun->lun_id = lun_number; 4722 atomic_add_int(&be_lun->be->num_luns, 1); 4723 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4724 lun->flags |= CTL_LUN_EJECTED; 4725 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4726 lun->flags |= CTL_LUN_NO_MEDIA; 4727 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4728 lun->flags |= CTL_LUN_STOPPED; 4729 4730 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4731 lun->flags |= CTL_LUN_PRIMARY_SC; 4732 4733 value = dnvlist_get_string(be_lun->options, "removable", NULL); 4734 if (value != NULL) { 4735 if (strcmp(value, "on") == 0) 4736 lun->flags |= CTL_LUN_REMOVABLE; 4737 } else if (be_lun->lun_type == T_CDROM) 4738 lun->flags |= CTL_LUN_REMOVABLE; 4739 4740 lun->ctl_softc = ctl_softc; 4741 #ifdef CTL_TIME_IO 4742 lun->last_busy = getsbinuptime(); 4743 #endif 4744 TAILQ_INIT(&lun->ooa_queue); 4745 TAILQ_INIT(&lun->blocked_queue); 4746 STAILQ_INIT(&lun->error_list); 4747 lun->ie_reported = 1; 4748 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4749 ctl_tpc_lun_init(lun); 4750 if (lun->flags & CTL_LUN_REMOVABLE) { 4751 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4752 M_CTL, M_WAITOK); 4753 } 4754 4755 /* 4756 * Initialize the mode and log page index. 4757 */ 4758 ctl_init_page_index(lun); 4759 ctl_init_log_page_index(lun); 4760 4761 /* Setup statistics gathering */ 4762 #ifdef CTL_LEGACY_STATS 4763 lun->legacy_stats.device_type = be_lun->lun_type; 4764 lun->legacy_stats.lun_number = lun_number; 4765 lun->legacy_stats.blocksize = be_lun->blocksize; 4766 if (be_lun->blocksize == 0) 4767 lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4768 lun->legacy_stats.ports = malloc(sizeof(struct ctl_lun_io_port_stats) * 4769 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4770 for (len = 0; len < ctl_max_ports; len++) 4771 lun->legacy_stats.ports[len].targ_port = len; 4772 #endif /* CTL_LEGACY_STATS */ 4773 lun->stats.item = lun_number; 4774 4775 /* 4776 * Now, before we insert this lun on the lun list, set the lun 4777 * inventory changed UA for all other luns. 4778 */ 4779 mtx_lock(&ctl_softc->ctl_lock); 4780 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4781 mtx_lock(&nlun->lun_lock); 4782 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4783 mtx_unlock(&nlun->lun_lock); 4784 } 4785 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4786 ctl_softc->ctl_luns[lun_number] = lun; 4787 ctl_softc->num_luns++; 4788 mtx_unlock(&ctl_softc->ctl_lock); 4789 4790 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4791 return (0); 4792 } 4793 4794 /* 4795 * Delete a LUN. 4796 * Assumptions: 4797 * - LUN has already been marked invalid and any pending I/O has been taken 4798 * care of. 4799 */ 4800 static int 4801 ctl_free_lun(struct ctl_lun *lun) 4802 { 4803 struct ctl_softc *softc = lun->ctl_softc; 4804 struct ctl_lun *nlun; 4805 int i; 4806 4807 KASSERT(TAILQ_EMPTY(&lun->ooa_queue), 4808 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4809 4810 mtx_lock(&softc->ctl_lock); 4811 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4812 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4813 softc->ctl_luns[lun->lun] = NULL; 4814 softc->num_luns--; 4815 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4816 mtx_lock(&nlun->lun_lock); 4817 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4818 mtx_unlock(&nlun->lun_lock); 4819 } 4820 mtx_unlock(&softc->ctl_lock); 4821 4822 /* 4823 * Tell the backend to free resources, if this LUN has a backend. 4824 */ 4825 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4826 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4827 4828 lun->ie_reportcnt = UINT32_MAX; 4829 callout_drain(&lun->ie_callout); 4830 ctl_tpc_lun_shutdown(lun); 4831 mtx_destroy(&lun->lun_lock); 4832 free(lun->lun_devid, M_CTL); 4833 for (i = 0; i < ctl_max_ports; i++) 4834 free(lun->pending_ua[i], M_CTL); 4835 free(lun->pending_ua, M_DEVBUF); 4836 for (i = 0; i < ctl_max_ports; i++) 4837 free(lun->pr_keys[i], M_CTL); 4838 free(lun->pr_keys, M_DEVBUF); 4839 free(lun->write_buffer, M_CTL); 4840 free(lun->prevent, M_CTL); 4841 if (lun->flags & CTL_LUN_MALLOCED) 4842 free(lun, M_CTL); 4843 4844 return (0); 4845 } 4846 4847 static void 4848 ctl_create_lun(struct ctl_be_lun *be_lun) 4849 { 4850 4851 /* 4852 * ctl_alloc_lun() should handle all potential failure cases. 4853 */ 4854 ctl_alloc_lun(control_softc, NULL, be_lun); 4855 } 4856 4857 int 4858 ctl_add_lun(struct ctl_be_lun *be_lun) 4859 { 4860 struct ctl_softc *softc = control_softc; 4861 4862 mtx_lock(&softc->ctl_lock); 4863 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4864 mtx_unlock(&softc->ctl_lock); 4865 wakeup(&softc->pending_lun_queue); 4866 4867 return (0); 4868 } 4869 4870 int 4871 ctl_enable_lun(struct ctl_be_lun *be_lun) 4872 { 4873 struct ctl_softc *softc; 4874 struct ctl_port *port, *nport; 4875 struct ctl_lun *lun; 4876 int retval; 4877 4878 lun = (struct ctl_lun *)be_lun->ctl_lun; 4879 softc = lun->ctl_softc; 4880 4881 mtx_lock(&softc->ctl_lock); 4882 mtx_lock(&lun->lun_lock); 4883 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4884 /* 4885 * eh? Why did we get called if the LUN is already 4886 * enabled? 4887 */ 4888 mtx_unlock(&lun->lun_lock); 4889 mtx_unlock(&softc->ctl_lock); 4890 return (0); 4891 } 4892 lun->flags &= ~CTL_LUN_DISABLED; 4893 mtx_unlock(&lun->lun_lock); 4894 4895 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4896 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4897 port->lun_map != NULL || port->lun_enable == NULL) 4898 continue; 4899 4900 /* 4901 * Drop the lock while we call the FETD's enable routine. 4902 * This can lead to a callback into CTL (at least in the 4903 * case of the internal initiator frontend. 4904 */ 4905 mtx_unlock(&softc->ctl_lock); 4906 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4907 mtx_lock(&softc->ctl_lock); 4908 if (retval != 0) { 4909 printf("%s: FETD %s port %d returned error " 4910 "%d for lun_enable on lun %jd\n", 4911 __func__, port->port_name, port->targ_port, 4912 retval, (intmax_t)lun->lun); 4913 } 4914 } 4915 4916 mtx_unlock(&softc->ctl_lock); 4917 ctl_isc_announce_lun(lun); 4918 4919 return (0); 4920 } 4921 4922 int 4923 ctl_disable_lun(struct ctl_be_lun *be_lun) 4924 { 4925 struct ctl_softc *softc; 4926 struct ctl_port *port; 4927 struct ctl_lun *lun; 4928 int retval; 4929 4930 lun = (struct ctl_lun *)be_lun->ctl_lun; 4931 softc = lun->ctl_softc; 4932 4933 mtx_lock(&softc->ctl_lock); 4934 mtx_lock(&lun->lun_lock); 4935 if (lun->flags & CTL_LUN_DISABLED) { 4936 mtx_unlock(&lun->lun_lock); 4937 mtx_unlock(&softc->ctl_lock); 4938 return (0); 4939 } 4940 lun->flags |= CTL_LUN_DISABLED; 4941 mtx_unlock(&lun->lun_lock); 4942 4943 STAILQ_FOREACH(port, &softc->port_list, links) { 4944 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4945 port->lun_map != NULL || port->lun_disable == NULL) 4946 continue; 4947 4948 /* 4949 * Drop the lock before we call the frontend's disable 4950 * routine, to avoid lock order reversals. 4951 * 4952 * XXX KDM what happens if the frontend list changes while 4953 * we're traversing it? It's unlikely, but should be handled. 4954 */ 4955 mtx_unlock(&softc->ctl_lock); 4956 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4957 mtx_lock(&softc->ctl_lock); 4958 if (retval != 0) { 4959 printf("%s: FETD %s port %d returned error " 4960 "%d for lun_disable on lun %jd\n", 4961 __func__, port->port_name, port->targ_port, 4962 retval, (intmax_t)lun->lun); 4963 } 4964 } 4965 4966 mtx_unlock(&softc->ctl_lock); 4967 ctl_isc_announce_lun(lun); 4968 4969 return (0); 4970 } 4971 4972 int 4973 ctl_start_lun(struct ctl_be_lun *be_lun) 4974 { 4975 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4976 4977 mtx_lock(&lun->lun_lock); 4978 lun->flags &= ~CTL_LUN_STOPPED; 4979 mtx_unlock(&lun->lun_lock); 4980 return (0); 4981 } 4982 4983 int 4984 ctl_stop_lun(struct ctl_be_lun *be_lun) 4985 { 4986 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4987 4988 mtx_lock(&lun->lun_lock); 4989 lun->flags |= CTL_LUN_STOPPED; 4990 mtx_unlock(&lun->lun_lock); 4991 return (0); 4992 } 4993 4994 int 4995 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4996 { 4997 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4998 4999 mtx_lock(&lun->lun_lock); 5000 lun->flags |= CTL_LUN_NO_MEDIA; 5001 mtx_unlock(&lun->lun_lock); 5002 return (0); 5003 } 5004 5005 int 5006 ctl_lun_has_media(struct ctl_be_lun *be_lun) 5007 { 5008 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5009 union ctl_ha_msg msg; 5010 5011 mtx_lock(&lun->lun_lock); 5012 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 5013 if (lun->flags & CTL_LUN_REMOVABLE) 5014 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 5015 mtx_unlock(&lun->lun_lock); 5016 if ((lun->flags & CTL_LUN_REMOVABLE) && 5017 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5018 bzero(&msg.ua, sizeof(msg.ua)); 5019 msg.hdr.msg_type = CTL_MSG_UA; 5020 msg.hdr.nexus.initid = -1; 5021 msg.hdr.nexus.targ_port = -1; 5022 msg.hdr.nexus.targ_lun = lun->lun; 5023 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5024 msg.ua.ua_all = 1; 5025 msg.ua.ua_set = 1; 5026 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 5027 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5028 M_WAITOK); 5029 } 5030 return (0); 5031 } 5032 5033 int 5034 ctl_lun_ejected(struct ctl_be_lun *be_lun) 5035 { 5036 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5037 5038 mtx_lock(&lun->lun_lock); 5039 lun->flags |= CTL_LUN_EJECTED; 5040 mtx_unlock(&lun->lun_lock); 5041 return (0); 5042 } 5043 5044 int 5045 ctl_lun_primary(struct ctl_be_lun *be_lun) 5046 { 5047 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5048 5049 mtx_lock(&lun->lun_lock); 5050 lun->flags |= CTL_LUN_PRIMARY_SC; 5051 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5052 mtx_unlock(&lun->lun_lock); 5053 ctl_isc_announce_lun(lun); 5054 return (0); 5055 } 5056 5057 int 5058 ctl_lun_secondary(struct ctl_be_lun *be_lun) 5059 { 5060 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5061 5062 mtx_lock(&lun->lun_lock); 5063 lun->flags &= ~CTL_LUN_PRIMARY_SC; 5064 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5065 mtx_unlock(&lun->lun_lock); 5066 ctl_isc_announce_lun(lun); 5067 return (0); 5068 } 5069 5070 int 5071 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 5072 { 5073 struct ctl_softc *softc; 5074 struct ctl_lun *lun; 5075 5076 lun = (struct ctl_lun *)be_lun->ctl_lun; 5077 softc = lun->ctl_softc; 5078 5079 mtx_lock(&lun->lun_lock); 5080 5081 /* 5082 * The LUN needs to be disabled before it can be marked invalid. 5083 */ 5084 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5085 mtx_unlock(&lun->lun_lock); 5086 return (-1); 5087 } 5088 /* 5089 * Mark the LUN invalid. 5090 */ 5091 lun->flags |= CTL_LUN_INVALID; 5092 5093 /* 5094 * If there is nothing in the OOA queue, go ahead and free the LUN. 5095 * If we have something in the OOA queue, we'll free it when the 5096 * last I/O completes. 5097 */ 5098 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5099 mtx_unlock(&lun->lun_lock); 5100 ctl_free_lun(lun); 5101 } else 5102 mtx_unlock(&lun->lun_lock); 5103 5104 return (0); 5105 } 5106 5107 void 5108 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5109 { 5110 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5111 union ctl_ha_msg msg; 5112 5113 mtx_lock(&lun->lun_lock); 5114 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5115 mtx_unlock(&lun->lun_lock); 5116 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5117 /* Send msg to other side. */ 5118 bzero(&msg.ua, sizeof(msg.ua)); 5119 msg.hdr.msg_type = CTL_MSG_UA; 5120 msg.hdr.nexus.initid = -1; 5121 msg.hdr.nexus.targ_port = -1; 5122 msg.hdr.nexus.targ_lun = lun->lun; 5123 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5124 msg.ua.ua_all = 1; 5125 msg.ua.ua_set = 1; 5126 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5127 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5128 M_WAITOK); 5129 } 5130 } 5131 5132 /* 5133 * Backend "memory move is complete" callback for requests that never 5134 * make it down to say RAIDCore's configuration code. 5135 */ 5136 int 5137 ctl_config_move_done(union ctl_io *io) 5138 { 5139 int retval; 5140 5141 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5142 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5143 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5144 5145 if ((io->io_hdr.port_status != 0) && 5146 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5147 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5148 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5149 /*retry_count*/ io->io_hdr.port_status); 5150 } else if (io->scsiio.kern_data_resid != 0 && 5151 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5152 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5153 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5154 ctl_set_invalid_field_ciu(&io->scsiio); 5155 } 5156 5157 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5158 ctl_data_print(io); 5159 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5160 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5161 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5162 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5163 /* 5164 * XXX KDM just assuming a single pointer here, and not a 5165 * S/G list. If we start using S/G lists for config data, 5166 * we'll need to know how to clean them up here as well. 5167 */ 5168 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5169 free(io->scsiio.kern_data_ptr, M_CTL); 5170 ctl_done(io); 5171 retval = CTL_RETVAL_COMPLETE; 5172 } else { 5173 /* 5174 * XXX KDM now we need to continue data movement. Some 5175 * options: 5176 * - call ctl_scsiio() again? We don't do this for data 5177 * writes, because for those at least we know ahead of 5178 * time where the write will go and how long it is. For 5179 * config writes, though, that information is largely 5180 * contained within the write itself, thus we need to 5181 * parse out the data again. 5182 * 5183 * - Call some other function once the data is in? 5184 */ 5185 5186 /* 5187 * XXX KDM call ctl_scsiio() again for now, and check flag 5188 * bits to see whether we're allocated or not. 5189 */ 5190 retval = ctl_scsiio(&io->scsiio); 5191 } 5192 return (retval); 5193 } 5194 5195 /* 5196 * This gets called by a backend driver when it is done with a 5197 * data_submit method. 5198 */ 5199 void 5200 ctl_data_submit_done(union ctl_io *io) 5201 { 5202 /* 5203 * If the IO_CONT flag is set, we need to call the supplied 5204 * function to continue processing the I/O, instead of completing 5205 * the I/O just yet. 5206 * 5207 * If there is an error, though, we don't want to keep processing. 5208 * Instead, just send status back to the initiator. 5209 */ 5210 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5211 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5212 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5213 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5214 io->scsiio.io_cont(io); 5215 return; 5216 } 5217 ctl_done(io); 5218 } 5219 5220 /* 5221 * This gets called by a backend driver when it is done with a 5222 * configuration write. 5223 */ 5224 void 5225 ctl_config_write_done(union ctl_io *io) 5226 { 5227 uint8_t *buf; 5228 5229 /* 5230 * If the IO_CONT flag is set, we need to call the supplied 5231 * function to continue processing the I/O, instead of completing 5232 * the I/O just yet. 5233 * 5234 * If there is an error, though, we don't want to keep processing. 5235 * Instead, just send status back to the initiator. 5236 */ 5237 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5238 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5239 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5240 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5241 io->scsiio.io_cont(io); 5242 return; 5243 } 5244 /* 5245 * Since a configuration write can be done for commands that actually 5246 * have data allocated, like write buffer, and commands that have 5247 * no data, like start/stop unit, we need to check here. 5248 */ 5249 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5250 buf = io->scsiio.kern_data_ptr; 5251 else 5252 buf = NULL; 5253 ctl_done(io); 5254 if (buf) 5255 free(buf, M_CTL); 5256 } 5257 5258 void 5259 ctl_config_read_done(union ctl_io *io) 5260 { 5261 uint8_t *buf; 5262 5263 /* 5264 * If there is some error -- we are done, skip data transfer. 5265 */ 5266 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5267 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5268 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5269 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5270 buf = io->scsiio.kern_data_ptr; 5271 else 5272 buf = NULL; 5273 ctl_done(io); 5274 if (buf) 5275 free(buf, M_CTL); 5276 return; 5277 } 5278 5279 /* 5280 * If the IO_CONT flag is set, we need to call the supplied 5281 * function to continue processing the I/O, instead of completing 5282 * the I/O just yet. 5283 */ 5284 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5285 io->scsiio.io_cont(io); 5286 return; 5287 } 5288 5289 ctl_datamove(io); 5290 } 5291 5292 /* 5293 * SCSI release command. 5294 */ 5295 int 5296 ctl_scsi_release(struct ctl_scsiio *ctsio) 5297 { 5298 struct ctl_lun *lun = CTL_LUN(ctsio); 5299 uint32_t residx; 5300 5301 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5302 5303 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5304 5305 /* 5306 * XXX KDM right now, we only support LUN reservation. We don't 5307 * support 3rd party reservations, or extent reservations, which 5308 * might actually need the parameter list. If we've gotten this 5309 * far, we've got a LUN reservation. Anything else got kicked out 5310 * above. So, according to SPC, ignore the length. 5311 */ 5312 5313 mtx_lock(&lun->lun_lock); 5314 5315 /* 5316 * According to SPC, it is not an error for an intiator to attempt 5317 * to release a reservation on a LUN that isn't reserved, or that 5318 * is reserved by another initiator. The reservation can only be 5319 * released, though, by the initiator who made it or by one of 5320 * several reset type events. 5321 */ 5322 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5323 lun->flags &= ~CTL_LUN_RESERVED; 5324 5325 mtx_unlock(&lun->lun_lock); 5326 5327 ctl_set_success(ctsio); 5328 ctl_done((union ctl_io *)ctsio); 5329 return (CTL_RETVAL_COMPLETE); 5330 } 5331 5332 int 5333 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5334 { 5335 struct ctl_lun *lun = CTL_LUN(ctsio); 5336 uint32_t residx; 5337 5338 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5339 5340 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5341 5342 /* 5343 * XXX KDM right now, we only support LUN reservation. We don't 5344 * support 3rd party reservations, or extent reservations, which 5345 * might actually need the parameter list. If we've gotten this 5346 * far, we've got a LUN reservation. Anything else got kicked out 5347 * above. So, according to SPC, ignore the length. 5348 */ 5349 5350 mtx_lock(&lun->lun_lock); 5351 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5352 ctl_set_reservation_conflict(ctsio); 5353 goto bailout; 5354 } 5355 5356 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5357 if (lun->flags & CTL_LUN_PR_RESERVED) { 5358 ctl_set_success(ctsio); 5359 goto bailout; 5360 } 5361 5362 lun->flags |= CTL_LUN_RESERVED; 5363 lun->res_idx = residx; 5364 ctl_set_success(ctsio); 5365 5366 bailout: 5367 mtx_unlock(&lun->lun_lock); 5368 ctl_done((union ctl_io *)ctsio); 5369 return (CTL_RETVAL_COMPLETE); 5370 } 5371 5372 int 5373 ctl_start_stop(struct ctl_scsiio *ctsio) 5374 { 5375 struct ctl_lun *lun = CTL_LUN(ctsio); 5376 struct scsi_start_stop_unit *cdb; 5377 int retval; 5378 5379 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5380 5381 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5382 5383 if ((cdb->how & SSS_PC_MASK) == 0) { 5384 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5385 (cdb->how & SSS_START) == 0) { 5386 uint32_t residx; 5387 5388 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5389 if (ctl_get_prkey(lun, residx) == 0 || 5390 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5391 5392 ctl_set_reservation_conflict(ctsio); 5393 ctl_done((union ctl_io *)ctsio); 5394 return (CTL_RETVAL_COMPLETE); 5395 } 5396 } 5397 5398 if ((cdb->how & SSS_LOEJ) && 5399 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5400 ctl_set_invalid_field(ctsio, 5401 /*sks_valid*/ 1, 5402 /*command*/ 1, 5403 /*field*/ 4, 5404 /*bit_valid*/ 1, 5405 /*bit*/ 1); 5406 ctl_done((union ctl_io *)ctsio); 5407 return (CTL_RETVAL_COMPLETE); 5408 } 5409 5410 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5411 lun->prevent_count > 0) { 5412 /* "Medium removal prevented" */ 5413 ctl_set_sense(ctsio, /*current_error*/ 1, 5414 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5415 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5416 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5417 ctl_done((union ctl_io *)ctsio); 5418 return (CTL_RETVAL_COMPLETE); 5419 } 5420 } 5421 5422 retval = lun->backend->config_write((union ctl_io *)ctsio); 5423 return (retval); 5424 } 5425 5426 int 5427 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5428 { 5429 struct ctl_lun *lun = CTL_LUN(ctsio); 5430 struct scsi_prevent *cdb; 5431 int retval; 5432 uint32_t initidx; 5433 5434 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5435 5436 cdb = (struct scsi_prevent *)ctsio->cdb; 5437 5438 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5439 ctl_set_invalid_opcode(ctsio); 5440 ctl_done((union ctl_io *)ctsio); 5441 return (CTL_RETVAL_COMPLETE); 5442 } 5443 5444 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5445 mtx_lock(&lun->lun_lock); 5446 if ((cdb->how & PR_PREVENT) && 5447 ctl_is_set(lun->prevent, initidx) == 0) { 5448 ctl_set_mask(lun->prevent, initidx); 5449 lun->prevent_count++; 5450 } else if ((cdb->how & PR_PREVENT) == 0 && 5451 ctl_is_set(lun->prevent, initidx)) { 5452 ctl_clear_mask(lun->prevent, initidx); 5453 lun->prevent_count--; 5454 } 5455 mtx_unlock(&lun->lun_lock); 5456 retval = lun->backend->config_write((union ctl_io *)ctsio); 5457 return (retval); 5458 } 5459 5460 /* 5461 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5462 * we don't really do anything with the LBA and length fields if the user 5463 * passes them in. Instead we'll just flush out the cache for the entire 5464 * LUN. 5465 */ 5466 int 5467 ctl_sync_cache(struct ctl_scsiio *ctsio) 5468 { 5469 struct ctl_lun *lun = CTL_LUN(ctsio); 5470 struct ctl_lba_len_flags *lbalen; 5471 uint64_t starting_lba; 5472 uint32_t block_count; 5473 int retval; 5474 uint8_t byte2; 5475 5476 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5477 5478 retval = 0; 5479 5480 switch (ctsio->cdb[0]) { 5481 case SYNCHRONIZE_CACHE: { 5482 struct scsi_sync_cache *cdb; 5483 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5484 5485 starting_lba = scsi_4btoul(cdb->begin_lba); 5486 block_count = scsi_2btoul(cdb->lb_count); 5487 byte2 = cdb->byte2; 5488 break; 5489 } 5490 case SYNCHRONIZE_CACHE_16: { 5491 struct scsi_sync_cache_16 *cdb; 5492 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5493 5494 starting_lba = scsi_8btou64(cdb->begin_lba); 5495 block_count = scsi_4btoul(cdb->lb_count); 5496 byte2 = cdb->byte2; 5497 break; 5498 } 5499 default: 5500 ctl_set_invalid_opcode(ctsio); 5501 ctl_done((union ctl_io *)ctsio); 5502 goto bailout; 5503 break; /* NOTREACHED */ 5504 } 5505 5506 /* 5507 * We check the LBA and length, but don't do anything with them. 5508 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5509 * get flushed. This check will just help satisfy anyone who wants 5510 * to see an error for an out of range LBA. 5511 */ 5512 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5513 ctl_set_lba_out_of_range(ctsio, 5514 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5515 ctl_done((union ctl_io *)ctsio); 5516 goto bailout; 5517 } 5518 5519 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5520 lbalen->lba = starting_lba; 5521 lbalen->len = block_count; 5522 lbalen->flags = byte2; 5523 retval = lun->backend->config_write((union ctl_io *)ctsio); 5524 5525 bailout: 5526 return (retval); 5527 } 5528 5529 int 5530 ctl_format(struct ctl_scsiio *ctsio) 5531 { 5532 struct scsi_format *cdb; 5533 int length, defect_list_len; 5534 5535 CTL_DEBUG_PRINT(("ctl_format\n")); 5536 5537 cdb = (struct scsi_format *)ctsio->cdb; 5538 5539 length = 0; 5540 if (cdb->byte2 & SF_FMTDATA) { 5541 if (cdb->byte2 & SF_LONGLIST) 5542 length = sizeof(struct scsi_format_header_long); 5543 else 5544 length = sizeof(struct scsi_format_header_short); 5545 } 5546 5547 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5548 && (length > 0)) { 5549 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5550 ctsio->kern_data_len = length; 5551 ctsio->kern_total_len = length; 5552 ctsio->kern_rel_offset = 0; 5553 ctsio->kern_sg_entries = 0; 5554 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5555 ctsio->be_move_done = ctl_config_move_done; 5556 ctl_datamove((union ctl_io *)ctsio); 5557 5558 return (CTL_RETVAL_COMPLETE); 5559 } 5560 5561 defect_list_len = 0; 5562 5563 if (cdb->byte2 & SF_FMTDATA) { 5564 if (cdb->byte2 & SF_LONGLIST) { 5565 struct scsi_format_header_long *header; 5566 5567 header = (struct scsi_format_header_long *) 5568 ctsio->kern_data_ptr; 5569 5570 defect_list_len = scsi_4btoul(header->defect_list_len); 5571 if (defect_list_len != 0) { 5572 ctl_set_invalid_field(ctsio, 5573 /*sks_valid*/ 1, 5574 /*command*/ 0, 5575 /*field*/ 2, 5576 /*bit_valid*/ 0, 5577 /*bit*/ 0); 5578 goto bailout; 5579 } 5580 } else { 5581 struct scsi_format_header_short *header; 5582 5583 header = (struct scsi_format_header_short *) 5584 ctsio->kern_data_ptr; 5585 5586 defect_list_len = scsi_2btoul(header->defect_list_len); 5587 if (defect_list_len != 0) { 5588 ctl_set_invalid_field(ctsio, 5589 /*sks_valid*/ 1, 5590 /*command*/ 0, 5591 /*field*/ 2, 5592 /*bit_valid*/ 0, 5593 /*bit*/ 0); 5594 goto bailout; 5595 } 5596 } 5597 } 5598 5599 ctl_set_success(ctsio); 5600 bailout: 5601 5602 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5603 free(ctsio->kern_data_ptr, M_CTL); 5604 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5605 } 5606 5607 ctl_done((union ctl_io *)ctsio); 5608 return (CTL_RETVAL_COMPLETE); 5609 } 5610 5611 int 5612 ctl_read_buffer(struct ctl_scsiio *ctsio) 5613 { 5614 struct ctl_lun *lun = CTL_LUN(ctsio); 5615 uint64_t buffer_offset; 5616 uint32_t len; 5617 uint8_t byte2; 5618 static uint8_t descr[4]; 5619 static uint8_t echo_descr[4] = { 0 }; 5620 5621 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5622 5623 switch (ctsio->cdb[0]) { 5624 case READ_BUFFER: { 5625 struct scsi_read_buffer *cdb; 5626 5627 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5628 buffer_offset = scsi_3btoul(cdb->offset); 5629 len = scsi_3btoul(cdb->length); 5630 byte2 = cdb->byte2; 5631 break; 5632 } 5633 case READ_BUFFER_16: { 5634 struct scsi_read_buffer_16 *cdb; 5635 5636 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5637 buffer_offset = scsi_8btou64(cdb->offset); 5638 len = scsi_4btoul(cdb->length); 5639 byte2 = cdb->byte2; 5640 break; 5641 } 5642 default: /* This shouldn't happen. */ 5643 ctl_set_invalid_opcode(ctsio); 5644 ctl_done((union ctl_io *)ctsio); 5645 return (CTL_RETVAL_COMPLETE); 5646 } 5647 5648 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5649 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5650 ctl_set_invalid_field(ctsio, 5651 /*sks_valid*/ 1, 5652 /*command*/ 1, 5653 /*field*/ 6, 5654 /*bit_valid*/ 0, 5655 /*bit*/ 0); 5656 ctl_done((union ctl_io *)ctsio); 5657 return (CTL_RETVAL_COMPLETE); 5658 } 5659 5660 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5661 descr[0] = 0; 5662 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5663 ctsio->kern_data_ptr = descr; 5664 len = min(len, sizeof(descr)); 5665 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5666 ctsio->kern_data_ptr = echo_descr; 5667 len = min(len, sizeof(echo_descr)); 5668 } else { 5669 if (lun->write_buffer == NULL) { 5670 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5671 M_CTL, M_WAITOK); 5672 } 5673 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5674 } 5675 ctsio->kern_data_len = len; 5676 ctsio->kern_total_len = len; 5677 ctsio->kern_rel_offset = 0; 5678 ctsio->kern_sg_entries = 0; 5679 ctl_set_success(ctsio); 5680 ctsio->be_move_done = ctl_config_move_done; 5681 ctl_datamove((union ctl_io *)ctsio); 5682 return (CTL_RETVAL_COMPLETE); 5683 } 5684 5685 int 5686 ctl_write_buffer(struct ctl_scsiio *ctsio) 5687 { 5688 struct ctl_lun *lun = CTL_LUN(ctsio); 5689 struct scsi_write_buffer *cdb; 5690 int buffer_offset, len; 5691 5692 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5693 5694 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5695 5696 len = scsi_3btoul(cdb->length); 5697 buffer_offset = scsi_3btoul(cdb->offset); 5698 5699 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5700 ctl_set_invalid_field(ctsio, 5701 /*sks_valid*/ 1, 5702 /*command*/ 1, 5703 /*field*/ 6, 5704 /*bit_valid*/ 0, 5705 /*bit*/ 0); 5706 ctl_done((union ctl_io *)ctsio); 5707 return (CTL_RETVAL_COMPLETE); 5708 } 5709 5710 /* 5711 * If we've got a kernel request that hasn't been malloced yet, 5712 * malloc it and tell the caller the data buffer is here. 5713 */ 5714 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5715 if (lun->write_buffer == NULL) { 5716 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5717 M_CTL, M_WAITOK); 5718 } 5719 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5720 ctsio->kern_data_len = len; 5721 ctsio->kern_total_len = len; 5722 ctsio->kern_rel_offset = 0; 5723 ctsio->kern_sg_entries = 0; 5724 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5725 ctsio->be_move_done = ctl_config_move_done; 5726 ctl_datamove((union ctl_io *)ctsio); 5727 5728 return (CTL_RETVAL_COMPLETE); 5729 } 5730 5731 ctl_set_success(ctsio); 5732 ctl_done((union ctl_io *)ctsio); 5733 return (CTL_RETVAL_COMPLETE); 5734 } 5735 5736 int 5737 ctl_write_same(struct ctl_scsiio *ctsio) 5738 { 5739 struct ctl_lun *lun = CTL_LUN(ctsio); 5740 struct ctl_lba_len_flags *lbalen; 5741 uint64_t lba; 5742 uint32_t num_blocks; 5743 int len, retval; 5744 uint8_t byte2; 5745 5746 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5747 5748 switch (ctsio->cdb[0]) { 5749 case WRITE_SAME_10: { 5750 struct scsi_write_same_10 *cdb; 5751 5752 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5753 5754 lba = scsi_4btoul(cdb->addr); 5755 num_blocks = scsi_2btoul(cdb->length); 5756 byte2 = cdb->byte2; 5757 break; 5758 } 5759 case WRITE_SAME_16: { 5760 struct scsi_write_same_16 *cdb; 5761 5762 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5763 5764 lba = scsi_8btou64(cdb->addr); 5765 num_blocks = scsi_4btoul(cdb->length); 5766 byte2 = cdb->byte2; 5767 break; 5768 } 5769 default: 5770 /* 5771 * We got a command we don't support. This shouldn't 5772 * happen, commands should be filtered out above us. 5773 */ 5774 ctl_set_invalid_opcode(ctsio); 5775 ctl_done((union ctl_io *)ctsio); 5776 5777 return (CTL_RETVAL_COMPLETE); 5778 break; /* NOTREACHED */ 5779 } 5780 5781 /* ANCHOR flag can be used only together with UNMAP */ 5782 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5783 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5784 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5785 ctl_done((union ctl_io *)ctsio); 5786 return (CTL_RETVAL_COMPLETE); 5787 } 5788 5789 /* 5790 * The first check is to make sure we're in bounds, the second 5791 * check is to catch wrap-around problems. If the lba + num blocks 5792 * is less than the lba, then we've wrapped around and the block 5793 * range is invalid anyway. 5794 */ 5795 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5796 || ((lba + num_blocks) < lba)) { 5797 ctl_set_lba_out_of_range(ctsio, 5798 MAX(lba, lun->be_lun->maxlba + 1)); 5799 ctl_done((union ctl_io *)ctsio); 5800 return (CTL_RETVAL_COMPLETE); 5801 } 5802 5803 /* Zero number of blocks means "to the last logical block" */ 5804 if (num_blocks == 0) { 5805 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5806 ctl_set_invalid_field(ctsio, 5807 /*sks_valid*/ 0, 5808 /*command*/ 1, 5809 /*field*/ 0, 5810 /*bit_valid*/ 0, 5811 /*bit*/ 0); 5812 ctl_done((union ctl_io *)ctsio); 5813 return (CTL_RETVAL_COMPLETE); 5814 } 5815 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5816 } 5817 5818 len = lun->be_lun->blocksize; 5819 5820 /* 5821 * If we've got a kernel request that hasn't been malloced yet, 5822 * malloc it and tell the caller the data buffer is here. 5823 */ 5824 if ((byte2 & SWS_NDOB) == 0 && 5825 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5826 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5827 ctsio->kern_data_len = len; 5828 ctsio->kern_total_len = len; 5829 ctsio->kern_rel_offset = 0; 5830 ctsio->kern_sg_entries = 0; 5831 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5832 ctsio->be_move_done = ctl_config_move_done; 5833 ctl_datamove((union ctl_io *)ctsio); 5834 5835 return (CTL_RETVAL_COMPLETE); 5836 } 5837 5838 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5839 lbalen->lba = lba; 5840 lbalen->len = num_blocks; 5841 lbalen->flags = byte2; 5842 retval = lun->backend->config_write((union ctl_io *)ctsio); 5843 5844 return (retval); 5845 } 5846 5847 int 5848 ctl_unmap(struct ctl_scsiio *ctsio) 5849 { 5850 struct ctl_lun *lun = CTL_LUN(ctsio); 5851 struct scsi_unmap *cdb; 5852 struct ctl_ptr_len_flags *ptrlen; 5853 struct scsi_unmap_header *hdr; 5854 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5855 uint64_t lba; 5856 uint32_t num_blocks; 5857 int len, retval; 5858 uint8_t byte2; 5859 5860 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5861 5862 cdb = (struct scsi_unmap *)ctsio->cdb; 5863 len = scsi_2btoul(cdb->length); 5864 byte2 = cdb->byte2; 5865 5866 /* 5867 * If we've got a kernel request that hasn't been malloced yet, 5868 * malloc it and tell the caller the data buffer is here. 5869 */ 5870 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5871 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5872 ctsio->kern_data_len = len; 5873 ctsio->kern_total_len = len; 5874 ctsio->kern_rel_offset = 0; 5875 ctsio->kern_sg_entries = 0; 5876 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5877 ctsio->be_move_done = ctl_config_move_done; 5878 ctl_datamove((union ctl_io *)ctsio); 5879 5880 return (CTL_RETVAL_COMPLETE); 5881 } 5882 5883 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5884 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5885 if (len < sizeof (*hdr) || 5886 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5887 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5888 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5889 ctl_set_invalid_field(ctsio, 5890 /*sks_valid*/ 0, 5891 /*command*/ 0, 5892 /*field*/ 0, 5893 /*bit_valid*/ 0, 5894 /*bit*/ 0); 5895 goto done; 5896 } 5897 len = scsi_2btoul(hdr->desc_length); 5898 buf = (struct scsi_unmap_desc *)(hdr + 1); 5899 end = buf + len / sizeof(*buf); 5900 5901 endnz = buf; 5902 for (range = buf; range < end; range++) { 5903 lba = scsi_8btou64(range->lba); 5904 num_blocks = scsi_4btoul(range->length); 5905 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5906 || ((lba + num_blocks) < lba)) { 5907 ctl_set_lba_out_of_range(ctsio, 5908 MAX(lba, lun->be_lun->maxlba + 1)); 5909 ctl_done((union ctl_io *)ctsio); 5910 return (CTL_RETVAL_COMPLETE); 5911 } 5912 if (num_blocks != 0) 5913 endnz = range + 1; 5914 } 5915 5916 /* 5917 * Block backend can not handle zero last range. 5918 * Filter it out and return if there is nothing left. 5919 */ 5920 len = (uint8_t *)endnz - (uint8_t *)buf; 5921 if (len == 0) { 5922 ctl_set_success(ctsio); 5923 goto done; 5924 } 5925 5926 mtx_lock(&lun->lun_lock); 5927 ptrlen = (struct ctl_ptr_len_flags *) 5928 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5929 ptrlen->ptr = (void *)buf; 5930 ptrlen->len = len; 5931 ptrlen->flags = byte2; 5932 ctl_check_blocked(lun); 5933 mtx_unlock(&lun->lun_lock); 5934 5935 retval = lun->backend->config_write((union ctl_io *)ctsio); 5936 return (retval); 5937 5938 done: 5939 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5940 free(ctsio->kern_data_ptr, M_CTL); 5941 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5942 } 5943 ctl_done((union ctl_io *)ctsio); 5944 return (CTL_RETVAL_COMPLETE); 5945 } 5946 5947 int 5948 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5949 struct ctl_page_index *page_index, uint8_t *page_ptr) 5950 { 5951 struct ctl_lun *lun = CTL_LUN(ctsio); 5952 uint8_t *current_cp; 5953 int set_ua; 5954 uint32_t initidx; 5955 5956 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5957 set_ua = 0; 5958 5959 current_cp = (page_index->page_data + (page_index->page_len * 5960 CTL_PAGE_CURRENT)); 5961 5962 mtx_lock(&lun->lun_lock); 5963 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5964 memcpy(current_cp, page_ptr, page_index->page_len); 5965 set_ua = 1; 5966 } 5967 if (set_ua != 0) 5968 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5969 mtx_unlock(&lun->lun_lock); 5970 if (set_ua) { 5971 ctl_isc_announce_mode(lun, 5972 ctl_get_initindex(&ctsio->io_hdr.nexus), 5973 page_index->page_code, page_index->subpage); 5974 } 5975 return (CTL_RETVAL_COMPLETE); 5976 } 5977 5978 static void 5979 ctl_ie_timer(void *arg) 5980 { 5981 struct ctl_lun *lun = arg; 5982 uint64_t t; 5983 5984 if (lun->ie_asc == 0) 5985 return; 5986 5987 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5988 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5989 else 5990 lun->ie_reported = 0; 5991 5992 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5993 lun->ie_reportcnt++; 5994 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5995 if (t == 0 || t == UINT32_MAX) 5996 t = 3000; /* 5 min */ 5997 callout_schedule(&lun->ie_callout, t * hz / 10); 5998 } 5999 } 6000 6001 int 6002 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 6003 struct ctl_page_index *page_index, uint8_t *page_ptr) 6004 { 6005 struct ctl_lun *lun = CTL_LUN(ctsio); 6006 struct scsi_info_exceptions_page *pg; 6007 uint64_t t; 6008 6009 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 6010 6011 pg = (struct scsi_info_exceptions_page *)page_ptr; 6012 mtx_lock(&lun->lun_lock); 6013 if (pg->info_flags & SIEP_FLAGS_TEST) { 6014 lun->ie_asc = 0x5d; 6015 lun->ie_ascq = 0xff; 6016 if (pg->mrie == SIEP_MRIE_UA) { 6017 ctl_est_ua_all(lun, -1, CTL_UA_IE); 6018 lun->ie_reported = 1; 6019 } else { 6020 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 6021 lun->ie_reported = -1; 6022 } 6023 lun->ie_reportcnt = 1; 6024 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 6025 lun->ie_reportcnt++; 6026 t = scsi_4btoul(pg->interval_timer); 6027 if (t == 0 || t == UINT32_MAX) 6028 t = 3000; /* 5 min */ 6029 callout_reset(&lun->ie_callout, t * hz / 10, 6030 ctl_ie_timer, lun); 6031 } 6032 } else { 6033 lun->ie_asc = 0; 6034 lun->ie_ascq = 0; 6035 lun->ie_reported = 1; 6036 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 6037 lun->ie_reportcnt = UINT32_MAX; 6038 callout_stop(&lun->ie_callout); 6039 } 6040 mtx_unlock(&lun->lun_lock); 6041 return (CTL_RETVAL_COMPLETE); 6042 } 6043 6044 static int 6045 ctl_do_mode_select(union ctl_io *io) 6046 { 6047 struct ctl_lun *lun = CTL_LUN(io); 6048 struct scsi_mode_page_header *page_header; 6049 struct ctl_page_index *page_index; 6050 struct ctl_scsiio *ctsio; 6051 int page_len, page_len_offset, page_len_size; 6052 union ctl_modepage_info *modepage_info; 6053 uint16_t *len_left, *len_used; 6054 int retval, i; 6055 6056 ctsio = &io->scsiio; 6057 page_index = NULL; 6058 page_len = 0; 6059 6060 modepage_info = (union ctl_modepage_info *) 6061 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6062 len_left = &modepage_info->header.len_left; 6063 len_used = &modepage_info->header.len_used; 6064 6065 do_next_page: 6066 6067 page_header = (struct scsi_mode_page_header *) 6068 (ctsio->kern_data_ptr + *len_used); 6069 6070 if (*len_left == 0) { 6071 free(ctsio->kern_data_ptr, M_CTL); 6072 ctl_set_success(ctsio); 6073 ctl_done((union ctl_io *)ctsio); 6074 return (CTL_RETVAL_COMPLETE); 6075 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6076 6077 free(ctsio->kern_data_ptr, M_CTL); 6078 ctl_set_param_len_error(ctsio); 6079 ctl_done((union ctl_io *)ctsio); 6080 return (CTL_RETVAL_COMPLETE); 6081 6082 } else if ((page_header->page_code & SMPH_SPF) 6083 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6084 6085 free(ctsio->kern_data_ptr, M_CTL); 6086 ctl_set_param_len_error(ctsio); 6087 ctl_done((union ctl_io *)ctsio); 6088 return (CTL_RETVAL_COMPLETE); 6089 } 6090 6091 6092 /* 6093 * XXX KDM should we do something with the block descriptor? 6094 */ 6095 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6096 page_index = &lun->mode_pages.index[i]; 6097 if (lun->be_lun->lun_type == T_DIRECT && 6098 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6099 continue; 6100 if (lun->be_lun->lun_type == T_PROCESSOR && 6101 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6102 continue; 6103 if (lun->be_lun->lun_type == T_CDROM && 6104 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6105 continue; 6106 6107 if ((page_index->page_code & SMPH_PC_MASK) != 6108 (page_header->page_code & SMPH_PC_MASK)) 6109 continue; 6110 6111 /* 6112 * If neither page has a subpage code, then we've got a 6113 * match. 6114 */ 6115 if (((page_index->page_code & SMPH_SPF) == 0) 6116 && ((page_header->page_code & SMPH_SPF) == 0)) { 6117 page_len = page_header->page_length; 6118 break; 6119 } 6120 6121 /* 6122 * If both pages have subpages, then the subpage numbers 6123 * have to match. 6124 */ 6125 if ((page_index->page_code & SMPH_SPF) 6126 && (page_header->page_code & SMPH_SPF)) { 6127 struct scsi_mode_page_header_sp *sph; 6128 6129 sph = (struct scsi_mode_page_header_sp *)page_header; 6130 if (page_index->subpage == sph->subpage) { 6131 page_len = scsi_2btoul(sph->page_length); 6132 break; 6133 } 6134 } 6135 } 6136 6137 /* 6138 * If we couldn't find the page, or if we don't have a mode select 6139 * handler for it, send back an error to the user. 6140 */ 6141 if ((i >= CTL_NUM_MODE_PAGES) 6142 || (page_index->select_handler == NULL)) { 6143 ctl_set_invalid_field(ctsio, 6144 /*sks_valid*/ 1, 6145 /*command*/ 0, 6146 /*field*/ *len_used, 6147 /*bit_valid*/ 0, 6148 /*bit*/ 0); 6149 free(ctsio->kern_data_ptr, M_CTL); 6150 ctl_done((union ctl_io *)ctsio); 6151 return (CTL_RETVAL_COMPLETE); 6152 } 6153 6154 if (page_index->page_code & SMPH_SPF) { 6155 page_len_offset = 2; 6156 page_len_size = 2; 6157 } else { 6158 page_len_size = 1; 6159 page_len_offset = 1; 6160 } 6161 6162 /* 6163 * If the length the initiator gives us isn't the one we specify in 6164 * the mode page header, or if they didn't specify enough data in 6165 * the CDB to avoid truncating this page, kick out the request. 6166 */ 6167 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6168 ctl_set_invalid_field(ctsio, 6169 /*sks_valid*/ 1, 6170 /*command*/ 0, 6171 /*field*/ *len_used + page_len_offset, 6172 /*bit_valid*/ 0, 6173 /*bit*/ 0); 6174 free(ctsio->kern_data_ptr, M_CTL); 6175 ctl_done((union ctl_io *)ctsio); 6176 return (CTL_RETVAL_COMPLETE); 6177 } 6178 if (*len_left < page_index->page_len) { 6179 free(ctsio->kern_data_ptr, M_CTL); 6180 ctl_set_param_len_error(ctsio); 6181 ctl_done((union ctl_io *)ctsio); 6182 return (CTL_RETVAL_COMPLETE); 6183 } 6184 6185 /* 6186 * Run through the mode page, checking to make sure that the bits 6187 * the user changed are actually legal for him to change. 6188 */ 6189 for (i = 0; i < page_index->page_len; i++) { 6190 uint8_t *user_byte, *change_mask, *current_byte; 6191 int bad_bit; 6192 int j; 6193 6194 user_byte = (uint8_t *)page_header + i; 6195 change_mask = page_index->page_data + 6196 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6197 current_byte = page_index->page_data + 6198 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6199 6200 /* 6201 * Check to see whether the user set any bits in this byte 6202 * that he is not allowed to set. 6203 */ 6204 if ((*user_byte & ~(*change_mask)) == 6205 (*current_byte & ~(*change_mask))) 6206 continue; 6207 6208 /* 6209 * Go through bit by bit to determine which one is illegal. 6210 */ 6211 bad_bit = 0; 6212 for (j = 7; j >= 0; j--) { 6213 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6214 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6215 bad_bit = i; 6216 break; 6217 } 6218 } 6219 ctl_set_invalid_field(ctsio, 6220 /*sks_valid*/ 1, 6221 /*command*/ 0, 6222 /*field*/ *len_used + i, 6223 /*bit_valid*/ 1, 6224 /*bit*/ bad_bit); 6225 free(ctsio->kern_data_ptr, M_CTL); 6226 ctl_done((union ctl_io *)ctsio); 6227 return (CTL_RETVAL_COMPLETE); 6228 } 6229 6230 /* 6231 * Decrement these before we call the page handler, since we may 6232 * end up getting called back one way or another before the handler 6233 * returns to this context. 6234 */ 6235 *len_left -= page_index->page_len; 6236 *len_used += page_index->page_len; 6237 6238 retval = page_index->select_handler(ctsio, page_index, 6239 (uint8_t *)page_header); 6240 6241 /* 6242 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6243 * wait until this queued command completes to finish processing 6244 * the mode page. If it returns anything other than 6245 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6246 * already set the sense information, freed the data pointer, and 6247 * completed the io for us. 6248 */ 6249 if (retval != CTL_RETVAL_COMPLETE) 6250 goto bailout_no_done; 6251 6252 /* 6253 * If the initiator sent us more than one page, parse the next one. 6254 */ 6255 if (*len_left > 0) 6256 goto do_next_page; 6257 6258 ctl_set_success(ctsio); 6259 free(ctsio->kern_data_ptr, M_CTL); 6260 ctl_done((union ctl_io *)ctsio); 6261 6262 bailout_no_done: 6263 6264 return (CTL_RETVAL_COMPLETE); 6265 6266 } 6267 6268 int 6269 ctl_mode_select(struct ctl_scsiio *ctsio) 6270 { 6271 struct ctl_lun *lun = CTL_LUN(ctsio); 6272 union ctl_modepage_info *modepage_info; 6273 int bd_len, i, header_size, param_len, pf, rtd, sp; 6274 uint32_t initidx; 6275 6276 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6277 switch (ctsio->cdb[0]) { 6278 case MODE_SELECT_6: { 6279 struct scsi_mode_select_6 *cdb; 6280 6281 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6282 6283 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6284 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6285 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6286 param_len = cdb->length; 6287 header_size = sizeof(struct scsi_mode_header_6); 6288 break; 6289 } 6290 case MODE_SELECT_10: { 6291 struct scsi_mode_select_10 *cdb; 6292 6293 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6294 6295 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6296 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6297 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6298 param_len = scsi_2btoul(cdb->length); 6299 header_size = sizeof(struct scsi_mode_header_10); 6300 break; 6301 } 6302 default: 6303 ctl_set_invalid_opcode(ctsio); 6304 ctl_done((union ctl_io *)ctsio); 6305 return (CTL_RETVAL_COMPLETE); 6306 } 6307 6308 if (rtd) { 6309 if (param_len != 0) { 6310 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6311 /*command*/ 1, /*field*/ 0, 6312 /*bit_valid*/ 0, /*bit*/ 0); 6313 ctl_done((union ctl_io *)ctsio); 6314 return (CTL_RETVAL_COMPLETE); 6315 } 6316 6317 /* Revert to defaults. */ 6318 ctl_init_page_index(lun); 6319 mtx_lock(&lun->lun_lock); 6320 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6321 mtx_unlock(&lun->lun_lock); 6322 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6323 ctl_isc_announce_mode(lun, -1, 6324 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6325 lun->mode_pages.index[i].subpage); 6326 } 6327 ctl_set_success(ctsio); 6328 ctl_done((union ctl_io *)ctsio); 6329 return (CTL_RETVAL_COMPLETE); 6330 } 6331 6332 /* 6333 * From SPC-3: 6334 * "A parameter list length of zero indicates that the Data-Out Buffer 6335 * shall be empty. This condition shall not be considered as an error." 6336 */ 6337 if (param_len == 0) { 6338 ctl_set_success(ctsio); 6339 ctl_done((union ctl_io *)ctsio); 6340 return (CTL_RETVAL_COMPLETE); 6341 } 6342 6343 /* 6344 * Since we'll hit this the first time through, prior to 6345 * allocation, we don't need to free a data buffer here. 6346 */ 6347 if (param_len < header_size) { 6348 ctl_set_param_len_error(ctsio); 6349 ctl_done((union ctl_io *)ctsio); 6350 return (CTL_RETVAL_COMPLETE); 6351 } 6352 6353 /* 6354 * Allocate the data buffer and grab the user's data. In theory, 6355 * we shouldn't have to sanity check the parameter list length here 6356 * because the maximum size is 64K. We should be able to malloc 6357 * that much without too many problems. 6358 */ 6359 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6360 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6361 ctsio->kern_data_len = param_len; 6362 ctsio->kern_total_len = param_len; 6363 ctsio->kern_rel_offset = 0; 6364 ctsio->kern_sg_entries = 0; 6365 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6366 ctsio->be_move_done = ctl_config_move_done; 6367 ctl_datamove((union ctl_io *)ctsio); 6368 6369 return (CTL_RETVAL_COMPLETE); 6370 } 6371 6372 switch (ctsio->cdb[0]) { 6373 case MODE_SELECT_6: { 6374 struct scsi_mode_header_6 *mh6; 6375 6376 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6377 bd_len = mh6->blk_desc_len; 6378 break; 6379 } 6380 case MODE_SELECT_10: { 6381 struct scsi_mode_header_10 *mh10; 6382 6383 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6384 bd_len = scsi_2btoul(mh10->blk_desc_len); 6385 break; 6386 } 6387 default: 6388 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6389 } 6390 6391 if (param_len < (header_size + bd_len)) { 6392 free(ctsio->kern_data_ptr, M_CTL); 6393 ctl_set_param_len_error(ctsio); 6394 ctl_done((union ctl_io *)ctsio); 6395 return (CTL_RETVAL_COMPLETE); 6396 } 6397 6398 /* 6399 * Set the IO_CONT flag, so that if this I/O gets passed to 6400 * ctl_config_write_done(), it'll get passed back to 6401 * ctl_do_mode_select() for further processing, or completion if 6402 * we're all done. 6403 */ 6404 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6405 ctsio->io_cont = ctl_do_mode_select; 6406 6407 modepage_info = (union ctl_modepage_info *) 6408 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6409 memset(modepage_info, 0, sizeof(*modepage_info)); 6410 modepage_info->header.len_left = param_len - header_size - bd_len; 6411 modepage_info->header.len_used = header_size + bd_len; 6412 6413 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6414 } 6415 6416 int 6417 ctl_mode_sense(struct ctl_scsiio *ctsio) 6418 { 6419 struct ctl_lun *lun = CTL_LUN(ctsio); 6420 int pc, page_code, dbd, llba, subpage; 6421 int alloc_len, page_len, header_len, total_len; 6422 struct scsi_mode_block_descr *block_desc; 6423 struct ctl_page_index *page_index; 6424 6425 dbd = 0; 6426 llba = 0; 6427 block_desc = NULL; 6428 6429 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6430 6431 switch (ctsio->cdb[0]) { 6432 case MODE_SENSE_6: { 6433 struct scsi_mode_sense_6 *cdb; 6434 6435 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6436 6437 header_len = sizeof(struct scsi_mode_hdr_6); 6438 if (cdb->byte2 & SMS_DBD) 6439 dbd = 1; 6440 else 6441 header_len += sizeof(struct scsi_mode_block_descr); 6442 6443 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6444 page_code = cdb->page & SMS_PAGE_CODE; 6445 subpage = cdb->subpage; 6446 alloc_len = cdb->length; 6447 break; 6448 } 6449 case MODE_SENSE_10: { 6450 struct scsi_mode_sense_10 *cdb; 6451 6452 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6453 6454 header_len = sizeof(struct scsi_mode_hdr_10); 6455 6456 if (cdb->byte2 & SMS_DBD) 6457 dbd = 1; 6458 else 6459 header_len += sizeof(struct scsi_mode_block_descr); 6460 if (cdb->byte2 & SMS10_LLBAA) 6461 llba = 1; 6462 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6463 page_code = cdb->page & SMS_PAGE_CODE; 6464 subpage = cdb->subpage; 6465 alloc_len = scsi_2btoul(cdb->length); 6466 break; 6467 } 6468 default: 6469 ctl_set_invalid_opcode(ctsio); 6470 ctl_done((union ctl_io *)ctsio); 6471 return (CTL_RETVAL_COMPLETE); 6472 break; /* NOTREACHED */ 6473 } 6474 6475 /* 6476 * We have to make a first pass through to calculate the size of 6477 * the pages that match the user's query. Then we allocate enough 6478 * memory to hold it, and actually copy the data into the buffer. 6479 */ 6480 switch (page_code) { 6481 case SMS_ALL_PAGES_PAGE: { 6482 u_int i; 6483 6484 page_len = 0; 6485 6486 /* 6487 * At the moment, values other than 0 and 0xff here are 6488 * reserved according to SPC-3. 6489 */ 6490 if ((subpage != SMS_SUBPAGE_PAGE_0) 6491 && (subpage != SMS_SUBPAGE_ALL)) { 6492 ctl_set_invalid_field(ctsio, 6493 /*sks_valid*/ 1, 6494 /*command*/ 1, 6495 /*field*/ 3, 6496 /*bit_valid*/ 0, 6497 /*bit*/ 0); 6498 ctl_done((union ctl_io *)ctsio); 6499 return (CTL_RETVAL_COMPLETE); 6500 } 6501 6502 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6503 page_index = &lun->mode_pages.index[i]; 6504 6505 /* Make sure the page is supported for this dev type */ 6506 if (lun->be_lun->lun_type == T_DIRECT && 6507 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6508 continue; 6509 if (lun->be_lun->lun_type == T_PROCESSOR && 6510 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6511 continue; 6512 if (lun->be_lun->lun_type == T_CDROM && 6513 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6514 continue; 6515 6516 /* 6517 * We don't use this subpage if the user didn't 6518 * request all subpages. 6519 */ 6520 if ((page_index->subpage != 0) 6521 && (subpage == SMS_SUBPAGE_PAGE_0)) 6522 continue; 6523 6524 #if 0 6525 printf("found page %#x len %d\n", 6526 page_index->page_code & SMPH_PC_MASK, 6527 page_index->page_len); 6528 #endif 6529 page_len += page_index->page_len; 6530 } 6531 break; 6532 } 6533 default: { 6534 u_int i; 6535 6536 page_len = 0; 6537 6538 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6539 page_index = &lun->mode_pages.index[i]; 6540 6541 /* Make sure the page is supported for this dev type */ 6542 if (lun->be_lun->lun_type == T_DIRECT && 6543 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6544 continue; 6545 if (lun->be_lun->lun_type == T_PROCESSOR && 6546 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6547 continue; 6548 if (lun->be_lun->lun_type == T_CDROM && 6549 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6550 continue; 6551 6552 /* Look for the right page code */ 6553 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6554 continue; 6555 6556 /* Look for the right subpage or the subpage wildcard*/ 6557 if ((page_index->subpage != subpage) 6558 && (subpage != SMS_SUBPAGE_ALL)) 6559 continue; 6560 6561 #if 0 6562 printf("found page %#x len %d\n", 6563 page_index->page_code & SMPH_PC_MASK, 6564 page_index->page_len); 6565 #endif 6566 6567 page_len += page_index->page_len; 6568 } 6569 6570 if (page_len == 0) { 6571 ctl_set_invalid_field(ctsio, 6572 /*sks_valid*/ 1, 6573 /*command*/ 1, 6574 /*field*/ 2, 6575 /*bit_valid*/ 1, 6576 /*bit*/ 5); 6577 ctl_done((union ctl_io *)ctsio); 6578 return (CTL_RETVAL_COMPLETE); 6579 } 6580 break; 6581 } 6582 } 6583 6584 total_len = header_len + page_len; 6585 #if 0 6586 printf("header_len = %d, page_len = %d, total_len = %d\n", 6587 header_len, page_len, total_len); 6588 #endif 6589 6590 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6591 ctsio->kern_sg_entries = 0; 6592 ctsio->kern_rel_offset = 0; 6593 ctsio->kern_data_len = min(total_len, alloc_len); 6594 ctsio->kern_total_len = ctsio->kern_data_len; 6595 6596 switch (ctsio->cdb[0]) { 6597 case MODE_SENSE_6: { 6598 struct scsi_mode_hdr_6 *header; 6599 6600 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6601 6602 header->datalen = MIN(total_len - 1, 254); 6603 if (lun->be_lun->lun_type == T_DIRECT) { 6604 header->dev_specific = 0x10; /* DPOFUA */ 6605 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6606 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6607 header->dev_specific |= 0x80; /* WP */ 6608 } 6609 if (dbd) 6610 header->block_descr_len = 0; 6611 else 6612 header->block_descr_len = 6613 sizeof(struct scsi_mode_block_descr); 6614 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6615 break; 6616 } 6617 case MODE_SENSE_10: { 6618 struct scsi_mode_hdr_10 *header; 6619 int datalen; 6620 6621 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6622 6623 datalen = MIN(total_len - 2, 65533); 6624 scsi_ulto2b(datalen, header->datalen); 6625 if (lun->be_lun->lun_type == T_DIRECT) { 6626 header->dev_specific = 0x10; /* DPOFUA */ 6627 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6628 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6629 header->dev_specific |= 0x80; /* WP */ 6630 } 6631 if (dbd) 6632 scsi_ulto2b(0, header->block_descr_len); 6633 else 6634 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6635 header->block_descr_len); 6636 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6637 break; 6638 } 6639 default: 6640 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6641 } 6642 6643 /* 6644 * If we've got a disk, use its blocksize in the block 6645 * descriptor. Otherwise, just set it to 0. 6646 */ 6647 if (dbd == 0) { 6648 if (lun->be_lun->lun_type == T_DIRECT) 6649 scsi_ulto3b(lun->be_lun->blocksize, 6650 block_desc->block_len); 6651 else 6652 scsi_ulto3b(0, block_desc->block_len); 6653 } 6654 6655 switch (page_code) { 6656 case SMS_ALL_PAGES_PAGE: { 6657 int i, data_used; 6658 6659 data_used = header_len; 6660 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6661 struct ctl_page_index *page_index; 6662 6663 page_index = &lun->mode_pages.index[i]; 6664 if (lun->be_lun->lun_type == T_DIRECT && 6665 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6666 continue; 6667 if (lun->be_lun->lun_type == T_PROCESSOR && 6668 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6669 continue; 6670 if (lun->be_lun->lun_type == T_CDROM && 6671 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6672 continue; 6673 6674 /* 6675 * We don't use this subpage if the user didn't 6676 * request all subpages. We already checked (above) 6677 * to make sure the user only specified a subpage 6678 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6679 */ 6680 if ((page_index->subpage != 0) 6681 && (subpage == SMS_SUBPAGE_PAGE_0)) 6682 continue; 6683 6684 /* 6685 * Call the handler, if it exists, to update the 6686 * page to the latest values. 6687 */ 6688 if (page_index->sense_handler != NULL) 6689 page_index->sense_handler(ctsio, page_index,pc); 6690 6691 memcpy(ctsio->kern_data_ptr + data_used, 6692 page_index->page_data + 6693 (page_index->page_len * pc), 6694 page_index->page_len); 6695 data_used += page_index->page_len; 6696 } 6697 break; 6698 } 6699 default: { 6700 int i, data_used; 6701 6702 data_used = header_len; 6703 6704 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6705 struct ctl_page_index *page_index; 6706 6707 page_index = &lun->mode_pages.index[i]; 6708 6709 /* Look for the right page code */ 6710 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6711 continue; 6712 6713 /* Look for the right subpage or the subpage wildcard*/ 6714 if ((page_index->subpage != subpage) 6715 && (subpage != SMS_SUBPAGE_ALL)) 6716 continue; 6717 6718 /* Make sure the page is supported for this dev type */ 6719 if (lun->be_lun->lun_type == T_DIRECT && 6720 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6721 continue; 6722 if (lun->be_lun->lun_type == T_PROCESSOR && 6723 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6724 continue; 6725 if (lun->be_lun->lun_type == T_CDROM && 6726 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6727 continue; 6728 6729 /* 6730 * Call the handler, if it exists, to update the 6731 * page to the latest values. 6732 */ 6733 if (page_index->sense_handler != NULL) 6734 page_index->sense_handler(ctsio, page_index,pc); 6735 6736 memcpy(ctsio->kern_data_ptr + data_used, 6737 page_index->page_data + 6738 (page_index->page_len * pc), 6739 page_index->page_len); 6740 data_used += page_index->page_len; 6741 } 6742 break; 6743 } 6744 } 6745 6746 ctl_set_success(ctsio); 6747 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6748 ctsio->be_move_done = ctl_config_move_done; 6749 ctl_datamove((union ctl_io *)ctsio); 6750 return (CTL_RETVAL_COMPLETE); 6751 } 6752 6753 int 6754 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6755 struct ctl_page_index *page_index, 6756 int pc) 6757 { 6758 struct ctl_lun *lun = CTL_LUN(ctsio); 6759 struct scsi_log_param_header *phdr; 6760 uint8_t *data; 6761 uint64_t val; 6762 6763 data = page_index->page_data; 6764 6765 if (lun->backend->lun_attr != NULL && 6766 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6767 != UINT64_MAX) { 6768 phdr = (struct scsi_log_param_header *)data; 6769 scsi_ulto2b(0x0001, phdr->param_code); 6770 phdr->param_control = SLP_LBIN | SLP_LP; 6771 phdr->param_len = 8; 6772 data = (uint8_t *)(phdr + 1); 6773 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6774 data[4] = 0x02; /* per-pool */ 6775 data += phdr->param_len; 6776 } 6777 6778 if (lun->backend->lun_attr != NULL && 6779 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6780 != UINT64_MAX) { 6781 phdr = (struct scsi_log_param_header *)data; 6782 scsi_ulto2b(0x0002, phdr->param_code); 6783 phdr->param_control = SLP_LBIN | SLP_LP; 6784 phdr->param_len = 8; 6785 data = (uint8_t *)(phdr + 1); 6786 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6787 data[4] = 0x01; /* per-LUN */ 6788 data += phdr->param_len; 6789 } 6790 6791 if (lun->backend->lun_attr != NULL && 6792 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6793 != UINT64_MAX) { 6794 phdr = (struct scsi_log_param_header *)data; 6795 scsi_ulto2b(0x00f1, phdr->param_code); 6796 phdr->param_control = SLP_LBIN | SLP_LP; 6797 phdr->param_len = 8; 6798 data = (uint8_t *)(phdr + 1); 6799 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6800 data[4] = 0x02; /* per-pool */ 6801 data += phdr->param_len; 6802 } 6803 6804 if (lun->backend->lun_attr != NULL && 6805 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6806 != UINT64_MAX) { 6807 phdr = (struct scsi_log_param_header *)data; 6808 scsi_ulto2b(0x00f2, phdr->param_code); 6809 phdr->param_control = SLP_LBIN | SLP_LP; 6810 phdr->param_len = 8; 6811 data = (uint8_t *)(phdr + 1); 6812 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6813 data[4] = 0x02; /* per-pool */ 6814 data += phdr->param_len; 6815 } 6816 6817 page_index->page_len = data - page_index->page_data; 6818 return (0); 6819 } 6820 6821 int 6822 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6823 struct ctl_page_index *page_index, 6824 int pc) 6825 { 6826 struct ctl_lun *lun = CTL_LUN(ctsio); 6827 struct stat_page *data; 6828 struct bintime *t; 6829 6830 data = (struct stat_page *)page_index->page_data; 6831 6832 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6833 data->sap.hdr.param_control = SLP_LBIN; 6834 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6835 sizeof(struct scsi_log_param_header); 6836 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6837 data->sap.read_num); 6838 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6839 data->sap.write_num); 6840 if (lun->be_lun->blocksize > 0) { 6841 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6842 lun->be_lun->blocksize, data->sap.recvieved_lba); 6843 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6844 lun->be_lun->blocksize, data->sap.transmitted_lba); 6845 } 6846 t = &lun->stats.time[CTL_STATS_READ]; 6847 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6848 data->sap.read_int); 6849 t = &lun->stats.time[CTL_STATS_WRITE]; 6850 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6851 data->sap.write_int); 6852 scsi_u64to8b(0, data->sap.weighted_num); 6853 scsi_u64to8b(0, data->sap.weighted_int); 6854 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6855 data->it.hdr.param_control = SLP_LBIN; 6856 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6857 sizeof(struct scsi_log_param_header); 6858 #ifdef CTL_TIME_IO 6859 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6860 #endif 6861 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6862 data->it.hdr.param_control = SLP_LBIN; 6863 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6864 sizeof(struct scsi_log_param_header); 6865 scsi_ulto4b(3, data->ti.exponent); 6866 scsi_ulto4b(1, data->ti.integer); 6867 return (0); 6868 } 6869 6870 int 6871 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6872 struct ctl_page_index *page_index, 6873 int pc) 6874 { 6875 struct ctl_lun *lun = CTL_LUN(ctsio); 6876 struct scsi_log_informational_exceptions *data; 6877 6878 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6879 6880 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6881 data->hdr.param_control = SLP_LBIN; 6882 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6883 sizeof(struct scsi_log_param_header); 6884 data->ie_asc = lun->ie_asc; 6885 data->ie_ascq = lun->ie_ascq; 6886 data->temperature = 0xff; 6887 return (0); 6888 } 6889 6890 int 6891 ctl_log_sense(struct ctl_scsiio *ctsio) 6892 { 6893 struct ctl_lun *lun = CTL_LUN(ctsio); 6894 int i, pc, page_code, subpage; 6895 int alloc_len, total_len; 6896 struct ctl_page_index *page_index; 6897 struct scsi_log_sense *cdb; 6898 struct scsi_log_header *header; 6899 6900 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6901 6902 cdb = (struct scsi_log_sense *)ctsio->cdb; 6903 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6904 page_code = cdb->page & SLS_PAGE_CODE; 6905 subpage = cdb->subpage; 6906 alloc_len = scsi_2btoul(cdb->length); 6907 6908 page_index = NULL; 6909 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6910 page_index = &lun->log_pages.index[i]; 6911 6912 /* Look for the right page code */ 6913 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6914 continue; 6915 6916 /* Look for the right subpage or the subpage wildcard*/ 6917 if (page_index->subpage != subpage) 6918 continue; 6919 6920 break; 6921 } 6922 if (i >= CTL_NUM_LOG_PAGES) { 6923 ctl_set_invalid_field(ctsio, 6924 /*sks_valid*/ 1, 6925 /*command*/ 1, 6926 /*field*/ 2, 6927 /*bit_valid*/ 0, 6928 /*bit*/ 0); 6929 ctl_done((union ctl_io *)ctsio); 6930 return (CTL_RETVAL_COMPLETE); 6931 } 6932 6933 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6934 6935 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6936 ctsio->kern_sg_entries = 0; 6937 ctsio->kern_rel_offset = 0; 6938 ctsio->kern_data_len = min(total_len, alloc_len); 6939 ctsio->kern_total_len = ctsio->kern_data_len; 6940 6941 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6942 header->page = page_index->page_code; 6943 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6944 header->page |= SL_DS; 6945 if (page_index->subpage) { 6946 header->page |= SL_SPF; 6947 header->subpage = page_index->subpage; 6948 } 6949 scsi_ulto2b(page_index->page_len, header->datalen); 6950 6951 /* 6952 * Call the handler, if it exists, to update the 6953 * page to the latest values. 6954 */ 6955 if (page_index->sense_handler != NULL) 6956 page_index->sense_handler(ctsio, page_index, pc); 6957 6958 memcpy(header + 1, page_index->page_data, page_index->page_len); 6959 6960 ctl_set_success(ctsio); 6961 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6962 ctsio->be_move_done = ctl_config_move_done; 6963 ctl_datamove((union ctl_io *)ctsio); 6964 return (CTL_RETVAL_COMPLETE); 6965 } 6966 6967 int 6968 ctl_read_capacity(struct ctl_scsiio *ctsio) 6969 { 6970 struct ctl_lun *lun = CTL_LUN(ctsio); 6971 struct scsi_read_capacity *cdb; 6972 struct scsi_read_capacity_data *data; 6973 uint32_t lba; 6974 6975 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6976 6977 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6978 6979 lba = scsi_4btoul(cdb->addr); 6980 if (((cdb->pmi & SRC_PMI) == 0) 6981 && (lba != 0)) { 6982 ctl_set_invalid_field(/*ctsio*/ ctsio, 6983 /*sks_valid*/ 1, 6984 /*command*/ 1, 6985 /*field*/ 2, 6986 /*bit_valid*/ 0, 6987 /*bit*/ 0); 6988 ctl_done((union ctl_io *)ctsio); 6989 return (CTL_RETVAL_COMPLETE); 6990 } 6991 6992 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6993 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6994 ctsio->kern_data_len = sizeof(*data); 6995 ctsio->kern_total_len = sizeof(*data); 6996 ctsio->kern_rel_offset = 0; 6997 ctsio->kern_sg_entries = 0; 6998 6999 /* 7000 * If the maximum LBA is greater than 0xfffffffe, the user must 7001 * issue a SERVICE ACTION IN (16) command, with the read capacity 7002 * serivce action set. 7003 */ 7004 if (lun->be_lun->maxlba > 0xfffffffe) 7005 scsi_ulto4b(0xffffffff, data->addr); 7006 else 7007 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7008 7009 /* 7010 * XXX KDM this may not be 512 bytes... 7011 */ 7012 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7013 7014 ctl_set_success(ctsio); 7015 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7016 ctsio->be_move_done = ctl_config_move_done; 7017 ctl_datamove((union ctl_io *)ctsio); 7018 return (CTL_RETVAL_COMPLETE); 7019 } 7020 7021 int 7022 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7023 { 7024 struct ctl_lun *lun = CTL_LUN(ctsio); 7025 struct scsi_read_capacity_16 *cdb; 7026 struct scsi_read_capacity_data_long *data; 7027 uint64_t lba; 7028 uint32_t alloc_len; 7029 7030 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7031 7032 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7033 7034 alloc_len = scsi_4btoul(cdb->alloc_len); 7035 lba = scsi_8btou64(cdb->addr); 7036 7037 if ((cdb->reladr & SRC16_PMI) 7038 && (lba != 0)) { 7039 ctl_set_invalid_field(/*ctsio*/ ctsio, 7040 /*sks_valid*/ 1, 7041 /*command*/ 1, 7042 /*field*/ 2, 7043 /*bit_valid*/ 0, 7044 /*bit*/ 0); 7045 ctl_done((union ctl_io *)ctsio); 7046 return (CTL_RETVAL_COMPLETE); 7047 } 7048 7049 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7050 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7051 ctsio->kern_rel_offset = 0; 7052 ctsio->kern_sg_entries = 0; 7053 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 7054 ctsio->kern_total_len = ctsio->kern_data_len; 7055 7056 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7057 /* XXX KDM this may not be 512 bytes... */ 7058 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7059 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7060 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7061 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7062 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7063 7064 ctl_set_success(ctsio); 7065 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7066 ctsio->be_move_done = ctl_config_move_done; 7067 ctl_datamove((union ctl_io *)ctsio); 7068 return (CTL_RETVAL_COMPLETE); 7069 } 7070 7071 int 7072 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7073 { 7074 struct ctl_lun *lun = CTL_LUN(ctsio); 7075 struct scsi_get_lba_status *cdb; 7076 struct scsi_get_lba_status_data *data; 7077 struct ctl_lba_len_flags *lbalen; 7078 uint64_t lba; 7079 uint32_t alloc_len, total_len; 7080 int retval; 7081 7082 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7083 7084 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7085 lba = scsi_8btou64(cdb->addr); 7086 alloc_len = scsi_4btoul(cdb->alloc_len); 7087 7088 if (lba > lun->be_lun->maxlba) { 7089 ctl_set_lba_out_of_range(ctsio, lba); 7090 ctl_done((union ctl_io *)ctsio); 7091 return (CTL_RETVAL_COMPLETE); 7092 } 7093 7094 total_len = sizeof(*data) + sizeof(data->descr[0]); 7095 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7096 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7097 ctsio->kern_rel_offset = 0; 7098 ctsio->kern_sg_entries = 0; 7099 ctsio->kern_data_len = min(total_len, alloc_len); 7100 ctsio->kern_total_len = ctsio->kern_data_len; 7101 7102 /* Fill dummy data in case backend can't tell anything. */ 7103 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7104 scsi_u64to8b(lba, data->descr[0].addr); 7105 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7106 data->descr[0].length); 7107 data->descr[0].status = 0; /* Mapped or unknown. */ 7108 7109 ctl_set_success(ctsio); 7110 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7111 ctsio->be_move_done = ctl_config_move_done; 7112 7113 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7114 lbalen->lba = lba; 7115 lbalen->len = total_len; 7116 lbalen->flags = 0; 7117 retval = lun->backend->config_read((union ctl_io *)ctsio); 7118 return (retval); 7119 } 7120 7121 int 7122 ctl_read_defect(struct ctl_scsiio *ctsio) 7123 { 7124 struct scsi_read_defect_data_10 *ccb10; 7125 struct scsi_read_defect_data_12 *ccb12; 7126 struct scsi_read_defect_data_hdr_10 *data10; 7127 struct scsi_read_defect_data_hdr_12 *data12; 7128 uint32_t alloc_len, data_len; 7129 uint8_t format; 7130 7131 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7132 7133 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7134 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7135 format = ccb10->format; 7136 alloc_len = scsi_2btoul(ccb10->alloc_length); 7137 data_len = sizeof(*data10); 7138 } else { 7139 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7140 format = ccb12->format; 7141 alloc_len = scsi_4btoul(ccb12->alloc_length); 7142 data_len = sizeof(*data12); 7143 } 7144 if (alloc_len == 0) { 7145 ctl_set_success(ctsio); 7146 ctl_done((union ctl_io *)ctsio); 7147 return (CTL_RETVAL_COMPLETE); 7148 } 7149 7150 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7151 ctsio->kern_rel_offset = 0; 7152 ctsio->kern_sg_entries = 0; 7153 ctsio->kern_data_len = min(data_len, alloc_len); 7154 ctsio->kern_total_len = ctsio->kern_data_len; 7155 7156 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7157 data10 = (struct scsi_read_defect_data_hdr_10 *) 7158 ctsio->kern_data_ptr; 7159 data10->format = format; 7160 scsi_ulto2b(0, data10->length); 7161 } else { 7162 data12 = (struct scsi_read_defect_data_hdr_12 *) 7163 ctsio->kern_data_ptr; 7164 data12->format = format; 7165 scsi_ulto2b(0, data12->generation); 7166 scsi_ulto4b(0, data12->length); 7167 } 7168 7169 ctl_set_success(ctsio); 7170 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7171 ctsio->be_move_done = ctl_config_move_done; 7172 ctl_datamove((union ctl_io *)ctsio); 7173 return (CTL_RETVAL_COMPLETE); 7174 } 7175 7176 int 7177 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7178 { 7179 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7180 struct ctl_lun *lun = CTL_LUN(ctsio); 7181 struct scsi_maintenance_in *cdb; 7182 int retval; 7183 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7184 int num_ha_groups, num_target_ports, shared_group; 7185 struct ctl_port *port; 7186 struct scsi_target_group_data *rtg_ptr; 7187 struct scsi_target_group_data_extended *rtg_ext_ptr; 7188 struct scsi_target_port_group_descriptor *tpg_desc; 7189 7190 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7191 7192 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7193 retval = CTL_RETVAL_COMPLETE; 7194 7195 switch (cdb->byte2 & STG_PDF_MASK) { 7196 case STG_PDF_LENGTH: 7197 ext = 0; 7198 break; 7199 case STG_PDF_EXTENDED: 7200 ext = 1; 7201 break; 7202 default: 7203 ctl_set_invalid_field(/*ctsio*/ ctsio, 7204 /*sks_valid*/ 1, 7205 /*command*/ 1, 7206 /*field*/ 2, 7207 /*bit_valid*/ 1, 7208 /*bit*/ 5); 7209 ctl_done((union ctl_io *)ctsio); 7210 return(retval); 7211 } 7212 7213 num_target_ports = 0; 7214 shared_group = (softc->is_single != 0); 7215 mtx_lock(&softc->ctl_lock); 7216 STAILQ_FOREACH(port, &softc->port_list, links) { 7217 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7218 continue; 7219 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7220 continue; 7221 num_target_ports++; 7222 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7223 shared_group = 1; 7224 } 7225 mtx_unlock(&softc->ctl_lock); 7226 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7227 7228 if (ext) 7229 total_len = sizeof(struct scsi_target_group_data_extended); 7230 else 7231 total_len = sizeof(struct scsi_target_group_data); 7232 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7233 (shared_group + num_ha_groups) + 7234 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7235 7236 alloc_len = scsi_4btoul(cdb->length); 7237 7238 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7239 ctsio->kern_sg_entries = 0; 7240 ctsio->kern_rel_offset = 0; 7241 ctsio->kern_data_len = min(total_len, alloc_len); 7242 ctsio->kern_total_len = ctsio->kern_data_len; 7243 7244 if (ext) { 7245 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7246 ctsio->kern_data_ptr; 7247 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7248 rtg_ext_ptr->format_type = 0x10; 7249 rtg_ext_ptr->implicit_transition_time = 0; 7250 tpg_desc = &rtg_ext_ptr->groups[0]; 7251 } else { 7252 rtg_ptr = (struct scsi_target_group_data *) 7253 ctsio->kern_data_ptr; 7254 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7255 tpg_desc = &rtg_ptr->groups[0]; 7256 } 7257 7258 mtx_lock(&softc->ctl_lock); 7259 pg = softc->port_min / softc->port_cnt; 7260 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7261 /* Some shelf is known to be primary. */ 7262 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7263 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7264 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7265 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7266 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7267 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7268 else 7269 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7270 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7271 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7272 } else { 7273 ts = os; 7274 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7275 } 7276 } else { 7277 /* No known primary shelf. */ 7278 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7279 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7280 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7281 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7282 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7283 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7284 } else { 7285 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7286 } 7287 } 7288 if (shared_group) { 7289 tpg_desc->pref_state = ts; 7290 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7291 TPG_U_SUP | TPG_T_SUP; 7292 scsi_ulto2b(1, tpg_desc->target_port_group); 7293 tpg_desc->status = TPG_IMPLICIT; 7294 pc = 0; 7295 STAILQ_FOREACH(port, &softc->port_list, links) { 7296 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7297 continue; 7298 if (!softc->is_single && 7299 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7300 continue; 7301 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7302 continue; 7303 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7304 relative_target_port_identifier); 7305 pc++; 7306 } 7307 tpg_desc->target_port_count = pc; 7308 tpg_desc = (struct scsi_target_port_group_descriptor *) 7309 &tpg_desc->descriptors[pc]; 7310 } 7311 for (g = 0; g < num_ha_groups; g++) { 7312 tpg_desc->pref_state = (g == pg) ? ts : os; 7313 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7314 TPG_U_SUP | TPG_T_SUP; 7315 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7316 tpg_desc->status = TPG_IMPLICIT; 7317 pc = 0; 7318 STAILQ_FOREACH(port, &softc->port_list, links) { 7319 if (port->targ_port < g * softc->port_cnt || 7320 port->targ_port >= (g + 1) * softc->port_cnt) 7321 continue; 7322 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7323 continue; 7324 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7325 continue; 7326 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7327 continue; 7328 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7329 relative_target_port_identifier); 7330 pc++; 7331 } 7332 tpg_desc->target_port_count = pc; 7333 tpg_desc = (struct scsi_target_port_group_descriptor *) 7334 &tpg_desc->descriptors[pc]; 7335 } 7336 mtx_unlock(&softc->ctl_lock); 7337 7338 ctl_set_success(ctsio); 7339 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7340 ctsio->be_move_done = ctl_config_move_done; 7341 ctl_datamove((union ctl_io *)ctsio); 7342 return(retval); 7343 } 7344 7345 int 7346 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7347 { 7348 struct ctl_lun *lun = CTL_LUN(ctsio); 7349 struct scsi_report_supported_opcodes *cdb; 7350 const struct ctl_cmd_entry *entry, *sentry; 7351 struct scsi_report_supported_opcodes_all *all; 7352 struct scsi_report_supported_opcodes_descr *descr; 7353 struct scsi_report_supported_opcodes_one *one; 7354 int retval; 7355 int alloc_len, total_len; 7356 int opcode, service_action, i, j, num; 7357 7358 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7359 7360 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7361 retval = CTL_RETVAL_COMPLETE; 7362 7363 opcode = cdb->requested_opcode; 7364 service_action = scsi_2btoul(cdb->requested_service_action); 7365 switch (cdb->options & RSO_OPTIONS_MASK) { 7366 case RSO_OPTIONS_ALL: 7367 num = 0; 7368 for (i = 0; i < 256; i++) { 7369 entry = &ctl_cmd_table[i]; 7370 if (entry->flags & CTL_CMD_FLAG_SA5) { 7371 for (j = 0; j < 32; j++) { 7372 sentry = &((const struct ctl_cmd_entry *) 7373 entry->execute)[j]; 7374 if (ctl_cmd_applicable( 7375 lun->be_lun->lun_type, sentry)) 7376 num++; 7377 } 7378 } else { 7379 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7380 entry)) 7381 num++; 7382 } 7383 } 7384 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7385 num * sizeof(struct scsi_report_supported_opcodes_descr); 7386 break; 7387 case RSO_OPTIONS_OC: 7388 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7389 ctl_set_invalid_field(/*ctsio*/ ctsio, 7390 /*sks_valid*/ 1, 7391 /*command*/ 1, 7392 /*field*/ 2, 7393 /*bit_valid*/ 1, 7394 /*bit*/ 2); 7395 ctl_done((union ctl_io *)ctsio); 7396 return (CTL_RETVAL_COMPLETE); 7397 } 7398 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7399 break; 7400 case RSO_OPTIONS_OC_SA: 7401 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7402 service_action >= 32) { 7403 ctl_set_invalid_field(/*ctsio*/ ctsio, 7404 /*sks_valid*/ 1, 7405 /*command*/ 1, 7406 /*field*/ 2, 7407 /*bit_valid*/ 1, 7408 /*bit*/ 2); 7409 ctl_done((union ctl_io *)ctsio); 7410 return (CTL_RETVAL_COMPLETE); 7411 } 7412 /* FALLTHROUGH */ 7413 case RSO_OPTIONS_OC_ASA: 7414 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7415 break; 7416 default: 7417 ctl_set_invalid_field(/*ctsio*/ ctsio, 7418 /*sks_valid*/ 1, 7419 /*command*/ 1, 7420 /*field*/ 2, 7421 /*bit_valid*/ 1, 7422 /*bit*/ 2); 7423 ctl_done((union ctl_io *)ctsio); 7424 return (CTL_RETVAL_COMPLETE); 7425 } 7426 7427 alloc_len = scsi_4btoul(cdb->length); 7428 7429 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7430 ctsio->kern_sg_entries = 0; 7431 ctsio->kern_rel_offset = 0; 7432 ctsio->kern_data_len = min(total_len, alloc_len); 7433 ctsio->kern_total_len = ctsio->kern_data_len; 7434 7435 switch (cdb->options & RSO_OPTIONS_MASK) { 7436 case RSO_OPTIONS_ALL: 7437 all = (struct scsi_report_supported_opcodes_all *) 7438 ctsio->kern_data_ptr; 7439 num = 0; 7440 for (i = 0; i < 256; i++) { 7441 entry = &ctl_cmd_table[i]; 7442 if (entry->flags & CTL_CMD_FLAG_SA5) { 7443 for (j = 0; j < 32; j++) { 7444 sentry = &((const struct ctl_cmd_entry *) 7445 entry->execute)[j]; 7446 if (!ctl_cmd_applicable( 7447 lun->be_lun->lun_type, sentry)) 7448 continue; 7449 descr = &all->descr[num++]; 7450 descr->opcode = i; 7451 scsi_ulto2b(j, descr->service_action); 7452 descr->flags = RSO_SERVACTV; 7453 scsi_ulto2b(sentry->length, 7454 descr->cdb_length); 7455 } 7456 } else { 7457 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7458 entry)) 7459 continue; 7460 descr = &all->descr[num++]; 7461 descr->opcode = i; 7462 scsi_ulto2b(0, descr->service_action); 7463 descr->flags = 0; 7464 scsi_ulto2b(entry->length, descr->cdb_length); 7465 } 7466 } 7467 scsi_ulto4b( 7468 num * sizeof(struct scsi_report_supported_opcodes_descr), 7469 all->length); 7470 break; 7471 case RSO_OPTIONS_OC: 7472 one = (struct scsi_report_supported_opcodes_one *) 7473 ctsio->kern_data_ptr; 7474 entry = &ctl_cmd_table[opcode]; 7475 goto fill_one; 7476 case RSO_OPTIONS_OC_SA: 7477 one = (struct scsi_report_supported_opcodes_one *) 7478 ctsio->kern_data_ptr; 7479 entry = &ctl_cmd_table[opcode]; 7480 entry = &((const struct ctl_cmd_entry *) 7481 entry->execute)[service_action]; 7482 fill_one: 7483 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7484 one->support = 3; 7485 scsi_ulto2b(entry->length, one->cdb_length); 7486 one->cdb_usage[0] = opcode; 7487 memcpy(&one->cdb_usage[1], entry->usage, 7488 entry->length - 1); 7489 } else 7490 one->support = 1; 7491 break; 7492 case RSO_OPTIONS_OC_ASA: 7493 one = (struct scsi_report_supported_opcodes_one *) 7494 ctsio->kern_data_ptr; 7495 entry = &ctl_cmd_table[opcode]; 7496 if (entry->flags & CTL_CMD_FLAG_SA5) { 7497 entry = &((const struct ctl_cmd_entry *) 7498 entry->execute)[service_action]; 7499 } else if (service_action != 0) { 7500 one->support = 1; 7501 break; 7502 } 7503 goto fill_one; 7504 } 7505 7506 ctl_set_success(ctsio); 7507 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7508 ctsio->be_move_done = ctl_config_move_done; 7509 ctl_datamove((union ctl_io *)ctsio); 7510 return(retval); 7511 } 7512 7513 int 7514 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7515 { 7516 struct scsi_report_supported_tmf *cdb; 7517 struct scsi_report_supported_tmf_ext_data *data; 7518 int retval; 7519 int alloc_len, total_len; 7520 7521 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7522 7523 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7524 7525 retval = CTL_RETVAL_COMPLETE; 7526 7527 if (cdb->options & RST_REPD) 7528 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7529 else 7530 total_len = sizeof(struct scsi_report_supported_tmf_data); 7531 alloc_len = scsi_4btoul(cdb->length); 7532 7533 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7534 ctsio->kern_sg_entries = 0; 7535 ctsio->kern_rel_offset = 0; 7536 ctsio->kern_data_len = min(total_len, alloc_len); 7537 ctsio->kern_total_len = ctsio->kern_data_len; 7538 7539 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7540 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7541 RST_TRS; 7542 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7543 data->length = total_len - 4; 7544 7545 ctl_set_success(ctsio); 7546 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7547 ctsio->be_move_done = ctl_config_move_done; 7548 ctl_datamove((union ctl_io *)ctsio); 7549 return (retval); 7550 } 7551 7552 int 7553 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7554 { 7555 struct scsi_report_timestamp *cdb; 7556 struct scsi_report_timestamp_data *data; 7557 struct timeval tv; 7558 int64_t timestamp; 7559 int retval; 7560 int alloc_len, total_len; 7561 7562 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7563 7564 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7565 7566 retval = CTL_RETVAL_COMPLETE; 7567 7568 total_len = sizeof(struct scsi_report_timestamp_data); 7569 alloc_len = scsi_4btoul(cdb->length); 7570 7571 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7572 ctsio->kern_sg_entries = 0; 7573 ctsio->kern_rel_offset = 0; 7574 ctsio->kern_data_len = min(total_len, alloc_len); 7575 ctsio->kern_total_len = ctsio->kern_data_len; 7576 7577 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7578 scsi_ulto2b(sizeof(*data) - 2, data->length); 7579 data->origin = RTS_ORIG_OUTSIDE; 7580 getmicrotime(&tv); 7581 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7582 scsi_ulto4b(timestamp >> 16, data->timestamp); 7583 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7584 7585 ctl_set_success(ctsio); 7586 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7587 ctsio->be_move_done = ctl_config_move_done; 7588 ctl_datamove((union ctl_io *)ctsio); 7589 return (retval); 7590 } 7591 7592 int 7593 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7594 { 7595 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7596 struct ctl_lun *lun = CTL_LUN(ctsio); 7597 struct scsi_per_res_in *cdb; 7598 int alloc_len, total_len = 0; 7599 /* struct scsi_per_res_in_rsrv in_data; */ 7600 uint64_t key; 7601 7602 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7603 7604 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7605 7606 alloc_len = scsi_2btoul(cdb->length); 7607 7608 retry: 7609 mtx_lock(&lun->lun_lock); 7610 switch (cdb->action) { 7611 case SPRI_RK: /* read keys */ 7612 total_len = sizeof(struct scsi_per_res_in_keys) + 7613 lun->pr_key_count * 7614 sizeof(struct scsi_per_res_key); 7615 break; 7616 case SPRI_RR: /* read reservation */ 7617 if (lun->flags & CTL_LUN_PR_RESERVED) 7618 total_len = sizeof(struct scsi_per_res_in_rsrv); 7619 else 7620 total_len = sizeof(struct scsi_per_res_in_header); 7621 break; 7622 case SPRI_RC: /* report capabilities */ 7623 total_len = sizeof(struct scsi_per_res_cap); 7624 break; 7625 case SPRI_RS: /* read full status */ 7626 total_len = sizeof(struct scsi_per_res_in_header) + 7627 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7628 lun->pr_key_count; 7629 break; 7630 default: 7631 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7632 } 7633 mtx_unlock(&lun->lun_lock); 7634 7635 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7636 ctsio->kern_rel_offset = 0; 7637 ctsio->kern_sg_entries = 0; 7638 ctsio->kern_data_len = min(total_len, alloc_len); 7639 ctsio->kern_total_len = ctsio->kern_data_len; 7640 7641 mtx_lock(&lun->lun_lock); 7642 switch (cdb->action) { 7643 case SPRI_RK: { // read keys 7644 struct scsi_per_res_in_keys *res_keys; 7645 int i, key_count; 7646 7647 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7648 7649 /* 7650 * We had to drop the lock to allocate our buffer, which 7651 * leaves time for someone to come in with another 7652 * persistent reservation. (That is unlikely, though, 7653 * since this should be the only persistent reservation 7654 * command active right now.) 7655 */ 7656 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7657 (lun->pr_key_count * 7658 sizeof(struct scsi_per_res_key)))){ 7659 mtx_unlock(&lun->lun_lock); 7660 free(ctsio->kern_data_ptr, M_CTL); 7661 printf("%s: reservation length changed, retrying\n", 7662 __func__); 7663 goto retry; 7664 } 7665 7666 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7667 7668 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7669 lun->pr_key_count, res_keys->header.length); 7670 7671 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7672 if ((key = ctl_get_prkey(lun, i)) == 0) 7673 continue; 7674 7675 /* 7676 * We used lun->pr_key_count to calculate the 7677 * size to allocate. If it turns out the number of 7678 * initiators with the registered flag set is 7679 * larger than that (i.e. they haven't been kept in 7680 * sync), we've got a problem. 7681 */ 7682 if (key_count >= lun->pr_key_count) { 7683 key_count++; 7684 continue; 7685 } 7686 scsi_u64to8b(key, res_keys->keys[key_count].key); 7687 key_count++; 7688 } 7689 break; 7690 } 7691 case SPRI_RR: { // read reservation 7692 struct scsi_per_res_in_rsrv *res; 7693 int tmp_len, header_only; 7694 7695 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7696 7697 scsi_ulto4b(lun->pr_generation, res->header.generation); 7698 7699 if (lun->flags & CTL_LUN_PR_RESERVED) 7700 { 7701 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7702 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7703 res->header.length); 7704 header_only = 0; 7705 } else { 7706 tmp_len = sizeof(struct scsi_per_res_in_header); 7707 scsi_ulto4b(0, res->header.length); 7708 header_only = 1; 7709 } 7710 7711 /* 7712 * We had to drop the lock to allocate our buffer, which 7713 * leaves time for someone to come in with another 7714 * persistent reservation. (That is unlikely, though, 7715 * since this should be the only persistent reservation 7716 * command active right now.) 7717 */ 7718 if (tmp_len != total_len) { 7719 mtx_unlock(&lun->lun_lock); 7720 free(ctsio->kern_data_ptr, M_CTL); 7721 printf("%s: reservation status changed, retrying\n", 7722 __func__); 7723 goto retry; 7724 } 7725 7726 /* 7727 * No reservation held, so we're done. 7728 */ 7729 if (header_only != 0) 7730 break; 7731 7732 /* 7733 * If the registration is an All Registrants type, the key 7734 * is 0, since it doesn't really matter. 7735 */ 7736 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7737 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7738 res->data.reservation); 7739 } 7740 res->data.scopetype = lun->pr_res_type; 7741 break; 7742 } 7743 case SPRI_RC: //report capabilities 7744 { 7745 struct scsi_per_res_cap *res_cap; 7746 uint16_t type_mask; 7747 7748 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7749 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7750 res_cap->flags1 = SPRI_CRH; 7751 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7752 type_mask = SPRI_TM_WR_EX_AR | 7753 SPRI_TM_EX_AC_RO | 7754 SPRI_TM_WR_EX_RO | 7755 SPRI_TM_EX_AC | 7756 SPRI_TM_WR_EX | 7757 SPRI_TM_EX_AC_AR; 7758 scsi_ulto2b(type_mask, res_cap->type_mask); 7759 break; 7760 } 7761 case SPRI_RS: { // read full status 7762 struct scsi_per_res_in_full *res_status; 7763 struct scsi_per_res_in_full_desc *res_desc; 7764 struct ctl_port *port; 7765 int i, len; 7766 7767 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7768 7769 /* 7770 * We had to drop the lock to allocate our buffer, which 7771 * leaves time for someone to come in with another 7772 * persistent reservation. (That is unlikely, though, 7773 * since this should be the only persistent reservation 7774 * command active right now.) 7775 */ 7776 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7777 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7778 lun->pr_key_count)){ 7779 mtx_unlock(&lun->lun_lock); 7780 free(ctsio->kern_data_ptr, M_CTL); 7781 printf("%s: reservation length changed, retrying\n", 7782 __func__); 7783 goto retry; 7784 } 7785 7786 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7787 7788 res_desc = &res_status->desc[0]; 7789 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7790 if ((key = ctl_get_prkey(lun, i)) == 0) 7791 continue; 7792 7793 scsi_u64to8b(key, res_desc->res_key.key); 7794 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7795 (lun->pr_res_idx == i || 7796 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7797 res_desc->flags = SPRI_FULL_R_HOLDER; 7798 res_desc->scopetype = lun->pr_res_type; 7799 } 7800 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7801 res_desc->rel_trgt_port_id); 7802 len = 0; 7803 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7804 if (port != NULL) 7805 len = ctl_create_iid(port, 7806 i % CTL_MAX_INIT_PER_PORT, 7807 res_desc->transport_id); 7808 scsi_ulto4b(len, res_desc->additional_length); 7809 res_desc = (struct scsi_per_res_in_full_desc *) 7810 &res_desc->transport_id[len]; 7811 } 7812 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7813 res_status->header.length); 7814 break; 7815 } 7816 default: 7817 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7818 } 7819 mtx_unlock(&lun->lun_lock); 7820 7821 ctl_set_success(ctsio); 7822 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7823 ctsio->be_move_done = ctl_config_move_done; 7824 ctl_datamove((union ctl_io *)ctsio); 7825 return (CTL_RETVAL_COMPLETE); 7826 } 7827 7828 /* 7829 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7830 * it should return. 7831 */ 7832 static int 7833 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7834 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7835 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7836 struct scsi_per_res_out_parms* param) 7837 { 7838 union ctl_ha_msg persis_io; 7839 int i; 7840 7841 mtx_lock(&lun->lun_lock); 7842 if (sa_res_key == 0) { 7843 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7844 /* validate scope and type */ 7845 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7846 SPR_LU_SCOPE) { 7847 mtx_unlock(&lun->lun_lock); 7848 ctl_set_invalid_field(/*ctsio*/ ctsio, 7849 /*sks_valid*/ 1, 7850 /*command*/ 1, 7851 /*field*/ 2, 7852 /*bit_valid*/ 1, 7853 /*bit*/ 4); 7854 ctl_done((union ctl_io *)ctsio); 7855 return (1); 7856 } 7857 7858 if (type>8 || type==2 || type==4 || type==0) { 7859 mtx_unlock(&lun->lun_lock); 7860 ctl_set_invalid_field(/*ctsio*/ ctsio, 7861 /*sks_valid*/ 1, 7862 /*command*/ 1, 7863 /*field*/ 2, 7864 /*bit_valid*/ 1, 7865 /*bit*/ 0); 7866 ctl_done((union ctl_io *)ctsio); 7867 return (1); 7868 } 7869 7870 /* 7871 * Unregister everybody else and build UA for 7872 * them 7873 */ 7874 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7875 if (i == residx || ctl_get_prkey(lun, i) == 0) 7876 continue; 7877 7878 ctl_clr_prkey(lun, i); 7879 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7880 } 7881 lun->pr_key_count = 1; 7882 lun->pr_res_type = type; 7883 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7884 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7885 lun->pr_res_idx = residx; 7886 lun->pr_generation++; 7887 mtx_unlock(&lun->lun_lock); 7888 7889 /* send msg to other side */ 7890 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7891 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7892 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7893 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7894 persis_io.pr.pr_info.res_type = type; 7895 memcpy(persis_io.pr.pr_info.sa_res_key, 7896 param->serv_act_res_key, 7897 sizeof(param->serv_act_res_key)); 7898 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7899 sizeof(persis_io.pr), M_WAITOK); 7900 } else { 7901 /* not all registrants */ 7902 mtx_unlock(&lun->lun_lock); 7903 free(ctsio->kern_data_ptr, M_CTL); 7904 ctl_set_invalid_field(ctsio, 7905 /*sks_valid*/ 1, 7906 /*command*/ 0, 7907 /*field*/ 8, 7908 /*bit_valid*/ 0, 7909 /*bit*/ 0); 7910 ctl_done((union ctl_io *)ctsio); 7911 return (1); 7912 } 7913 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7914 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7915 int found = 0; 7916 7917 if (res_key == sa_res_key) { 7918 /* special case */ 7919 /* 7920 * The spec implies this is not good but doesn't 7921 * say what to do. There are two choices either 7922 * generate a res conflict or check condition 7923 * with illegal field in parameter data. Since 7924 * that is what is done when the sa_res_key is 7925 * zero I'll take that approach since this has 7926 * to do with the sa_res_key. 7927 */ 7928 mtx_unlock(&lun->lun_lock); 7929 free(ctsio->kern_data_ptr, M_CTL); 7930 ctl_set_invalid_field(ctsio, 7931 /*sks_valid*/ 1, 7932 /*command*/ 0, 7933 /*field*/ 8, 7934 /*bit_valid*/ 0, 7935 /*bit*/ 0); 7936 ctl_done((union ctl_io *)ctsio); 7937 return (1); 7938 } 7939 7940 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7941 if (ctl_get_prkey(lun, i) != sa_res_key) 7942 continue; 7943 7944 found = 1; 7945 ctl_clr_prkey(lun, i); 7946 lun->pr_key_count--; 7947 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7948 } 7949 if (!found) { 7950 mtx_unlock(&lun->lun_lock); 7951 free(ctsio->kern_data_ptr, M_CTL); 7952 ctl_set_reservation_conflict(ctsio); 7953 ctl_done((union ctl_io *)ctsio); 7954 return (CTL_RETVAL_COMPLETE); 7955 } 7956 lun->pr_generation++; 7957 mtx_unlock(&lun->lun_lock); 7958 7959 /* send msg to other side */ 7960 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7961 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7962 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7963 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7964 persis_io.pr.pr_info.res_type = type; 7965 memcpy(persis_io.pr.pr_info.sa_res_key, 7966 param->serv_act_res_key, 7967 sizeof(param->serv_act_res_key)); 7968 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7969 sizeof(persis_io.pr), M_WAITOK); 7970 } else { 7971 /* Reserved but not all registrants */ 7972 /* sa_res_key is res holder */ 7973 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7974 /* validate scope and type */ 7975 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7976 SPR_LU_SCOPE) { 7977 mtx_unlock(&lun->lun_lock); 7978 ctl_set_invalid_field(/*ctsio*/ ctsio, 7979 /*sks_valid*/ 1, 7980 /*command*/ 1, 7981 /*field*/ 2, 7982 /*bit_valid*/ 1, 7983 /*bit*/ 4); 7984 ctl_done((union ctl_io *)ctsio); 7985 return (1); 7986 } 7987 7988 if (type>8 || type==2 || type==4 || type==0) { 7989 mtx_unlock(&lun->lun_lock); 7990 ctl_set_invalid_field(/*ctsio*/ ctsio, 7991 /*sks_valid*/ 1, 7992 /*command*/ 1, 7993 /*field*/ 2, 7994 /*bit_valid*/ 1, 7995 /*bit*/ 0); 7996 ctl_done((union ctl_io *)ctsio); 7997 return (1); 7998 } 7999 8000 /* 8001 * Do the following: 8002 * if sa_res_key != res_key remove all 8003 * registrants w/sa_res_key and generate UA 8004 * for these registrants(Registrations 8005 * Preempted) if it wasn't an exclusive 8006 * reservation generate UA(Reservations 8007 * Preempted) for all other registered nexuses 8008 * if the type has changed. Establish the new 8009 * reservation and holder. If res_key and 8010 * sa_res_key are the same do the above 8011 * except don't unregister the res holder. 8012 */ 8013 8014 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8015 if (i == residx || ctl_get_prkey(lun, i) == 0) 8016 continue; 8017 8018 if (sa_res_key == ctl_get_prkey(lun, i)) { 8019 ctl_clr_prkey(lun, i); 8020 lun->pr_key_count--; 8021 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8022 } else if (type != lun->pr_res_type && 8023 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8024 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8025 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8026 } 8027 } 8028 lun->pr_res_type = type; 8029 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8030 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8031 lun->pr_res_idx = residx; 8032 else 8033 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8034 lun->pr_generation++; 8035 mtx_unlock(&lun->lun_lock); 8036 8037 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8038 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8039 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8040 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8041 persis_io.pr.pr_info.res_type = type; 8042 memcpy(persis_io.pr.pr_info.sa_res_key, 8043 param->serv_act_res_key, 8044 sizeof(param->serv_act_res_key)); 8045 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8046 sizeof(persis_io.pr), M_WAITOK); 8047 } else { 8048 /* 8049 * sa_res_key is not the res holder just 8050 * remove registrants 8051 */ 8052 int found=0; 8053 8054 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8055 if (sa_res_key != ctl_get_prkey(lun, i)) 8056 continue; 8057 8058 found = 1; 8059 ctl_clr_prkey(lun, i); 8060 lun->pr_key_count--; 8061 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8062 } 8063 8064 if (!found) { 8065 mtx_unlock(&lun->lun_lock); 8066 free(ctsio->kern_data_ptr, M_CTL); 8067 ctl_set_reservation_conflict(ctsio); 8068 ctl_done((union ctl_io *)ctsio); 8069 return (1); 8070 } 8071 lun->pr_generation++; 8072 mtx_unlock(&lun->lun_lock); 8073 8074 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8075 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8076 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8077 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8078 persis_io.pr.pr_info.res_type = type; 8079 memcpy(persis_io.pr.pr_info.sa_res_key, 8080 param->serv_act_res_key, 8081 sizeof(param->serv_act_res_key)); 8082 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8083 sizeof(persis_io.pr), M_WAITOK); 8084 } 8085 } 8086 return (0); 8087 } 8088 8089 static void 8090 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8091 { 8092 uint64_t sa_res_key; 8093 int i; 8094 8095 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8096 8097 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8098 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8099 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8100 if (sa_res_key == 0) { 8101 /* 8102 * Unregister everybody else and build UA for 8103 * them 8104 */ 8105 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8106 if (i == msg->pr.pr_info.residx || 8107 ctl_get_prkey(lun, i) == 0) 8108 continue; 8109 8110 ctl_clr_prkey(lun, i); 8111 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8112 } 8113 8114 lun->pr_key_count = 1; 8115 lun->pr_res_type = msg->pr.pr_info.res_type; 8116 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8117 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8118 lun->pr_res_idx = msg->pr.pr_info.residx; 8119 } else { 8120 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8121 if (sa_res_key == ctl_get_prkey(lun, i)) 8122 continue; 8123 8124 ctl_clr_prkey(lun, i); 8125 lun->pr_key_count--; 8126 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8127 } 8128 } 8129 } else { 8130 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8131 if (i == msg->pr.pr_info.residx || 8132 ctl_get_prkey(lun, i) == 0) 8133 continue; 8134 8135 if (sa_res_key == ctl_get_prkey(lun, i)) { 8136 ctl_clr_prkey(lun, i); 8137 lun->pr_key_count--; 8138 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8139 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8140 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8141 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8142 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8143 } 8144 } 8145 lun->pr_res_type = msg->pr.pr_info.res_type; 8146 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8147 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8148 lun->pr_res_idx = msg->pr.pr_info.residx; 8149 else 8150 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8151 } 8152 lun->pr_generation++; 8153 8154 } 8155 8156 8157 int 8158 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8159 { 8160 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8161 struct ctl_lun *lun = CTL_LUN(ctsio); 8162 int retval; 8163 u_int32_t param_len; 8164 struct scsi_per_res_out *cdb; 8165 struct scsi_per_res_out_parms* param; 8166 uint32_t residx; 8167 uint64_t res_key, sa_res_key, key; 8168 uint8_t type; 8169 union ctl_ha_msg persis_io; 8170 int i; 8171 8172 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8173 8174 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8175 retval = CTL_RETVAL_COMPLETE; 8176 8177 /* 8178 * We only support whole-LUN scope. The scope & type are ignored for 8179 * register, register and ignore existing key and clear. 8180 * We sometimes ignore scope and type on preempts too!! 8181 * Verify reservation type here as well. 8182 */ 8183 type = cdb->scope_type & SPR_TYPE_MASK; 8184 if ((cdb->action == SPRO_RESERVE) 8185 || (cdb->action == SPRO_RELEASE)) { 8186 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8187 ctl_set_invalid_field(/*ctsio*/ ctsio, 8188 /*sks_valid*/ 1, 8189 /*command*/ 1, 8190 /*field*/ 2, 8191 /*bit_valid*/ 1, 8192 /*bit*/ 4); 8193 ctl_done((union ctl_io *)ctsio); 8194 return (CTL_RETVAL_COMPLETE); 8195 } 8196 8197 if (type>8 || type==2 || type==4 || type==0) { 8198 ctl_set_invalid_field(/*ctsio*/ ctsio, 8199 /*sks_valid*/ 1, 8200 /*command*/ 1, 8201 /*field*/ 2, 8202 /*bit_valid*/ 1, 8203 /*bit*/ 0); 8204 ctl_done((union ctl_io *)ctsio); 8205 return (CTL_RETVAL_COMPLETE); 8206 } 8207 } 8208 8209 param_len = scsi_4btoul(cdb->length); 8210 8211 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8212 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8213 ctsio->kern_data_len = param_len; 8214 ctsio->kern_total_len = param_len; 8215 ctsio->kern_rel_offset = 0; 8216 ctsio->kern_sg_entries = 0; 8217 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8218 ctsio->be_move_done = ctl_config_move_done; 8219 ctl_datamove((union ctl_io *)ctsio); 8220 8221 return (CTL_RETVAL_COMPLETE); 8222 } 8223 8224 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8225 8226 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8227 res_key = scsi_8btou64(param->res_key.key); 8228 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8229 8230 /* 8231 * Validate the reservation key here except for SPRO_REG_IGNO 8232 * This must be done for all other service actions 8233 */ 8234 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8235 mtx_lock(&lun->lun_lock); 8236 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8237 if (res_key != key) { 8238 /* 8239 * The current key passed in doesn't match 8240 * the one the initiator previously 8241 * registered. 8242 */ 8243 mtx_unlock(&lun->lun_lock); 8244 free(ctsio->kern_data_ptr, M_CTL); 8245 ctl_set_reservation_conflict(ctsio); 8246 ctl_done((union ctl_io *)ctsio); 8247 return (CTL_RETVAL_COMPLETE); 8248 } 8249 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8250 /* 8251 * We are not registered 8252 */ 8253 mtx_unlock(&lun->lun_lock); 8254 free(ctsio->kern_data_ptr, M_CTL); 8255 ctl_set_reservation_conflict(ctsio); 8256 ctl_done((union ctl_io *)ctsio); 8257 return (CTL_RETVAL_COMPLETE); 8258 } else if (res_key != 0) { 8259 /* 8260 * We are not registered and trying to register but 8261 * the register key isn't zero. 8262 */ 8263 mtx_unlock(&lun->lun_lock); 8264 free(ctsio->kern_data_ptr, M_CTL); 8265 ctl_set_reservation_conflict(ctsio); 8266 ctl_done((union ctl_io *)ctsio); 8267 return (CTL_RETVAL_COMPLETE); 8268 } 8269 mtx_unlock(&lun->lun_lock); 8270 } 8271 8272 switch (cdb->action & SPRO_ACTION_MASK) { 8273 case SPRO_REGISTER: 8274 case SPRO_REG_IGNO: { 8275 8276 #if 0 8277 printf("Registration received\n"); 8278 #endif 8279 8280 /* 8281 * We don't support any of these options, as we report in 8282 * the read capabilities request (see 8283 * ctl_persistent_reserve_in(), above). 8284 */ 8285 if ((param->flags & SPR_SPEC_I_PT) 8286 || (param->flags & SPR_ALL_TG_PT) 8287 || (param->flags & SPR_APTPL)) { 8288 int bit_ptr; 8289 8290 if (param->flags & SPR_APTPL) 8291 bit_ptr = 0; 8292 else if (param->flags & SPR_ALL_TG_PT) 8293 bit_ptr = 2; 8294 else /* SPR_SPEC_I_PT */ 8295 bit_ptr = 3; 8296 8297 free(ctsio->kern_data_ptr, M_CTL); 8298 ctl_set_invalid_field(ctsio, 8299 /*sks_valid*/ 1, 8300 /*command*/ 0, 8301 /*field*/ 20, 8302 /*bit_valid*/ 1, 8303 /*bit*/ bit_ptr); 8304 ctl_done((union ctl_io *)ctsio); 8305 return (CTL_RETVAL_COMPLETE); 8306 } 8307 8308 mtx_lock(&lun->lun_lock); 8309 8310 /* 8311 * The initiator wants to clear the 8312 * key/unregister. 8313 */ 8314 if (sa_res_key == 0) { 8315 if ((res_key == 0 8316 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8317 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8318 && ctl_get_prkey(lun, residx) == 0)) { 8319 mtx_unlock(&lun->lun_lock); 8320 goto done; 8321 } 8322 8323 ctl_clr_prkey(lun, residx); 8324 lun->pr_key_count--; 8325 8326 if (residx == lun->pr_res_idx) { 8327 lun->flags &= ~CTL_LUN_PR_RESERVED; 8328 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8329 8330 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8331 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8332 lun->pr_key_count) { 8333 /* 8334 * If the reservation is a registrants 8335 * only type we need to generate a UA 8336 * for other registered inits. The 8337 * sense code should be RESERVATIONS 8338 * RELEASED 8339 */ 8340 8341 for (i = softc->init_min; i < softc->init_max; i++){ 8342 if (ctl_get_prkey(lun, i) == 0) 8343 continue; 8344 ctl_est_ua(lun, i, 8345 CTL_UA_RES_RELEASE); 8346 } 8347 } 8348 lun->pr_res_type = 0; 8349 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8350 if (lun->pr_key_count==0) { 8351 lun->flags &= ~CTL_LUN_PR_RESERVED; 8352 lun->pr_res_type = 0; 8353 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8354 } 8355 } 8356 lun->pr_generation++; 8357 mtx_unlock(&lun->lun_lock); 8358 8359 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8360 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8361 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8362 persis_io.pr.pr_info.residx = residx; 8363 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8364 sizeof(persis_io.pr), M_WAITOK); 8365 } else /* sa_res_key != 0 */ { 8366 8367 /* 8368 * If we aren't registered currently then increment 8369 * the key count and set the registered flag. 8370 */ 8371 ctl_alloc_prkey(lun, residx); 8372 if (ctl_get_prkey(lun, residx) == 0) 8373 lun->pr_key_count++; 8374 ctl_set_prkey(lun, residx, sa_res_key); 8375 lun->pr_generation++; 8376 mtx_unlock(&lun->lun_lock); 8377 8378 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8379 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8380 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8381 persis_io.pr.pr_info.residx = residx; 8382 memcpy(persis_io.pr.pr_info.sa_res_key, 8383 param->serv_act_res_key, 8384 sizeof(param->serv_act_res_key)); 8385 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8386 sizeof(persis_io.pr), M_WAITOK); 8387 } 8388 8389 break; 8390 } 8391 case SPRO_RESERVE: 8392 #if 0 8393 printf("Reserve executed type %d\n", type); 8394 #endif 8395 mtx_lock(&lun->lun_lock); 8396 if (lun->flags & CTL_LUN_PR_RESERVED) { 8397 /* 8398 * if this isn't the reservation holder and it's 8399 * not a "all registrants" type or if the type is 8400 * different then we have a conflict 8401 */ 8402 if ((lun->pr_res_idx != residx 8403 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8404 || lun->pr_res_type != type) { 8405 mtx_unlock(&lun->lun_lock); 8406 free(ctsio->kern_data_ptr, M_CTL); 8407 ctl_set_reservation_conflict(ctsio); 8408 ctl_done((union ctl_io *)ctsio); 8409 return (CTL_RETVAL_COMPLETE); 8410 } 8411 mtx_unlock(&lun->lun_lock); 8412 } else /* create a reservation */ { 8413 /* 8414 * If it's not an "all registrants" type record 8415 * reservation holder 8416 */ 8417 if (type != SPR_TYPE_WR_EX_AR 8418 && type != SPR_TYPE_EX_AC_AR) 8419 lun->pr_res_idx = residx; /* Res holder */ 8420 else 8421 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8422 8423 lun->flags |= CTL_LUN_PR_RESERVED; 8424 lun->pr_res_type = type; 8425 8426 mtx_unlock(&lun->lun_lock); 8427 8428 /* send msg to other side */ 8429 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8430 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8431 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8432 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8433 persis_io.pr.pr_info.res_type = type; 8434 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8435 sizeof(persis_io.pr), M_WAITOK); 8436 } 8437 break; 8438 8439 case SPRO_RELEASE: 8440 mtx_lock(&lun->lun_lock); 8441 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8442 /* No reservation exists return good status */ 8443 mtx_unlock(&lun->lun_lock); 8444 goto done; 8445 } 8446 /* 8447 * Is this nexus a reservation holder? 8448 */ 8449 if (lun->pr_res_idx != residx 8450 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8451 /* 8452 * not a res holder return good status but 8453 * do nothing 8454 */ 8455 mtx_unlock(&lun->lun_lock); 8456 goto done; 8457 } 8458 8459 if (lun->pr_res_type != type) { 8460 mtx_unlock(&lun->lun_lock); 8461 free(ctsio->kern_data_ptr, M_CTL); 8462 ctl_set_illegal_pr_release(ctsio); 8463 ctl_done((union ctl_io *)ctsio); 8464 return (CTL_RETVAL_COMPLETE); 8465 } 8466 8467 /* okay to release */ 8468 lun->flags &= ~CTL_LUN_PR_RESERVED; 8469 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8470 lun->pr_res_type = 0; 8471 8472 /* 8473 * If this isn't an exclusive access reservation and NUAR 8474 * is not set, generate UA for all other registrants. 8475 */ 8476 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8477 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8478 for (i = softc->init_min; i < softc->init_max; i++) { 8479 if (i == residx || ctl_get_prkey(lun, i) == 0) 8480 continue; 8481 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8482 } 8483 } 8484 mtx_unlock(&lun->lun_lock); 8485 8486 /* Send msg to other side */ 8487 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8488 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8489 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8490 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8491 sizeof(persis_io.pr), M_WAITOK); 8492 break; 8493 8494 case SPRO_CLEAR: 8495 /* send msg to other side */ 8496 8497 mtx_lock(&lun->lun_lock); 8498 lun->flags &= ~CTL_LUN_PR_RESERVED; 8499 lun->pr_res_type = 0; 8500 lun->pr_key_count = 0; 8501 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8502 8503 ctl_clr_prkey(lun, residx); 8504 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8505 if (ctl_get_prkey(lun, i) != 0) { 8506 ctl_clr_prkey(lun, i); 8507 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8508 } 8509 lun->pr_generation++; 8510 mtx_unlock(&lun->lun_lock); 8511 8512 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8513 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8514 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8515 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8516 sizeof(persis_io.pr), M_WAITOK); 8517 break; 8518 8519 case SPRO_PREEMPT: 8520 case SPRO_PRE_ABO: { 8521 int nretval; 8522 8523 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8524 residx, ctsio, cdb, param); 8525 if (nretval != 0) 8526 return (CTL_RETVAL_COMPLETE); 8527 break; 8528 } 8529 default: 8530 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8531 } 8532 8533 done: 8534 free(ctsio->kern_data_ptr, M_CTL); 8535 ctl_set_success(ctsio); 8536 ctl_done((union ctl_io *)ctsio); 8537 8538 return (retval); 8539 } 8540 8541 /* 8542 * This routine is for handling a message from the other SC pertaining to 8543 * persistent reserve out. All the error checking will have been done 8544 * so only perorming the action need be done here to keep the two 8545 * in sync. 8546 */ 8547 static void 8548 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8549 { 8550 struct ctl_softc *softc = CTL_SOFTC(io); 8551 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8552 struct ctl_lun *lun; 8553 int i; 8554 uint32_t residx, targ_lun; 8555 8556 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8557 mtx_lock(&softc->ctl_lock); 8558 if (targ_lun >= ctl_max_luns || 8559 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8560 mtx_unlock(&softc->ctl_lock); 8561 return; 8562 } 8563 mtx_lock(&lun->lun_lock); 8564 mtx_unlock(&softc->ctl_lock); 8565 if (lun->flags & CTL_LUN_DISABLED) { 8566 mtx_unlock(&lun->lun_lock); 8567 return; 8568 } 8569 residx = ctl_get_initindex(&msg->hdr.nexus); 8570 switch(msg->pr.pr_info.action) { 8571 case CTL_PR_REG_KEY: 8572 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8573 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8574 lun->pr_key_count++; 8575 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8576 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8577 lun->pr_generation++; 8578 break; 8579 8580 case CTL_PR_UNREG_KEY: 8581 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8582 lun->pr_key_count--; 8583 8584 /* XXX Need to see if the reservation has been released */ 8585 /* if so do we need to generate UA? */ 8586 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8587 lun->flags &= ~CTL_LUN_PR_RESERVED; 8588 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8589 8590 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8591 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8592 lun->pr_key_count) { 8593 /* 8594 * If the reservation is a registrants 8595 * only type we need to generate a UA 8596 * for other registered inits. The 8597 * sense code should be RESERVATIONS 8598 * RELEASED 8599 */ 8600 8601 for (i = softc->init_min; i < softc->init_max; i++) { 8602 if (ctl_get_prkey(lun, i) == 0) 8603 continue; 8604 8605 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8606 } 8607 } 8608 lun->pr_res_type = 0; 8609 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8610 if (lun->pr_key_count==0) { 8611 lun->flags &= ~CTL_LUN_PR_RESERVED; 8612 lun->pr_res_type = 0; 8613 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8614 } 8615 } 8616 lun->pr_generation++; 8617 break; 8618 8619 case CTL_PR_RESERVE: 8620 lun->flags |= CTL_LUN_PR_RESERVED; 8621 lun->pr_res_type = msg->pr.pr_info.res_type; 8622 lun->pr_res_idx = msg->pr.pr_info.residx; 8623 8624 break; 8625 8626 case CTL_PR_RELEASE: 8627 /* 8628 * If this isn't an exclusive access reservation and NUAR 8629 * is not set, generate UA for all other registrants. 8630 */ 8631 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8632 lun->pr_res_type != SPR_TYPE_WR_EX && 8633 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8634 for (i = softc->init_min; i < softc->init_max; i++) 8635 if (i == residx || ctl_get_prkey(lun, i) == 0) 8636 continue; 8637 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8638 } 8639 8640 lun->flags &= ~CTL_LUN_PR_RESERVED; 8641 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8642 lun->pr_res_type = 0; 8643 break; 8644 8645 case CTL_PR_PREEMPT: 8646 ctl_pro_preempt_other(lun, msg); 8647 break; 8648 case CTL_PR_CLEAR: 8649 lun->flags &= ~CTL_LUN_PR_RESERVED; 8650 lun->pr_res_type = 0; 8651 lun->pr_key_count = 0; 8652 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8653 8654 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8655 if (ctl_get_prkey(lun, i) == 0) 8656 continue; 8657 ctl_clr_prkey(lun, i); 8658 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8659 } 8660 lun->pr_generation++; 8661 break; 8662 } 8663 8664 mtx_unlock(&lun->lun_lock); 8665 } 8666 8667 int 8668 ctl_read_write(struct ctl_scsiio *ctsio) 8669 { 8670 struct ctl_lun *lun = CTL_LUN(ctsio); 8671 struct ctl_lba_len_flags *lbalen; 8672 uint64_t lba; 8673 uint32_t num_blocks; 8674 int flags, retval; 8675 int isread; 8676 8677 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8678 8679 flags = 0; 8680 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8681 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8682 switch (ctsio->cdb[0]) { 8683 case READ_6: 8684 case WRITE_6: { 8685 struct scsi_rw_6 *cdb; 8686 8687 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8688 8689 lba = scsi_3btoul(cdb->addr); 8690 /* only 5 bits are valid in the most significant address byte */ 8691 lba &= 0x1fffff; 8692 num_blocks = cdb->length; 8693 /* 8694 * This is correct according to SBC-2. 8695 */ 8696 if (num_blocks == 0) 8697 num_blocks = 256; 8698 break; 8699 } 8700 case READ_10: 8701 case WRITE_10: { 8702 struct scsi_rw_10 *cdb; 8703 8704 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8705 if (cdb->byte2 & SRW10_FUA) 8706 flags |= CTL_LLF_FUA; 8707 if (cdb->byte2 & SRW10_DPO) 8708 flags |= CTL_LLF_DPO; 8709 lba = scsi_4btoul(cdb->addr); 8710 num_blocks = scsi_2btoul(cdb->length); 8711 break; 8712 } 8713 case WRITE_VERIFY_10: { 8714 struct scsi_write_verify_10 *cdb; 8715 8716 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8717 flags |= CTL_LLF_FUA; 8718 if (cdb->byte2 & SWV_DPO) 8719 flags |= CTL_LLF_DPO; 8720 lba = scsi_4btoul(cdb->addr); 8721 num_blocks = scsi_2btoul(cdb->length); 8722 break; 8723 } 8724 case READ_12: 8725 case WRITE_12: { 8726 struct scsi_rw_12 *cdb; 8727 8728 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8729 if (cdb->byte2 & SRW12_FUA) 8730 flags |= CTL_LLF_FUA; 8731 if (cdb->byte2 & SRW12_DPO) 8732 flags |= CTL_LLF_DPO; 8733 lba = scsi_4btoul(cdb->addr); 8734 num_blocks = scsi_4btoul(cdb->length); 8735 break; 8736 } 8737 case WRITE_VERIFY_12: { 8738 struct scsi_write_verify_12 *cdb; 8739 8740 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8741 flags |= CTL_LLF_FUA; 8742 if (cdb->byte2 & SWV_DPO) 8743 flags |= CTL_LLF_DPO; 8744 lba = scsi_4btoul(cdb->addr); 8745 num_blocks = scsi_4btoul(cdb->length); 8746 break; 8747 } 8748 case READ_16: 8749 case WRITE_16: { 8750 struct scsi_rw_16 *cdb; 8751 8752 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8753 if (cdb->byte2 & SRW12_FUA) 8754 flags |= CTL_LLF_FUA; 8755 if (cdb->byte2 & SRW12_DPO) 8756 flags |= CTL_LLF_DPO; 8757 lba = scsi_8btou64(cdb->addr); 8758 num_blocks = scsi_4btoul(cdb->length); 8759 break; 8760 } 8761 case WRITE_ATOMIC_16: { 8762 struct scsi_write_atomic_16 *cdb; 8763 8764 if (lun->be_lun->atomicblock == 0) { 8765 ctl_set_invalid_opcode(ctsio); 8766 ctl_done((union ctl_io *)ctsio); 8767 return (CTL_RETVAL_COMPLETE); 8768 } 8769 8770 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8771 if (cdb->byte2 & SRW12_FUA) 8772 flags |= CTL_LLF_FUA; 8773 if (cdb->byte2 & SRW12_DPO) 8774 flags |= CTL_LLF_DPO; 8775 lba = scsi_8btou64(cdb->addr); 8776 num_blocks = scsi_2btoul(cdb->length); 8777 if (num_blocks > lun->be_lun->atomicblock) { 8778 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8779 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8780 /*bit*/ 0); 8781 ctl_done((union ctl_io *)ctsio); 8782 return (CTL_RETVAL_COMPLETE); 8783 } 8784 break; 8785 } 8786 case WRITE_VERIFY_16: { 8787 struct scsi_write_verify_16 *cdb; 8788 8789 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8790 flags |= CTL_LLF_FUA; 8791 if (cdb->byte2 & SWV_DPO) 8792 flags |= CTL_LLF_DPO; 8793 lba = scsi_8btou64(cdb->addr); 8794 num_blocks = scsi_4btoul(cdb->length); 8795 break; 8796 } 8797 default: 8798 /* 8799 * We got a command we don't support. This shouldn't 8800 * happen, commands should be filtered out above us. 8801 */ 8802 ctl_set_invalid_opcode(ctsio); 8803 ctl_done((union ctl_io *)ctsio); 8804 8805 return (CTL_RETVAL_COMPLETE); 8806 break; /* NOTREACHED */ 8807 } 8808 8809 /* 8810 * The first check is to make sure we're in bounds, the second 8811 * check is to catch wrap-around problems. If the lba + num blocks 8812 * is less than the lba, then we've wrapped around and the block 8813 * range is invalid anyway. 8814 */ 8815 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8816 || ((lba + num_blocks) < lba)) { 8817 ctl_set_lba_out_of_range(ctsio, 8818 MAX(lba, lun->be_lun->maxlba + 1)); 8819 ctl_done((union ctl_io *)ctsio); 8820 return (CTL_RETVAL_COMPLETE); 8821 } 8822 8823 /* 8824 * According to SBC-3, a transfer length of 0 is not an error. 8825 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8826 * translates to 256 blocks for those commands. 8827 */ 8828 if (num_blocks == 0) { 8829 ctl_set_success(ctsio); 8830 ctl_done((union ctl_io *)ctsio); 8831 return (CTL_RETVAL_COMPLETE); 8832 } 8833 8834 /* Set FUA and/or DPO if caches are disabled. */ 8835 if (isread) { 8836 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8837 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8838 } else { 8839 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8840 flags |= CTL_LLF_FUA; 8841 } 8842 8843 lbalen = (struct ctl_lba_len_flags *) 8844 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8845 lbalen->lba = lba; 8846 lbalen->len = num_blocks; 8847 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8848 8849 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8850 ctsio->kern_rel_offset = 0; 8851 8852 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8853 8854 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8855 return (retval); 8856 } 8857 8858 static int 8859 ctl_cnw_cont(union ctl_io *io) 8860 { 8861 struct ctl_lun *lun = CTL_LUN(io); 8862 struct ctl_scsiio *ctsio; 8863 struct ctl_lba_len_flags *lbalen; 8864 int retval; 8865 8866 ctsio = &io->scsiio; 8867 ctsio->io_hdr.status = CTL_STATUS_NONE; 8868 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8869 lbalen = (struct ctl_lba_len_flags *) 8870 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8871 lbalen->flags &= ~CTL_LLF_COMPARE; 8872 lbalen->flags |= CTL_LLF_WRITE; 8873 8874 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8875 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8876 return (retval); 8877 } 8878 8879 int 8880 ctl_cnw(struct ctl_scsiio *ctsio) 8881 { 8882 struct ctl_lun *lun = CTL_LUN(ctsio); 8883 struct ctl_lba_len_flags *lbalen; 8884 uint64_t lba; 8885 uint32_t num_blocks; 8886 int flags, retval; 8887 8888 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8889 8890 flags = 0; 8891 switch (ctsio->cdb[0]) { 8892 case COMPARE_AND_WRITE: { 8893 struct scsi_compare_and_write *cdb; 8894 8895 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8896 if (cdb->byte2 & SRW10_FUA) 8897 flags |= CTL_LLF_FUA; 8898 if (cdb->byte2 & SRW10_DPO) 8899 flags |= CTL_LLF_DPO; 8900 lba = scsi_8btou64(cdb->addr); 8901 num_blocks = cdb->length; 8902 break; 8903 } 8904 default: 8905 /* 8906 * We got a command we don't support. This shouldn't 8907 * happen, commands should be filtered out above us. 8908 */ 8909 ctl_set_invalid_opcode(ctsio); 8910 ctl_done((union ctl_io *)ctsio); 8911 8912 return (CTL_RETVAL_COMPLETE); 8913 break; /* NOTREACHED */ 8914 } 8915 8916 /* 8917 * The first check is to make sure we're in bounds, the second 8918 * check is to catch wrap-around problems. If the lba + num blocks 8919 * is less than the lba, then we've wrapped around and the block 8920 * range is invalid anyway. 8921 */ 8922 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8923 || ((lba + num_blocks) < lba)) { 8924 ctl_set_lba_out_of_range(ctsio, 8925 MAX(lba, lun->be_lun->maxlba + 1)); 8926 ctl_done((union ctl_io *)ctsio); 8927 return (CTL_RETVAL_COMPLETE); 8928 } 8929 8930 /* 8931 * According to SBC-3, a transfer length of 0 is not an error. 8932 */ 8933 if (num_blocks == 0) { 8934 ctl_set_success(ctsio); 8935 ctl_done((union ctl_io *)ctsio); 8936 return (CTL_RETVAL_COMPLETE); 8937 } 8938 8939 /* Set FUA if write cache is disabled. */ 8940 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8941 flags |= CTL_LLF_FUA; 8942 8943 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8944 ctsio->kern_rel_offset = 0; 8945 8946 /* 8947 * Set the IO_CONT flag, so that if this I/O gets passed to 8948 * ctl_data_submit_done(), it'll get passed back to 8949 * ctl_ctl_cnw_cont() for further processing. 8950 */ 8951 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8952 ctsio->io_cont = ctl_cnw_cont; 8953 8954 lbalen = (struct ctl_lba_len_flags *) 8955 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8956 lbalen->lba = lba; 8957 lbalen->len = num_blocks; 8958 lbalen->flags = CTL_LLF_COMPARE | flags; 8959 8960 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8961 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8962 return (retval); 8963 } 8964 8965 int 8966 ctl_verify(struct ctl_scsiio *ctsio) 8967 { 8968 struct ctl_lun *lun = CTL_LUN(ctsio); 8969 struct ctl_lba_len_flags *lbalen; 8970 uint64_t lba; 8971 uint32_t num_blocks; 8972 int bytchk, flags; 8973 int retval; 8974 8975 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8976 8977 bytchk = 0; 8978 flags = CTL_LLF_FUA; 8979 switch (ctsio->cdb[0]) { 8980 case VERIFY_10: { 8981 struct scsi_verify_10 *cdb; 8982 8983 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8984 if (cdb->byte2 & SVFY_BYTCHK) 8985 bytchk = 1; 8986 if (cdb->byte2 & SVFY_DPO) 8987 flags |= CTL_LLF_DPO; 8988 lba = scsi_4btoul(cdb->addr); 8989 num_blocks = scsi_2btoul(cdb->length); 8990 break; 8991 } 8992 case VERIFY_12: { 8993 struct scsi_verify_12 *cdb; 8994 8995 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8996 if (cdb->byte2 & SVFY_BYTCHK) 8997 bytchk = 1; 8998 if (cdb->byte2 & SVFY_DPO) 8999 flags |= CTL_LLF_DPO; 9000 lba = scsi_4btoul(cdb->addr); 9001 num_blocks = scsi_4btoul(cdb->length); 9002 break; 9003 } 9004 case VERIFY_16: { 9005 struct scsi_rw_16 *cdb; 9006 9007 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9008 if (cdb->byte2 & SVFY_BYTCHK) 9009 bytchk = 1; 9010 if (cdb->byte2 & SVFY_DPO) 9011 flags |= CTL_LLF_DPO; 9012 lba = scsi_8btou64(cdb->addr); 9013 num_blocks = scsi_4btoul(cdb->length); 9014 break; 9015 } 9016 default: 9017 /* 9018 * We got a command we don't support. This shouldn't 9019 * happen, commands should be filtered out above us. 9020 */ 9021 ctl_set_invalid_opcode(ctsio); 9022 ctl_done((union ctl_io *)ctsio); 9023 return (CTL_RETVAL_COMPLETE); 9024 } 9025 9026 /* 9027 * The first check is to make sure we're in bounds, the second 9028 * check is to catch wrap-around problems. If the lba + num blocks 9029 * is less than the lba, then we've wrapped around and the block 9030 * range is invalid anyway. 9031 */ 9032 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9033 || ((lba + num_blocks) < lba)) { 9034 ctl_set_lba_out_of_range(ctsio, 9035 MAX(lba, lun->be_lun->maxlba + 1)); 9036 ctl_done((union ctl_io *)ctsio); 9037 return (CTL_RETVAL_COMPLETE); 9038 } 9039 9040 /* 9041 * According to SBC-3, a transfer length of 0 is not an error. 9042 */ 9043 if (num_blocks == 0) { 9044 ctl_set_success(ctsio); 9045 ctl_done((union ctl_io *)ctsio); 9046 return (CTL_RETVAL_COMPLETE); 9047 } 9048 9049 lbalen = (struct ctl_lba_len_flags *) 9050 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9051 lbalen->lba = lba; 9052 lbalen->len = num_blocks; 9053 if (bytchk) { 9054 lbalen->flags = CTL_LLF_COMPARE | flags; 9055 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9056 } else { 9057 lbalen->flags = CTL_LLF_VERIFY | flags; 9058 ctsio->kern_total_len = 0; 9059 } 9060 ctsio->kern_rel_offset = 0; 9061 9062 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9063 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9064 return (retval); 9065 } 9066 9067 int 9068 ctl_report_luns(struct ctl_scsiio *ctsio) 9069 { 9070 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9071 struct ctl_port *port = CTL_PORT(ctsio); 9072 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9073 struct scsi_report_luns *cdb; 9074 struct scsi_report_luns_data *lun_data; 9075 int num_filled, num_luns, num_port_luns, retval; 9076 uint32_t alloc_len, lun_datalen; 9077 uint32_t initidx, targ_lun_id, lun_id; 9078 9079 retval = CTL_RETVAL_COMPLETE; 9080 cdb = (struct scsi_report_luns *)ctsio->cdb; 9081 9082 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9083 9084 num_luns = 0; 9085 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; 9086 mtx_lock(&softc->ctl_lock); 9087 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9088 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9089 num_luns++; 9090 } 9091 mtx_unlock(&softc->ctl_lock); 9092 9093 switch (cdb->select_report) { 9094 case RPL_REPORT_DEFAULT: 9095 case RPL_REPORT_ALL: 9096 case RPL_REPORT_NONSUBSID: 9097 break; 9098 case RPL_REPORT_WELLKNOWN: 9099 case RPL_REPORT_ADMIN: 9100 case RPL_REPORT_CONGLOM: 9101 num_luns = 0; 9102 break; 9103 default: 9104 ctl_set_invalid_field(ctsio, 9105 /*sks_valid*/ 1, 9106 /*command*/ 1, 9107 /*field*/ 2, 9108 /*bit_valid*/ 0, 9109 /*bit*/ 0); 9110 ctl_done((union ctl_io *)ctsio); 9111 return (retval); 9112 break; /* NOTREACHED */ 9113 } 9114 9115 alloc_len = scsi_4btoul(cdb->length); 9116 /* 9117 * The initiator has to allocate at least 16 bytes for this request, 9118 * so he can at least get the header and the first LUN. Otherwise 9119 * we reject the request (per SPC-3 rev 14, section 6.21). 9120 */ 9121 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9122 sizeof(struct scsi_report_luns_lundata))) { 9123 ctl_set_invalid_field(ctsio, 9124 /*sks_valid*/ 1, 9125 /*command*/ 1, 9126 /*field*/ 6, 9127 /*bit_valid*/ 0, 9128 /*bit*/ 0); 9129 ctl_done((union ctl_io *)ctsio); 9130 return (retval); 9131 } 9132 9133 lun_datalen = sizeof(*lun_data) + 9134 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9135 9136 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9137 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9138 ctsio->kern_sg_entries = 0; 9139 9140 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9141 9142 mtx_lock(&softc->ctl_lock); 9143 for (targ_lun_id = 0, num_filled = 0; 9144 targ_lun_id < num_port_luns && num_filled < num_luns; 9145 targ_lun_id++) { 9146 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9147 if (lun_id == UINT32_MAX) 9148 continue; 9149 lun = softc->ctl_luns[lun_id]; 9150 if (lun == NULL) 9151 continue; 9152 9153 be64enc(lun_data->luns[num_filled++].lundata, 9154 ctl_encode_lun(targ_lun_id)); 9155 9156 /* 9157 * According to SPC-3, rev 14 section 6.21: 9158 * 9159 * "The execution of a REPORT LUNS command to any valid and 9160 * installed logical unit shall clear the REPORTED LUNS DATA 9161 * HAS CHANGED unit attention condition for all logical 9162 * units of that target with respect to the requesting 9163 * initiator. A valid and installed logical unit is one 9164 * having a PERIPHERAL QUALIFIER of 000b in the standard 9165 * INQUIRY data (see 6.4.2)." 9166 * 9167 * If request_lun is NULL, the LUN this report luns command 9168 * was issued to is either disabled or doesn't exist. In that 9169 * case, we shouldn't clear any pending lun change unit 9170 * attention. 9171 */ 9172 if (request_lun != NULL) { 9173 mtx_lock(&lun->lun_lock); 9174 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9175 mtx_unlock(&lun->lun_lock); 9176 } 9177 } 9178 mtx_unlock(&softc->ctl_lock); 9179 9180 /* 9181 * It's quite possible that we've returned fewer LUNs than we allocated 9182 * space for. Trim it. 9183 */ 9184 lun_datalen = sizeof(*lun_data) + 9185 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9186 ctsio->kern_rel_offset = 0; 9187 ctsio->kern_sg_entries = 0; 9188 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9189 ctsio->kern_total_len = ctsio->kern_data_len; 9190 9191 /* 9192 * We set this to the actual data length, regardless of how much 9193 * space we actually have to return results. If the user looks at 9194 * this value, he'll know whether or not he allocated enough space 9195 * and reissue the command if necessary. We don't support well 9196 * known logical units, so if the user asks for that, return none. 9197 */ 9198 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9199 9200 /* 9201 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9202 * this request. 9203 */ 9204 ctl_set_success(ctsio); 9205 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9206 ctsio->be_move_done = ctl_config_move_done; 9207 ctl_datamove((union ctl_io *)ctsio); 9208 return (retval); 9209 } 9210 9211 int 9212 ctl_request_sense(struct ctl_scsiio *ctsio) 9213 { 9214 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9215 struct ctl_lun *lun = CTL_LUN(ctsio); 9216 struct scsi_request_sense *cdb; 9217 struct scsi_sense_data *sense_ptr, *ps; 9218 uint32_t initidx; 9219 int have_error; 9220 u_int sense_len = SSD_FULL_SIZE; 9221 scsi_sense_data_type sense_format; 9222 ctl_ua_type ua_type; 9223 uint8_t asc = 0, ascq = 0; 9224 9225 cdb = (struct scsi_request_sense *)ctsio->cdb; 9226 9227 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9228 9229 /* 9230 * Determine which sense format the user wants. 9231 */ 9232 if (cdb->byte2 & SRS_DESC) 9233 sense_format = SSD_TYPE_DESC; 9234 else 9235 sense_format = SSD_TYPE_FIXED; 9236 9237 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9238 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9239 ctsio->kern_sg_entries = 0; 9240 ctsio->kern_rel_offset = 0; 9241 9242 /* 9243 * struct scsi_sense_data, which is currently set to 256 bytes, is 9244 * larger than the largest allowed value for the length field in the 9245 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9246 */ 9247 ctsio->kern_data_len = cdb->length; 9248 ctsio->kern_total_len = cdb->length; 9249 9250 /* 9251 * If we don't have a LUN, we don't have any pending sense. 9252 */ 9253 if (lun == NULL || 9254 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9255 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9256 /* "Logical unit not supported" */ 9257 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9258 /*current_error*/ 1, 9259 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9260 /*asc*/ 0x25, 9261 /*ascq*/ 0x00, 9262 SSD_ELEM_NONE); 9263 goto send; 9264 } 9265 9266 have_error = 0; 9267 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9268 /* 9269 * Check for pending sense, and then for pending unit attentions. 9270 * Pending sense gets returned first, then pending unit attentions. 9271 */ 9272 mtx_lock(&lun->lun_lock); 9273 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9274 if (ps != NULL) 9275 ps += initidx % CTL_MAX_INIT_PER_PORT; 9276 if (ps != NULL && ps->error_code != 0) { 9277 scsi_sense_data_type stored_format; 9278 9279 /* 9280 * Check to see which sense format was used for the stored 9281 * sense data. 9282 */ 9283 stored_format = scsi_sense_type(ps); 9284 9285 /* 9286 * If the user requested a different sense format than the 9287 * one we stored, then we need to convert it to the other 9288 * format. If we're going from descriptor to fixed format 9289 * sense data, we may lose things in translation, depending 9290 * on what options were used. 9291 * 9292 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9293 * for some reason we'll just copy it out as-is. 9294 */ 9295 if ((stored_format == SSD_TYPE_FIXED) 9296 && (sense_format == SSD_TYPE_DESC)) 9297 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9298 ps, (struct scsi_sense_data_desc *)sense_ptr); 9299 else if ((stored_format == SSD_TYPE_DESC) 9300 && (sense_format == SSD_TYPE_FIXED)) 9301 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9302 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9303 else 9304 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9305 9306 ps->error_code = 0; 9307 have_error = 1; 9308 } else { 9309 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9310 sense_format); 9311 if (ua_type != CTL_UA_NONE) 9312 have_error = 1; 9313 } 9314 if (have_error == 0) { 9315 /* 9316 * Report informational exception if have one and allowed. 9317 */ 9318 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9319 asc = lun->ie_asc; 9320 ascq = lun->ie_ascq; 9321 } 9322 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9323 /*current_error*/ 1, 9324 /*sense_key*/ SSD_KEY_NO_SENSE, 9325 /*asc*/ asc, 9326 /*ascq*/ ascq, 9327 SSD_ELEM_NONE); 9328 } 9329 mtx_unlock(&lun->lun_lock); 9330 9331 send: 9332 /* 9333 * We report the SCSI status as OK, since the status of the command 9334 * itself is OK. We're reporting sense as parameter data. 9335 */ 9336 ctl_set_success(ctsio); 9337 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9338 ctsio->be_move_done = ctl_config_move_done; 9339 ctl_datamove((union ctl_io *)ctsio); 9340 return (CTL_RETVAL_COMPLETE); 9341 } 9342 9343 int 9344 ctl_tur(struct ctl_scsiio *ctsio) 9345 { 9346 9347 CTL_DEBUG_PRINT(("ctl_tur\n")); 9348 9349 ctl_set_success(ctsio); 9350 ctl_done((union ctl_io *)ctsio); 9351 9352 return (CTL_RETVAL_COMPLETE); 9353 } 9354 9355 /* 9356 * SCSI VPD page 0x00, the Supported VPD Pages page. 9357 */ 9358 static int 9359 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9360 { 9361 struct ctl_lun *lun = CTL_LUN(ctsio); 9362 struct scsi_vpd_supported_pages *pages; 9363 int sup_page_size; 9364 int p; 9365 9366 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9367 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9368 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9369 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9370 ctsio->kern_rel_offset = 0; 9371 ctsio->kern_sg_entries = 0; 9372 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9373 ctsio->kern_total_len = ctsio->kern_data_len; 9374 9375 /* 9376 * The control device is always connected. The disk device, on the 9377 * other hand, may not be online all the time. Need to change this 9378 * to figure out whether the disk device is actually online or not. 9379 */ 9380 if (lun != NULL) 9381 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9382 lun->be_lun->lun_type; 9383 else 9384 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9385 9386 p = 0; 9387 /* Supported VPD pages */ 9388 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9389 /* Serial Number */ 9390 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9391 /* Device Identification */ 9392 pages->page_list[p++] = SVPD_DEVICE_ID; 9393 /* Extended INQUIRY Data */ 9394 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9395 /* Mode Page Policy */ 9396 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9397 /* SCSI Ports */ 9398 pages->page_list[p++] = SVPD_SCSI_PORTS; 9399 /* Third-party Copy */ 9400 pages->page_list[p++] = SVPD_SCSI_TPC; 9401 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9402 /* Block limits */ 9403 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9404 /* Block Device Characteristics */ 9405 pages->page_list[p++] = SVPD_BDC; 9406 /* Logical Block Provisioning */ 9407 pages->page_list[p++] = SVPD_LBP; 9408 } 9409 pages->length = p; 9410 9411 ctl_set_success(ctsio); 9412 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9413 ctsio->be_move_done = ctl_config_move_done; 9414 ctl_datamove((union ctl_io *)ctsio); 9415 return (CTL_RETVAL_COMPLETE); 9416 } 9417 9418 /* 9419 * SCSI VPD page 0x80, the Unit Serial Number page. 9420 */ 9421 static int 9422 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9423 { 9424 struct ctl_lun *lun = CTL_LUN(ctsio); 9425 struct scsi_vpd_unit_serial_number *sn_ptr; 9426 int data_len; 9427 9428 data_len = 4 + CTL_SN_LEN; 9429 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9430 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9431 ctsio->kern_rel_offset = 0; 9432 ctsio->kern_sg_entries = 0; 9433 ctsio->kern_data_len = min(data_len, alloc_len); 9434 ctsio->kern_total_len = ctsio->kern_data_len; 9435 9436 /* 9437 * The control device is always connected. The disk device, on the 9438 * other hand, may not be online all the time. Need to change this 9439 * to figure out whether the disk device is actually online or not. 9440 */ 9441 if (lun != NULL) 9442 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9443 lun->be_lun->lun_type; 9444 else 9445 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9446 9447 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9448 sn_ptr->length = CTL_SN_LEN; 9449 /* 9450 * If we don't have a LUN, we just leave the serial number as 9451 * all spaces. 9452 */ 9453 if (lun != NULL) { 9454 strncpy((char *)sn_ptr->serial_num, 9455 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9456 } else 9457 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9458 9459 ctl_set_success(ctsio); 9460 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9461 ctsio->be_move_done = ctl_config_move_done; 9462 ctl_datamove((union ctl_io *)ctsio); 9463 return (CTL_RETVAL_COMPLETE); 9464 } 9465 9466 9467 /* 9468 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9469 */ 9470 static int 9471 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9472 { 9473 struct ctl_lun *lun = CTL_LUN(ctsio); 9474 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9475 int data_len; 9476 9477 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9478 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9479 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9480 ctsio->kern_sg_entries = 0; 9481 ctsio->kern_rel_offset = 0; 9482 ctsio->kern_data_len = min(data_len, alloc_len); 9483 ctsio->kern_total_len = ctsio->kern_data_len; 9484 9485 /* 9486 * The control device is always connected. The disk device, on the 9487 * other hand, may not be online all the time. 9488 */ 9489 if (lun != NULL) 9490 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9491 lun->be_lun->lun_type; 9492 else 9493 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9494 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9495 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9496 /* 9497 * We support head of queue, ordered and simple tags. 9498 */ 9499 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9500 /* 9501 * Volatile cache supported. 9502 */ 9503 eid_ptr->flags3 = SVPD_EID_V_SUP; 9504 9505 /* 9506 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9507 * attention for a particular IT nexus on all LUNs once we report 9508 * it to that nexus once. This bit is required as of SPC-4. 9509 */ 9510 eid_ptr->flags4 = SVPD_EID_LUICLR; 9511 9512 /* 9513 * We support revert to defaults (RTD) bit in MODE SELECT. 9514 */ 9515 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9516 9517 /* 9518 * XXX KDM in order to correctly answer this, we would need 9519 * information from the SIM to determine how much sense data it 9520 * can send. So this would really be a path inquiry field, most 9521 * likely. This can be set to a maximum of 252 according to SPC-4, 9522 * but the hardware may or may not be able to support that much. 9523 * 0 just means that the maximum sense data length is not reported. 9524 */ 9525 eid_ptr->max_sense_length = 0; 9526 9527 ctl_set_success(ctsio); 9528 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9529 ctsio->be_move_done = ctl_config_move_done; 9530 ctl_datamove((union ctl_io *)ctsio); 9531 return (CTL_RETVAL_COMPLETE); 9532 } 9533 9534 static int 9535 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9536 { 9537 struct ctl_lun *lun = CTL_LUN(ctsio); 9538 struct scsi_vpd_mode_page_policy *mpp_ptr; 9539 int data_len; 9540 9541 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9542 sizeof(struct scsi_vpd_mode_page_policy_descr); 9543 9544 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9545 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9546 ctsio->kern_rel_offset = 0; 9547 ctsio->kern_sg_entries = 0; 9548 ctsio->kern_data_len = min(data_len, alloc_len); 9549 ctsio->kern_total_len = ctsio->kern_data_len; 9550 9551 /* 9552 * The control device is always connected. The disk device, on the 9553 * other hand, may not be online all the time. 9554 */ 9555 if (lun != NULL) 9556 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9557 lun->be_lun->lun_type; 9558 else 9559 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9560 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9561 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9562 mpp_ptr->descr[0].page_code = 0x3f; 9563 mpp_ptr->descr[0].subpage_code = 0xff; 9564 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9565 9566 ctl_set_success(ctsio); 9567 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9568 ctsio->be_move_done = ctl_config_move_done; 9569 ctl_datamove((union ctl_io *)ctsio); 9570 return (CTL_RETVAL_COMPLETE); 9571 } 9572 9573 /* 9574 * SCSI VPD page 0x83, the Device Identification page. 9575 */ 9576 static int 9577 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9578 { 9579 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9580 struct ctl_port *port = CTL_PORT(ctsio); 9581 struct ctl_lun *lun = CTL_LUN(ctsio); 9582 struct scsi_vpd_device_id *devid_ptr; 9583 struct scsi_vpd_id_descriptor *desc; 9584 int data_len, g; 9585 uint8_t proto; 9586 9587 data_len = sizeof(struct scsi_vpd_device_id) + 9588 sizeof(struct scsi_vpd_id_descriptor) + 9589 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9590 sizeof(struct scsi_vpd_id_descriptor) + 9591 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9592 if (lun && lun->lun_devid) 9593 data_len += lun->lun_devid->len; 9594 if (port && port->port_devid) 9595 data_len += port->port_devid->len; 9596 if (port && port->target_devid) 9597 data_len += port->target_devid->len; 9598 9599 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9600 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9601 ctsio->kern_sg_entries = 0; 9602 ctsio->kern_rel_offset = 0; 9603 ctsio->kern_sg_entries = 0; 9604 ctsio->kern_data_len = min(data_len, alloc_len); 9605 ctsio->kern_total_len = ctsio->kern_data_len; 9606 9607 /* 9608 * The control device is always connected. The disk device, on the 9609 * other hand, may not be online all the time. 9610 */ 9611 if (lun != NULL) 9612 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9613 lun->be_lun->lun_type; 9614 else 9615 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9616 devid_ptr->page_code = SVPD_DEVICE_ID; 9617 scsi_ulto2b(data_len - 4, devid_ptr->length); 9618 9619 if (port && port->port_type == CTL_PORT_FC) 9620 proto = SCSI_PROTO_FC << 4; 9621 else if (port && port->port_type == CTL_PORT_SAS) 9622 proto = SCSI_PROTO_SAS << 4; 9623 else if (port && port->port_type == CTL_PORT_ISCSI) 9624 proto = SCSI_PROTO_ISCSI << 4; 9625 else 9626 proto = SCSI_PROTO_SPI << 4; 9627 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9628 9629 /* 9630 * We're using a LUN association here. i.e., this device ID is a 9631 * per-LUN identifier. 9632 */ 9633 if (lun && lun->lun_devid) { 9634 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9635 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9636 lun->lun_devid->len); 9637 } 9638 9639 /* 9640 * This is for the WWPN which is a port association. 9641 */ 9642 if (port && port->port_devid) { 9643 memcpy(desc, port->port_devid->data, port->port_devid->len); 9644 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9645 port->port_devid->len); 9646 } 9647 9648 /* 9649 * This is for the Relative Target Port(type 4h) identifier 9650 */ 9651 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9652 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9653 SVPD_ID_TYPE_RELTARG; 9654 desc->length = 4; 9655 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9656 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9657 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9658 9659 /* 9660 * This is for the Target Port Group(type 5h) identifier 9661 */ 9662 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9663 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9664 SVPD_ID_TYPE_TPORTGRP; 9665 desc->length = 4; 9666 if (softc->is_single || 9667 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9668 g = 1; 9669 else 9670 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9671 scsi_ulto2b(g, &desc->identifier[2]); 9672 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9673 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9674 9675 /* 9676 * This is for the Target identifier 9677 */ 9678 if (port && port->target_devid) { 9679 memcpy(desc, port->target_devid->data, port->target_devid->len); 9680 } 9681 9682 ctl_set_success(ctsio); 9683 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9684 ctsio->be_move_done = ctl_config_move_done; 9685 ctl_datamove((union ctl_io *)ctsio); 9686 return (CTL_RETVAL_COMPLETE); 9687 } 9688 9689 static int 9690 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9691 { 9692 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9693 struct ctl_lun *lun = CTL_LUN(ctsio); 9694 struct scsi_vpd_scsi_ports *sp; 9695 struct scsi_vpd_port_designation *pd; 9696 struct scsi_vpd_port_designation_cont *pdc; 9697 struct ctl_port *port; 9698 int data_len, num_target_ports, iid_len, id_len; 9699 9700 num_target_ports = 0; 9701 iid_len = 0; 9702 id_len = 0; 9703 mtx_lock(&softc->ctl_lock); 9704 STAILQ_FOREACH(port, &softc->port_list, links) { 9705 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9706 continue; 9707 if (lun != NULL && 9708 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9709 continue; 9710 num_target_ports++; 9711 if (port->init_devid) 9712 iid_len += port->init_devid->len; 9713 if (port->port_devid) 9714 id_len += port->port_devid->len; 9715 } 9716 mtx_unlock(&softc->ctl_lock); 9717 9718 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9719 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9720 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9721 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9722 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9723 ctsio->kern_sg_entries = 0; 9724 ctsio->kern_rel_offset = 0; 9725 ctsio->kern_sg_entries = 0; 9726 ctsio->kern_data_len = min(data_len, alloc_len); 9727 ctsio->kern_total_len = ctsio->kern_data_len; 9728 9729 /* 9730 * The control device is always connected. The disk device, on the 9731 * other hand, may not be online all the time. Need to change this 9732 * to figure out whether the disk device is actually online or not. 9733 */ 9734 if (lun != NULL) 9735 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9736 lun->be_lun->lun_type; 9737 else 9738 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9739 9740 sp->page_code = SVPD_SCSI_PORTS; 9741 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9742 sp->page_length); 9743 pd = &sp->design[0]; 9744 9745 mtx_lock(&softc->ctl_lock); 9746 STAILQ_FOREACH(port, &softc->port_list, links) { 9747 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9748 continue; 9749 if (lun != NULL && 9750 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9751 continue; 9752 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9753 if (port->init_devid) { 9754 iid_len = port->init_devid->len; 9755 memcpy(pd->initiator_transportid, 9756 port->init_devid->data, port->init_devid->len); 9757 } else 9758 iid_len = 0; 9759 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9760 pdc = (struct scsi_vpd_port_designation_cont *) 9761 (&pd->initiator_transportid[iid_len]); 9762 if (port->port_devid) { 9763 id_len = port->port_devid->len; 9764 memcpy(pdc->target_port_descriptors, 9765 port->port_devid->data, port->port_devid->len); 9766 } else 9767 id_len = 0; 9768 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9769 pd = (struct scsi_vpd_port_designation *) 9770 ((uint8_t *)pdc->target_port_descriptors + id_len); 9771 } 9772 mtx_unlock(&softc->ctl_lock); 9773 9774 ctl_set_success(ctsio); 9775 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9776 ctsio->be_move_done = ctl_config_move_done; 9777 ctl_datamove((union ctl_io *)ctsio); 9778 return (CTL_RETVAL_COMPLETE); 9779 } 9780 9781 static int 9782 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9783 { 9784 struct ctl_lun *lun = CTL_LUN(ctsio); 9785 struct scsi_vpd_block_limits *bl_ptr; 9786 const char *val; 9787 uint64_t ival; 9788 9789 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9790 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9791 ctsio->kern_sg_entries = 0; 9792 ctsio->kern_rel_offset = 0; 9793 ctsio->kern_sg_entries = 0; 9794 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9795 ctsio->kern_total_len = ctsio->kern_data_len; 9796 9797 /* 9798 * The control device is always connected. The disk device, on the 9799 * other hand, may not be online all the time. Need to change this 9800 * to figure out whether the disk device is actually online or not. 9801 */ 9802 if (lun != NULL) 9803 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9804 lun->be_lun->lun_type; 9805 else 9806 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9807 9808 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9809 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9810 bl_ptr->max_cmp_write_len = 0xff; 9811 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9812 if (lun != NULL) { 9813 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9814 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9815 ival = 0xffffffff; 9816 val = dnvlist_get_string(lun->be_lun->options, 9817 "unmap_max_lba", NULL); 9818 if (val != NULL) 9819 ctl_expand_number(val, &ival); 9820 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9821 ival = 0xffffffff; 9822 val = dnvlist_get_string(lun->be_lun->options, 9823 "unmap_max_descr", NULL); 9824 if (val != NULL) 9825 ctl_expand_number(val, &ival); 9826 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9827 if (lun->be_lun->ublockexp != 0) { 9828 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9829 bl_ptr->opt_unmap_grain); 9830 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9831 bl_ptr->unmap_grain_align); 9832 } 9833 } 9834 scsi_ulto4b(lun->be_lun->atomicblock, 9835 bl_ptr->max_atomic_transfer_length); 9836 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9837 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9838 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9839 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9840 ival = UINT64_MAX; 9841 val = dnvlist_get_string(lun->be_lun->options, 9842 "write_same_max_lba", NULL); 9843 if (val != NULL) 9844 ctl_expand_number(val, &ival); 9845 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9846 } 9847 9848 ctl_set_success(ctsio); 9849 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9850 ctsio->be_move_done = ctl_config_move_done; 9851 ctl_datamove((union ctl_io *)ctsio); 9852 return (CTL_RETVAL_COMPLETE); 9853 } 9854 9855 static int 9856 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9857 { 9858 struct ctl_lun *lun = CTL_LUN(ctsio); 9859 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9860 const char *value; 9861 u_int i; 9862 9863 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9864 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9865 ctsio->kern_sg_entries = 0; 9866 ctsio->kern_rel_offset = 0; 9867 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9868 ctsio->kern_total_len = ctsio->kern_data_len; 9869 9870 /* 9871 * The control device is always connected. The disk device, on the 9872 * other hand, may not be online all the time. Need to change this 9873 * to figure out whether the disk device is actually online or not. 9874 */ 9875 if (lun != NULL) 9876 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9877 lun->be_lun->lun_type; 9878 else 9879 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9880 bdc_ptr->page_code = SVPD_BDC; 9881 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9882 if (lun != NULL && 9883 (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) 9884 i = strtol(value, NULL, 0); 9885 else 9886 i = CTL_DEFAULT_ROTATION_RATE; 9887 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9888 if (lun != NULL && 9889 (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) 9890 i = strtol(value, NULL, 0); 9891 else 9892 i = 0; 9893 bdc_ptr->wab_wac_ff = (i & 0x0f); 9894 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9895 9896 ctl_set_success(ctsio); 9897 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9898 ctsio->be_move_done = ctl_config_move_done; 9899 ctl_datamove((union ctl_io *)ctsio); 9900 return (CTL_RETVAL_COMPLETE); 9901 } 9902 9903 static int 9904 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9905 { 9906 struct ctl_lun *lun = CTL_LUN(ctsio); 9907 struct scsi_vpd_logical_block_prov *lbp_ptr; 9908 const char *value; 9909 9910 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9911 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9912 ctsio->kern_sg_entries = 0; 9913 ctsio->kern_rel_offset = 0; 9914 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9915 ctsio->kern_total_len = ctsio->kern_data_len; 9916 9917 /* 9918 * The control device is always connected. The disk device, on the 9919 * other hand, may not be online all the time. Need to change this 9920 * to figure out whether the disk device is actually online or not. 9921 */ 9922 if (lun != NULL) 9923 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9924 lun->be_lun->lun_type; 9925 else 9926 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9927 9928 lbp_ptr->page_code = SVPD_LBP; 9929 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9930 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9931 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9932 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9933 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9934 value = dnvlist_get_string(lun->be_lun->options, 9935 "provisioning_type", NULL); 9936 if (value != NULL) { 9937 if (strcmp(value, "resource") == 0) 9938 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 9939 else if (strcmp(value, "thin") == 0) 9940 lbp_ptr->prov_type = SVPD_LBP_THIN; 9941 } else 9942 lbp_ptr->prov_type = SVPD_LBP_THIN; 9943 } 9944 9945 ctl_set_success(ctsio); 9946 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9947 ctsio->be_move_done = ctl_config_move_done; 9948 ctl_datamove((union ctl_io *)ctsio); 9949 return (CTL_RETVAL_COMPLETE); 9950 } 9951 9952 /* 9953 * INQUIRY with the EVPD bit set. 9954 */ 9955 static int 9956 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9957 { 9958 struct ctl_lun *lun = CTL_LUN(ctsio); 9959 struct scsi_inquiry *cdb; 9960 int alloc_len, retval; 9961 9962 cdb = (struct scsi_inquiry *)ctsio->cdb; 9963 alloc_len = scsi_2btoul(cdb->length); 9964 9965 switch (cdb->page_code) { 9966 case SVPD_SUPPORTED_PAGES: 9967 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9968 break; 9969 case SVPD_UNIT_SERIAL_NUMBER: 9970 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9971 break; 9972 case SVPD_DEVICE_ID: 9973 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9974 break; 9975 case SVPD_EXTENDED_INQUIRY_DATA: 9976 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9977 break; 9978 case SVPD_MODE_PAGE_POLICY: 9979 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9980 break; 9981 case SVPD_SCSI_PORTS: 9982 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9983 break; 9984 case SVPD_SCSI_TPC: 9985 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9986 break; 9987 case SVPD_BLOCK_LIMITS: 9988 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9989 goto err; 9990 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9991 break; 9992 case SVPD_BDC: 9993 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9994 goto err; 9995 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9996 break; 9997 case SVPD_LBP: 9998 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9999 goto err; 10000 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10001 break; 10002 default: 10003 err: 10004 ctl_set_invalid_field(ctsio, 10005 /*sks_valid*/ 1, 10006 /*command*/ 1, 10007 /*field*/ 2, 10008 /*bit_valid*/ 0, 10009 /*bit*/ 0); 10010 ctl_done((union ctl_io *)ctsio); 10011 retval = CTL_RETVAL_COMPLETE; 10012 break; 10013 } 10014 10015 return (retval); 10016 } 10017 10018 /* 10019 * Standard INQUIRY data. 10020 */ 10021 static int 10022 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10023 { 10024 struct ctl_softc *softc = CTL_SOFTC(ctsio); 10025 struct ctl_port *port = CTL_PORT(ctsio); 10026 struct ctl_lun *lun = CTL_LUN(ctsio); 10027 struct scsi_inquiry_data *inq_ptr; 10028 struct scsi_inquiry *cdb; 10029 const char *val; 10030 uint32_t alloc_len, data_len; 10031 ctl_port_type port_type; 10032 10033 port_type = port->port_type; 10034 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10035 port_type = CTL_PORT_SCSI; 10036 10037 cdb = (struct scsi_inquiry *)ctsio->cdb; 10038 alloc_len = scsi_2btoul(cdb->length); 10039 10040 /* 10041 * We malloc the full inquiry data size here and fill it 10042 * in. If the user only asks for less, we'll give him 10043 * that much. 10044 */ 10045 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10046 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10047 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10048 ctsio->kern_sg_entries = 0; 10049 ctsio->kern_rel_offset = 0; 10050 ctsio->kern_data_len = min(data_len, alloc_len); 10051 ctsio->kern_total_len = ctsio->kern_data_len; 10052 10053 if (lun != NULL) { 10054 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10055 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10056 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10057 lun->be_lun->lun_type; 10058 } else { 10059 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10060 lun->be_lun->lun_type; 10061 } 10062 if (lun->flags & CTL_LUN_REMOVABLE) 10063 inq_ptr->dev_qual2 |= SID_RMB; 10064 } else 10065 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10066 10067 /* RMB in byte 2 is 0 */ 10068 inq_ptr->version = SCSI_REV_SPC5; 10069 10070 /* 10071 * According to SAM-3, even if a device only supports a single 10072 * level of LUN addressing, it should still set the HISUP bit: 10073 * 10074 * 4.9.1 Logical unit numbers overview 10075 * 10076 * All logical unit number formats described in this standard are 10077 * hierarchical in structure even when only a single level in that 10078 * hierarchy is used. The HISUP bit shall be set to one in the 10079 * standard INQUIRY data (see SPC-2) when any logical unit number 10080 * format described in this standard is used. Non-hierarchical 10081 * formats are outside the scope of this standard. 10082 * 10083 * Therefore we set the HiSup bit here. 10084 * 10085 * The response format is 2, per SPC-3. 10086 */ 10087 inq_ptr->response_format = SID_HiSup | 2; 10088 10089 inq_ptr->additional_length = data_len - 10090 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10091 CTL_DEBUG_PRINT(("additional_length = %d\n", 10092 inq_ptr->additional_length)); 10093 10094 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10095 if (port_type == CTL_PORT_SCSI) 10096 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10097 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10098 inq_ptr->flags = SID_CmdQue; 10099 if (port_type == CTL_PORT_SCSI) 10100 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10101 10102 /* 10103 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10104 * We have 8 bytes for the vendor name, and 16 bytes for the device 10105 * name and 4 bytes for the revision. 10106 */ 10107 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10108 "vendor", NULL)) == NULL) { 10109 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10110 } else { 10111 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10112 strncpy(inq_ptr->vendor, val, 10113 min(sizeof(inq_ptr->vendor), strlen(val))); 10114 } 10115 if (lun == NULL) { 10116 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10117 sizeof(inq_ptr->product)); 10118 } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", 10119 NULL)) == NULL) { 10120 switch (lun->be_lun->lun_type) { 10121 case T_DIRECT: 10122 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10123 sizeof(inq_ptr->product)); 10124 break; 10125 case T_PROCESSOR: 10126 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10127 sizeof(inq_ptr->product)); 10128 break; 10129 case T_CDROM: 10130 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10131 sizeof(inq_ptr->product)); 10132 break; 10133 default: 10134 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10135 sizeof(inq_ptr->product)); 10136 break; 10137 } 10138 } else { 10139 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10140 strncpy(inq_ptr->product, val, 10141 min(sizeof(inq_ptr->product), strlen(val))); 10142 } 10143 10144 /* 10145 * XXX make this a macro somewhere so it automatically gets 10146 * incremented when we make changes. 10147 */ 10148 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10149 "revision", NULL)) == NULL) { 10150 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10151 } else { 10152 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10153 strncpy(inq_ptr->revision, val, 10154 min(sizeof(inq_ptr->revision), strlen(val))); 10155 } 10156 10157 /* 10158 * For parallel SCSI, we support double transition and single 10159 * transition clocking. We also support QAS (Quick Arbitration 10160 * and Selection) and Information Unit transfers on both the 10161 * control and array devices. 10162 */ 10163 if (port_type == CTL_PORT_SCSI) 10164 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10165 SID_SPI_IUS; 10166 10167 /* SAM-6 (no version claimed) */ 10168 scsi_ulto2b(0x00C0, inq_ptr->version1); 10169 /* SPC-5 (no version claimed) */ 10170 scsi_ulto2b(0x05C0, inq_ptr->version2); 10171 if (port_type == CTL_PORT_FC) { 10172 /* FCP-2 ANSI INCITS.350:2003 */ 10173 scsi_ulto2b(0x0917, inq_ptr->version3); 10174 } else if (port_type == CTL_PORT_SCSI) { 10175 /* SPI-4 ANSI INCITS.362:200x */ 10176 scsi_ulto2b(0x0B56, inq_ptr->version3); 10177 } else if (port_type == CTL_PORT_ISCSI) { 10178 /* iSCSI (no version claimed) */ 10179 scsi_ulto2b(0x0960, inq_ptr->version3); 10180 } else if (port_type == CTL_PORT_SAS) { 10181 /* SAS (no version claimed) */ 10182 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10183 } else if (port_type == CTL_PORT_UMASS) { 10184 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10185 scsi_ulto2b(0x1730, inq_ptr->version3); 10186 } 10187 10188 if (lun == NULL) { 10189 /* SBC-4 (no version claimed) */ 10190 scsi_ulto2b(0x0600, inq_ptr->version4); 10191 } else { 10192 switch (lun->be_lun->lun_type) { 10193 case T_DIRECT: 10194 /* SBC-4 (no version claimed) */ 10195 scsi_ulto2b(0x0600, inq_ptr->version4); 10196 break; 10197 case T_PROCESSOR: 10198 break; 10199 case T_CDROM: 10200 /* MMC-6 (no version claimed) */ 10201 scsi_ulto2b(0x04E0, inq_ptr->version4); 10202 break; 10203 default: 10204 break; 10205 } 10206 } 10207 10208 ctl_set_success(ctsio); 10209 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10210 ctsio->be_move_done = ctl_config_move_done; 10211 ctl_datamove((union ctl_io *)ctsio); 10212 return (CTL_RETVAL_COMPLETE); 10213 } 10214 10215 int 10216 ctl_inquiry(struct ctl_scsiio *ctsio) 10217 { 10218 struct scsi_inquiry *cdb; 10219 int retval; 10220 10221 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10222 10223 cdb = (struct scsi_inquiry *)ctsio->cdb; 10224 if (cdb->byte2 & SI_EVPD) 10225 retval = ctl_inquiry_evpd(ctsio); 10226 else if (cdb->page_code == 0) 10227 retval = ctl_inquiry_std(ctsio); 10228 else { 10229 ctl_set_invalid_field(ctsio, 10230 /*sks_valid*/ 1, 10231 /*command*/ 1, 10232 /*field*/ 2, 10233 /*bit_valid*/ 0, 10234 /*bit*/ 0); 10235 ctl_done((union ctl_io *)ctsio); 10236 return (CTL_RETVAL_COMPLETE); 10237 } 10238 10239 return (retval); 10240 } 10241 10242 int 10243 ctl_get_config(struct ctl_scsiio *ctsio) 10244 { 10245 struct ctl_lun *lun = CTL_LUN(ctsio); 10246 struct scsi_get_config_header *hdr; 10247 struct scsi_get_config_feature *feature; 10248 struct scsi_get_config *cdb; 10249 uint32_t alloc_len, data_len; 10250 int rt, starting; 10251 10252 cdb = (struct scsi_get_config *)ctsio->cdb; 10253 rt = (cdb->rt & SGC_RT_MASK); 10254 starting = scsi_2btoul(cdb->starting_feature); 10255 alloc_len = scsi_2btoul(cdb->length); 10256 10257 data_len = sizeof(struct scsi_get_config_header) + 10258 sizeof(struct scsi_get_config_feature) + 8 + 10259 sizeof(struct scsi_get_config_feature) + 8 + 10260 sizeof(struct scsi_get_config_feature) + 4 + 10261 sizeof(struct scsi_get_config_feature) + 4 + 10262 sizeof(struct scsi_get_config_feature) + 8 + 10263 sizeof(struct scsi_get_config_feature) + 10264 sizeof(struct scsi_get_config_feature) + 4 + 10265 sizeof(struct scsi_get_config_feature) + 4 + 10266 sizeof(struct scsi_get_config_feature) + 4 + 10267 sizeof(struct scsi_get_config_feature) + 4 + 10268 sizeof(struct scsi_get_config_feature) + 4 + 10269 sizeof(struct scsi_get_config_feature) + 4; 10270 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10271 ctsio->kern_sg_entries = 0; 10272 ctsio->kern_rel_offset = 0; 10273 10274 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10275 if (lun->flags & CTL_LUN_NO_MEDIA) 10276 scsi_ulto2b(0x0000, hdr->current_profile); 10277 else 10278 scsi_ulto2b(0x0010, hdr->current_profile); 10279 feature = (struct scsi_get_config_feature *)(hdr + 1); 10280 10281 if (starting > 0x003b) 10282 goto done; 10283 if (starting > 0x003a) 10284 goto f3b; 10285 if (starting > 0x002b) 10286 goto f3a; 10287 if (starting > 0x002a) 10288 goto f2b; 10289 if (starting > 0x001f) 10290 goto f2a; 10291 if (starting > 0x001e) 10292 goto f1f; 10293 if (starting > 0x001d) 10294 goto f1e; 10295 if (starting > 0x0010) 10296 goto f1d; 10297 if (starting > 0x0003) 10298 goto f10; 10299 if (starting > 0x0002) 10300 goto f3; 10301 if (starting > 0x0001) 10302 goto f2; 10303 if (starting > 0x0000) 10304 goto f1; 10305 10306 /* Profile List */ 10307 scsi_ulto2b(0x0000, feature->feature_code); 10308 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10309 feature->add_length = 8; 10310 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10311 feature->feature_data[2] = 0x00; 10312 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10313 feature->feature_data[6] = 0x01; 10314 feature = (struct scsi_get_config_feature *) 10315 &feature->feature_data[feature->add_length]; 10316 10317 f1: /* Core */ 10318 scsi_ulto2b(0x0001, feature->feature_code); 10319 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10320 feature->add_length = 8; 10321 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10322 feature->feature_data[4] = 0x03; 10323 feature = (struct scsi_get_config_feature *) 10324 &feature->feature_data[feature->add_length]; 10325 10326 f2: /* Morphing */ 10327 scsi_ulto2b(0x0002, feature->feature_code); 10328 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10329 feature->add_length = 4; 10330 feature->feature_data[0] = 0x02; 10331 feature = (struct scsi_get_config_feature *) 10332 &feature->feature_data[feature->add_length]; 10333 10334 f3: /* Removable Medium */ 10335 scsi_ulto2b(0x0003, feature->feature_code); 10336 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10337 feature->add_length = 4; 10338 feature->feature_data[0] = 0x39; 10339 feature = (struct scsi_get_config_feature *) 10340 &feature->feature_data[feature->add_length]; 10341 10342 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10343 goto done; 10344 10345 f10: /* Random Read */ 10346 scsi_ulto2b(0x0010, feature->feature_code); 10347 feature->flags = 0x00; 10348 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10349 feature->flags |= SGC_F_CURRENT; 10350 feature->add_length = 8; 10351 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10352 scsi_ulto2b(1, &feature->feature_data[4]); 10353 feature->feature_data[6] = 0x00; 10354 feature = (struct scsi_get_config_feature *) 10355 &feature->feature_data[feature->add_length]; 10356 10357 f1d: /* Multi-Read */ 10358 scsi_ulto2b(0x001D, feature->feature_code); 10359 feature->flags = 0x00; 10360 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10361 feature->flags |= SGC_F_CURRENT; 10362 feature->add_length = 0; 10363 feature = (struct scsi_get_config_feature *) 10364 &feature->feature_data[feature->add_length]; 10365 10366 f1e: /* CD Read */ 10367 scsi_ulto2b(0x001E, feature->feature_code); 10368 feature->flags = 0x00; 10369 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10370 feature->flags |= SGC_F_CURRENT; 10371 feature->add_length = 4; 10372 feature->feature_data[0] = 0x00; 10373 feature = (struct scsi_get_config_feature *) 10374 &feature->feature_data[feature->add_length]; 10375 10376 f1f: /* DVD Read */ 10377 scsi_ulto2b(0x001F, feature->feature_code); 10378 feature->flags = 0x08; 10379 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10380 feature->flags |= SGC_F_CURRENT; 10381 feature->add_length = 4; 10382 feature->feature_data[0] = 0x01; 10383 feature->feature_data[2] = 0x03; 10384 feature = (struct scsi_get_config_feature *) 10385 &feature->feature_data[feature->add_length]; 10386 10387 f2a: /* DVD+RW */ 10388 scsi_ulto2b(0x002A, feature->feature_code); 10389 feature->flags = 0x04; 10390 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10391 feature->flags |= SGC_F_CURRENT; 10392 feature->add_length = 4; 10393 feature->feature_data[0] = 0x00; 10394 feature->feature_data[1] = 0x00; 10395 feature = (struct scsi_get_config_feature *) 10396 &feature->feature_data[feature->add_length]; 10397 10398 f2b: /* DVD+R */ 10399 scsi_ulto2b(0x002B, feature->feature_code); 10400 feature->flags = 0x00; 10401 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10402 feature->flags |= SGC_F_CURRENT; 10403 feature->add_length = 4; 10404 feature->feature_data[0] = 0x00; 10405 feature = (struct scsi_get_config_feature *) 10406 &feature->feature_data[feature->add_length]; 10407 10408 f3a: /* DVD+RW Dual Layer */ 10409 scsi_ulto2b(0x003A, feature->feature_code); 10410 feature->flags = 0x00; 10411 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10412 feature->flags |= SGC_F_CURRENT; 10413 feature->add_length = 4; 10414 feature->feature_data[0] = 0x00; 10415 feature->feature_data[1] = 0x00; 10416 feature = (struct scsi_get_config_feature *) 10417 &feature->feature_data[feature->add_length]; 10418 10419 f3b: /* DVD+R Dual Layer */ 10420 scsi_ulto2b(0x003B, feature->feature_code); 10421 feature->flags = 0x00; 10422 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10423 feature->flags |= SGC_F_CURRENT; 10424 feature->add_length = 4; 10425 feature->feature_data[0] = 0x00; 10426 feature = (struct scsi_get_config_feature *) 10427 &feature->feature_data[feature->add_length]; 10428 10429 done: 10430 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10431 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10432 feature = (struct scsi_get_config_feature *)(hdr + 1); 10433 if (scsi_2btoul(feature->feature_code) == starting) 10434 feature = (struct scsi_get_config_feature *) 10435 &feature->feature_data[feature->add_length]; 10436 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10437 } 10438 scsi_ulto4b(data_len - 4, hdr->data_length); 10439 ctsio->kern_data_len = min(data_len, alloc_len); 10440 ctsio->kern_total_len = ctsio->kern_data_len; 10441 10442 ctl_set_success(ctsio); 10443 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10444 ctsio->be_move_done = ctl_config_move_done; 10445 ctl_datamove((union ctl_io *)ctsio); 10446 return (CTL_RETVAL_COMPLETE); 10447 } 10448 10449 int 10450 ctl_get_event_status(struct ctl_scsiio *ctsio) 10451 { 10452 struct scsi_get_event_status_header *hdr; 10453 struct scsi_get_event_status *cdb; 10454 uint32_t alloc_len, data_len; 10455 int notif_class; 10456 10457 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10458 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10459 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10460 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10461 ctl_done((union ctl_io *)ctsio); 10462 return (CTL_RETVAL_COMPLETE); 10463 } 10464 notif_class = cdb->notif_class; 10465 alloc_len = scsi_2btoul(cdb->length); 10466 10467 data_len = sizeof(struct scsi_get_event_status_header); 10468 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10469 ctsio->kern_sg_entries = 0; 10470 ctsio->kern_rel_offset = 0; 10471 ctsio->kern_data_len = min(data_len, alloc_len); 10472 ctsio->kern_total_len = ctsio->kern_data_len; 10473 10474 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10475 scsi_ulto2b(0, hdr->descr_length); 10476 hdr->nea_class = SGESN_NEA; 10477 hdr->supported_class = 0; 10478 10479 ctl_set_success(ctsio); 10480 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10481 ctsio->be_move_done = ctl_config_move_done; 10482 ctl_datamove((union ctl_io *)ctsio); 10483 return (CTL_RETVAL_COMPLETE); 10484 } 10485 10486 int 10487 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10488 { 10489 struct scsi_mechanism_status_header *hdr; 10490 struct scsi_mechanism_status *cdb; 10491 uint32_t alloc_len, data_len; 10492 10493 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10494 alloc_len = scsi_2btoul(cdb->length); 10495 10496 data_len = sizeof(struct scsi_mechanism_status_header); 10497 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10498 ctsio->kern_sg_entries = 0; 10499 ctsio->kern_rel_offset = 0; 10500 ctsio->kern_data_len = min(data_len, alloc_len); 10501 ctsio->kern_total_len = ctsio->kern_data_len; 10502 10503 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10504 hdr->state1 = 0x00; 10505 hdr->state2 = 0xe0; 10506 scsi_ulto3b(0, hdr->lba); 10507 hdr->slots_num = 0; 10508 scsi_ulto2b(0, hdr->slots_length); 10509 10510 ctl_set_success(ctsio); 10511 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10512 ctsio->be_move_done = ctl_config_move_done; 10513 ctl_datamove((union ctl_io *)ctsio); 10514 return (CTL_RETVAL_COMPLETE); 10515 } 10516 10517 static void 10518 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10519 { 10520 10521 lba += 150; 10522 buf[0] = 0; 10523 buf[1] = bin2bcd((lba / 75) / 60); 10524 buf[2] = bin2bcd((lba / 75) % 60); 10525 buf[3] = bin2bcd(lba % 75); 10526 } 10527 10528 int 10529 ctl_read_toc(struct ctl_scsiio *ctsio) 10530 { 10531 struct ctl_lun *lun = CTL_LUN(ctsio); 10532 struct scsi_read_toc_hdr *hdr; 10533 struct scsi_read_toc_type01_descr *descr; 10534 struct scsi_read_toc *cdb; 10535 uint32_t alloc_len, data_len; 10536 int format, msf; 10537 10538 cdb = (struct scsi_read_toc *)ctsio->cdb; 10539 msf = (cdb->byte2 & CD_MSF) != 0; 10540 format = cdb->format; 10541 alloc_len = scsi_2btoul(cdb->data_len); 10542 10543 data_len = sizeof(struct scsi_read_toc_hdr); 10544 if (format == 0) 10545 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10546 else 10547 data_len += sizeof(struct scsi_read_toc_type01_descr); 10548 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10549 ctsio->kern_sg_entries = 0; 10550 ctsio->kern_rel_offset = 0; 10551 ctsio->kern_data_len = min(data_len, alloc_len); 10552 ctsio->kern_total_len = ctsio->kern_data_len; 10553 10554 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10555 if (format == 0) { 10556 scsi_ulto2b(0x12, hdr->data_length); 10557 hdr->first = 1; 10558 hdr->last = 1; 10559 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10560 descr->addr_ctl = 0x14; 10561 descr->track_number = 1; 10562 if (msf) 10563 ctl_ultomsf(0, descr->track_start); 10564 else 10565 scsi_ulto4b(0, descr->track_start); 10566 descr++; 10567 descr->addr_ctl = 0x14; 10568 descr->track_number = 0xaa; 10569 if (msf) 10570 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10571 else 10572 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10573 } else { 10574 scsi_ulto2b(0x0a, hdr->data_length); 10575 hdr->first = 1; 10576 hdr->last = 1; 10577 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10578 descr->addr_ctl = 0x14; 10579 descr->track_number = 1; 10580 if (msf) 10581 ctl_ultomsf(0, descr->track_start); 10582 else 10583 scsi_ulto4b(0, descr->track_start); 10584 } 10585 10586 ctl_set_success(ctsio); 10587 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10588 ctsio->be_move_done = ctl_config_move_done; 10589 ctl_datamove((union ctl_io *)ctsio); 10590 return (CTL_RETVAL_COMPLETE); 10591 } 10592 10593 /* 10594 * For known CDB types, parse the LBA and length. 10595 */ 10596 static int 10597 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10598 { 10599 if (io->io_hdr.io_type != CTL_IO_SCSI) 10600 return (1); 10601 10602 switch (io->scsiio.cdb[0]) { 10603 case COMPARE_AND_WRITE: { 10604 struct scsi_compare_and_write *cdb; 10605 10606 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10607 10608 *lba = scsi_8btou64(cdb->addr); 10609 *len = cdb->length; 10610 break; 10611 } 10612 case READ_6: 10613 case WRITE_6: { 10614 struct scsi_rw_6 *cdb; 10615 10616 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10617 10618 *lba = scsi_3btoul(cdb->addr); 10619 /* only 5 bits are valid in the most significant address byte */ 10620 *lba &= 0x1fffff; 10621 *len = cdb->length; 10622 break; 10623 } 10624 case READ_10: 10625 case WRITE_10: { 10626 struct scsi_rw_10 *cdb; 10627 10628 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10629 10630 *lba = scsi_4btoul(cdb->addr); 10631 *len = scsi_2btoul(cdb->length); 10632 break; 10633 } 10634 case WRITE_VERIFY_10: { 10635 struct scsi_write_verify_10 *cdb; 10636 10637 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10638 10639 *lba = scsi_4btoul(cdb->addr); 10640 *len = scsi_2btoul(cdb->length); 10641 break; 10642 } 10643 case READ_12: 10644 case WRITE_12: { 10645 struct scsi_rw_12 *cdb; 10646 10647 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10648 10649 *lba = scsi_4btoul(cdb->addr); 10650 *len = scsi_4btoul(cdb->length); 10651 break; 10652 } 10653 case WRITE_VERIFY_12: { 10654 struct scsi_write_verify_12 *cdb; 10655 10656 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10657 10658 *lba = scsi_4btoul(cdb->addr); 10659 *len = scsi_4btoul(cdb->length); 10660 break; 10661 } 10662 case READ_16: 10663 case WRITE_16: { 10664 struct scsi_rw_16 *cdb; 10665 10666 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10667 10668 *lba = scsi_8btou64(cdb->addr); 10669 *len = scsi_4btoul(cdb->length); 10670 break; 10671 } 10672 case WRITE_ATOMIC_16: { 10673 struct scsi_write_atomic_16 *cdb; 10674 10675 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10676 10677 *lba = scsi_8btou64(cdb->addr); 10678 *len = scsi_2btoul(cdb->length); 10679 break; 10680 } 10681 case WRITE_VERIFY_16: { 10682 struct scsi_write_verify_16 *cdb; 10683 10684 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10685 10686 *lba = scsi_8btou64(cdb->addr); 10687 *len = scsi_4btoul(cdb->length); 10688 break; 10689 } 10690 case WRITE_SAME_10: { 10691 struct scsi_write_same_10 *cdb; 10692 10693 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10694 10695 *lba = scsi_4btoul(cdb->addr); 10696 *len = scsi_2btoul(cdb->length); 10697 break; 10698 } 10699 case WRITE_SAME_16: { 10700 struct scsi_write_same_16 *cdb; 10701 10702 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10703 10704 *lba = scsi_8btou64(cdb->addr); 10705 *len = scsi_4btoul(cdb->length); 10706 break; 10707 } 10708 case VERIFY_10: { 10709 struct scsi_verify_10 *cdb; 10710 10711 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10712 10713 *lba = scsi_4btoul(cdb->addr); 10714 *len = scsi_2btoul(cdb->length); 10715 break; 10716 } 10717 case VERIFY_12: { 10718 struct scsi_verify_12 *cdb; 10719 10720 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10721 10722 *lba = scsi_4btoul(cdb->addr); 10723 *len = scsi_4btoul(cdb->length); 10724 break; 10725 } 10726 case VERIFY_16: { 10727 struct scsi_verify_16 *cdb; 10728 10729 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10730 10731 *lba = scsi_8btou64(cdb->addr); 10732 *len = scsi_4btoul(cdb->length); 10733 break; 10734 } 10735 case UNMAP: { 10736 *lba = 0; 10737 *len = UINT64_MAX; 10738 break; 10739 } 10740 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10741 struct scsi_get_lba_status *cdb; 10742 10743 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10744 *lba = scsi_8btou64(cdb->addr); 10745 *len = UINT32_MAX; 10746 break; 10747 } 10748 default: 10749 return (1); 10750 break; /* NOTREACHED */ 10751 } 10752 10753 return (0); 10754 } 10755 10756 static ctl_action 10757 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10758 bool seq) 10759 { 10760 uint64_t endlba1, endlba2; 10761 10762 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10763 endlba2 = lba2 + len2 - 1; 10764 10765 if ((endlba1 < lba2) || (endlba2 < lba1)) 10766 return (CTL_ACTION_PASS); 10767 else 10768 return (CTL_ACTION_BLOCK); 10769 } 10770 10771 static int 10772 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10773 { 10774 struct ctl_ptr_len_flags *ptrlen; 10775 struct scsi_unmap_desc *buf, *end, *range; 10776 uint64_t lba; 10777 uint32_t len; 10778 10779 /* If not UNMAP -- go other way. */ 10780 if (io->io_hdr.io_type != CTL_IO_SCSI || 10781 io->scsiio.cdb[0] != UNMAP) 10782 return (CTL_ACTION_ERROR); 10783 10784 /* If UNMAP without data -- block and wait for data. */ 10785 ptrlen = (struct ctl_ptr_len_flags *) 10786 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10787 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10788 ptrlen->ptr == NULL) 10789 return (CTL_ACTION_BLOCK); 10790 10791 /* UNMAP with data -- check for collision. */ 10792 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10793 end = buf + ptrlen->len / sizeof(*buf); 10794 for (range = buf; range < end; range++) { 10795 lba = scsi_8btou64(range->lba); 10796 len = scsi_4btoul(range->length); 10797 if ((lba < lba2 + len2) && (lba + len > lba2)) 10798 return (CTL_ACTION_BLOCK); 10799 } 10800 return (CTL_ACTION_PASS); 10801 } 10802 10803 static ctl_action 10804 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10805 { 10806 uint64_t lba1, lba2; 10807 uint64_t len1, len2; 10808 int retval; 10809 10810 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10811 return (CTL_ACTION_ERROR); 10812 10813 retval = ctl_extent_check_unmap(io1, lba2, len2); 10814 if (retval != CTL_ACTION_ERROR) 10815 return (retval); 10816 10817 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10818 return (CTL_ACTION_ERROR); 10819 10820 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10821 seq = FALSE; 10822 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10823 } 10824 10825 static ctl_action 10826 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10827 { 10828 uint64_t lba1, lba2; 10829 uint64_t len1, len2; 10830 10831 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10832 return (CTL_ACTION_PASS); 10833 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10834 return (CTL_ACTION_ERROR); 10835 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10836 return (CTL_ACTION_ERROR); 10837 10838 if (lba1 + len1 == lba2) 10839 return (CTL_ACTION_BLOCK); 10840 return (CTL_ACTION_PASS); 10841 } 10842 10843 static ctl_action 10844 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10845 union ctl_io *ooa_io) 10846 { 10847 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10848 const ctl_serialize_action *serialize_row; 10849 10850 /* 10851 * The initiator attempted multiple untagged commands at the same 10852 * time. Can't do that. 10853 */ 10854 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10855 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10856 && ((pending_io->io_hdr.nexus.targ_port == 10857 ooa_io->io_hdr.nexus.targ_port) 10858 && (pending_io->io_hdr.nexus.initid == 10859 ooa_io->io_hdr.nexus.initid)) 10860 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10861 CTL_FLAG_STATUS_SENT)) == 0)) 10862 return (CTL_ACTION_OVERLAP); 10863 10864 /* 10865 * The initiator attempted to send multiple tagged commands with 10866 * the same ID. (It's fine if different initiators have the same 10867 * tag ID.) 10868 * 10869 * Even if all of those conditions are true, we don't kill the I/O 10870 * if the command ahead of us has been aborted. We won't end up 10871 * sending it to the FETD, and it's perfectly legal to resend a 10872 * command with the same tag number as long as the previous 10873 * instance of this tag number has been aborted somehow. 10874 */ 10875 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10876 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10877 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10878 && ((pending_io->io_hdr.nexus.targ_port == 10879 ooa_io->io_hdr.nexus.targ_port) 10880 && (pending_io->io_hdr.nexus.initid == 10881 ooa_io->io_hdr.nexus.initid)) 10882 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10883 CTL_FLAG_STATUS_SENT)) == 0)) 10884 return (CTL_ACTION_OVERLAP_TAG); 10885 10886 /* 10887 * If we get a head of queue tag, SAM-3 says that we should 10888 * immediately execute it. 10889 * 10890 * What happens if this command would normally block for some other 10891 * reason? e.g. a request sense with a head of queue tag 10892 * immediately after a write. Normally that would block, but this 10893 * will result in its getting executed immediately... 10894 * 10895 * We currently return "pass" instead of "skip", so we'll end up 10896 * going through the rest of the queue to check for overlapped tags. 10897 * 10898 * XXX KDM check for other types of blockage first?? 10899 */ 10900 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10901 return (CTL_ACTION_PASS); 10902 10903 /* 10904 * Ordered tags have to block until all items ahead of them 10905 * have completed. If we get called with an ordered tag, we always 10906 * block, if something else is ahead of us in the queue. 10907 */ 10908 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10909 return (CTL_ACTION_BLOCK); 10910 10911 /* 10912 * Simple tags get blocked until all head of queue and ordered tags 10913 * ahead of them have completed. I'm lumping untagged commands in 10914 * with simple tags here. XXX KDM is that the right thing to do? 10915 */ 10916 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10917 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10918 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10919 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10920 return (CTL_ACTION_BLOCK); 10921 10922 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10923 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 10924 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 10925 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 10926 pending_io->scsiio.cdb[1], pending_io)); 10927 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10928 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 10929 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 10930 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 10931 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 10932 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 10933 ooa_io->scsiio.cdb[1], ooa_io)); 10934 10935 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10936 10937 switch (serialize_row[pending_entry->seridx]) { 10938 case CTL_SER_BLOCK: 10939 return (CTL_ACTION_BLOCK); 10940 case CTL_SER_EXTENT: 10941 return (ctl_extent_check(ooa_io, pending_io, 10942 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10943 case CTL_SER_EXTENTOPT: 10944 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10945 SCP_QUEUE_ALG_UNRESTRICTED) 10946 return (ctl_extent_check(ooa_io, pending_io, 10947 (lun->be_lun && 10948 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10949 return (CTL_ACTION_PASS); 10950 case CTL_SER_EXTENTSEQ: 10951 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10952 return (ctl_extent_check_seq(ooa_io, pending_io)); 10953 return (CTL_ACTION_PASS); 10954 case CTL_SER_PASS: 10955 return (CTL_ACTION_PASS); 10956 case CTL_SER_BLOCKOPT: 10957 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10958 SCP_QUEUE_ALG_UNRESTRICTED) 10959 return (CTL_ACTION_BLOCK); 10960 return (CTL_ACTION_PASS); 10961 case CTL_SER_SKIP: 10962 return (CTL_ACTION_SKIP); 10963 default: 10964 panic("%s: Invalid serialization value %d for %d => %d", 10965 __func__, serialize_row[pending_entry->seridx], 10966 pending_entry->seridx, ooa_entry->seridx); 10967 } 10968 10969 return (CTL_ACTION_ERROR); 10970 } 10971 10972 /* 10973 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10974 * Assumptions: 10975 * - pending_io is generally either incoming, or on the blocked queue 10976 * - starting I/O is the I/O we want to start the check with. 10977 */ 10978 static ctl_action 10979 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10980 union ctl_io *starting_io) 10981 { 10982 union ctl_io *ooa_io; 10983 ctl_action action; 10984 10985 mtx_assert(&lun->lun_lock, MA_OWNED); 10986 10987 /* 10988 * Run back along the OOA queue, starting with the current 10989 * blocked I/O and going through every I/O before it on the 10990 * queue. If starting_io is NULL, we'll just end up returning 10991 * CTL_ACTION_PASS. 10992 */ 10993 for (ooa_io = starting_io; ooa_io != NULL; 10994 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10995 ooa_links)){ 10996 10997 /* 10998 * This routine just checks to see whether 10999 * cur_blocked is blocked by ooa_io, which is ahead 11000 * of it in the queue. It doesn't queue/dequeue 11001 * cur_blocked. 11002 */ 11003 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 11004 switch (action) { 11005 case CTL_ACTION_BLOCK: 11006 case CTL_ACTION_OVERLAP: 11007 case CTL_ACTION_OVERLAP_TAG: 11008 case CTL_ACTION_SKIP: 11009 case CTL_ACTION_ERROR: 11010 return (action); 11011 break; /* NOTREACHED */ 11012 case CTL_ACTION_PASS: 11013 break; 11014 default: 11015 panic("%s: Invalid action %d\n", __func__, action); 11016 } 11017 } 11018 11019 return (CTL_ACTION_PASS); 11020 } 11021 11022 /* 11023 * Assumptions: 11024 * - An I/O has just completed, and has been removed from the per-LUN OOA 11025 * queue, so some items on the blocked queue may now be unblocked. 11026 */ 11027 static int 11028 ctl_check_blocked(struct ctl_lun *lun) 11029 { 11030 struct ctl_softc *softc = lun->ctl_softc; 11031 union ctl_io *cur_blocked, *next_blocked; 11032 11033 mtx_assert(&lun->lun_lock, MA_OWNED); 11034 11035 /* 11036 * Run forward from the head of the blocked queue, checking each 11037 * entry against the I/Os prior to it on the OOA queue to see if 11038 * there is still any blockage. 11039 * 11040 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 11041 * with our removing a variable on it while it is traversing the 11042 * list. 11043 */ 11044 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 11045 cur_blocked != NULL; cur_blocked = next_blocked) { 11046 union ctl_io *prev_ooa; 11047 ctl_action action; 11048 11049 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 11050 blocked_links); 11051 11052 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 11053 ctl_ooaq, ooa_links); 11054 11055 /* 11056 * If cur_blocked happens to be the first item in the OOA 11057 * queue now, prev_ooa will be NULL, and the action 11058 * returned will just be CTL_ACTION_PASS. 11059 */ 11060 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 11061 11062 switch (action) { 11063 case CTL_ACTION_BLOCK: 11064 /* Nothing to do here, still blocked */ 11065 break; 11066 case CTL_ACTION_OVERLAP: 11067 case CTL_ACTION_OVERLAP_TAG: 11068 /* 11069 * This shouldn't happen! In theory we've already 11070 * checked this command for overlap... 11071 */ 11072 break; 11073 case CTL_ACTION_PASS: 11074 case CTL_ACTION_SKIP: { 11075 const struct ctl_cmd_entry *entry; 11076 11077 /* 11078 * The skip case shouldn't happen, this transaction 11079 * should have never made it onto the blocked queue. 11080 */ 11081 /* 11082 * This I/O is no longer blocked, we can remove it 11083 * from the blocked queue. Since this is a TAILQ 11084 * (doubly linked list), we can do O(1) removals 11085 * from any place on the list. 11086 */ 11087 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11088 blocked_links); 11089 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11090 11091 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 11092 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 11093 /* 11094 * Need to send IO back to original side to 11095 * run 11096 */ 11097 union ctl_ha_msg msg_info; 11098 11099 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11100 msg_info.hdr.original_sc = 11101 cur_blocked->io_hdr.original_sc; 11102 msg_info.hdr.serializing_sc = cur_blocked; 11103 msg_info.hdr.msg_type = CTL_MSG_R2R; 11104 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11105 sizeof(msg_info.hdr), M_NOWAIT); 11106 break; 11107 } 11108 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11109 11110 /* 11111 * Check this I/O for LUN state changes that may 11112 * have happened while this command was blocked. 11113 * The LUN state may have been changed by a command 11114 * ahead of us in the queue, so we need to re-check 11115 * for any states that can be caused by SCSI 11116 * commands. 11117 */ 11118 if (ctl_scsiio_lun_check(lun, entry, 11119 &cur_blocked->scsiio) == 0) { 11120 cur_blocked->io_hdr.flags |= 11121 CTL_FLAG_IS_WAS_ON_RTR; 11122 ctl_enqueue_rtr(cur_blocked); 11123 } else 11124 ctl_done(cur_blocked); 11125 break; 11126 } 11127 default: 11128 /* 11129 * This probably shouldn't happen -- we shouldn't 11130 * get CTL_ACTION_ERROR, or anything else. 11131 */ 11132 break; 11133 } 11134 } 11135 11136 return (CTL_RETVAL_COMPLETE); 11137 } 11138 11139 /* 11140 * This routine (with one exception) checks LUN flags that can be set by 11141 * commands ahead of us in the OOA queue. These flags have to be checked 11142 * when a command initially comes in, and when we pull a command off the 11143 * blocked queue and are preparing to execute it. The reason we have to 11144 * check these flags for commands on the blocked queue is that the LUN 11145 * state may have been changed by a command ahead of us while we're on the 11146 * blocked queue. 11147 * 11148 * Ordering is somewhat important with these checks, so please pay 11149 * careful attention to the placement of any new checks. 11150 */ 11151 static int 11152 ctl_scsiio_lun_check(struct ctl_lun *lun, 11153 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11154 { 11155 struct ctl_softc *softc = lun->ctl_softc; 11156 int retval; 11157 uint32_t residx; 11158 11159 retval = 0; 11160 11161 mtx_assert(&lun->lun_lock, MA_OWNED); 11162 11163 /* 11164 * If this shelf is a secondary shelf controller, we may have to 11165 * reject some commands disallowed by HA mode and link state. 11166 */ 11167 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11168 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11169 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11170 ctl_set_lun_unavail(ctsio); 11171 retval = 1; 11172 goto bailout; 11173 } 11174 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11175 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11176 ctl_set_lun_transit(ctsio); 11177 retval = 1; 11178 goto bailout; 11179 } 11180 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11181 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11182 ctl_set_lun_standby(ctsio); 11183 retval = 1; 11184 goto bailout; 11185 } 11186 11187 /* The rest of checks are only done on executing side */ 11188 if (softc->ha_mode == CTL_HA_MODE_XFER) 11189 goto bailout; 11190 } 11191 11192 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11193 if (lun->be_lun && 11194 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11195 ctl_set_hw_write_protected(ctsio); 11196 retval = 1; 11197 goto bailout; 11198 } 11199 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11200 ctl_set_sense(ctsio, /*current_error*/ 1, 11201 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11202 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11203 retval = 1; 11204 goto bailout; 11205 } 11206 } 11207 11208 /* 11209 * Check for a reservation conflict. If this command isn't allowed 11210 * even on reserved LUNs, and if this initiator isn't the one who 11211 * reserved us, reject the command with a reservation conflict. 11212 */ 11213 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11214 if ((lun->flags & CTL_LUN_RESERVED) 11215 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11216 if (lun->res_idx != residx) { 11217 ctl_set_reservation_conflict(ctsio); 11218 retval = 1; 11219 goto bailout; 11220 } 11221 } 11222 11223 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11224 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11225 /* No reservation or command is allowed. */; 11226 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11227 (lun->pr_res_type == SPR_TYPE_WR_EX || 11228 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11229 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11230 /* The command is allowed for Write Exclusive resv. */; 11231 } else { 11232 /* 11233 * if we aren't registered or it's a res holder type 11234 * reservation and this isn't the res holder then set a 11235 * conflict. 11236 */ 11237 if (ctl_get_prkey(lun, residx) == 0 || 11238 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11239 ctl_set_reservation_conflict(ctsio); 11240 retval = 1; 11241 goto bailout; 11242 } 11243 } 11244 11245 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11246 if (lun->flags & CTL_LUN_EJECTED) 11247 ctl_set_lun_ejected(ctsio); 11248 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11249 if (lun->flags & CTL_LUN_REMOVABLE) 11250 ctl_set_lun_no_media(ctsio); 11251 else 11252 ctl_set_lun_int_reqd(ctsio); 11253 } else if (lun->flags & CTL_LUN_STOPPED) 11254 ctl_set_lun_stopped(ctsio); 11255 else 11256 goto bailout; 11257 retval = 1; 11258 goto bailout; 11259 } 11260 11261 bailout: 11262 return (retval); 11263 } 11264 11265 static void 11266 ctl_failover_io(union ctl_io *io, int have_lock) 11267 { 11268 ctl_set_busy(&io->scsiio); 11269 ctl_done(io); 11270 } 11271 11272 static void 11273 ctl_failover_lun(union ctl_io *rio) 11274 { 11275 struct ctl_softc *softc = CTL_SOFTC(rio); 11276 struct ctl_lun *lun; 11277 struct ctl_io_hdr *io, *next_io; 11278 uint32_t targ_lun; 11279 11280 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11281 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); 11282 11283 /* Find and lock the LUN. */ 11284 mtx_lock(&softc->ctl_lock); 11285 if (targ_lun > ctl_max_luns || 11286 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11287 mtx_unlock(&softc->ctl_lock); 11288 return; 11289 } 11290 mtx_lock(&lun->lun_lock); 11291 mtx_unlock(&softc->ctl_lock); 11292 if (lun->flags & CTL_LUN_DISABLED) { 11293 mtx_unlock(&lun->lun_lock); 11294 return; 11295 } 11296 11297 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11298 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11299 /* We are master */ 11300 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11301 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11302 io->flags |= CTL_FLAG_ABORT; 11303 io->flags |= CTL_FLAG_FAILOVER; 11304 } else { /* This can be only due to DATAMOVE */ 11305 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11306 io->flags &= ~CTL_FLAG_DMA_INPROG; 11307 io->flags |= CTL_FLAG_IO_ACTIVE; 11308 io->port_status = 31340; 11309 ctl_enqueue_isc((union ctl_io *)io); 11310 } 11311 } 11312 /* We are slave */ 11313 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11314 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11315 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11316 io->flags |= CTL_FLAG_FAILOVER; 11317 } else { 11318 ctl_set_busy(&((union ctl_io *)io)-> 11319 scsiio); 11320 ctl_done((union ctl_io *)io); 11321 } 11322 } 11323 } 11324 } else { /* SERIALIZE modes */ 11325 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11326 next_io) { 11327 /* We are master */ 11328 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11329 TAILQ_REMOVE(&lun->blocked_queue, io, 11330 blocked_links); 11331 io->flags &= ~CTL_FLAG_BLOCKED; 11332 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11333 ctl_free_io((union ctl_io *)io); 11334 } 11335 } 11336 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11337 /* We are master */ 11338 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11339 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11340 ctl_free_io((union ctl_io *)io); 11341 } 11342 /* We are slave */ 11343 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11344 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11345 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11346 ctl_set_busy(&((union ctl_io *)io)-> 11347 scsiio); 11348 ctl_done((union ctl_io *)io); 11349 } 11350 } 11351 } 11352 ctl_check_blocked(lun); 11353 } 11354 mtx_unlock(&lun->lun_lock); 11355 } 11356 11357 static int 11358 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11359 { 11360 struct ctl_lun *lun; 11361 const struct ctl_cmd_entry *entry; 11362 uint32_t initidx, targ_lun; 11363 int retval = 0; 11364 11365 lun = NULL; 11366 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11367 if (targ_lun < ctl_max_luns) 11368 lun = softc->ctl_luns[targ_lun]; 11369 if (lun) { 11370 /* 11371 * If the LUN is invalid, pretend that it doesn't exist. 11372 * It will go away as soon as all pending I/O has been 11373 * completed. 11374 */ 11375 mtx_lock(&lun->lun_lock); 11376 if (lun->flags & CTL_LUN_DISABLED) { 11377 mtx_unlock(&lun->lun_lock); 11378 lun = NULL; 11379 } 11380 } 11381 CTL_LUN(ctsio) = lun; 11382 if (lun) { 11383 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11384 11385 /* 11386 * Every I/O goes into the OOA queue for a particular LUN, 11387 * and stays there until completion. 11388 */ 11389 #ifdef CTL_TIME_IO 11390 if (TAILQ_EMPTY(&lun->ooa_queue)) 11391 lun->idle_time += getsbinuptime() - lun->last_busy; 11392 #endif 11393 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11394 } 11395 11396 /* Get command entry and return error if it is unsuppotyed. */ 11397 entry = ctl_validate_command(ctsio); 11398 if (entry == NULL) { 11399 if (lun) 11400 mtx_unlock(&lun->lun_lock); 11401 return (retval); 11402 } 11403 11404 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11405 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11406 11407 /* 11408 * Check to see whether we can send this command to LUNs that don't 11409 * exist. This should pretty much only be the case for inquiry 11410 * and request sense. Further checks, below, really require having 11411 * a LUN, so we can't really check the command anymore. Just put 11412 * it on the rtr queue. 11413 */ 11414 if (lun == NULL) { 11415 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11416 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11417 ctl_enqueue_rtr((union ctl_io *)ctsio); 11418 return (retval); 11419 } 11420 11421 ctl_set_unsupported_lun(ctsio); 11422 ctl_done((union ctl_io *)ctsio); 11423 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11424 return (retval); 11425 } else { 11426 /* 11427 * Make sure we support this particular command on this LUN. 11428 * e.g., we don't support writes to the control LUN. 11429 */ 11430 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11431 mtx_unlock(&lun->lun_lock); 11432 ctl_set_invalid_opcode(ctsio); 11433 ctl_done((union ctl_io *)ctsio); 11434 return (retval); 11435 } 11436 } 11437 11438 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11439 11440 /* 11441 * If we've got a request sense, it'll clear the contingent 11442 * allegiance condition. Otherwise, if we have a CA condition for 11443 * this initiator, clear it, because it sent down a command other 11444 * than request sense. 11445 */ 11446 if (ctsio->cdb[0] != REQUEST_SENSE) { 11447 struct scsi_sense_data *ps; 11448 11449 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11450 if (ps != NULL) 11451 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11452 } 11453 11454 /* 11455 * If the command has this flag set, it handles its own unit 11456 * attention reporting, we shouldn't do anything. Otherwise we 11457 * check for any pending unit attentions, and send them back to the 11458 * initiator. We only do this when a command initially comes in, 11459 * not when we pull it off the blocked queue. 11460 * 11461 * According to SAM-3, section 5.3.2, the order that things get 11462 * presented back to the host is basically unit attentions caused 11463 * by some sort of reset event, busy status, reservation conflicts 11464 * or task set full, and finally any other status. 11465 * 11466 * One issue here is that some of the unit attentions we report 11467 * don't fall into the "reset" category (e.g. "reported luns data 11468 * has changed"). So reporting it here, before the reservation 11469 * check, may be technically wrong. I guess the only thing to do 11470 * would be to check for and report the reset events here, and then 11471 * check for the other unit attention types after we check for a 11472 * reservation conflict. 11473 * 11474 * XXX KDM need to fix this 11475 */ 11476 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11477 ctl_ua_type ua_type; 11478 u_int sense_len = 0; 11479 11480 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11481 &sense_len, SSD_TYPE_NONE); 11482 if (ua_type != CTL_UA_NONE) { 11483 mtx_unlock(&lun->lun_lock); 11484 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11485 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11486 ctsio->sense_len = sense_len; 11487 ctl_done((union ctl_io *)ctsio); 11488 return (retval); 11489 } 11490 } 11491 11492 11493 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11494 mtx_unlock(&lun->lun_lock); 11495 ctl_done((union ctl_io *)ctsio); 11496 return (retval); 11497 } 11498 11499 /* 11500 * XXX CHD this is where we want to send IO to other side if 11501 * this LUN is secondary on this SC. We will need to make a copy 11502 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11503 * the copy we send as FROM_OTHER. 11504 * We also need to stuff the address of the original IO so we can 11505 * find it easily. Something similar will need be done on the other 11506 * side so when we are done we can find the copy. 11507 */ 11508 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11509 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11510 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11511 union ctl_ha_msg msg_info; 11512 int isc_retval; 11513 11514 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11515 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11516 mtx_unlock(&lun->lun_lock); 11517 11518 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11519 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11520 msg_info.hdr.serializing_sc = NULL; 11521 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11522 msg_info.scsi.tag_num = ctsio->tag_num; 11523 msg_info.scsi.tag_type = ctsio->tag_type; 11524 msg_info.scsi.cdb_len = ctsio->cdb_len; 11525 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11526 11527 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11528 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11529 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11530 ctl_set_busy(ctsio); 11531 ctl_done((union ctl_io *)ctsio); 11532 return (retval); 11533 } 11534 return (retval); 11535 } 11536 11537 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11538 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11539 ctl_ooaq, ooa_links))) { 11540 case CTL_ACTION_BLOCK: 11541 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11542 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11543 blocked_links); 11544 mtx_unlock(&lun->lun_lock); 11545 return (retval); 11546 case CTL_ACTION_PASS: 11547 case CTL_ACTION_SKIP: 11548 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11549 mtx_unlock(&lun->lun_lock); 11550 ctl_enqueue_rtr((union ctl_io *)ctsio); 11551 break; 11552 case CTL_ACTION_OVERLAP: 11553 mtx_unlock(&lun->lun_lock); 11554 ctl_set_overlapped_cmd(ctsio); 11555 ctl_done((union ctl_io *)ctsio); 11556 break; 11557 case CTL_ACTION_OVERLAP_TAG: 11558 mtx_unlock(&lun->lun_lock); 11559 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11560 ctl_done((union ctl_io *)ctsio); 11561 break; 11562 case CTL_ACTION_ERROR: 11563 default: 11564 mtx_unlock(&lun->lun_lock); 11565 ctl_set_internal_failure(ctsio, 11566 /*sks_valid*/ 0, 11567 /*retry_count*/ 0); 11568 ctl_done((union ctl_io *)ctsio); 11569 break; 11570 } 11571 return (retval); 11572 } 11573 11574 const struct ctl_cmd_entry * 11575 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11576 { 11577 const struct ctl_cmd_entry *entry; 11578 int service_action; 11579 11580 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11581 if (sa) 11582 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11583 if (entry->flags & CTL_CMD_FLAG_SA5) { 11584 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11585 entry = &((const struct ctl_cmd_entry *) 11586 entry->execute)[service_action]; 11587 } 11588 return (entry); 11589 } 11590 11591 const struct ctl_cmd_entry * 11592 ctl_validate_command(struct ctl_scsiio *ctsio) 11593 { 11594 const struct ctl_cmd_entry *entry; 11595 int i, sa; 11596 uint8_t diff; 11597 11598 entry = ctl_get_cmd_entry(ctsio, &sa); 11599 if (entry->execute == NULL) { 11600 if (sa) 11601 ctl_set_invalid_field(ctsio, 11602 /*sks_valid*/ 1, 11603 /*command*/ 1, 11604 /*field*/ 1, 11605 /*bit_valid*/ 1, 11606 /*bit*/ 4); 11607 else 11608 ctl_set_invalid_opcode(ctsio); 11609 ctl_done((union ctl_io *)ctsio); 11610 return (NULL); 11611 } 11612 KASSERT(entry->length > 0, 11613 ("Not defined length for command 0x%02x/0x%02x", 11614 ctsio->cdb[0], ctsio->cdb[1])); 11615 for (i = 1; i < entry->length; i++) { 11616 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11617 if (diff == 0) 11618 continue; 11619 ctl_set_invalid_field(ctsio, 11620 /*sks_valid*/ 1, 11621 /*command*/ 1, 11622 /*field*/ i, 11623 /*bit_valid*/ 1, 11624 /*bit*/ fls(diff) - 1); 11625 ctl_done((union ctl_io *)ctsio); 11626 return (NULL); 11627 } 11628 return (entry); 11629 } 11630 11631 static int 11632 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11633 { 11634 11635 switch (lun_type) { 11636 case T_DIRECT: 11637 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11638 return (0); 11639 break; 11640 case T_PROCESSOR: 11641 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11642 return (0); 11643 break; 11644 case T_CDROM: 11645 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11646 return (0); 11647 break; 11648 default: 11649 return (0); 11650 } 11651 return (1); 11652 } 11653 11654 static int 11655 ctl_scsiio(struct ctl_scsiio *ctsio) 11656 { 11657 int retval; 11658 const struct ctl_cmd_entry *entry; 11659 11660 retval = CTL_RETVAL_COMPLETE; 11661 11662 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11663 11664 entry = ctl_get_cmd_entry(ctsio, NULL); 11665 11666 /* 11667 * If this I/O has been aborted, just send it straight to 11668 * ctl_done() without executing it. 11669 */ 11670 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11671 ctl_done((union ctl_io *)ctsio); 11672 goto bailout; 11673 } 11674 11675 /* 11676 * All the checks should have been handled by ctl_scsiio_precheck(). 11677 * We should be clear now to just execute the I/O. 11678 */ 11679 retval = entry->execute(ctsio); 11680 11681 bailout: 11682 return (retval); 11683 } 11684 11685 static int 11686 ctl_target_reset(union ctl_io *io) 11687 { 11688 struct ctl_softc *softc = CTL_SOFTC(io); 11689 struct ctl_port *port = CTL_PORT(io); 11690 struct ctl_lun *lun; 11691 uint32_t initidx; 11692 ctl_ua_type ua_type; 11693 11694 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11695 union ctl_ha_msg msg_info; 11696 11697 msg_info.hdr.nexus = io->io_hdr.nexus; 11698 msg_info.task.task_action = io->taskio.task_action; 11699 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11700 msg_info.hdr.original_sc = NULL; 11701 msg_info.hdr.serializing_sc = NULL; 11702 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11703 sizeof(msg_info.task), M_WAITOK); 11704 } 11705 11706 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11707 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11708 ua_type = CTL_UA_TARG_RESET; 11709 else 11710 ua_type = CTL_UA_BUS_RESET; 11711 mtx_lock(&softc->ctl_lock); 11712 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11713 if (port != NULL && 11714 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11715 continue; 11716 ctl_do_lun_reset(lun, initidx, ua_type); 11717 } 11718 mtx_unlock(&softc->ctl_lock); 11719 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11720 return (0); 11721 } 11722 11723 /* 11724 * The LUN should always be set. The I/O is optional, and is used to 11725 * distinguish between I/Os sent by this initiator, and by other 11726 * initiators. We set unit attention for initiators other than this one. 11727 * SAM-3 is vague on this point. It does say that a unit attention should 11728 * be established for other initiators when a LUN is reset (see section 11729 * 5.7.3), but it doesn't specifically say that the unit attention should 11730 * be established for this particular initiator when a LUN is reset. Here 11731 * is the relevant text, from SAM-3 rev 8: 11732 * 11733 * 5.7.2 When a SCSI initiator port aborts its own tasks 11734 * 11735 * When a SCSI initiator port causes its own task(s) to be aborted, no 11736 * notification that the task(s) have been aborted shall be returned to 11737 * the SCSI initiator port other than the completion response for the 11738 * command or task management function action that caused the task(s) to 11739 * be aborted and notification(s) associated with related effects of the 11740 * action (e.g., a reset unit attention condition). 11741 * 11742 * XXX KDM for now, we're setting unit attention for all initiators. 11743 */ 11744 static void 11745 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11746 { 11747 union ctl_io *xio; 11748 int i; 11749 11750 mtx_lock(&lun->lun_lock); 11751 /* Abort tasks. */ 11752 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11753 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11754 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11755 } 11756 /* Clear CA. */ 11757 for (i = 0; i < ctl_max_ports; i++) { 11758 free(lun->pending_sense[i], M_CTL); 11759 lun->pending_sense[i] = NULL; 11760 } 11761 /* Clear reservation. */ 11762 lun->flags &= ~CTL_LUN_RESERVED; 11763 /* Clear prevent media removal. */ 11764 if (lun->prevent) { 11765 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11766 ctl_clear_mask(lun->prevent, i); 11767 lun->prevent_count = 0; 11768 } 11769 /* Clear TPC status */ 11770 ctl_tpc_lun_clear(lun, -1); 11771 /* Establish UA. */ 11772 #if 0 11773 ctl_est_ua_all(lun, initidx, ua_type); 11774 #else 11775 ctl_est_ua_all(lun, -1, ua_type); 11776 #endif 11777 mtx_unlock(&lun->lun_lock); 11778 } 11779 11780 static int 11781 ctl_lun_reset(union ctl_io *io) 11782 { 11783 struct ctl_softc *softc = CTL_SOFTC(io); 11784 struct ctl_lun *lun; 11785 uint32_t targ_lun, initidx; 11786 11787 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11788 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11789 mtx_lock(&softc->ctl_lock); 11790 if (targ_lun >= ctl_max_luns || 11791 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11792 mtx_unlock(&softc->ctl_lock); 11793 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11794 return (1); 11795 } 11796 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11797 mtx_unlock(&softc->ctl_lock); 11798 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11799 11800 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11801 union ctl_ha_msg msg_info; 11802 11803 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11804 msg_info.hdr.nexus = io->io_hdr.nexus; 11805 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11806 msg_info.hdr.original_sc = NULL; 11807 msg_info.hdr.serializing_sc = NULL; 11808 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11809 sizeof(msg_info.task), M_WAITOK); 11810 } 11811 return (0); 11812 } 11813 11814 static void 11815 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11816 int other_sc) 11817 { 11818 union ctl_io *xio; 11819 11820 mtx_assert(&lun->lun_lock, MA_OWNED); 11821 11822 /* 11823 * Run through the OOA queue and attempt to find the given I/O. 11824 * The target port, initiator ID, tag type and tag number have to 11825 * match the values that we got from the initiator. If we have an 11826 * untagged command to abort, simply abort the first untagged command 11827 * we come to. We only allow one untagged command at a time of course. 11828 */ 11829 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11830 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11831 11832 if ((targ_port == UINT32_MAX || 11833 targ_port == xio->io_hdr.nexus.targ_port) && 11834 (init_id == UINT32_MAX || 11835 init_id == xio->io_hdr.nexus.initid)) { 11836 if (targ_port != xio->io_hdr.nexus.targ_port || 11837 init_id != xio->io_hdr.nexus.initid) 11838 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11839 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11840 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11841 union ctl_ha_msg msg_info; 11842 11843 msg_info.hdr.nexus = xio->io_hdr.nexus; 11844 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11845 msg_info.task.tag_num = xio->scsiio.tag_num; 11846 msg_info.task.tag_type = xio->scsiio.tag_type; 11847 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11848 msg_info.hdr.original_sc = NULL; 11849 msg_info.hdr.serializing_sc = NULL; 11850 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11851 sizeof(msg_info.task), M_NOWAIT); 11852 } 11853 } 11854 } 11855 } 11856 11857 static int 11858 ctl_abort_task_set(union ctl_io *io) 11859 { 11860 struct ctl_softc *softc = CTL_SOFTC(io); 11861 struct ctl_lun *lun; 11862 uint32_t targ_lun; 11863 11864 /* 11865 * Look up the LUN. 11866 */ 11867 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11868 mtx_lock(&softc->ctl_lock); 11869 if (targ_lun >= ctl_max_luns || 11870 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11871 mtx_unlock(&softc->ctl_lock); 11872 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11873 return (1); 11874 } 11875 11876 mtx_lock(&lun->lun_lock); 11877 mtx_unlock(&softc->ctl_lock); 11878 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11879 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11880 io->io_hdr.nexus.initid, 11881 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11882 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11883 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11884 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11885 } 11886 mtx_unlock(&lun->lun_lock); 11887 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11888 return (0); 11889 } 11890 11891 static void 11892 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 11893 ctl_ua_type ua_type) 11894 { 11895 struct ctl_lun *lun; 11896 struct scsi_sense_data *ps; 11897 uint32_t p, i; 11898 11899 p = initidx / CTL_MAX_INIT_PER_PORT; 11900 i = initidx % CTL_MAX_INIT_PER_PORT; 11901 mtx_lock(&softc->ctl_lock); 11902 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11903 mtx_lock(&lun->lun_lock); 11904 /* Abort tasks. */ 11905 ctl_abort_tasks_lun(lun, p, i, 1); 11906 /* Clear CA. */ 11907 ps = lun->pending_sense[p]; 11908 if (ps != NULL) 11909 ps[i].error_code = 0; 11910 /* Clear reservation. */ 11911 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11912 lun->flags &= ~CTL_LUN_RESERVED; 11913 /* Clear prevent media removal. */ 11914 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11915 ctl_clear_mask(lun->prevent, initidx); 11916 lun->prevent_count--; 11917 } 11918 /* Clear TPC status */ 11919 ctl_tpc_lun_clear(lun, initidx); 11920 /* Establish UA. */ 11921 ctl_est_ua(lun, initidx, ua_type); 11922 mtx_unlock(&lun->lun_lock); 11923 } 11924 mtx_unlock(&softc->ctl_lock); 11925 } 11926 11927 static int 11928 ctl_i_t_nexus_reset(union ctl_io *io) 11929 { 11930 struct ctl_softc *softc = CTL_SOFTC(io); 11931 uint32_t initidx; 11932 11933 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11934 union ctl_ha_msg msg_info; 11935 11936 msg_info.hdr.nexus = io->io_hdr.nexus; 11937 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11938 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11939 msg_info.hdr.original_sc = NULL; 11940 msg_info.hdr.serializing_sc = NULL; 11941 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11942 sizeof(msg_info.task), M_WAITOK); 11943 } 11944 11945 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11946 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 11947 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11948 return (0); 11949 } 11950 11951 static int 11952 ctl_abort_task(union ctl_io *io) 11953 { 11954 struct ctl_softc *softc = CTL_SOFTC(io); 11955 union ctl_io *xio; 11956 struct ctl_lun *lun; 11957 #if 0 11958 struct sbuf sb; 11959 char printbuf[128]; 11960 #endif 11961 int found; 11962 uint32_t targ_lun; 11963 11964 found = 0; 11965 11966 /* 11967 * Look up the LUN. 11968 */ 11969 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11970 mtx_lock(&softc->ctl_lock); 11971 if (targ_lun >= ctl_max_luns || 11972 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11973 mtx_unlock(&softc->ctl_lock); 11974 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11975 return (1); 11976 } 11977 11978 #if 0 11979 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11980 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11981 #endif 11982 11983 mtx_lock(&lun->lun_lock); 11984 mtx_unlock(&softc->ctl_lock); 11985 /* 11986 * Run through the OOA queue and attempt to find the given I/O. 11987 * The target port, initiator ID, tag type and tag number have to 11988 * match the values that we got from the initiator. If we have an 11989 * untagged command to abort, simply abort the first untagged command 11990 * we come to. We only allow one untagged command at a time of course. 11991 */ 11992 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11993 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11994 #if 0 11995 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11996 11997 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11998 lun->lun, xio->scsiio.tag_num, 11999 xio->scsiio.tag_type, 12000 (xio->io_hdr.blocked_links.tqe_prev 12001 == NULL) ? "" : " BLOCKED", 12002 (xio->io_hdr.flags & 12003 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 12004 (xio->io_hdr.flags & 12005 CTL_FLAG_ABORT) ? " ABORT" : "", 12006 (xio->io_hdr.flags & 12007 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 12008 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 12009 sbuf_finish(&sb); 12010 printf("%s\n", sbuf_data(&sb)); 12011 #endif 12012 12013 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12014 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12015 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12016 continue; 12017 12018 /* 12019 * If the abort says that the task is untagged, the 12020 * task in the queue must be untagged. Otherwise, 12021 * we just check to see whether the tag numbers 12022 * match. This is because the QLogic firmware 12023 * doesn't pass back the tag type in an abort 12024 * request. 12025 */ 12026 #if 0 12027 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12028 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12029 || (xio->scsiio.tag_num == io->taskio.tag_num)) 12030 #endif 12031 /* 12032 * XXX KDM we've got problems with FC, because it 12033 * doesn't send down a tag type with aborts. So we 12034 * can only really go by the tag number... 12035 * This may cause problems with parallel SCSI. 12036 * Need to figure that out!! 12037 */ 12038 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12039 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12040 found = 1; 12041 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12042 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12043 union ctl_ha_msg msg_info; 12044 12045 msg_info.hdr.nexus = io->io_hdr.nexus; 12046 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12047 msg_info.task.tag_num = io->taskio.tag_num; 12048 msg_info.task.tag_type = io->taskio.tag_type; 12049 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12050 msg_info.hdr.original_sc = NULL; 12051 msg_info.hdr.serializing_sc = NULL; 12052 #if 0 12053 printf("Sent Abort to other side\n"); 12054 #endif 12055 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12056 sizeof(msg_info.task), M_NOWAIT); 12057 } 12058 #if 0 12059 printf("ctl_abort_task: found I/O to abort\n"); 12060 #endif 12061 } 12062 } 12063 mtx_unlock(&lun->lun_lock); 12064 12065 if (found == 0) { 12066 /* 12067 * This isn't really an error. It's entirely possible for 12068 * the abort and command completion to cross on the wire. 12069 * This is more of an informative/diagnostic error. 12070 */ 12071 #if 0 12072 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12073 "%u:%u:%u tag %d type %d\n", 12074 io->io_hdr.nexus.initid, 12075 io->io_hdr.nexus.targ_port, 12076 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12077 io->taskio.tag_type); 12078 #endif 12079 } 12080 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12081 return (0); 12082 } 12083 12084 static int 12085 ctl_query_task(union ctl_io *io, int task_set) 12086 { 12087 struct ctl_softc *softc = CTL_SOFTC(io); 12088 union ctl_io *xio; 12089 struct ctl_lun *lun; 12090 int found = 0; 12091 uint32_t targ_lun; 12092 12093 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12094 mtx_lock(&softc->ctl_lock); 12095 if (targ_lun >= ctl_max_luns || 12096 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12097 mtx_unlock(&softc->ctl_lock); 12098 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12099 return (1); 12100 } 12101 mtx_lock(&lun->lun_lock); 12102 mtx_unlock(&softc->ctl_lock); 12103 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12104 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12105 12106 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12107 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12108 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12109 continue; 12110 12111 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12112 found = 1; 12113 break; 12114 } 12115 } 12116 mtx_unlock(&lun->lun_lock); 12117 if (found) 12118 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12119 else 12120 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12121 return (0); 12122 } 12123 12124 static int 12125 ctl_query_async_event(union ctl_io *io) 12126 { 12127 struct ctl_softc *softc = CTL_SOFTC(io); 12128 struct ctl_lun *lun; 12129 ctl_ua_type ua; 12130 uint32_t targ_lun, initidx; 12131 12132 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12133 mtx_lock(&softc->ctl_lock); 12134 if (targ_lun >= ctl_max_luns || 12135 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12136 mtx_unlock(&softc->ctl_lock); 12137 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12138 return (1); 12139 } 12140 mtx_lock(&lun->lun_lock); 12141 mtx_unlock(&softc->ctl_lock); 12142 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12143 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12144 mtx_unlock(&lun->lun_lock); 12145 if (ua != CTL_UA_NONE) 12146 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12147 else 12148 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12149 return (0); 12150 } 12151 12152 static void 12153 ctl_run_task(union ctl_io *io) 12154 { 12155 int retval = 1; 12156 12157 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12158 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12159 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12160 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12161 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12162 switch (io->taskio.task_action) { 12163 case CTL_TASK_ABORT_TASK: 12164 retval = ctl_abort_task(io); 12165 break; 12166 case CTL_TASK_ABORT_TASK_SET: 12167 case CTL_TASK_CLEAR_TASK_SET: 12168 retval = ctl_abort_task_set(io); 12169 break; 12170 case CTL_TASK_CLEAR_ACA: 12171 break; 12172 case CTL_TASK_I_T_NEXUS_RESET: 12173 retval = ctl_i_t_nexus_reset(io); 12174 break; 12175 case CTL_TASK_LUN_RESET: 12176 retval = ctl_lun_reset(io); 12177 break; 12178 case CTL_TASK_TARGET_RESET: 12179 case CTL_TASK_BUS_RESET: 12180 retval = ctl_target_reset(io); 12181 break; 12182 case CTL_TASK_PORT_LOGIN: 12183 break; 12184 case CTL_TASK_PORT_LOGOUT: 12185 break; 12186 case CTL_TASK_QUERY_TASK: 12187 retval = ctl_query_task(io, 0); 12188 break; 12189 case CTL_TASK_QUERY_TASK_SET: 12190 retval = ctl_query_task(io, 1); 12191 break; 12192 case CTL_TASK_QUERY_ASYNC_EVENT: 12193 retval = ctl_query_async_event(io); 12194 break; 12195 default: 12196 printf("%s: got unknown task management event %d\n", 12197 __func__, io->taskio.task_action); 12198 break; 12199 } 12200 if (retval == 0) 12201 io->io_hdr.status = CTL_SUCCESS; 12202 else 12203 io->io_hdr.status = CTL_ERROR; 12204 ctl_done(io); 12205 } 12206 12207 /* 12208 * For HA operation. Handle commands that come in from the other 12209 * controller. 12210 */ 12211 static void 12212 ctl_handle_isc(union ctl_io *io) 12213 { 12214 struct ctl_softc *softc = CTL_SOFTC(io); 12215 struct ctl_lun *lun; 12216 const struct ctl_cmd_entry *entry; 12217 uint32_t targ_lun; 12218 12219 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12220 switch (io->io_hdr.msg_type) { 12221 case CTL_MSG_SERIALIZE: 12222 ctl_serialize_other_sc_cmd(&io->scsiio); 12223 break; 12224 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12225 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12226 if (targ_lun >= ctl_max_luns || 12227 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12228 ctl_done(io); 12229 break; 12230 } 12231 mtx_lock(&lun->lun_lock); 12232 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12233 mtx_unlock(&lun->lun_lock); 12234 ctl_done(io); 12235 break; 12236 } 12237 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12238 mtx_unlock(&lun->lun_lock); 12239 ctl_enqueue_rtr(io); 12240 break; 12241 case CTL_MSG_FINISH_IO: 12242 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12243 ctl_done(io); 12244 break; 12245 } 12246 if (targ_lun >= ctl_max_luns || 12247 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12248 ctl_free_io(io); 12249 break; 12250 } 12251 mtx_lock(&lun->lun_lock); 12252 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12253 ctl_check_blocked(lun); 12254 mtx_unlock(&lun->lun_lock); 12255 ctl_free_io(io); 12256 break; 12257 case CTL_MSG_PERS_ACTION: 12258 ctl_hndl_per_res_out_on_other_sc(io); 12259 ctl_free_io(io); 12260 break; 12261 case CTL_MSG_BAD_JUJU: 12262 ctl_done(io); 12263 break; 12264 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12265 ctl_datamove_remote(io); 12266 break; 12267 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12268 io->scsiio.be_move_done(io); 12269 break; 12270 case CTL_MSG_FAILOVER: 12271 ctl_failover_lun(io); 12272 ctl_free_io(io); 12273 break; 12274 default: 12275 printf("%s: Invalid message type %d\n", 12276 __func__, io->io_hdr.msg_type); 12277 ctl_free_io(io); 12278 break; 12279 } 12280 12281 } 12282 12283 12284 /* 12285 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12286 * there is no match. 12287 */ 12288 static ctl_lun_error_pattern 12289 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12290 { 12291 const struct ctl_cmd_entry *entry; 12292 ctl_lun_error_pattern filtered_pattern, pattern; 12293 12294 pattern = desc->error_pattern; 12295 12296 /* 12297 * XXX KDM we need more data passed into this function to match a 12298 * custom pattern, and we actually need to implement custom pattern 12299 * matching. 12300 */ 12301 if (pattern & CTL_LUN_PAT_CMD) 12302 return (CTL_LUN_PAT_CMD); 12303 12304 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12305 return (CTL_LUN_PAT_ANY); 12306 12307 entry = ctl_get_cmd_entry(ctsio, NULL); 12308 12309 filtered_pattern = entry->pattern & pattern; 12310 12311 /* 12312 * If the user requested specific flags in the pattern (e.g. 12313 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12314 * flags. 12315 * 12316 * If the user did not specify any flags, it doesn't matter whether 12317 * or not the command supports the flags. 12318 */ 12319 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12320 (pattern & ~CTL_LUN_PAT_MASK)) 12321 return (CTL_LUN_PAT_NONE); 12322 12323 /* 12324 * If the user asked for a range check, see if the requested LBA 12325 * range overlaps with this command's LBA range. 12326 */ 12327 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12328 uint64_t lba1; 12329 uint64_t len1; 12330 ctl_action action; 12331 int retval; 12332 12333 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12334 if (retval != 0) 12335 return (CTL_LUN_PAT_NONE); 12336 12337 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12338 desc->lba_range.len, FALSE); 12339 /* 12340 * A "pass" means that the LBA ranges don't overlap, so 12341 * this doesn't match the user's range criteria. 12342 */ 12343 if (action == CTL_ACTION_PASS) 12344 return (CTL_LUN_PAT_NONE); 12345 } 12346 12347 return (filtered_pattern); 12348 } 12349 12350 static void 12351 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12352 { 12353 struct ctl_error_desc *desc, *desc2; 12354 12355 mtx_assert(&lun->lun_lock, MA_OWNED); 12356 12357 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12358 ctl_lun_error_pattern pattern; 12359 /* 12360 * Check to see whether this particular command matches 12361 * the pattern in the descriptor. 12362 */ 12363 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12364 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12365 continue; 12366 12367 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12368 case CTL_LUN_INJ_ABORTED: 12369 ctl_set_aborted(&io->scsiio); 12370 break; 12371 case CTL_LUN_INJ_MEDIUM_ERR: 12372 ctl_set_medium_error(&io->scsiio, 12373 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12374 CTL_FLAG_DATA_OUT); 12375 break; 12376 case CTL_LUN_INJ_UA: 12377 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12378 * OCCURRED */ 12379 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12380 break; 12381 case CTL_LUN_INJ_CUSTOM: 12382 /* 12383 * We're assuming the user knows what he is doing. 12384 * Just copy the sense information without doing 12385 * checks. 12386 */ 12387 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12388 MIN(sizeof(desc->custom_sense), 12389 sizeof(io->scsiio.sense_data))); 12390 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12391 io->scsiio.sense_len = SSD_FULL_SIZE; 12392 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12393 break; 12394 case CTL_LUN_INJ_NONE: 12395 default: 12396 /* 12397 * If this is an error injection type we don't know 12398 * about, clear the continuous flag (if it is set) 12399 * so it will get deleted below. 12400 */ 12401 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12402 break; 12403 } 12404 /* 12405 * By default, each error injection action is a one-shot 12406 */ 12407 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12408 continue; 12409 12410 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12411 12412 free(desc, M_CTL); 12413 } 12414 } 12415 12416 #ifdef CTL_IO_DELAY 12417 static void 12418 ctl_datamove_timer_wakeup(void *arg) 12419 { 12420 union ctl_io *io; 12421 12422 io = (union ctl_io *)arg; 12423 12424 ctl_datamove(io); 12425 } 12426 #endif /* CTL_IO_DELAY */ 12427 12428 void 12429 ctl_datamove(union ctl_io *io) 12430 { 12431 void (*fe_datamove)(union ctl_io *io); 12432 12433 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12434 12435 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12436 12437 /* No data transferred yet. Frontend must update this when done. */ 12438 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12439 12440 #ifdef CTL_TIME_IO 12441 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12442 char str[256]; 12443 char path_str[64]; 12444 struct sbuf sb; 12445 12446 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12447 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12448 12449 sbuf_cat(&sb, path_str); 12450 switch (io->io_hdr.io_type) { 12451 case CTL_IO_SCSI: 12452 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12453 sbuf_printf(&sb, "\n"); 12454 sbuf_cat(&sb, path_str); 12455 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12456 io->scsiio.tag_num, io->scsiio.tag_type); 12457 break; 12458 case CTL_IO_TASK: 12459 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12460 "Tag Type: %d\n", io->taskio.task_action, 12461 io->taskio.tag_num, io->taskio.tag_type); 12462 break; 12463 default: 12464 panic("%s: Invalid CTL I/O type %d\n", 12465 __func__, io->io_hdr.io_type); 12466 } 12467 sbuf_cat(&sb, path_str); 12468 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12469 (intmax_t)time_uptime - io->io_hdr.start_time); 12470 sbuf_finish(&sb); 12471 printf("%s", sbuf_data(&sb)); 12472 } 12473 #endif /* CTL_TIME_IO */ 12474 12475 #ifdef CTL_IO_DELAY 12476 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12477 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12478 } else { 12479 struct ctl_lun *lun; 12480 12481 lun = CTL_LUN(io); 12482 if ((lun != NULL) 12483 && (lun->delay_info.datamove_delay > 0)) { 12484 12485 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12486 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12487 callout_reset(&io->io_hdr.delay_callout, 12488 lun->delay_info.datamove_delay * hz, 12489 ctl_datamove_timer_wakeup, io); 12490 if (lun->delay_info.datamove_type == 12491 CTL_DELAY_TYPE_ONESHOT) 12492 lun->delay_info.datamove_delay = 0; 12493 return; 12494 } 12495 } 12496 #endif 12497 12498 /* 12499 * This command has been aborted. Set the port status, so we fail 12500 * the data move. 12501 */ 12502 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12503 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12504 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12505 io->io_hdr.nexus.targ_port, 12506 io->io_hdr.nexus.targ_lun); 12507 io->io_hdr.port_status = 31337; 12508 /* 12509 * Note that the backend, in this case, will get the 12510 * callback in its context. In other cases it may get 12511 * called in the frontend's interrupt thread context. 12512 */ 12513 io->scsiio.be_move_done(io); 12514 return; 12515 } 12516 12517 /* Don't confuse frontend with zero length data move. */ 12518 if (io->scsiio.kern_data_len == 0) { 12519 io->scsiio.be_move_done(io); 12520 return; 12521 } 12522 12523 fe_datamove = CTL_PORT(io)->fe_datamove; 12524 fe_datamove(io); 12525 } 12526 12527 static void 12528 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12529 { 12530 union ctl_ha_msg msg; 12531 #ifdef CTL_TIME_IO 12532 struct bintime cur_bt; 12533 #endif 12534 12535 memset(&msg, 0, sizeof(msg)); 12536 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12537 msg.hdr.original_sc = io; 12538 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12539 msg.hdr.nexus = io->io_hdr.nexus; 12540 msg.hdr.status = io->io_hdr.status; 12541 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12542 msg.scsi.tag_num = io->scsiio.tag_num; 12543 msg.scsi.tag_type = io->scsiio.tag_type; 12544 msg.scsi.scsi_status = io->scsiio.scsi_status; 12545 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12546 io->scsiio.sense_len); 12547 msg.scsi.sense_len = io->scsiio.sense_len; 12548 msg.scsi.port_status = io->io_hdr.port_status; 12549 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12550 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12551 ctl_failover_io(io, /*have_lock*/ have_lock); 12552 return; 12553 } 12554 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12555 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12556 msg.scsi.sense_len, M_WAITOK); 12557 12558 #ifdef CTL_TIME_IO 12559 getbinuptime(&cur_bt); 12560 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12561 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12562 #endif 12563 io->io_hdr.num_dmas++; 12564 } 12565 12566 /* 12567 * The DMA to the remote side is done, now we need to tell the other side 12568 * we're done so it can continue with its data movement. 12569 */ 12570 static void 12571 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12572 { 12573 union ctl_io *io; 12574 uint32_t i; 12575 12576 io = rq->context; 12577 12578 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12579 printf("%s: ISC DMA write failed with error %d", __func__, 12580 rq->ret); 12581 ctl_set_internal_failure(&io->scsiio, 12582 /*sks_valid*/ 1, 12583 /*retry_count*/ rq->ret); 12584 } 12585 12586 ctl_dt_req_free(rq); 12587 12588 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12589 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12590 free(io->io_hdr.remote_sglist, M_CTL); 12591 io->io_hdr.remote_sglist = NULL; 12592 io->io_hdr.local_sglist = NULL; 12593 12594 /* 12595 * The data is in local and remote memory, so now we need to send 12596 * status (good or back) back to the other side. 12597 */ 12598 ctl_send_datamove_done(io, /*have_lock*/ 0); 12599 } 12600 12601 /* 12602 * We've moved the data from the host/controller into local memory. Now we 12603 * need to push it over to the remote controller's memory. 12604 */ 12605 static int 12606 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12607 { 12608 int retval; 12609 12610 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12611 ctl_datamove_remote_write_cb); 12612 return (retval); 12613 } 12614 12615 static void 12616 ctl_datamove_remote_write(union ctl_io *io) 12617 { 12618 int retval; 12619 void (*fe_datamove)(union ctl_io *io); 12620 12621 /* 12622 * - Get the data from the host/HBA into local memory. 12623 * - DMA memory from the local controller to the remote controller. 12624 * - Send status back to the remote controller. 12625 */ 12626 12627 retval = ctl_datamove_remote_sgl_setup(io); 12628 if (retval != 0) 12629 return; 12630 12631 /* Switch the pointer over so the FETD knows what to do */ 12632 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12633 12634 /* 12635 * Use a custom move done callback, since we need to send completion 12636 * back to the other controller, not to the backend on this side. 12637 */ 12638 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12639 12640 fe_datamove = CTL_PORT(io)->fe_datamove; 12641 fe_datamove(io); 12642 } 12643 12644 static int 12645 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12646 { 12647 #if 0 12648 char str[256]; 12649 char path_str[64]; 12650 struct sbuf sb; 12651 #endif 12652 uint32_t i; 12653 12654 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12655 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12656 free(io->io_hdr.remote_sglist, M_CTL); 12657 io->io_hdr.remote_sglist = NULL; 12658 io->io_hdr.local_sglist = NULL; 12659 12660 #if 0 12661 scsi_path_string(io, path_str, sizeof(path_str)); 12662 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12663 sbuf_cat(&sb, path_str); 12664 scsi_command_string(&io->scsiio, NULL, &sb); 12665 sbuf_printf(&sb, "\n"); 12666 sbuf_cat(&sb, path_str); 12667 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12668 io->scsiio.tag_num, io->scsiio.tag_type); 12669 sbuf_cat(&sb, path_str); 12670 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12671 io->io_hdr.flags, io->io_hdr.status); 12672 sbuf_finish(&sb); 12673 printk("%s", sbuf_data(&sb)); 12674 #endif 12675 12676 12677 /* 12678 * The read is done, now we need to send status (good or bad) back 12679 * to the other side. 12680 */ 12681 ctl_send_datamove_done(io, /*have_lock*/ 0); 12682 12683 return (0); 12684 } 12685 12686 static void 12687 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12688 { 12689 union ctl_io *io; 12690 void (*fe_datamove)(union ctl_io *io); 12691 12692 io = rq->context; 12693 12694 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12695 printf("%s: ISC DMA read failed with error %d\n", __func__, 12696 rq->ret); 12697 ctl_set_internal_failure(&io->scsiio, 12698 /*sks_valid*/ 1, 12699 /*retry_count*/ rq->ret); 12700 } 12701 12702 ctl_dt_req_free(rq); 12703 12704 /* Switch the pointer over so the FETD knows what to do */ 12705 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12706 12707 /* 12708 * Use a custom move done callback, since we need to send completion 12709 * back to the other controller, not to the backend on this side. 12710 */ 12711 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12712 12713 /* XXX KDM add checks like the ones in ctl_datamove? */ 12714 12715 fe_datamove = CTL_PORT(io)->fe_datamove; 12716 fe_datamove(io); 12717 } 12718 12719 static int 12720 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12721 { 12722 struct ctl_sg_entry *local_sglist; 12723 uint32_t len_to_go; 12724 int retval; 12725 int i; 12726 12727 retval = 0; 12728 local_sglist = io->io_hdr.local_sglist; 12729 len_to_go = io->scsiio.kern_data_len; 12730 12731 /* 12732 * The difficult thing here is that the size of the various 12733 * S/G segments may be different than the size from the 12734 * remote controller. That'll make it harder when DMAing 12735 * the data back to the other side. 12736 */ 12737 for (i = 0; len_to_go > 0; i++) { 12738 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12739 local_sglist[i].addr = 12740 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12741 12742 len_to_go -= local_sglist[i].len; 12743 } 12744 /* 12745 * Reset the number of S/G entries accordingly. The original 12746 * number of S/G entries is available in rem_sg_entries. 12747 */ 12748 io->scsiio.kern_sg_entries = i; 12749 12750 #if 0 12751 printf("%s: kern_sg_entries = %d\n", __func__, 12752 io->scsiio.kern_sg_entries); 12753 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12754 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12755 local_sglist[i].addr, local_sglist[i].len); 12756 #endif 12757 12758 return (retval); 12759 } 12760 12761 static int 12762 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12763 ctl_ha_dt_cb callback) 12764 { 12765 struct ctl_ha_dt_req *rq; 12766 struct ctl_sg_entry *remote_sglist, *local_sglist; 12767 uint32_t local_used, remote_used, total_used; 12768 int i, j, isc_ret; 12769 12770 rq = ctl_dt_req_alloc(); 12771 12772 /* 12773 * If we failed to allocate the request, and if the DMA didn't fail 12774 * anyway, set busy status. This is just a resource allocation 12775 * failure. 12776 */ 12777 if ((rq == NULL) 12778 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12779 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12780 ctl_set_busy(&io->scsiio); 12781 12782 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12783 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12784 12785 if (rq != NULL) 12786 ctl_dt_req_free(rq); 12787 12788 /* 12789 * The data move failed. We need to return status back 12790 * to the other controller. No point in trying to DMA 12791 * data to the remote controller. 12792 */ 12793 12794 ctl_send_datamove_done(io, /*have_lock*/ 0); 12795 12796 return (1); 12797 } 12798 12799 local_sglist = io->io_hdr.local_sglist; 12800 remote_sglist = io->io_hdr.remote_sglist; 12801 local_used = 0; 12802 remote_used = 0; 12803 total_used = 0; 12804 12805 /* 12806 * Pull/push the data over the wire from/to the other controller. 12807 * This takes into account the possibility that the local and 12808 * remote sglists may not be identical in terms of the size of 12809 * the elements and the number of elements. 12810 * 12811 * One fundamental assumption here is that the length allocated for 12812 * both the local and remote sglists is identical. Otherwise, we've 12813 * essentially got a coding error of some sort. 12814 */ 12815 isc_ret = CTL_HA_STATUS_SUCCESS; 12816 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12817 uint32_t cur_len; 12818 uint8_t *tmp_ptr; 12819 12820 rq->command = command; 12821 rq->context = io; 12822 12823 /* 12824 * Both pointers should be aligned. But it is possible 12825 * that the allocation length is not. They should both 12826 * also have enough slack left over at the end, though, 12827 * to round up to the next 8 byte boundary. 12828 */ 12829 cur_len = MIN(local_sglist[i].len - local_used, 12830 remote_sglist[j].len - remote_used); 12831 rq->size = cur_len; 12832 12833 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12834 tmp_ptr += local_used; 12835 12836 #if 0 12837 /* Use physical addresses when talking to ISC hardware */ 12838 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12839 /* XXX KDM use busdma */ 12840 rq->local = vtophys(tmp_ptr); 12841 } else 12842 rq->local = tmp_ptr; 12843 #else 12844 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12845 ("HA does not support BUS_ADDR")); 12846 rq->local = tmp_ptr; 12847 #endif 12848 12849 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12850 tmp_ptr += remote_used; 12851 rq->remote = tmp_ptr; 12852 12853 rq->callback = NULL; 12854 12855 local_used += cur_len; 12856 if (local_used >= local_sglist[i].len) { 12857 i++; 12858 local_used = 0; 12859 } 12860 12861 remote_used += cur_len; 12862 if (remote_used >= remote_sglist[j].len) { 12863 j++; 12864 remote_used = 0; 12865 } 12866 total_used += cur_len; 12867 12868 if (total_used >= io->scsiio.kern_data_len) 12869 rq->callback = callback; 12870 12871 #if 0 12872 printf("%s: %s: local %p remote %p size %d\n", __func__, 12873 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12874 rq->local, rq->remote, rq->size); 12875 #endif 12876 12877 isc_ret = ctl_dt_single(rq); 12878 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12879 break; 12880 } 12881 if (isc_ret != CTL_HA_STATUS_WAIT) { 12882 rq->ret = isc_ret; 12883 callback(rq); 12884 } 12885 12886 return (0); 12887 } 12888 12889 static void 12890 ctl_datamove_remote_read(union ctl_io *io) 12891 { 12892 int retval; 12893 uint32_t i; 12894 12895 /* 12896 * This will send an error to the other controller in the case of a 12897 * failure. 12898 */ 12899 retval = ctl_datamove_remote_sgl_setup(io); 12900 if (retval != 0) 12901 return; 12902 12903 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12904 ctl_datamove_remote_read_cb); 12905 if (retval != 0) { 12906 /* 12907 * Make sure we free memory if there was an error.. The 12908 * ctl_datamove_remote_xfer() function will send the 12909 * datamove done message, or call the callback with an 12910 * error if there is a problem. 12911 */ 12912 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12913 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12914 free(io->io_hdr.remote_sglist, M_CTL); 12915 io->io_hdr.remote_sglist = NULL; 12916 io->io_hdr.local_sglist = NULL; 12917 } 12918 } 12919 12920 /* 12921 * Process a datamove request from the other controller. This is used for 12922 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12923 * first. Once that is complete, the data gets DMAed into the remote 12924 * controller's memory. For reads, we DMA from the remote controller's 12925 * memory into our memory first, and then move it out to the FETD. 12926 */ 12927 static void 12928 ctl_datamove_remote(union ctl_io *io) 12929 { 12930 12931 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12932 12933 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12934 ctl_failover_io(io, /*have_lock*/ 0); 12935 return; 12936 } 12937 12938 /* 12939 * Note that we look for an aborted I/O here, but don't do some of 12940 * the other checks that ctl_datamove() normally does. 12941 * We don't need to run the datamove delay code, since that should 12942 * have been done if need be on the other controller. 12943 */ 12944 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12945 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12946 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12947 io->io_hdr.nexus.targ_port, 12948 io->io_hdr.nexus.targ_lun); 12949 io->io_hdr.port_status = 31338; 12950 ctl_send_datamove_done(io, /*have_lock*/ 0); 12951 return; 12952 } 12953 12954 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12955 ctl_datamove_remote_write(io); 12956 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12957 ctl_datamove_remote_read(io); 12958 else { 12959 io->io_hdr.port_status = 31339; 12960 ctl_send_datamove_done(io, /*have_lock*/ 0); 12961 } 12962 } 12963 12964 static void 12965 ctl_process_done(union ctl_io *io) 12966 { 12967 struct ctl_softc *softc = CTL_SOFTC(io); 12968 struct ctl_port *port = CTL_PORT(io); 12969 struct ctl_lun *lun = CTL_LUN(io); 12970 void (*fe_done)(union ctl_io *io); 12971 union ctl_ha_msg msg; 12972 12973 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12974 fe_done = port->fe_done; 12975 12976 #ifdef CTL_TIME_IO 12977 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12978 char str[256]; 12979 char path_str[64]; 12980 struct sbuf sb; 12981 12982 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12983 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12984 12985 sbuf_cat(&sb, path_str); 12986 switch (io->io_hdr.io_type) { 12987 case CTL_IO_SCSI: 12988 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12989 sbuf_printf(&sb, "\n"); 12990 sbuf_cat(&sb, path_str); 12991 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12992 io->scsiio.tag_num, io->scsiio.tag_type); 12993 break; 12994 case CTL_IO_TASK: 12995 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12996 "Tag Type: %d\n", io->taskio.task_action, 12997 io->taskio.tag_num, io->taskio.tag_type); 12998 break; 12999 default: 13000 panic("%s: Invalid CTL I/O type %d\n", 13001 __func__, io->io_hdr.io_type); 13002 } 13003 sbuf_cat(&sb, path_str); 13004 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13005 (intmax_t)time_uptime - io->io_hdr.start_time); 13006 sbuf_finish(&sb); 13007 printf("%s", sbuf_data(&sb)); 13008 } 13009 #endif /* CTL_TIME_IO */ 13010 13011 switch (io->io_hdr.io_type) { 13012 case CTL_IO_SCSI: 13013 break; 13014 case CTL_IO_TASK: 13015 if (ctl_debug & CTL_DEBUG_INFO) 13016 ctl_io_error_print(io, NULL); 13017 fe_done(io); 13018 return; 13019 default: 13020 panic("%s: Invalid CTL I/O type %d\n", 13021 __func__, io->io_hdr.io_type); 13022 } 13023 13024 if (lun == NULL) { 13025 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13026 io->io_hdr.nexus.targ_mapped_lun)); 13027 goto bailout; 13028 } 13029 13030 mtx_lock(&lun->lun_lock); 13031 13032 /* 13033 * Check to see if we have any informational exception and status 13034 * of this command can be modified to report it in form of either 13035 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 13036 */ 13037 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 13038 io->io_hdr.status == CTL_SUCCESS && 13039 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 13040 uint8_t mrie = lun->MODE_IE.mrie; 13041 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 13042 (lun->MODE_VER.byte3 & SMS_VER_PER)); 13043 if (((mrie == SIEP_MRIE_REC_COND && per) || 13044 mrie == SIEP_MRIE_REC_UNCOND || 13045 mrie == SIEP_MRIE_NO_SENSE) && 13046 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 13047 CTL_CMD_FLAG_NO_SENSE) == 0) { 13048 ctl_set_sense(&io->scsiio, 13049 /*current_error*/ 1, 13050 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 13051 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 13052 /*asc*/ lun->ie_asc, 13053 /*ascq*/ lun->ie_ascq, 13054 SSD_ELEM_NONE); 13055 lun->ie_reported = 1; 13056 } 13057 } else if (lun->ie_reported < 0) 13058 lun->ie_reported = 0; 13059 13060 /* 13061 * Check to see if we have any errors to inject here. We only 13062 * inject errors for commands that don't already have errors set. 13063 */ 13064 if (!STAILQ_EMPTY(&lun->error_list) && 13065 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13066 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13067 ctl_inject_error(lun, io); 13068 13069 /* 13070 * XXX KDM how do we treat commands that aren't completed 13071 * successfully? 13072 * 13073 * XXX KDM should we also track I/O latency? 13074 */ 13075 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13076 io->io_hdr.io_type == CTL_IO_SCSI) { 13077 int type; 13078 #ifdef CTL_TIME_IO 13079 struct bintime bt; 13080 13081 getbinuptime(&bt); 13082 bintime_sub(&bt, &io->io_hdr.start_bt); 13083 #endif 13084 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13085 CTL_FLAG_DATA_IN) 13086 type = CTL_STATS_READ; 13087 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13088 CTL_FLAG_DATA_OUT) 13089 type = CTL_STATS_WRITE; 13090 else 13091 type = CTL_STATS_NO_IO; 13092 13093 #ifdef CTL_LEGACY_STATS 13094 uint32_t targ_port = port->targ_port; 13095 lun->legacy_stats.ports[targ_port].bytes[type] += 13096 io->scsiio.kern_total_len; 13097 lun->legacy_stats.ports[targ_port].operations[type] ++; 13098 lun->legacy_stats.ports[targ_port].num_dmas[type] += 13099 io->io_hdr.num_dmas; 13100 #ifdef CTL_TIME_IO 13101 bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type], 13102 &io->io_hdr.dma_bt); 13103 bintime_add(&lun->legacy_stats.ports[targ_port].time[type], 13104 &bt); 13105 #endif 13106 #endif /* CTL_LEGACY_STATS */ 13107 13108 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13109 lun->stats.operations[type] ++; 13110 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13111 #ifdef CTL_TIME_IO 13112 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13113 bintime_add(&lun->stats.time[type], &bt); 13114 #endif 13115 13116 mtx_lock(&port->port_lock); 13117 port->stats.bytes[type] += io->scsiio.kern_total_len; 13118 port->stats.operations[type] ++; 13119 port->stats.dmas[type] += io->io_hdr.num_dmas; 13120 #ifdef CTL_TIME_IO 13121 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13122 bintime_add(&port->stats.time[type], &bt); 13123 #endif 13124 mtx_unlock(&port->port_lock); 13125 } 13126 13127 /* 13128 * Remove this from the OOA queue. 13129 */ 13130 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13131 #ifdef CTL_TIME_IO 13132 if (TAILQ_EMPTY(&lun->ooa_queue)) 13133 lun->last_busy = getsbinuptime(); 13134 #endif 13135 13136 /* 13137 * Run through the blocked queue on this LUN and see if anything 13138 * has become unblocked, now that this transaction is done. 13139 */ 13140 ctl_check_blocked(lun); 13141 13142 /* 13143 * If the LUN has been invalidated, free it if there is nothing 13144 * left on its OOA queue. 13145 */ 13146 if ((lun->flags & CTL_LUN_INVALID) 13147 && TAILQ_EMPTY(&lun->ooa_queue)) { 13148 mtx_unlock(&lun->lun_lock); 13149 ctl_free_lun(lun); 13150 } else 13151 mtx_unlock(&lun->lun_lock); 13152 13153 bailout: 13154 13155 /* 13156 * If this command has been aborted, make sure we set the status 13157 * properly. The FETD is responsible for freeing the I/O and doing 13158 * whatever it needs to do to clean up its state. 13159 */ 13160 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13161 ctl_set_task_aborted(&io->scsiio); 13162 13163 /* 13164 * If enabled, print command error status. 13165 */ 13166 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13167 (ctl_debug & CTL_DEBUG_INFO) != 0) 13168 ctl_io_error_print(io, NULL); 13169 13170 /* 13171 * Tell the FETD or the other shelf controller we're done with this 13172 * command. Note that only SCSI commands get to this point. Task 13173 * management commands are completed above. 13174 */ 13175 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13176 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13177 memset(&msg, 0, sizeof(msg)); 13178 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13179 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13180 msg.hdr.nexus = io->io_hdr.nexus; 13181 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13182 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13183 M_WAITOK); 13184 } 13185 13186 fe_done(io); 13187 } 13188 13189 /* 13190 * Front end should call this if it doesn't do autosense. When the request 13191 * sense comes back in from the initiator, we'll dequeue this and send it. 13192 */ 13193 int 13194 ctl_queue_sense(union ctl_io *io) 13195 { 13196 struct ctl_softc *softc = CTL_SOFTC(io); 13197 struct ctl_port *port = CTL_PORT(io); 13198 struct ctl_lun *lun; 13199 struct scsi_sense_data *ps; 13200 uint32_t initidx, p, targ_lun; 13201 13202 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13203 13204 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13205 13206 /* 13207 * LUN lookup will likely move to the ctl_work_thread() once we 13208 * have our new queueing infrastructure (that doesn't put things on 13209 * a per-LUN queue initially). That is so that we can handle 13210 * things like an INQUIRY to a LUN that we don't have enabled. We 13211 * can't deal with that right now. 13212 * If we don't have a LUN for this, just toss the sense information. 13213 */ 13214 mtx_lock(&softc->ctl_lock); 13215 if (targ_lun >= ctl_max_luns || 13216 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13217 mtx_unlock(&softc->ctl_lock); 13218 goto bailout; 13219 } 13220 mtx_lock(&lun->lun_lock); 13221 mtx_unlock(&softc->ctl_lock); 13222 13223 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13224 p = initidx / CTL_MAX_INIT_PER_PORT; 13225 if (lun->pending_sense[p] == NULL) { 13226 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13227 M_CTL, M_NOWAIT | M_ZERO); 13228 } 13229 if ((ps = lun->pending_sense[p]) != NULL) { 13230 ps += initidx % CTL_MAX_INIT_PER_PORT; 13231 memset(ps, 0, sizeof(*ps)); 13232 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13233 } 13234 mtx_unlock(&lun->lun_lock); 13235 13236 bailout: 13237 ctl_free_io(io); 13238 return (CTL_RETVAL_COMPLETE); 13239 } 13240 13241 /* 13242 * Primary command inlet from frontend ports. All SCSI and task I/O 13243 * requests must go through this function. 13244 */ 13245 int 13246 ctl_queue(union ctl_io *io) 13247 { 13248 struct ctl_port *port = CTL_PORT(io); 13249 13250 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13251 13252 #ifdef CTL_TIME_IO 13253 io->io_hdr.start_time = time_uptime; 13254 getbinuptime(&io->io_hdr.start_bt); 13255 #endif /* CTL_TIME_IO */ 13256 13257 /* Map FE-specific LUN ID into global one. */ 13258 io->io_hdr.nexus.targ_mapped_lun = 13259 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13260 13261 switch (io->io_hdr.io_type) { 13262 case CTL_IO_SCSI: 13263 case CTL_IO_TASK: 13264 if (ctl_debug & CTL_DEBUG_CDB) 13265 ctl_io_print(io); 13266 ctl_enqueue_incoming(io); 13267 break; 13268 default: 13269 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13270 return (EINVAL); 13271 } 13272 13273 return (CTL_RETVAL_COMPLETE); 13274 } 13275 13276 #ifdef CTL_IO_DELAY 13277 static void 13278 ctl_done_timer_wakeup(void *arg) 13279 { 13280 union ctl_io *io; 13281 13282 io = (union ctl_io *)arg; 13283 ctl_done(io); 13284 } 13285 #endif /* CTL_IO_DELAY */ 13286 13287 void 13288 ctl_serseq_done(union ctl_io *io) 13289 { 13290 struct ctl_lun *lun = CTL_LUN(io);; 13291 13292 if (lun->be_lun == NULL || 13293 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13294 return; 13295 mtx_lock(&lun->lun_lock); 13296 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13297 ctl_check_blocked(lun); 13298 mtx_unlock(&lun->lun_lock); 13299 } 13300 13301 void 13302 ctl_done(union ctl_io *io) 13303 { 13304 13305 /* 13306 * Enable this to catch duplicate completion issues. 13307 */ 13308 #if 0 13309 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13310 printf("%s: type %d msg %d cdb %x iptl: " 13311 "%u:%u:%u tag 0x%04x " 13312 "flag %#x status %x\n", 13313 __func__, 13314 io->io_hdr.io_type, 13315 io->io_hdr.msg_type, 13316 io->scsiio.cdb[0], 13317 io->io_hdr.nexus.initid, 13318 io->io_hdr.nexus.targ_port, 13319 io->io_hdr.nexus.targ_lun, 13320 (io->io_hdr.io_type == 13321 CTL_IO_TASK) ? 13322 io->taskio.tag_num : 13323 io->scsiio.tag_num, 13324 io->io_hdr.flags, 13325 io->io_hdr.status); 13326 } else 13327 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13328 #endif 13329 13330 /* 13331 * This is an internal copy of an I/O, and should not go through 13332 * the normal done processing logic. 13333 */ 13334 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13335 return; 13336 13337 #ifdef CTL_IO_DELAY 13338 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13339 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13340 } else { 13341 struct ctl_lun *lun = CTL_LUN(io); 13342 13343 if ((lun != NULL) 13344 && (lun->delay_info.done_delay > 0)) { 13345 13346 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13347 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13348 callout_reset(&io->io_hdr.delay_callout, 13349 lun->delay_info.done_delay * hz, 13350 ctl_done_timer_wakeup, io); 13351 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13352 lun->delay_info.done_delay = 0; 13353 return; 13354 } 13355 } 13356 #endif /* CTL_IO_DELAY */ 13357 13358 ctl_enqueue_done(io); 13359 } 13360 13361 static void 13362 ctl_work_thread(void *arg) 13363 { 13364 struct ctl_thread *thr = (struct ctl_thread *)arg; 13365 struct ctl_softc *softc = thr->ctl_softc; 13366 union ctl_io *io; 13367 int retval; 13368 13369 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13370 13371 while (!softc->shutdown) { 13372 /* 13373 * We handle the queues in this order: 13374 * - ISC 13375 * - done queue (to free up resources, unblock other commands) 13376 * - incoming queue 13377 * - RtR queue 13378 * 13379 * If those queues are empty, we break out of the loop and 13380 * go to sleep. 13381 */ 13382 mtx_lock(&thr->queue_lock); 13383 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13384 if (io != NULL) { 13385 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13386 mtx_unlock(&thr->queue_lock); 13387 ctl_handle_isc(io); 13388 continue; 13389 } 13390 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13391 if (io != NULL) { 13392 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13393 /* clear any blocked commands, call fe_done */ 13394 mtx_unlock(&thr->queue_lock); 13395 ctl_process_done(io); 13396 continue; 13397 } 13398 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13399 if (io != NULL) { 13400 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13401 mtx_unlock(&thr->queue_lock); 13402 if (io->io_hdr.io_type == CTL_IO_TASK) 13403 ctl_run_task(io); 13404 else 13405 ctl_scsiio_precheck(softc, &io->scsiio); 13406 continue; 13407 } 13408 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13409 if (io != NULL) { 13410 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13411 mtx_unlock(&thr->queue_lock); 13412 retval = ctl_scsiio(&io->scsiio); 13413 if (retval != CTL_RETVAL_COMPLETE) 13414 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13415 continue; 13416 } 13417 13418 /* Sleep until we have something to do. */ 13419 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13420 } 13421 thr->thread = NULL; 13422 kthread_exit(); 13423 } 13424 13425 static void 13426 ctl_lun_thread(void *arg) 13427 { 13428 struct ctl_softc *softc = (struct ctl_softc *)arg; 13429 struct ctl_be_lun *be_lun; 13430 13431 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13432 13433 while (!softc->shutdown) { 13434 mtx_lock(&softc->ctl_lock); 13435 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13436 if (be_lun != NULL) { 13437 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13438 mtx_unlock(&softc->ctl_lock); 13439 ctl_create_lun(be_lun); 13440 continue; 13441 } 13442 13443 /* Sleep until we have something to do. */ 13444 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13445 PDROP | PRIBIO, "-", 0); 13446 } 13447 softc->lun_thread = NULL; 13448 kthread_exit(); 13449 } 13450 13451 static void 13452 ctl_thresh_thread(void *arg) 13453 { 13454 struct ctl_softc *softc = (struct ctl_softc *)arg; 13455 struct ctl_lun *lun; 13456 struct ctl_logical_block_provisioning_page *page; 13457 const char *attr; 13458 union ctl_ha_msg msg; 13459 uint64_t thres, val; 13460 int i, e, set; 13461 13462 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13463 13464 while (!softc->shutdown) { 13465 mtx_lock(&softc->ctl_lock); 13466 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13467 if ((lun->flags & CTL_LUN_DISABLED) || 13468 (lun->flags & CTL_LUN_NO_MEDIA) || 13469 lun->backend->lun_attr == NULL) 13470 continue; 13471 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13472 softc->ha_mode == CTL_HA_MODE_XFER) 13473 continue; 13474 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13475 continue; 13476 e = 0; 13477 page = &lun->MODE_LBP; 13478 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13479 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13480 continue; 13481 thres = scsi_4btoul(page->descr[i].count); 13482 thres <<= CTL_LBP_EXPONENT; 13483 switch (page->descr[i].resource) { 13484 case 0x01: 13485 attr = "blocksavail"; 13486 break; 13487 case 0x02: 13488 attr = "blocksused"; 13489 break; 13490 case 0xf1: 13491 attr = "poolblocksavail"; 13492 break; 13493 case 0xf2: 13494 attr = "poolblocksused"; 13495 break; 13496 default: 13497 continue; 13498 } 13499 mtx_unlock(&softc->ctl_lock); // XXX 13500 val = lun->backend->lun_attr( 13501 lun->be_lun->be_lun, attr); 13502 mtx_lock(&softc->ctl_lock); 13503 if (val == UINT64_MAX) 13504 continue; 13505 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13506 == SLBPPD_ARMING_INC) 13507 e = (val >= thres); 13508 else 13509 e = (val <= thres); 13510 if (e) 13511 break; 13512 } 13513 mtx_lock(&lun->lun_lock); 13514 if (e) { 13515 scsi_u64to8b((uint8_t *)&page->descr[i] - 13516 (uint8_t *)page, lun->ua_tpt_info); 13517 if (lun->lasttpt == 0 || 13518 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13519 lun->lasttpt = time_uptime; 13520 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13521 set = 1; 13522 } else 13523 set = 0; 13524 } else { 13525 lun->lasttpt = 0; 13526 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13527 set = -1; 13528 } 13529 mtx_unlock(&lun->lun_lock); 13530 if (set != 0 && 13531 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13532 /* Send msg to other side. */ 13533 bzero(&msg.ua, sizeof(msg.ua)); 13534 msg.hdr.msg_type = CTL_MSG_UA; 13535 msg.hdr.nexus.initid = -1; 13536 msg.hdr.nexus.targ_port = -1; 13537 msg.hdr.nexus.targ_lun = lun->lun; 13538 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13539 msg.ua.ua_all = 1; 13540 msg.ua.ua_set = (set > 0); 13541 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13542 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13543 mtx_unlock(&softc->ctl_lock); // XXX 13544 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13545 sizeof(msg.ua), M_WAITOK); 13546 mtx_lock(&softc->ctl_lock); 13547 } 13548 } 13549 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13550 PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz); 13551 } 13552 softc->thresh_thread = NULL; 13553 kthread_exit(); 13554 } 13555 13556 static void 13557 ctl_enqueue_incoming(union ctl_io *io) 13558 { 13559 struct ctl_softc *softc = CTL_SOFTC(io); 13560 struct ctl_thread *thr; 13561 u_int idx; 13562 13563 idx = (io->io_hdr.nexus.targ_port * 127 + 13564 io->io_hdr.nexus.initid) % worker_threads; 13565 thr = &softc->threads[idx]; 13566 mtx_lock(&thr->queue_lock); 13567 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13568 mtx_unlock(&thr->queue_lock); 13569 wakeup(thr); 13570 } 13571 13572 static void 13573 ctl_enqueue_rtr(union ctl_io *io) 13574 { 13575 struct ctl_softc *softc = CTL_SOFTC(io); 13576 struct ctl_thread *thr; 13577 13578 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13579 mtx_lock(&thr->queue_lock); 13580 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13581 mtx_unlock(&thr->queue_lock); 13582 wakeup(thr); 13583 } 13584 13585 static void 13586 ctl_enqueue_done(union ctl_io *io) 13587 { 13588 struct ctl_softc *softc = CTL_SOFTC(io); 13589 struct ctl_thread *thr; 13590 13591 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13592 mtx_lock(&thr->queue_lock); 13593 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13594 mtx_unlock(&thr->queue_lock); 13595 wakeup(thr); 13596 } 13597 13598 static void 13599 ctl_enqueue_isc(union ctl_io *io) 13600 { 13601 struct ctl_softc *softc = CTL_SOFTC(io); 13602 struct ctl_thread *thr; 13603 13604 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13605 mtx_lock(&thr->queue_lock); 13606 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13607 mtx_unlock(&thr->queue_lock); 13608 wakeup(thr); 13609 } 13610 13611 /* 13612 * vim: ts=8 13613 */ 13614