1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org> 8 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org> 9 * All rights reserved. 10 * 11 * Portions of this software were developed by Edward Tomasz Napierala 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification. 20 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 21 * substantially similar to the "NO WARRANTY" disclaimer below 22 * ("Disclaimer") and any redistribution must be conditioned upon 23 * including a substantially similar Disclaimer requirement for further 24 * binary redistribution. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 * $Id$ 40 */ 41 /* 42 * CAM Target Layer, a SCSI device emulation subsystem. 43 * 44 * Author: Ken Merry <ken@FreeBSD.org> 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/ctype.h> 53 #include <sys/kernel.h> 54 #include <sys/types.h> 55 #include <sys/kthread.h> 56 #include <sys/bio.h> 57 #include <sys/fcntl.h> 58 #include <sys/lock.h> 59 #include <sys/module.h> 60 #include <sys/mutex.h> 61 #include <sys/condvar.h> 62 #include <sys/malloc.h> 63 #include <sys/conf.h> 64 #include <sys/ioccom.h> 65 #include <sys/queue.h> 66 #include <sys/sbuf.h> 67 #include <sys/smp.h> 68 #include <sys/endian.h> 69 #include <sys/proc.h> 70 #include <sys/sched.h> 71 #include <sys/sysctl.h> 72 #include <sys/nv.h> 73 #include <sys/dnv.h> 74 #include <vm/uma.h> 75 76 #include <cam/cam.h> 77 #include <cam/scsi/scsi_all.h> 78 #include <cam/scsi/scsi_cd.h> 79 #include <cam/scsi/scsi_da.h> 80 #include <cam/ctl/ctl_io.h> 81 #include <cam/ctl/ctl.h> 82 #include <cam/ctl/ctl_frontend.h> 83 #include <cam/ctl/ctl_util.h> 84 #include <cam/ctl/ctl_backend.h> 85 #include <cam/ctl/ctl_ioctl.h> 86 #include <cam/ctl/ctl_ha.h> 87 #include <cam/ctl/ctl_private.h> 88 #include <cam/ctl/ctl_debug.h> 89 #include <cam/ctl/ctl_scsi_all.h> 90 #include <cam/ctl/ctl_error.h> 91 92 struct ctl_softc *control_softc = NULL; 93 94 /* 95 * Template mode pages. 96 */ 97 98 /* 99 * Note that these are default values only. The actual values will be 100 * filled in when the user does a mode sense. 101 */ 102 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 103 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 104 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 105 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 106 /*read_retry_count*/0, 107 /*correction_span*/0, 108 /*head_offset_count*/0, 109 /*data_strobe_offset_cnt*/0, 110 /*byte8*/SMS_RWER_LBPERE, 111 /*write_retry_count*/0, 112 /*reserved2*/0, 113 /*recovery_time_limit*/{0, 0}, 114 }; 115 116 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 117 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 118 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 119 /*byte3*/SMS_RWER_PER, 120 /*read_retry_count*/0, 121 /*correction_span*/0, 122 /*head_offset_count*/0, 123 /*data_strobe_offset_cnt*/0, 124 /*byte8*/SMS_RWER_LBPERE, 125 /*write_retry_count*/0, 126 /*reserved2*/0, 127 /*recovery_time_limit*/{0, 0}, 128 }; 129 130 const static struct scsi_format_page format_page_default = { 131 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 132 /*page_length*/sizeof(struct scsi_format_page) - 2, 133 /*tracks_per_zone*/ {0, 0}, 134 /*alt_sectors_per_zone*/ {0, 0}, 135 /*alt_tracks_per_zone*/ {0, 0}, 136 /*alt_tracks_per_lun*/ {0, 0}, 137 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 138 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 139 /*bytes_per_sector*/ {0, 0}, 140 /*interleave*/ {0, 0}, 141 /*track_skew*/ {0, 0}, 142 /*cylinder_skew*/ {0, 0}, 143 /*flags*/ SFP_HSEC, 144 /*reserved*/ {0, 0, 0} 145 }; 146 147 const static struct scsi_format_page format_page_changeable = { 148 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 149 /*page_length*/sizeof(struct scsi_format_page) - 2, 150 /*tracks_per_zone*/ {0, 0}, 151 /*alt_sectors_per_zone*/ {0, 0}, 152 /*alt_tracks_per_zone*/ {0, 0}, 153 /*alt_tracks_per_lun*/ {0, 0}, 154 /*sectors_per_track*/ {0, 0}, 155 /*bytes_per_sector*/ {0, 0}, 156 /*interleave*/ {0, 0}, 157 /*track_skew*/ {0, 0}, 158 /*cylinder_skew*/ {0, 0}, 159 /*flags*/ 0, 160 /*reserved*/ {0, 0, 0} 161 }; 162 163 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 164 /*page_code*/SMS_RIGID_DISK_PAGE, 165 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 166 /*cylinders*/ {0, 0, 0}, 167 /*heads*/ CTL_DEFAULT_HEADS, 168 /*start_write_precomp*/ {0, 0, 0}, 169 /*start_reduced_current*/ {0, 0, 0}, 170 /*step_rate*/ {0, 0}, 171 /*landing_zone_cylinder*/ {0, 0, 0}, 172 /*rpl*/ SRDP_RPL_DISABLED, 173 /*rotational_offset*/ 0, 174 /*reserved1*/ 0, 175 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 176 CTL_DEFAULT_ROTATION_RATE & 0xff}, 177 /*reserved2*/ {0, 0} 178 }; 179 180 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 181 /*page_code*/SMS_RIGID_DISK_PAGE, 182 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 183 /*cylinders*/ {0, 0, 0}, 184 /*heads*/ 0, 185 /*start_write_precomp*/ {0, 0, 0}, 186 /*start_reduced_current*/ {0, 0, 0}, 187 /*step_rate*/ {0, 0}, 188 /*landing_zone_cylinder*/ {0, 0, 0}, 189 /*rpl*/ 0, 190 /*rotational_offset*/ 0, 191 /*reserved1*/ 0, 192 /*rotation_rate*/ {0, 0}, 193 /*reserved2*/ {0, 0} 194 }; 195 196 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 197 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 198 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 199 /*byte3*/0, 200 /*read_retry_count*/0, 201 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 202 /*recovery_time_limit*/{0, 0}, 203 }; 204 205 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 206 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 207 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 208 /*byte3*/SMS_VER_PER, 209 /*read_retry_count*/0, 210 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 211 /*recovery_time_limit*/{0, 0}, 212 }; 213 214 const static struct scsi_caching_page caching_page_default = { 215 /*page_code*/SMS_CACHING_PAGE, 216 /*page_length*/sizeof(struct scsi_caching_page) - 2, 217 /*flags1*/ SCP_DISC | SCP_WCE, 218 /*ret_priority*/ 0, 219 /*disable_pf_transfer_len*/ {0xff, 0xff}, 220 /*min_prefetch*/ {0, 0}, 221 /*max_prefetch*/ {0xff, 0xff}, 222 /*max_pf_ceiling*/ {0xff, 0xff}, 223 /*flags2*/ 0, 224 /*cache_segments*/ 0, 225 /*cache_seg_size*/ {0, 0}, 226 /*reserved*/ 0, 227 /*non_cache_seg_size*/ {0, 0, 0} 228 }; 229 230 const static struct scsi_caching_page caching_page_changeable = { 231 /*page_code*/SMS_CACHING_PAGE, 232 /*page_length*/sizeof(struct scsi_caching_page) - 2, 233 /*flags1*/ SCP_WCE | SCP_RCD, 234 /*ret_priority*/ 0, 235 /*disable_pf_transfer_len*/ {0, 0}, 236 /*min_prefetch*/ {0, 0}, 237 /*max_prefetch*/ {0, 0}, 238 /*max_pf_ceiling*/ {0, 0}, 239 /*flags2*/ 0, 240 /*cache_segments*/ 0, 241 /*cache_seg_size*/ {0, 0}, 242 /*reserved*/ 0, 243 /*non_cache_seg_size*/ {0, 0, 0} 244 }; 245 246 const static struct scsi_control_page control_page_default = { 247 /*page_code*/SMS_CONTROL_MODE_PAGE, 248 /*page_length*/sizeof(struct scsi_control_page) - 2, 249 /*rlec*/0, 250 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 251 /*eca_and_aen*/0, 252 /*flags4*/SCP_TAS, 253 /*aen_holdoff_period*/{0, 0}, 254 /*busy_timeout_period*/{0, 0}, 255 /*extended_selftest_completion_time*/{0, 0} 256 }; 257 258 const static struct scsi_control_page control_page_changeable = { 259 /*page_code*/SMS_CONTROL_MODE_PAGE, 260 /*page_length*/sizeof(struct scsi_control_page) - 2, 261 /*rlec*/SCP_DSENSE, 262 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 263 /*eca_and_aen*/SCP_SWP, 264 /*flags4*/0, 265 /*aen_holdoff_period*/{0, 0}, 266 /*busy_timeout_period*/{0, 0}, 267 /*extended_selftest_completion_time*/{0, 0} 268 }; 269 270 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 271 272 const static struct scsi_control_ext_page control_ext_page_default = { 273 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 274 /*subpage_code*/0x01, 275 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 276 /*flags*/0, 277 /*prio*/0, 278 /*max_sense*/0 279 }; 280 281 const static struct scsi_control_ext_page control_ext_page_changeable = { 282 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 283 /*subpage_code*/0x01, 284 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 285 /*flags*/0, 286 /*prio*/0, 287 /*max_sense*/0xff 288 }; 289 290 const static struct scsi_info_exceptions_page ie_page_default = { 291 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 292 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 293 /*info_flags*/SIEP_FLAGS_EWASC, 294 /*mrie*/SIEP_MRIE_NO, 295 /*interval_timer*/{0, 0, 0, 0}, 296 /*report_count*/{0, 0, 0, 1} 297 }; 298 299 const static struct scsi_info_exceptions_page ie_page_changeable = { 300 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 301 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 302 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 303 SIEP_FLAGS_LOGERR, 304 /*mrie*/0x0f, 305 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 306 /*report_count*/{0xff, 0xff, 0xff, 0xff} 307 }; 308 309 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 310 311 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 312 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 313 /*subpage_code*/0x02, 314 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 315 /*flags*/0, 316 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 317 /*descr*/{}}, 318 {{/*flags*/0, 319 /*resource*/0x01, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0x02, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}}, 326 {/*flags*/0, 327 /*resource*/0xf1, 328 /*reserved*/{0, 0}, 329 /*count*/{0, 0, 0, 0}}, 330 {/*flags*/0, 331 /*resource*/0xf2, 332 /*reserved*/{0, 0}, 333 /*count*/{0, 0, 0, 0}} 334 } 335 }; 336 337 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 338 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 339 /*subpage_code*/0x02, 340 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 341 /*flags*/SLBPP_SITUA, 342 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 343 /*descr*/{}}, 344 {{/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}}, 352 {/*flags*/0, 353 /*resource*/0, 354 /*reserved*/{0, 0}, 355 /*count*/{0, 0, 0, 0}}, 356 {/*flags*/0, 357 /*resource*/0, 358 /*reserved*/{0, 0}, 359 /*count*/{0, 0, 0, 0}} 360 } 361 }; 362 363 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 364 /*page_code*/SMS_CDDVD_CAPS_PAGE, 365 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 366 /*caps1*/0x3f, 367 /*caps2*/0x00, 368 /*caps3*/0xf0, 369 /*caps4*/0x00, 370 /*caps5*/0x29, 371 /*caps6*/0x00, 372 /*obsolete*/{0, 0}, 373 /*nvol_levels*/{0, 0}, 374 /*buffer_size*/{8, 0}, 375 /*obsolete2*/{0, 0}, 376 /*reserved*/0, 377 /*digital*/0, 378 /*obsolete3*/0, 379 /*copy_management*/0, 380 /*reserved2*/0, 381 /*rotation_control*/0, 382 /*cur_write_speed*/0, 383 /*num_speed_descr*/0, 384 }; 385 386 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 387 /*page_code*/SMS_CDDVD_CAPS_PAGE, 388 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 389 /*caps1*/0, 390 /*caps2*/0, 391 /*caps3*/0, 392 /*caps4*/0, 393 /*caps5*/0, 394 /*caps6*/0, 395 /*obsolete*/{0, 0}, 396 /*nvol_levels*/{0, 0}, 397 /*buffer_size*/{0, 0}, 398 /*obsolete2*/{0, 0}, 399 /*reserved*/0, 400 /*digital*/0, 401 /*obsolete3*/0, 402 /*copy_management*/0, 403 /*reserved2*/0, 404 /*rotation_control*/0, 405 /*cur_write_speed*/0, 406 /*num_speed_descr*/0, 407 }; 408 409 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 410 static int worker_threads = -1; 411 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 412 &worker_threads, 1, "Number of worker threads"); 413 static int ctl_debug = CTL_DEBUG_NONE; 414 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 415 &ctl_debug, 0, "Enabled debug flags"); 416 static int ctl_lun_map_size = 1024; 417 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 418 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 419 #ifdef CTL_TIME_IO 420 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 421 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 422 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 423 #endif 424 425 /* 426 * Maximum number of LUNs we support. MUST be a power of 2. 427 */ 428 #define CTL_DEFAULT_MAX_LUNS 1024 429 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 430 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); 431 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, 432 &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); 433 434 /* 435 * Maximum number of ports registered at one time. 436 */ 437 #define CTL_DEFAULT_MAX_PORTS 256 438 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 439 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); 440 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, 441 &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); 442 443 /* 444 * Maximum number of initiators we support. 445 */ 446 #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) 447 448 /* 449 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 450 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 451 * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), 452 * Block limits (0xB0), Block Device Characteristics (0xB1) and 453 * Logical Block Provisioning (0xB2) 454 */ 455 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 456 457 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 458 int param); 459 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 460 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 461 static int ctl_init(void); 462 static int ctl_shutdown(void); 463 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 464 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 465 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 466 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 467 struct ctl_ooa *ooa_hdr, 468 struct ctl_ooa_entry *kern_entries); 469 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 470 struct thread *td); 471 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 472 struct ctl_be_lun *be_lun); 473 static int ctl_free_lun(struct ctl_lun *lun); 474 static void ctl_create_lun(struct ctl_be_lun *be_lun); 475 476 static int ctl_do_mode_select(union ctl_io *io); 477 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 478 uint64_t res_key, uint64_t sa_res_key, 479 uint8_t type, uint32_t residx, 480 struct ctl_scsiio *ctsio, 481 struct scsi_per_res_out *cdb, 482 struct scsi_per_res_out_parms* param); 483 static void ctl_pro_preempt_other(struct ctl_lun *lun, 484 union ctl_ha_msg *msg); 485 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 486 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 487 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 488 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 489 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 490 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 491 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 492 int alloc_len); 493 static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); 494 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 495 int alloc_len); 496 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 497 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 498 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 499 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 500 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 501 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 502 bool seq); 503 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 504 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 505 union ctl_io *pending_io, union ctl_io *ooa_io); 506 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 507 union ctl_io **starting_io); 508 static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, 509 bool skip); 510 static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, 511 bool skip); 512 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 513 const struct ctl_cmd_entry *entry, 514 struct ctl_scsiio *ctsio); 515 static void ctl_failover_lun(union ctl_io *io); 516 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 517 struct ctl_scsiio *ctsio); 518 static int ctl_scsiio(struct ctl_scsiio *ctsio); 519 520 static int ctl_target_reset(union ctl_io *io); 521 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 522 ctl_ua_type ua_type); 523 static int ctl_lun_reset(union ctl_io *io); 524 static int ctl_abort_task(union ctl_io *io); 525 static int ctl_abort_task_set(union ctl_io *io); 526 static int ctl_query_task(union ctl_io *io, int task_set); 527 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 528 ctl_ua_type ua_type); 529 static int ctl_i_t_nexus_reset(union ctl_io *io); 530 static int ctl_query_async_event(union ctl_io *io); 531 static void ctl_run_task(union ctl_io *io); 532 #ifdef CTL_IO_DELAY 533 static void ctl_datamove_timer_wakeup(void *arg); 534 static void ctl_done_timer_wakeup(void *arg); 535 #endif /* CTL_IO_DELAY */ 536 537 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 538 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 539 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 540 static void ctl_datamove_remote_write(union ctl_io *io); 541 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 542 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 543 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 544 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 545 ctl_ha_dt_cb callback); 546 static void ctl_datamove_remote_read(union ctl_io *io); 547 static void ctl_datamove_remote(union ctl_io *io); 548 static void ctl_process_done(union ctl_io *io); 549 static void ctl_lun_thread(void *arg); 550 static void ctl_thresh_thread(void *arg); 551 static void ctl_work_thread(void *arg); 552 static void ctl_enqueue_incoming(union ctl_io *io); 553 static void ctl_enqueue_rtr(union ctl_io *io); 554 static void ctl_enqueue_done(union ctl_io *io); 555 static void ctl_enqueue_isc(union ctl_io *io); 556 static const struct ctl_cmd_entry * 557 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 558 static const struct ctl_cmd_entry * 559 ctl_validate_command(struct ctl_scsiio *ctsio); 560 static int ctl_cmd_applicable(uint8_t lun_type, 561 const struct ctl_cmd_entry *entry); 562 static int ctl_ha_init(void); 563 static int ctl_ha_shutdown(void); 564 565 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 566 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 567 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 568 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 569 570 /* 571 * Load the serialization table. This isn't very pretty, but is probably 572 * the easiest way to do it. 573 */ 574 #include "ctl_ser_table.c" 575 576 /* 577 * We only need to define open, close and ioctl routines for this driver. 578 */ 579 static struct cdevsw ctl_cdevsw = { 580 .d_version = D_VERSION, 581 .d_flags = 0, 582 .d_open = ctl_open, 583 .d_close = ctl_close, 584 .d_ioctl = ctl_ioctl, 585 .d_name = "ctl", 586 }; 587 588 589 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 590 591 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 592 593 static moduledata_t ctl_moduledata = { 594 "ctl", 595 ctl_module_event_handler, 596 NULL 597 }; 598 599 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 600 MODULE_VERSION(ctl, 1); 601 602 static struct ctl_frontend ha_frontend = 603 { 604 .name = "ha", 605 .init = ctl_ha_init, 606 .shutdown = ctl_ha_shutdown, 607 }; 608 609 static int 610 ctl_ha_init(void) 611 { 612 struct ctl_softc *softc = control_softc; 613 614 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 615 &softc->othersc_pool) != 0) 616 return (ENOMEM); 617 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 618 ctl_pool_free(softc->othersc_pool); 619 return (EIO); 620 } 621 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 622 != CTL_HA_STATUS_SUCCESS) { 623 ctl_ha_msg_destroy(softc); 624 ctl_pool_free(softc->othersc_pool); 625 return (EIO); 626 } 627 return (0); 628 }; 629 630 static int 631 ctl_ha_shutdown(void) 632 { 633 struct ctl_softc *softc = control_softc; 634 struct ctl_port *port; 635 636 ctl_ha_msg_shutdown(softc); 637 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 638 return (EIO); 639 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 640 return (EIO); 641 ctl_pool_free(softc->othersc_pool); 642 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 643 ctl_port_deregister(port); 644 free(port->port_name, M_CTL); 645 free(port, M_CTL); 646 } 647 return (0); 648 }; 649 650 static void 651 ctl_ha_datamove(union ctl_io *io) 652 { 653 struct ctl_lun *lun = CTL_LUN(io); 654 struct ctl_sg_entry *sgl; 655 union ctl_ha_msg msg; 656 uint32_t sg_entries_sent; 657 int do_sg_copy, i, j; 658 659 memset(&msg.dt, 0, sizeof(msg.dt)); 660 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 661 msg.hdr.original_sc = io->io_hdr.remote_io; 662 msg.hdr.serializing_sc = io; 663 msg.hdr.nexus = io->io_hdr.nexus; 664 msg.hdr.status = io->io_hdr.status; 665 msg.dt.flags = io->io_hdr.flags; 666 667 /* 668 * We convert everything into a S/G list here. We can't 669 * pass by reference, only by value between controllers. 670 * So we can't pass a pointer to the S/G list, only as many 671 * S/G entries as we can fit in here. If it's possible for 672 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 673 * then we need to break this up into multiple transfers. 674 */ 675 if (io->scsiio.kern_sg_entries == 0) { 676 msg.dt.kern_sg_entries = 1; 677 #if 0 678 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 679 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 680 } else { 681 /* XXX KDM use busdma here! */ 682 msg.dt.sg_list[0].addr = 683 (void *)vtophys(io->scsiio.kern_data_ptr); 684 } 685 #else 686 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 687 ("HA does not support BUS_ADDR")); 688 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 689 #endif 690 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 691 do_sg_copy = 0; 692 } else { 693 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 694 do_sg_copy = 1; 695 } 696 697 msg.dt.kern_data_len = io->scsiio.kern_data_len; 698 msg.dt.kern_total_len = io->scsiio.kern_total_len; 699 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 700 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 701 msg.dt.sg_sequence = 0; 702 703 /* 704 * Loop until we've sent all of the S/G entries. On the 705 * other end, we'll recompose these S/G entries into one 706 * contiguous list before processing. 707 */ 708 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 709 msg.dt.sg_sequence++) { 710 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 711 sizeof(msg.dt.sg_list[0])), 712 msg.dt.kern_sg_entries - sg_entries_sent); 713 if (do_sg_copy != 0) { 714 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 715 for (i = sg_entries_sent, j = 0; 716 i < msg.dt.cur_sg_entries; i++, j++) { 717 #if 0 718 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 719 msg.dt.sg_list[j].addr = sgl[i].addr; 720 } else { 721 /* XXX KDM use busdma here! */ 722 msg.dt.sg_list[j].addr = 723 (void *)vtophys(sgl[i].addr); 724 } 725 #else 726 KASSERT((io->io_hdr.flags & 727 CTL_FLAG_BUS_ADDR) == 0, 728 ("HA does not support BUS_ADDR")); 729 msg.dt.sg_list[j].addr = sgl[i].addr; 730 #endif 731 msg.dt.sg_list[j].len = sgl[i].len; 732 } 733 } 734 735 sg_entries_sent += msg.dt.cur_sg_entries; 736 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 737 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 738 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 739 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 740 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 741 io->io_hdr.port_status = 31341; 742 io->scsiio.be_move_done(io); 743 return; 744 } 745 msg.dt.sent_sg_entries = sg_entries_sent; 746 } 747 748 /* 749 * Officially handover the request from us to peer. 750 * If failover has just happened, then we must return error. 751 * If failover happen just after, then it is not our problem. 752 */ 753 if (lun) 754 mtx_lock(&lun->lun_lock); 755 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 756 if (lun) 757 mtx_unlock(&lun->lun_lock); 758 io->io_hdr.port_status = 31342; 759 io->scsiio.be_move_done(io); 760 return; 761 } 762 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 763 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 764 if (lun) 765 mtx_unlock(&lun->lun_lock); 766 } 767 768 static void 769 ctl_ha_done(union ctl_io *io) 770 { 771 union ctl_ha_msg msg; 772 773 if (io->io_hdr.io_type == CTL_IO_SCSI) { 774 memset(&msg, 0, sizeof(msg)); 775 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 776 msg.hdr.original_sc = io->io_hdr.remote_io; 777 msg.hdr.nexus = io->io_hdr.nexus; 778 msg.hdr.status = io->io_hdr.status; 779 msg.scsi.scsi_status = io->scsiio.scsi_status; 780 msg.scsi.tag_num = io->scsiio.tag_num; 781 msg.scsi.tag_type = io->scsiio.tag_type; 782 msg.scsi.sense_len = io->scsiio.sense_len; 783 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 784 io->scsiio.sense_len); 785 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 786 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 787 msg.scsi.sense_len, M_WAITOK); 788 } 789 ctl_free_io(io); 790 } 791 792 static void 793 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 794 union ctl_ha_msg *msg_info) 795 { 796 struct ctl_scsiio *ctsio; 797 798 if (msg_info->hdr.original_sc == NULL) { 799 printf("%s: original_sc == NULL!\n", __func__); 800 /* XXX KDM now what? */ 801 return; 802 } 803 804 ctsio = &msg_info->hdr.original_sc->scsiio; 805 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 806 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 807 ctsio->io_hdr.status = msg_info->hdr.status; 808 ctsio->scsi_status = msg_info->scsi.scsi_status; 809 ctsio->sense_len = msg_info->scsi.sense_len; 810 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 811 msg_info->scsi.sense_len); 812 ctl_enqueue_isc((union ctl_io *)ctsio); 813 } 814 815 static void 816 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 817 union ctl_ha_msg *msg_info) 818 { 819 struct ctl_scsiio *ctsio; 820 821 if (msg_info->hdr.serializing_sc == NULL) { 822 printf("%s: serializing_sc == NULL!\n", __func__); 823 /* XXX KDM now what? */ 824 return; 825 } 826 827 ctsio = &msg_info->hdr.serializing_sc->scsiio; 828 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 829 ctl_enqueue_isc((union ctl_io *)ctsio); 830 } 831 832 void 833 ctl_isc_announce_lun(struct ctl_lun *lun) 834 { 835 struct ctl_softc *softc = lun->ctl_softc; 836 union ctl_ha_msg *msg; 837 struct ctl_ha_msg_lun_pr_key pr_key; 838 int i, k; 839 840 if (softc->ha_link != CTL_HA_LINK_ONLINE) 841 return; 842 mtx_lock(&lun->lun_lock); 843 i = sizeof(msg->lun); 844 if (lun->lun_devid) 845 i += lun->lun_devid->len; 846 i += sizeof(pr_key) * lun->pr_key_count; 847 alloc: 848 mtx_unlock(&lun->lun_lock); 849 msg = malloc(i, M_CTL, M_WAITOK); 850 mtx_lock(&lun->lun_lock); 851 k = sizeof(msg->lun); 852 if (lun->lun_devid) 853 k += lun->lun_devid->len; 854 k += sizeof(pr_key) * lun->pr_key_count; 855 if (i < k) { 856 free(msg, M_CTL); 857 i = k; 858 goto alloc; 859 } 860 bzero(&msg->lun, sizeof(msg->lun)); 861 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 862 msg->hdr.nexus.targ_lun = lun->lun; 863 msg->hdr.nexus.targ_mapped_lun = lun->lun; 864 msg->lun.flags = lun->flags; 865 msg->lun.pr_generation = lun->pr_generation; 866 msg->lun.pr_res_idx = lun->pr_res_idx; 867 msg->lun.pr_res_type = lun->pr_res_type; 868 msg->lun.pr_key_count = lun->pr_key_count; 869 i = 0; 870 if (lun->lun_devid) { 871 msg->lun.lun_devid_len = lun->lun_devid->len; 872 memcpy(&msg->lun.data[i], lun->lun_devid->data, 873 msg->lun.lun_devid_len); 874 i += msg->lun.lun_devid_len; 875 } 876 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 877 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 878 continue; 879 pr_key.pr_iid = k; 880 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 881 i += sizeof(pr_key); 882 } 883 mtx_unlock(&lun->lun_lock); 884 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 885 M_WAITOK); 886 free(msg, M_CTL); 887 888 if (lun->flags & CTL_LUN_PRIMARY_SC) { 889 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 890 ctl_isc_announce_mode(lun, -1, 891 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 892 lun->mode_pages.index[i].subpage); 893 } 894 } 895 } 896 897 void 898 ctl_isc_announce_port(struct ctl_port *port) 899 { 900 struct ctl_softc *softc = port->ctl_softc; 901 union ctl_ha_msg *msg; 902 int i; 903 904 if (port->targ_port < softc->port_min || 905 port->targ_port >= softc->port_max || 906 softc->ha_link != CTL_HA_LINK_ONLINE) 907 return; 908 i = sizeof(msg->port) + strlen(port->port_name) + 1; 909 if (port->lun_map) 910 i += port->lun_map_size * sizeof(uint32_t); 911 if (port->port_devid) 912 i += port->port_devid->len; 913 if (port->target_devid) 914 i += port->target_devid->len; 915 if (port->init_devid) 916 i += port->init_devid->len; 917 msg = malloc(i, M_CTL, M_WAITOK); 918 bzero(&msg->port, sizeof(msg->port)); 919 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 920 msg->hdr.nexus.targ_port = port->targ_port; 921 msg->port.port_type = port->port_type; 922 msg->port.physical_port = port->physical_port; 923 msg->port.virtual_port = port->virtual_port; 924 msg->port.status = port->status; 925 i = 0; 926 msg->port.name_len = sprintf(&msg->port.data[i], 927 "%d:%s", softc->ha_id, port->port_name) + 1; 928 i += msg->port.name_len; 929 if (port->lun_map) { 930 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 931 memcpy(&msg->port.data[i], port->lun_map, 932 msg->port.lun_map_len); 933 i += msg->port.lun_map_len; 934 } 935 if (port->port_devid) { 936 msg->port.port_devid_len = port->port_devid->len; 937 memcpy(&msg->port.data[i], port->port_devid->data, 938 msg->port.port_devid_len); 939 i += msg->port.port_devid_len; 940 } 941 if (port->target_devid) { 942 msg->port.target_devid_len = port->target_devid->len; 943 memcpy(&msg->port.data[i], port->target_devid->data, 944 msg->port.target_devid_len); 945 i += msg->port.target_devid_len; 946 } 947 if (port->init_devid) { 948 msg->port.init_devid_len = port->init_devid->len; 949 memcpy(&msg->port.data[i], port->init_devid->data, 950 msg->port.init_devid_len); 951 i += msg->port.init_devid_len; 952 } 953 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 954 M_WAITOK); 955 free(msg, M_CTL); 956 } 957 958 void 959 ctl_isc_announce_iid(struct ctl_port *port, int iid) 960 { 961 struct ctl_softc *softc = port->ctl_softc; 962 union ctl_ha_msg *msg; 963 int i, l; 964 965 if (port->targ_port < softc->port_min || 966 port->targ_port >= softc->port_max || 967 softc->ha_link != CTL_HA_LINK_ONLINE) 968 return; 969 mtx_lock(&softc->ctl_lock); 970 i = sizeof(msg->iid); 971 l = 0; 972 if (port->wwpn_iid[iid].name) 973 l = strlen(port->wwpn_iid[iid].name) + 1; 974 i += l; 975 msg = malloc(i, M_CTL, M_NOWAIT); 976 if (msg == NULL) { 977 mtx_unlock(&softc->ctl_lock); 978 return; 979 } 980 bzero(&msg->iid, sizeof(msg->iid)); 981 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 982 msg->hdr.nexus.targ_port = port->targ_port; 983 msg->hdr.nexus.initid = iid; 984 msg->iid.in_use = port->wwpn_iid[iid].in_use; 985 msg->iid.name_len = l; 986 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 987 if (port->wwpn_iid[iid].name) 988 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 989 mtx_unlock(&softc->ctl_lock); 990 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 991 free(msg, M_CTL); 992 } 993 994 void 995 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 996 uint8_t page, uint8_t subpage) 997 { 998 struct ctl_softc *softc = lun->ctl_softc; 999 union ctl_ha_msg msg; 1000 u_int i; 1001 1002 if (softc->ha_link != CTL_HA_LINK_ONLINE) 1003 return; 1004 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1005 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1006 page && lun->mode_pages.index[i].subpage == subpage) 1007 break; 1008 } 1009 if (i == CTL_NUM_MODE_PAGES) 1010 return; 1011 1012 /* Don't try to replicate pages not present on this device. */ 1013 if (lun->mode_pages.index[i].page_data == NULL) 1014 return; 1015 1016 bzero(&msg.mode, sizeof(msg.mode)); 1017 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 1018 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 1019 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 1020 msg.hdr.nexus.targ_lun = lun->lun; 1021 msg.hdr.nexus.targ_mapped_lun = lun->lun; 1022 msg.mode.page_code = page; 1023 msg.mode.subpage = subpage; 1024 msg.mode.page_len = lun->mode_pages.index[i].page_len; 1025 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 1026 msg.mode.page_len); 1027 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 1028 M_WAITOK); 1029 } 1030 1031 static void 1032 ctl_isc_ha_link_up(struct ctl_softc *softc) 1033 { 1034 struct ctl_port *port; 1035 struct ctl_lun *lun; 1036 union ctl_ha_msg msg; 1037 int i; 1038 1039 /* Announce this node parameters to peer for validation. */ 1040 msg.login.msg_type = CTL_MSG_LOGIN; 1041 msg.login.version = CTL_HA_VERSION; 1042 msg.login.ha_mode = softc->ha_mode; 1043 msg.login.ha_id = softc->ha_id; 1044 msg.login.max_luns = ctl_max_luns; 1045 msg.login.max_ports = ctl_max_ports; 1046 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1047 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1048 M_WAITOK); 1049 1050 STAILQ_FOREACH(port, &softc->port_list, links) { 1051 ctl_isc_announce_port(port); 1052 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1053 if (port->wwpn_iid[i].in_use) 1054 ctl_isc_announce_iid(port, i); 1055 } 1056 } 1057 STAILQ_FOREACH(lun, &softc->lun_list, links) 1058 ctl_isc_announce_lun(lun); 1059 } 1060 1061 static void 1062 ctl_isc_ha_link_down(struct ctl_softc *softc) 1063 { 1064 struct ctl_port *port; 1065 struct ctl_lun *lun; 1066 union ctl_io *io; 1067 int i; 1068 1069 mtx_lock(&softc->ctl_lock); 1070 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1071 mtx_lock(&lun->lun_lock); 1072 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1073 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1074 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1075 } 1076 mtx_unlock(&lun->lun_lock); 1077 1078 mtx_unlock(&softc->ctl_lock); 1079 io = ctl_alloc_io(softc->othersc_pool); 1080 mtx_lock(&softc->ctl_lock); 1081 ctl_zero_io(io); 1082 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1083 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1084 ctl_enqueue_isc(io); 1085 } 1086 1087 STAILQ_FOREACH(port, &softc->port_list, links) { 1088 if (port->targ_port >= softc->port_min && 1089 port->targ_port < softc->port_max) 1090 continue; 1091 port->status &= ~CTL_PORT_STATUS_ONLINE; 1092 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1093 port->wwpn_iid[i].in_use = 0; 1094 free(port->wwpn_iid[i].name, M_CTL); 1095 port->wwpn_iid[i].name = NULL; 1096 } 1097 } 1098 mtx_unlock(&softc->ctl_lock); 1099 } 1100 1101 static void 1102 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1103 { 1104 struct ctl_lun *lun; 1105 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1106 1107 mtx_lock(&softc->ctl_lock); 1108 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || 1109 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1110 mtx_unlock(&softc->ctl_lock); 1111 return; 1112 } 1113 mtx_lock(&lun->lun_lock); 1114 mtx_unlock(&softc->ctl_lock); 1115 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1116 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1117 if (msg->ua.ua_all) { 1118 if (msg->ua.ua_set) 1119 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1120 else 1121 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1122 } else { 1123 if (msg->ua.ua_set) 1124 ctl_est_ua(lun, iid, msg->ua.ua_type); 1125 else 1126 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1127 } 1128 mtx_unlock(&lun->lun_lock); 1129 } 1130 1131 static void 1132 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1133 { 1134 struct ctl_lun *lun; 1135 struct ctl_ha_msg_lun_pr_key pr_key; 1136 int i, k; 1137 ctl_lun_flags oflags; 1138 uint32_t targ_lun; 1139 1140 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1141 mtx_lock(&softc->ctl_lock); 1142 if (targ_lun >= ctl_max_luns || 1143 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1144 mtx_unlock(&softc->ctl_lock); 1145 return; 1146 } 1147 mtx_lock(&lun->lun_lock); 1148 mtx_unlock(&softc->ctl_lock); 1149 if (lun->flags & CTL_LUN_DISABLED) { 1150 mtx_unlock(&lun->lun_lock); 1151 return; 1152 } 1153 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1154 if (msg->lun.lun_devid_len != i || (i > 0 && 1155 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1156 mtx_unlock(&lun->lun_lock); 1157 printf("%s: Received conflicting HA LUN %d\n", 1158 __func__, targ_lun); 1159 return; 1160 } else { 1161 /* Record whether peer is primary. */ 1162 oflags = lun->flags; 1163 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1164 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1165 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1166 else 1167 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1168 if (oflags != lun->flags) 1169 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1170 1171 /* If peer is primary and we are not -- use data */ 1172 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1173 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1174 lun->pr_generation = msg->lun.pr_generation; 1175 lun->pr_res_idx = msg->lun.pr_res_idx; 1176 lun->pr_res_type = msg->lun.pr_res_type; 1177 lun->pr_key_count = msg->lun.pr_key_count; 1178 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1179 ctl_clr_prkey(lun, k); 1180 for (k = 0; k < msg->lun.pr_key_count; k++) { 1181 memcpy(&pr_key, &msg->lun.data[i], 1182 sizeof(pr_key)); 1183 ctl_alloc_prkey(lun, pr_key.pr_iid); 1184 ctl_set_prkey(lun, pr_key.pr_iid, 1185 pr_key.pr_key); 1186 i += sizeof(pr_key); 1187 } 1188 } 1189 1190 mtx_unlock(&lun->lun_lock); 1191 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1192 __func__, targ_lun, 1193 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1194 "primary" : "secondary")); 1195 1196 /* If we are primary but peer doesn't know -- notify */ 1197 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1198 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1199 ctl_isc_announce_lun(lun); 1200 } 1201 } 1202 1203 static void 1204 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1205 { 1206 struct ctl_port *port; 1207 struct ctl_lun *lun; 1208 int i, new; 1209 1210 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1211 if (port == NULL) { 1212 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1213 msg->hdr.nexus.targ_port)); 1214 new = 1; 1215 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1216 port->frontend = &ha_frontend; 1217 port->targ_port = msg->hdr.nexus.targ_port; 1218 port->fe_datamove = ctl_ha_datamove; 1219 port->fe_done = ctl_ha_done; 1220 } else if (port->frontend == &ha_frontend) { 1221 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1222 msg->hdr.nexus.targ_port)); 1223 new = 0; 1224 } else { 1225 printf("%s: Received conflicting HA port %d\n", 1226 __func__, msg->hdr.nexus.targ_port); 1227 return; 1228 } 1229 port->port_type = msg->port.port_type; 1230 port->physical_port = msg->port.physical_port; 1231 port->virtual_port = msg->port.virtual_port; 1232 port->status = msg->port.status; 1233 i = 0; 1234 free(port->port_name, M_CTL); 1235 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1236 M_CTL); 1237 i += msg->port.name_len; 1238 if (msg->port.lun_map_len != 0) { 1239 if (port->lun_map == NULL || 1240 port->lun_map_size * sizeof(uint32_t) < 1241 msg->port.lun_map_len) { 1242 port->lun_map_size = 0; 1243 free(port->lun_map, M_CTL); 1244 port->lun_map = malloc(msg->port.lun_map_len, 1245 M_CTL, M_WAITOK); 1246 } 1247 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1248 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1249 i += msg->port.lun_map_len; 1250 } else { 1251 port->lun_map_size = 0; 1252 free(port->lun_map, M_CTL); 1253 port->lun_map = NULL; 1254 } 1255 if (msg->port.port_devid_len != 0) { 1256 if (port->port_devid == NULL || 1257 port->port_devid->len < msg->port.port_devid_len) { 1258 free(port->port_devid, M_CTL); 1259 port->port_devid = malloc(sizeof(struct ctl_devid) + 1260 msg->port.port_devid_len, M_CTL, M_WAITOK); 1261 } 1262 memcpy(port->port_devid->data, &msg->port.data[i], 1263 msg->port.port_devid_len); 1264 port->port_devid->len = msg->port.port_devid_len; 1265 i += msg->port.port_devid_len; 1266 } else { 1267 free(port->port_devid, M_CTL); 1268 port->port_devid = NULL; 1269 } 1270 if (msg->port.target_devid_len != 0) { 1271 if (port->target_devid == NULL || 1272 port->target_devid->len < msg->port.target_devid_len) { 1273 free(port->target_devid, M_CTL); 1274 port->target_devid = malloc(sizeof(struct ctl_devid) + 1275 msg->port.target_devid_len, M_CTL, M_WAITOK); 1276 } 1277 memcpy(port->target_devid->data, &msg->port.data[i], 1278 msg->port.target_devid_len); 1279 port->target_devid->len = msg->port.target_devid_len; 1280 i += msg->port.target_devid_len; 1281 } else { 1282 free(port->target_devid, M_CTL); 1283 port->target_devid = NULL; 1284 } 1285 if (msg->port.init_devid_len != 0) { 1286 if (port->init_devid == NULL || 1287 port->init_devid->len < msg->port.init_devid_len) { 1288 free(port->init_devid, M_CTL); 1289 port->init_devid = malloc(sizeof(struct ctl_devid) + 1290 msg->port.init_devid_len, M_CTL, M_WAITOK); 1291 } 1292 memcpy(port->init_devid->data, &msg->port.data[i], 1293 msg->port.init_devid_len); 1294 port->init_devid->len = msg->port.init_devid_len; 1295 i += msg->port.init_devid_len; 1296 } else { 1297 free(port->init_devid, M_CTL); 1298 port->init_devid = NULL; 1299 } 1300 if (new) { 1301 if (ctl_port_register(port) != 0) { 1302 printf("%s: ctl_port_register() failed with error\n", 1303 __func__); 1304 } 1305 } 1306 mtx_lock(&softc->ctl_lock); 1307 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1308 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1309 continue; 1310 mtx_lock(&lun->lun_lock); 1311 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1312 mtx_unlock(&lun->lun_lock); 1313 } 1314 mtx_unlock(&softc->ctl_lock); 1315 } 1316 1317 static void 1318 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1319 { 1320 struct ctl_port *port; 1321 int iid; 1322 1323 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1324 if (port == NULL) { 1325 printf("%s: Received IID for unknown port %d\n", 1326 __func__, msg->hdr.nexus.targ_port); 1327 return; 1328 } 1329 iid = msg->hdr.nexus.initid; 1330 if (port->wwpn_iid[iid].in_use != 0 && 1331 msg->iid.in_use == 0) 1332 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1333 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1334 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1335 free(port->wwpn_iid[iid].name, M_CTL); 1336 if (msg->iid.name_len) { 1337 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1338 msg->iid.name_len, M_CTL); 1339 } else 1340 port->wwpn_iid[iid].name = NULL; 1341 } 1342 1343 static void 1344 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1345 { 1346 1347 if (msg->login.version != CTL_HA_VERSION) { 1348 printf("CTL HA peers have different versions %d != %d\n", 1349 msg->login.version, CTL_HA_VERSION); 1350 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1351 return; 1352 } 1353 if (msg->login.ha_mode != softc->ha_mode) { 1354 printf("CTL HA peers have different ha_mode %d != %d\n", 1355 msg->login.ha_mode, softc->ha_mode); 1356 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1357 return; 1358 } 1359 if (msg->login.ha_id == softc->ha_id) { 1360 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1361 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1362 return; 1363 } 1364 if (msg->login.max_luns != ctl_max_luns || 1365 msg->login.max_ports != ctl_max_ports || 1366 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1367 printf("CTL HA peers have different limits\n"); 1368 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1369 return; 1370 } 1371 } 1372 1373 static void 1374 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1375 { 1376 struct ctl_lun *lun; 1377 u_int i; 1378 uint32_t initidx, targ_lun; 1379 1380 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1381 mtx_lock(&softc->ctl_lock); 1382 if (targ_lun >= ctl_max_luns || 1383 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1384 mtx_unlock(&softc->ctl_lock); 1385 return; 1386 } 1387 mtx_lock(&lun->lun_lock); 1388 mtx_unlock(&softc->ctl_lock); 1389 if (lun->flags & CTL_LUN_DISABLED) { 1390 mtx_unlock(&lun->lun_lock); 1391 return; 1392 } 1393 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1394 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1395 msg->mode.page_code && 1396 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1397 break; 1398 } 1399 if (i == CTL_NUM_MODE_PAGES) { 1400 mtx_unlock(&lun->lun_lock); 1401 return; 1402 } 1403 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1404 lun->mode_pages.index[i].page_len); 1405 initidx = ctl_get_initindex(&msg->hdr.nexus); 1406 if (initidx != -1) 1407 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1408 mtx_unlock(&lun->lun_lock); 1409 } 1410 1411 /* 1412 * ISC (Inter Shelf Communication) event handler. Events from the HA 1413 * subsystem come in here. 1414 */ 1415 static void 1416 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1417 { 1418 struct ctl_softc *softc = control_softc; 1419 union ctl_io *io; 1420 struct ctl_prio *presio; 1421 ctl_ha_status isc_status; 1422 1423 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1424 if (event == CTL_HA_EVT_MSG_RECV) { 1425 union ctl_ha_msg *msg, msgbuf; 1426 1427 if (param > sizeof(msgbuf)) 1428 msg = malloc(param, M_CTL, M_WAITOK); 1429 else 1430 msg = &msgbuf; 1431 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1432 M_WAITOK); 1433 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1434 printf("%s: Error receiving message: %d\n", 1435 __func__, isc_status); 1436 if (msg != &msgbuf) 1437 free(msg, M_CTL); 1438 return; 1439 } 1440 1441 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1442 switch (msg->hdr.msg_type) { 1443 case CTL_MSG_SERIALIZE: 1444 io = ctl_alloc_io(softc->othersc_pool); 1445 ctl_zero_io(io); 1446 // populate ctsio from msg 1447 io->io_hdr.io_type = CTL_IO_SCSI; 1448 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1449 io->io_hdr.remote_io = msg->hdr.original_sc; 1450 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1451 CTL_FLAG_IO_ACTIVE; 1452 /* 1453 * If we're in serialization-only mode, we don't 1454 * want to go through full done processing. Thus 1455 * the COPY flag. 1456 * 1457 * XXX KDM add another flag that is more specific. 1458 */ 1459 if (softc->ha_mode != CTL_HA_MODE_XFER) 1460 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1461 io->io_hdr.nexus = msg->hdr.nexus; 1462 io->scsiio.tag_num = msg->scsi.tag_num; 1463 io->scsiio.tag_type = msg->scsi.tag_type; 1464 #ifdef CTL_TIME_IO 1465 io->io_hdr.start_time = time_uptime; 1466 getbinuptime(&io->io_hdr.start_bt); 1467 #endif /* CTL_TIME_IO */ 1468 io->scsiio.cdb_len = msg->scsi.cdb_len; 1469 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1470 CTL_MAX_CDBLEN); 1471 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1472 const struct ctl_cmd_entry *entry; 1473 1474 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1475 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1476 io->io_hdr.flags |= 1477 entry->flags & CTL_FLAG_DATA_MASK; 1478 } 1479 ctl_enqueue_isc(io); 1480 break; 1481 1482 /* Performed on the Originating SC, XFER mode only */ 1483 case CTL_MSG_DATAMOVE: { 1484 struct ctl_sg_entry *sgl; 1485 int i, j; 1486 1487 io = msg->hdr.original_sc; 1488 if (io == NULL) { 1489 printf("%s: original_sc == NULL!\n", __func__); 1490 /* XXX KDM do something here */ 1491 break; 1492 } 1493 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1494 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1495 /* 1496 * Keep track of this, we need to send it back over 1497 * when the datamove is complete. 1498 */ 1499 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1500 if (msg->hdr.status == CTL_SUCCESS) 1501 io->io_hdr.status = msg->hdr.status; 1502 1503 if (msg->dt.sg_sequence == 0) { 1504 #ifdef CTL_TIME_IO 1505 getbinuptime(&io->io_hdr.dma_start_bt); 1506 #endif 1507 i = msg->dt.kern_sg_entries + 1508 msg->dt.kern_data_len / 1509 CTL_HA_DATAMOVE_SEGMENT + 1; 1510 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1511 M_WAITOK | M_ZERO); 1512 CTL_RSGL(io) = sgl; 1513 CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; 1514 1515 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1516 1517 io->scsiio.kern_sg_entries = 1518 msg->dt.kern_sg_entries; 1519 io->scsiio.rem_sg_entries = 1520 msg->dt.kern_sg_entries; 1521 io->scsiio.kern_data_len = 1522 msg->dt.kern_data_len; 1523 io->scsiio.kern_total_len = 1524 msg->dt.kern_total_len; 1525 io->scsiio.kern_data_resid = 1526 msg->dt.kern_data_resid; 1527 io->scsiio.kern_rel_offset = 1528 msg->dt.kern_rel_offset; 1529 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1530 io->io_hdr.flags |= msg->dt.flags & 1531 CTL_FLAG_BUS_ADDR; 1532 } else 1533 sgl = (struct ctl_sg_entry *) 1534 io->scsiio.kern_data_ptr; 1535 1536 for (i = msg->dt.sent_sg_entries, j = 0; 1537 i < (msg->dt.sent_sg_entries + 1538 msg->dt.cur_sg_entries); i++, j++) { 1539 sgl[i].addr = msg->dt.sg_list[j].addr; 1540 sgl[i].len = msg->dt.sg_list[j].len; 1541 } 1542 1543 /* 1544 * If this is the last piece of the I/O, we've got 1545 * the full S/G list. Queue processing in the thread. 1546 * Otherwise wait for the next piece. 1547 */ 1548 if (msg->dt.sg_last != 0) 1549 ctl_enqueue_isc(io); 1550 break; 1551 } 1552 /* Performed on the Serializing (primary) SC, XFER mode only */ 1553 case CTL_MSG_DATAMOVE_DONE: { 1554 if (msg->hdr.serializing_sc == NULL) { 1555 printf("%s: serializing_sc == NULL!\n", 1556 __func__); 1557 /* XXX KDM now what? */ 1558 break; 1559 } 1560 /* 1561 * We grab the sense information here in case 1562 * there was a failure, so we can return status 1563 * back to the initiator. 1564 */ 1565 io = msg->hdr.serializing_sc; 1566 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1567 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1568 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1569 io->io_hdr.port_status = msg->scsi.port_status; 1570 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1571 if (msg->hdr.status != CTL_STATUS_NONE) { 1572 io->io_hdr.status = msg->hdr.status; 1573 io->scsiio.scsi_status = msg->scsi.scsi_status; 1574 io->scsiio.sense_len = msg->scsi.sense_len; 1575 memcpy(&io->scsiio.sense_data, 1576 &msg->scsi.sense_data, 1577 msg->scsi.sense_len); 1578 if (msg->hdr.status == CTL_SUCCESS) 1579 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1580 } 1581 ctl_enqueue_isc(io); 1582 break; 1583 } 1584 1585 /* Preformed on Originating SC, SER_ONLY mode */ 1586 case CTL_MSG_R2R: 1587 io = msg->hdr.original_sc; 1588 if (io == NULL) { 1589 printf("%s: original_sc == NULL!\n", 1590 __func__); 1591 break; 1592 } 1593 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1594 io->io_hdr.msg_type = CTL_MSG_R2R; 1595 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1596 ctl_enqueue_isc(io); 1597 break; 1598 1599 /* 1600 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1601 * mode. 1602 * Performed on the Originating (i.e. secondary) SC in XFER 1603 * mode 1604 */ 1605 case CTL_MSG_FINISH_IO: 1606 if (softc->ha_mode == CTL_HA_MODE_XFER) 1607 ctl_isc_handler_finish_xfer(softc, msg); 1608 else 1609 ctl_isc_handler_finish_ser_only(softc, msg); 1610 break; 1611 1612 /* Preformed on Originating SC */ 1613 case CTL_MSG_BAD_JUJU: 1614 io = msg->hdr.original_sc; 1615 if (io == NULL) { 1616 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1617 __func__); 1618 break; 1619 } 1620 ctl_copy_sense_data(msg, io); 1621 /* 1622 * IO should have already been cleaned up on other 1623 * SC so clear this flag so we won't send a message 1624 * back to finish the IO there. 1625 */ 1626 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1627 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1628 1629 /* io = msg->hdr.serializing_sc; */ 1630 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1631 ctl_enqueue_isc(io); 1632 break; 1633 1634 /* Handle resets sent from the other side */ 1635 case CTL_MSG_MANAGE_TASKS: { 1636 struct ctl_taskio *taskio; 1637 taskio = (struct ctl_taskio *)ctl_alloc_io( 1638 softc->othersc_pool); 1639 ctl_zero_io((union ctl_io *)taskio); 1640 taskio->io_hdr.io_type = CTL_IO_TASK; 1641 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1642 taskio->io_hdr.nexus = msg->hdr.nexus; 1643 taskio->task_action = msg->task.task_action; 1644 taskio->tag_num = msg->task.tag_num; 1645 taskio->tag_type = msg->task.tag_type; 1646 #ifdef CTL_TIME_IO 1647 taskio->io_hdr.start_time = time_uptime; 1648 getbinuptime(&taskio->io_hdr.start_bt); 1649 #endif /* CTL_TIME_IO */ 1650 ctl_run_task((union ctl_io *)taskio); 1651 break; 1652 } 1653 /* Persistent Reserve action which needs attention */ 1654 case CTL_MSG_PERS_ACTION: 1655 presio = (struct ctl_prio *)ctl_alloc_io( 1656 softc->othersc_pool); 1657 ctl_zero_io((union ctl_io *)presio); 1658 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1659 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1660 presio->io_hdr.nexus = msg->hdr.nexus; 1661 presio->pr_msg = msg->pr; 1662 ctl_enqueue_isc((union ctl_io *)presio); 1663 break; 1664 case CTL_MSG_UA: 1665 ctl_isc_ua(softc, msg, param); 1666 break; 1667 case CTL_MSG_PORT_SYNC: 1668 ctl_isc_port_sync(softc, msg, param); 1669 break; 1670 case CTL_MSG_LUN_SYNC: 1671 ctl_isc_lun_sync(softc, msg, param); 1672 break; 1673 case CTL_MSG_IID_SYNC: 1674 ctl_isc_iid_sync(softc, msg, param); 1675 break; 1676 case CTL_MSG_LOGIN: 1677 ctl_isc_login(softc, msg, param); 1678 break; 1679 case CTL_MSG_MODE_SYNC: 1680 ctl_isc_mode_sync(softc, msg, param); 1681 break; 1682 default: 1683 printf("Received HA message of unknown type %d\n", 1684 msg->hdr.msg_type); 1685 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1686 break; 1687 } 1688 if (msg != &msgbuf) 1689 free(msg, M_CTL); 1690 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1691 printf("CTL: HA link status changed from %d to %d\n", 1692 softc->ha_link, param); 1693 if (param == softc->ha_link) 1694 return; 1695 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1696 softc->ha_link = param; 1697 ctl_isc_ha_link_down(softc); 1698 } else { 1699 softc->ha_link = param; 1700 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1701 ctl_isc_ha_link_up(softc); 1702 } 1703 return; 1704 } else { 1705 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1706 return; 1707 } 1708 } 1709 1710 static void 1711 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1712 { 1713 1714 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1715 src->scsi.sense_len); 1716 dest->scsiio.scsi_status = src->scsi.scsi_status; 1717 dest->scsiio.sense_len = src->scsi.sense_len; 1718 dest->io_hdr.status = src->hdr.status; 1719 } 1720 1721 static void 1722 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1723 { 1724 1725 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1726 src->scsiio.sense_len); 1727 dest->scsi.scsi_status = src->scsiio.scsi_status; 1728 dest->scsi.sense_len = src->scsiio.sense_len; 1729 dest->hdr.status = src->io_hdr.status; 1730 } 1731 1732 void 1733 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1734 { 1735 struct ctl_softc *softc = lun->ctl_softc; 1736 ctl_ua_type *pu; 1737 1738 if (initidx < softc->init_min || initidx >= softc->init_max) 1739 return; 1740 mtx_assert(&lun->lun_lock, MA_OWNED); 1741 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1742 if (pu == NULL) 1743 return; 1744 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1745 } 1746 1747 void 1748 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1749 { 1750 int i; 1751 1752 mtx_assert(&lun->lun_lock, MA_OWNED); 1753 if (lun->pending_ua[port] == NULL) 1754 return; 1755 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1756 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1757 continue; 1758 lun->pending_ua[port][i] |= ua; 1759 } 1760 } 1761 1762 void 1763 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1764 { 1765 struct ctl_softc *softc = lun->ctl_softc; 1766 int i; 1767 1768 mtx_assert(&lun->lun_lock, MA_OWNED); 1769 for (i = softc->port_min; i < softc->port_max; i++) 1770 ctl_est_ua_port(lun, i, except, ua); 1771 } 1772 1773 void 1774 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1775 { 1776 struct ctl_softc *softc = lun->ctl_softc; 1777 ctl_ua_type *pu; 1778 1779 if (initidx < softc->init_min || initidx >= softc->init_max) 1780 return; 1781 mtx_assert(&lun->lun_lock, MA_OWNED); 1782 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1783 if (pu == NULL) 1784 return; 1785 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1786 } 1787 1788 void 1789 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1790 { 1791 struct ctl_softc *softc = lun->ctl_softc; 1792 int i, j; 1793 1794 mtx_assert(&lun->lun_lock, MA_OWNED); 1795 for (i = softc->port_min; i < softc->port_max; i++) { 1796 if (lun->pending_ua[i] == NULL) 1797 continue; 1798 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1799 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1800 continue; 1801 lun->pending_ua[i][j] &= ~ua; 1802 } 1803 } 1804 } 1805 1806 void 1807 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1808 ctl_ua_type ua_type) 1809 { 1810 struct ctl_lun *lun; 1811 1812 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1813 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1814 mtx_lock(&lun->lun_lock); 1815 ctl_clr_ua(lun, initidx, ua_type); 1816 mtx_unlock(&lun->lun_lock); 1817 } 1818 } 1819 1820 static int 1821 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1822 { 1823 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1824 struct ctl_lun *lun; 1825 struct ctl_lun_req ireq; 1826 int error, value; 1827 1828 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1829 error = sysctl_handle_int(oidp, &value, 0, req); 1830 if ((error != 0) || (req->newptr == NULL)) 1831 return (error); 1832 1833 mtx_lock(&softc->ctl_lock); 1834 if (value == 0) 1835 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1836 else 1837 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1838 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1839 mtx_unlock(&softc->ctl_lock); 1840 bzero(&ireq, sizeof(ireq)); 1841 ireq.reqtype = CTL_LUNREQ_MODIFY; 1842 ireq.reqdata.modify.lun_id = lun->lun; 1843 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1844 curthread); 1845 if (ireq.status != CTL_LUN_OK) { 1846 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1847 __func__, ireq.status, ireq.error_str); 1848 } 1849 mtx_lock(&softc->ctl_lock); 1850 } 1851 mtx_unlock(&softc->ctl_lock); 1852 return (0); 1853 } 1854 1855 static int 1856 ctl_init(void) 1857 { 1858 struct make_dev_args args; 1859 struct ctl_softc *softc; 1860 int i, error; 1861 1862 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1863 M_WAITOK | M_ZERO); 1864 1865 make_dev_args_init(&args); 1866 args.mda_devsw = &ctl_cdevsw; 1867 args.mda_uid = UID_ROOT; 1868 args.mda_gid = GID_OPERATOR; 1869 args.mda_mode = 0600; 1870 args.mda_si_drv1 = softc; 1871 args.mda_si_drv2 = NULL; 1872 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1873 if (error != 0) { 1874 free(softc, M_DEVBUF); 1875 control_softc = NULL; 1876 return (error); 1877 } 1878 1879 sysctl_ctx_init(&softc->sysctl_ctx); 1880 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1881 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1882 CTLFLAG_RD, 0, "CAM Target Layer"); 1883 1884 if (softc->sysctl_tree == NULL) { 1885 printf("%s: unable to allocate sysctl tree\n", __func__); 1886 destroy_dev(softc->dev); 1887 free(softc, M_DEVBUF); 1888 control_softc = NULL; 1889 return (ENOMEM); 1890 } 1891 1892 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1893 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1894 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1895 softc->flags = 0; 1896 1897 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1898 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1899 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1900 1901 if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { 1902 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", 1903 ctl_max_luns, CTL_DEFAULT_MAX_LUNS); 1904 ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 1905 } 1906 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, 1907 M_DEVBUF, M_WAITOK | M_ZERO); 1908 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * 1909 ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1910 if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { 1911 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", 1912 ctl_max_ports, CTL_DEFAULT_MAX_PORTS); 1913 ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 1914 } 1915 softc->ctl_port_mask = malloc(sizeof(uint32_t) * 1916 ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1917 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, 1918 M_DEVBUF, M_WAITOK | M_ZERO); 1919 1920 1921 /* 1922 * In Copan's HA scheme, the "master" and "slave" roles are 1923 * figured out through the slot the controller is in. Although it 1924 * is an active/active system, someone has to be in charge. 1925 */ 1926 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1927 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1928 "HA head ID (0 - no HA)"); 1929 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1930 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1931 softc->is_single = 1; 1932 softc->port_cnt = ctl_max_ports; 1933 softc->port_min = 0; 1934 } else { 1935 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; 1936 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1937 } 1938 softc->port_max = softc->port_min + softc->port_cnt; 1939 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1940 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1941 1942 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1943 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1944 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1945 1946 STAILQ_INIT(&softc->lun_list); 1947 STAILQ_INIT(&softc->pending_lun_queue); 1948 STAILQ_INIT(&softc->fe_list); 1949 STAILQ_INIT(&softc->port_list); 1950 STAILQ_INIT(&softc->be_list); 1951 ctl_tpc_init(softc); 1952 1953 if (worker_threads <= 0) 1954 worker_threads = max(1, mp_ncpus / 4); 1955 if (worker_threads > CTL_MAX_THREADS) 1956 worker_threads = CTL_MAX_THREADS; 1957 1958 for (i = 0; i < worker_threads; i++) { 1959 struct ctl_thread *thr = &softc->threads[i]; 1960 1961 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1962 thr->ctl_softc = softc; 1963 STAILQ_INIT(&thr->incoming_queue); 1964 STAILQ_INIT(&thr->rtr_queue); 1965 STAILQ_INIT(&thr->done_queue); 1966 STAILQ_INIT(&thr->isc_queue); 1967 1968 error = kproc_kthread_add(ctl_work_thread, thr, 1969 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1970 if (error != 0) { 1971 printf("error creating CTL work thread!\n"); 1972 return (error); 1973 } 1974 } 1975 error = kproc_kthread_add(ctl_lun_thread, softc, 1976 &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun"); 1977 if (error != 0) { 1978 printf("error creating CTL lun thread!\n"); 1979 return (error); 1980 } 1981 error = kproc_kthread_add(ctl_thresh_thread, softc, 1982 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1983 if (error != 0) { 1984 printf("error creating CTL threshold thread!\n"); 1985 return (error); 1986 } 1987 1988 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1989 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1990 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1991 1992 if (softc->is_single == 0) { 1993 if (ctl_frontend_register(&ha_frontend) != 0) 1994 softc->is_single = 1; 1995 } 1996 return (0); 1997 } 1998 1999 static int 2000 ctl_shutdown(void) 2001 { 2002 struct ctl_softc *softc = control_softc; 2003 int i; 2004 2005 if (softc->is_single == 0) 2006 ctl_frontend_deregister(&ha_frontend); 2007 2008 destroy_dev(softc->dev); 2009 2010 /* Shutdown CTL threads. */ 2011 softc->shutdown = 1; 2012 for (i = 0; i < worker_threads; i++) { 2013 struct ctl_thread *thr = &softc->threads[i]; 2014 while (thr->thread != NULL) { 2015 wakeup(thr); 2016 if (thr->thread != NULL) 2017 pause("CTL thr shutdown", 1); 2018 } 2019 mtx_destroy(&thr->queue_lock); 2020 } 2021 while (softc->lun_thread != NULL) { 2022 wakeup(&softc->pending_lun_queue); 2023 if (softc->lun_thread != NULL) 2024 pause("CTL thr shutdown", 1); 2025 } 2026 while (softc->thresh_thread != NULL) { 2027 wakeup(softc->thresh_thread); 2028 if (softc->thresh_thread != NULL) 2029 pause("CTL thr shutdown", 1); 2030 } 2031 2032 ctl_tpc_shutdown(softc); 2033 uma_zdestroy(softc->io_zone); 2034 mtx_destroy(&softc->ctl_lock); 2035 2036 free(softc->ctl_luns, M_DEVBUF); 2037 free(softc->ctl_lun_mask, M_DEVBUF); 2038 free(softc->ctl_port_mask, M_DEVBUF); 2039 free(softc->ctl_ports, M_DEVBUF); 2040 2041 sysctl_ctx_free(&softc->sysctl_ctx); 2042 2043 free(softc, M_DEVBUF); 2044 control_softc = NULL; 2045 return (0); 2046 } 2047 2048 static int 2049 ctl_module_event_handler(module_t mod, int what, void *arg) 2050 { 2051 2052 switch (what) { 2053 case MOD_LOAD: 2054 return (ctl_init()); 2055 case MOD_UNLOAD: 2056 return (ctl_shutdown()); 2057 default: 2058 return (EOPNOTSUPP); 2059 } 2060 } 2061 2062 /* 2063 * XXX KDM should we do some access checks here? Bump a reference count to 2064 * prevent a CTL module from being unloaded while someone has it open? 2065 */ 2066 static int 2067 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2068 { 2069 return (0); 2070 } 2071 2072 static int 2073 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2074 { 2075 return (0); 2076 } 2077 2078 /* 2079 * Remove an initiator by port number and initiator ID. 2080 * Returns 0 for success, -1 for failure. 2081 */ 2082 int 2083 ctl_remove_initiator(struct ctl_port *port, int iid) 2084 { 2085 struct ctl_softc *softc = port->ctl_softc; 2086 int last; 2087 2088 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2089 2090 if (iid > CTL_MAX_INIT_PER_PORT) { 2091 printf("%s: initiator ID %u > maximun %u!\n", 2092 __func__, iid, CTL_MAX_INIT_PER_PORT); 2093 return (-1); 2094 } 2095 2096 mtx_lock(&softc->ctl_lock); 2097 last = (--port->wwpn_iid[iid].in_use == 0); 2098 port->wwpn_iid[iid].last_use = time_uptime; 2099 mtx_unlock(&softc->ctl_lock); 2100 if (last) 2101 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2102 ctl_isc_announce_iid(port, iid); 2103 2104 return (0); 2105 } 2106 2107 /* 2108 * Add an initiator to the initiator map. 2109 * Returns iid for success, < 0 for failure. 2110 */ 2111 int 2112 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2113 { 2114 struct ctl_softc *softc = port->ctl_softc; 2115 time_t best_time; 2116 int i, best; 2117 2118 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2119 2120 if (iid >= CTL_MAX_INIT_PER_PORT) { 2121 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2122 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2123 free(name, M_CTL); 2124 return (-1); 2125 } 2126 2127 mtx_lock(&softc->ctl_lock); 2128 2129 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2130 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2131 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2132 iid = i; 2133 break; 2134 } 2135 if (name != NULL && port->wwpn_iid[i].name != NULL && 2136 strcmp(name, port->wwpn_iid[i].name) == 0) { 2137 iid = i; 2138 break; 2139 } 2140 } 2141 } 2142 2143 if (iid < 0) { 2144 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2145 if (port->wwpn_iid[i].in_use == 0 && 2146 port->wwpn_iid[i].wwpn == 0 && 2147 port->wwpn_iid[i].name == NULL) { 2148 iid = i; 2149 break; 2150 } 2151 } 2152 } 2153 2154 if (iid < 0) { 2155 best = -1; 2156 best_time = INT32_MAX; 2157 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2158 if (port->wwpn_iid[i].in_use == 0) { 2159 if (port->wwpn_iid[i].last_use < best_time) { 2160 best = i; 2161 best_time = port->wwpn_iid[i].last_use; 2162 } 2163 } 2164 } 2165 iid = best; 2166 } 2167 2168 if (iid < 0) { 2169 mtx_unlock(&softc->ctl_lock); 2170 free(name, M_CTL); 2171 return (-2); 2172 } 2173 2174 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2175 /* 2176 * This is not an error yet. 2177 */ 2178 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2179 #if 0 2180 printf("%s: port %d iid %u WWPN %#jx arrived" 2181 " again\n", __func__, port->targ_port, 2182 iid, (uintmax_t)wwpn); 2183 #endif 2184 goto take; 2185 } 2186 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2187 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2188 #if 0 2189 printf("%s: port %d iid %u name '%s' arrived" 2190 " again\n", __func__, port->targ_port, 2191 iid, name); 2192 #endif 2193 goto take; 2194 } 2195 2196 /* 2197 * This is an error, but what do we do about it? The 2198 * driver is telling us we have a new WWPN for this 2199 * initiator ID, so we pretty much need to use it. 2200 */ 2201 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2202 " but WWPN %#jx '%s' is still at that address\n", 2203 __func__, port->targ_port, iid, wwpn, name, 2204 (uintmax_t)port->wwpn_iid[iid].wwpn, 2205 port->wwpn_iid[iid].name); 2206 } 2207 take: 2208 free(port->wwpn_iid[iid].name, M_CTL); 2209 port->wwpn_iid[iid].name = name; 2210 port->wwpn_iid[iid].wwpn = wwpn; 2211 port->wwpn_iid[iid].in_use++; 2212 mtx_unlock(&softc->ctl_lock); 2213 ctl_isc_announce_iid(port, iid); 2214 2215 return (iid); 2216 } 2217 2218 static int 2219 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2220 { 2221 int len; 2222 2223 switch (port->port_type) { 2224 case CTL_PORT_FC: 2225 { 2226 struct scsi_transportid_fcp *id = 2227 (struct scsi_transportid_fcp *)buf; 2228 if (port->wwpn_iid[iid].wwpn == 0) 2229 return (0); 2230 memset(id, 0, sizeof(*id)); 2231 id->format_protocol = SCSI_PROTO_FC; 2232 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2233 return (sizeof(*id)); 2234 } 2235 case CTL_PORT_ISCSI: 2236 { 2237 struct scsi_transportid_iscsi_port *id = 2238 (struct scsi_transportid_iscsi_port *)buf; 2239 if (port->wwpn_iid[iid].name == NULL) 2240 return (0); 2241 memset(id, 0, 256); 2242 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2243 SCSI_PROTO_ISCSI; 2244 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2245 len = roundup2(min(len, 252), 4); 2246 scsi_ulto2b(len, id->additional_length); 2247 return (sizeof(*id) + len); 2248 } 2249 case CTL_PORT_SAS: 2250 { 2251 struct scsi_transportid_sas *id = 2252 (struct scsi_transportid_sas *)buf; 2253 if (port->wwpn_iid[iid].wwpn == 0) 2254 return (0); 2255 memset(id, 0, sizeof(*id)); 2256 id->format_protocol = SCSI_PROTO_SAS; 2257 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2258 return (sizeof(*id)); 2259 } 2260 default: 2261 { 2262 struct scsi_transportid_spi *id = 2263 (struct scsi_transportid_spi *)buf; 2264 memset(id, 0, sizeof(*id)); 2265 id->format_protocol = SCSI_PROTO_SPI; 2266 scsi_ulto2b(iid, id->scsi_addr); 2267 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2268 return (sizeof(*id)); 2269 } 2270 } 2271 } 2272 2273 /* 2274 * Serialize a command that went down the "wrong" side, and so was sent to 2275 * this controller for execution. The logic is a little different than the 2276 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2277 * sent back to the other side, but in the success case, we execute the 2278 * command on this side (XFER mode) or tell the other side to execute it 2279 * (SER_ONLY mode). 2280 */ 2281 static void 2282 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2283 { 2284 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2285 struct ctl_port *port = CTL_PORT(ctsio); 2286 union ctl_ha_msg msg_info; 2287 struct ctl_lun *lun; 2288 const struct ctl_cmd_entry *entry; 2289 union ctl_io *bio; 2290 uint32_t targ_lun; 2291 2292 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2293 2294 /* Make sure that we know about this port. */ 2295 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2296 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2297 /*retry_count*/ 1); 2298 goto badjuju; 2299 } 2300 2301 /* Make sure that we know about this LUN. */ 2302 mtx_lock(&softc->ctl_lock); 2303 if (targ_lun >= ctl_max_luns || 2304 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2305 mtx_unlock(&softc->ctl_lock); 2306 2307 /* 2308 * The other node would not send this request to us unless 2309 * received announce that we are primary node for this LUN. 2310 * If this LUN does not exist now, it is probably result of 2311 * a race, so respond to initiator in the most opaque way. 2312 */ 2313 ctl_set_busy(ctsio); 2314 goto badjuju; 2315 } 2316 mtx_lock(&lun->lun_lock); 2317 mtx_unlock(&softc->ctl_lock); 2318 2319 /* 2320 * If the LUN is invalid, pretend that it doesn't exist. 2321 * It will go away as soon as all pending I/Os completed. 2322 */ 2323 if (lun->flags & CTL_LUN_DISABLED) { 2324 mtx_unlock(&lun->lun_lock); 2325 ctl_set_busy(ctsio); 2326 goto badjuju; 2327 } 2328 2329 entry = ctl_get_cmd_entry(ctsio, NULL); 2330 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2331 mtx_unlock(&lun->lun_lock); 2332 goto badjuju; 2333 } 2334 2335 CTL_LUN(ctsio) = lun; 2336 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2337 2338 /* 2339 * Every I/O goes into the OOA queue for a 2340 * particular LUN, and stays there until completion. 2341 */ 2342 #ifdef CTL_TIME_IO 2343 if (TAILQ_EMPTY(&lun->ooa_queue)) 2344 lun->idle_time += getsbinuptime() - lun->last_busy; 2345 #endif 2346 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2347 2348 bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links); 2349 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 2350 case CTL_ACTION_BLOCK: 2351 ctsio->io_hdr.blocker = bio; 2352 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 2353 blocked_links); 2354 mtx_unlock(&lun->lun_lock); 2355 break; 2356 case CTL_ACTION_PASS: 2357 case CTL_ACTION_SKIP: 2358 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2359 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2360 ctl_enqueue_rtr((union ctl_io *)ctsio); 2361 mtx_unlock(&lun->lun_lock); 2362 } else { 2363 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2364 mtx_unlock(&lun->lun_lock); 2365 2366 /* send msg back to other side */ 2367 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2368 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2369 msg_info.hdr.msg_type = CTL_MSG_R2R; 2370 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2371 sizeof(msg_info.hdr), M_WAITOK); 2372 } 2373 break; 2374 case CTL_ACTION_OVERLAP: 2375 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2376 mtx_unlock(&lun->lun_lock); 2377 ctl_set_overlapped_cmd(ctsio); 2378 goto badjuju; 2379 case CTL_ACTION_OVERLAP_TAG: 2380 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2381 mtx_unlock(&lun->lun_lock); 2382 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2383 goto badjuju; 2384 case CTL_ACTION_ERROR: 2385 default: 2386 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2387 mtx_unlock(&lun->lun_lock); 2388 2389 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2390 /*retry_count*/ 0); 2391 badjuju: 2392 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2393 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2394 msg_info.hdr.serializing_sc = NULL; 2395 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2396 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2397 sizeof(msg_info.scsi), M_WAITOK); 2398 ctl_free_io((union ctl_io *)ctsio); 2399 break; 2400 } 2401 } 2402 2403 /* 2404 * Returns 0 for success, errno for failure. 2405 */ 2406 static void 2407 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2408 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2409 { 2410 union ctl_io *io; 2411 2412 mtx_lock(&lun->lun_lock); 2413 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2414 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2415 ooa_links)) { 2416 struct ctl_ooa_entry *entry; 2417 2418 /* 2419 * If we've got more than we can fit, just count the 2420 * remaining entries. 2421 */ 2422 if (*cur_fill_num >= ooa_hdr->alloc_num) 2423 continue; 2424 2425 entry = &kern_entries[*cur_fill_num]; 2426 2427 entry->tag_num = io->scsiio.tag_num; 2428 entry->lun_num = lun->lun; 2429 #ifdef CTL_TIME_IO 2430 entry->start_bt = io->io_hdr.start_bt; 2431 #endif 2432 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2433 entry->cdb_len = io->scsiio.cdb_len; 2434 if (io->io_hdr.blocker != NULL) 2435 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2436 2437 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2438 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2439 2440 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2441 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2442 2443 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2444 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2445 2446 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2447 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2448 } 2449 mtx_unlock(&lun->lun_lock); 2450 } 2451 2452 /* 2453 * Escape characters that are illegal or not recommended in XML. 2454 */ 2455 int 2456 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2457 { 2458 char *end = str + size; 2459 int retval; 2460 2461 retval = 0; 2462 2463 for (; *str && str < end; str++) { 2464 switch (*str) { 2465 case '&': 2466 retval = sbuf_printf(sb, "&"); 2467 break; 2468 case '>': 2469 retval = sbuf_printf(sb, ">"); 2470 break; 2471 case '<': 2472 retval = sbuf_printf(sb, "<"); 2473 break; 2474 default: 2475 retval = sbuf_putc(sb, *str); 2476 break; 2477 } 2478 2479 if (retval != 0) 2480 break; 2481 2482 } 2483 2484 return (retval); 2485 } 2486 2487 static void 2488 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2489 { 2490 struct scsi_vpd_id_descriptor *desc; 2491 int i; 2492 2493 if (id == NULL || id->len < 4) 2494 return; 2495 desc = (struct scsi_vpd_id_descriptor *)id->data; 2496 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2497 case SVPD_ID_TYPE_T10: 2498 sbuf_printf(sb, "t10."); 2499 break; 2500 case SVPD_ID_TYPE_EUI64: 2501 sbuf_printf(sb, "eui."); 2502 break; 2503 case SVPD_ID_TYPE_NAA: 2504 sbuf_printf(sb, "naa."); 2505 break; 2506 case SVPD_ID_TYPE_SCSI_NAME: 2507 break; 2508 } 2509 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2510 case SVPD_ID_CODESET_BINARY: 2511 for (i = 0; i < desc->length; i++) 2512 sbuf_printf(sb, "%02x", desc->identifier[i]); 2513 break; 2514 case SVPD_ID_CODESET_ASCII: 2515 sbuf_printf(sb, "%.*s", (int)desc->length, 2516 (char *)desc->identifier); 2517 break; 2518 case SVPD_ID_CODESET_UTF8: 2519 sbuf_printf(sb, "%s", (char *)desc->identifier); 2520 break; 2521 } 2522 } 2523 2524 static int 2525 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2526 struct thread *td) 2527 { 2528 struct ctl_softc *softc = dev->si_drv1; 2529 struct ctl_port *port; 2530 struct ctl_lun *lun; 2531 int retval; 2532 2533 retval = 0; 2534 2535 switch (cmd) { 2536 case CTL_IO: 2537 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2538 break; 2539 case CTL_ENABLE_PORT: 2540 case CTL_DISABLE_PORT: 2541 case CTL_SET_PORT_WWNS: { 2542 struct ctl_port *port; 2543 struct ctl_port_entry *entry; 2544 2545 entry = (struct ctl_port_entry *)addr; 2546 2547 mtx_lock(&softc->ctl_lock); 2548 STAILQ_FOREACH(port, &softc->port_list, links) { 2549 int action, done; 2550 2551 if (port->targ_port < softc->port_min || 2552 port->targ_port >= softc->port_max) 2553 continue; 2554 2555 action = 0; 2556 done = 0; 2557 if ((entry->port_type == CTL_PORT_NONE) 2558 && (entry->targ_port == port->targ_port)) { 2559 /* 2560 * If the user only wants to enable or 2561 * disable or set WWNs on a specific port, 2562 * do the operation and we're done. 2563 */ 2564 action = 1; 2565 done = 1; 2566 } else if (entry->port_type & port->port_type) { 2567 /* 2568 * Compare the user's type mask with the 2569 * particular frontend type to see if we 2570 * have a match. 2571 */ 2572 action = 1; 2573 done = 0; 2574 2575 /* 2576 * Make sure the user isn't trying to set 2577 * WWNs on multiple ports at the same time. 2578 */ 2579 if (cmd == CTL_SET_PORT_WWNS) { 2580 printf("%s: Can't set WWNs on " 2581 "multiple ports\n", __func__); 2582 retval = EINVAL; 2583 break; 2584 } 2585 } 2586 if (action == 0) 2587 continue; 2588 2589 /* 2590 * XXX KDM we have to drop the lock here, because 2591 * the online/offline operations can potentially 2592 * block. We need to reference count the frontends 2593 * so they can't go away, 2594 */ 2595 if (cmd == CTL_ENABLE_PORT) { 2596 mtx_unlock(&softc->ctl_lock); 2597 ctl_port_online(port); 2598 mtx_lock(&softc->ctl_lock); 2599 } else if (cmd == CTL_DISABLE_PORT) { 2600 mtx_unlock(&softc->ctl_lock); 2601 ctl_port_offline(port); 2602 mtx_lock(&softc->ctl_lock); 2603 } else if (cmd == CTL_SET_PORT_WWNS) { 2604 ctl_port_set_wwns(port, 2605 (entry->flags & CTL_PORT_WWNN_VALID) ? 2606 1 : 0, entry->wwnn, 2607 (entry->flags & CTL_PORT_WWPN_VALID) ? 2608 1 : 0, entry->wwpn); 2609 } 2610 if (done != 0) 2611 break; 2612 } 2613 mtx_unlock(&softc->ctl_lock); 2614 break; 2615 } 2616 case CTL_GET_OOA: { 2617 struct ctl_ooa *ooa_hdr; 2618 struct ctl_ooa_entry *entries; 2619 uint32_t cur_fill_num; 2620 2621 ooa_hdr = (struct ctl_ooa *)addr; 2622 2623 if ((ooa_hdr->alloc_len == 0) 2624 || (ooa_hdr->alloc_num == 0)) { 2625 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2626 "must be non-zero\n", __func__, 2627 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2628 retval = EINVAL; 2629 break; 2630 } 2631 2632 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2633 sizeof(struct ctl_ooa_entry))) { 2634 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2635 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2636 __func__, ooa_hdr->alloc_len, 2637 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2638 retval = EINVAL; 2639 break; 2640 } 2641 2642 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2643 if (entries == NULL) { 2644 printf("%s: could not allocate %d bytes for OOA " 2645 "dump\n", __func__, ooa_hdr->alloc_len); 2646 retval = ENOMEM; 2647 break; 2648 } 2649 2650 mtx_lock(&softc->ctl_lock); 2651 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2652 (ooa_hdr->lun_num >= ctl_max_luns || 2653 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2654 mtx_unlock(&softc->ctl_lock); 2655 free(entries, M_CTL); 2656 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2657 __func__, (uintmax_t)ooa_hdr->lun_num); 2658 retval = EINVAL; 2659 break; 2660 } 2661 2662 cur_fill_num = 0; 2663 2664 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2665 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2666 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2667 ooa_hdr, entries); 2668 } 2669 } else { 2670 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2671 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2672 entries); 2673 } 2674 mtx_unlock(&softc->ctl_lock); 2675 2676 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2677 ooa_hdr->fill_len = ooa_hdr->fill_num * 2678 sizeof(struct ctl_ooa_entry); 2679 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2680 if (retval != 0) { 2681 printf("%s: error copying out %d bytes for OOA dump\n", 2682 __func__, ooa_hdr->fill_len); 2683 } 2684 2685 getbinuptime(&ooa_hdr->cur_bt); 2686 2687 if (cur_fill_num > ooa_hdr->alloc_num) { 2688 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2689 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2690 } else { 2691 ooa_hdr->dropped_num = 0; 2692 ooa_hdr->status = CTL_OOA_OK; 2693 } 2694 2695 free(entries, M_CTL); 2696 break; 2697 } 2698 case CTL_DELAY_IO: { 2699 struct ctl_io_delay_info *delay_info; 2700 2701 delay_info = (struct ctl_io_delay_info *)addr; 2702 2703 #ifdef CTL_IO_DELAY 2704 mtx_lock(&softc->ctl_lock); 2705 if (delay_info->lun_id >= ctl_max_luns || 2706 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2707 mtx_unlock(&softc->ctl_lock); 2708 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2709 break; 2710 } 2711 mtx_lock(&lun->lun_lock); 2712 mtx_unlock(&softc->ctl_lock); 2713 delay_info->status = CTL_DELAY_STATUS_OK; 2714 switch (delay_info->delay_type) { 2715 case CTL_DELAY_TYPE_CONT: 2716 case CTL_DELAY_TYPE_ONESHOT: 2717 break; 2718 default: 2719 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2720 break; 2721 } 2722 switch (delay_info->delay_loc) { 2723 case CTL_DELAY_LOC_DATAMOVE: 2724 lun->delay_info.datamove_type = delay_info->delay_type; 2725 lun->delay_info.datamove_delay = delay_info->delay_secs; 2726 break; 2727 case CTL_DELAY_LOC_DONE: 2728 lun->delay_info.done_type = delay_info->delay_type; 2729 lun->delay_info.done_delay = delay_info->delay_secs; 2730 break; 2731 default: 2732 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2733 break; 2734 } 2735 mtx_unlock(&lun->lun_lock); 2736 #else 2737 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2738 #endif /* CTL_IO_DELAY */ 2739 break; 2740 } 2741 case CTL_ERROR_INJECT: { 2742 struct ctl_error_desc *err_desc, *new_err_desc; 2743 2744 err_desc = (struct ctl_error_desc *)addr; 2745 2746 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2747 M_WAITOK | M_ZERO); 2748 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2749 2750 mtx_lock(&softc->ctl_lock); 2751 if (err_desc->lun_id >= ctl_max_luns || 2752 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2753 mtx_unlock(&softc->ctl_lock); 2754 free(new_err_desc, M_CTL); 2755 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2756 __func__, (uintmax_t)err_desc->lun_id); 2757 retval = EINVAL; 2758 break; 2759 } 2760 mtx_lock(&lun->lun_lock); 2761 mtx_unlock(&softc->ctl_lock); 2762 2763 /* 2764 * We could do some checking here to verify the validity 2765 * of the request, but given the complexity of error 2766 * injection requests, the checking logic would be fairly 2767 * complex. 2768 * 2769 * For now, if the request is invalid, it just won't get 2770 * executed and might get deleted. 2771 */ 2772 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2773 2774 /* 2775 * XXX KDM check to make sure the serial number is unique, 2776 * in case we somehow manage to wrap. That shouldn't 2777 * happen for a very long time, but it's the right thing to 2778 * do. 2779 */ 2780 new_err_desc->serial = lun->error_serial; 2781 err_desc->serial = lun->error_serial; 2782 lun->error_serial++; 2783 2784 mtx_unlock(&lun->lun_lock); 2785 break; 2786 } 2787 case CTL_ERROR_INJECT_DELETE: { 2788 struct ctl_error_desc *delete_desc, *desc, *desc2; 2789 int delete_done; 2790 2791 delete_desc = (struct ctl_error_desc *)addr; 2792 delete_done = 0; 2793 2794 mtx_lock(&softc->ctl_lock); 2795 if (delete_desc->lun_id >= ctl_max_luns || 2796 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2797 mtx_unlock(&softc->ctl_lock); 2798 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2799 __func__, (uintmax_t)delete_desc->lun_id); 2800 retval = EINVAL; 2801 break; 2802 } 2803 mtx_lock(&lun->lun_lock); 2804 mtx_unlock(&softc->ctl_lock); 2805 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2806 if (desc->serial != delete_desc->serial) 2807 continue; 2808 2809 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2810 links); 2811 free(desc, M_CTL); 2812 delete_done = 1; 2813 } 2814 mtx_unlock(&lun->lun_lock); 2815 if (delete_done == 0) { 2816 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2817 "error serial %ju on LUN %u\n", __func__, 2818 delete_desc->serial, delete_desc->lun_id); 2819 retval = EINVAL; 2820 break; 2821 } 2822 break; 2823 } 2824 case CTL_DUMP_STRUCTS: { 2825 int j, k; 2826 struct ctl_port *port; 2827 struct ctl_frontend *fe; 2828 2829 mtx_lock(&softc->ctl_lock); 2830 printf("CTL Persistent Reservation information start:\n"); 2831 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2832 mtx_lock(&lun->lun_lock); 2833 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2834 mtx_unlock(&lun->lun_lock); 2835 continue; 2836 } 2837 2838 for (j = 0; j < ctl_max_ports; j++) { 2839 if (lun->pr_keys[j] == NULL) 2840 continue; 2841 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2842 if (lun->pr_keys[j][k] == 0) 2843 continue; 2844 printf(" LUN %ju port %d iid %d key " 2845 "%#jx\n", lun->lun, j, k, 2846 (uintmax_t)lun->pr_keys[j][k]); 2847 } 2848 } 2849 mtx_unlock(&lun->lun_lock); 2850 } 2851 printf("CTL Persistent Reservation information end\n"); 2852 printf("CTL Ports:\n"); 2853 STAILQ_FOREACH(port, &softc->port_list, links) { 2854 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2855 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2856 port->frontend->name, port->port_type, 2857 port->physical_port, port->virtual_port, 2858 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2859 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2860 if (port->wwpn_iid[j].in_use == 0 && 2861 port->wwpn_iid[j].wwpn == 0 && 2862 port->wwpn_iid[j].name == NULL) 2863 continue; 2864 2865 printf(" iid %u use %d WWPN %#jx '%s'\n", 2866 j, port->wwpn_iid[j].in_use, 2867 (uintmax_t)port->wwpn_iid[j].wwpn, 2868 port->wwpn_iid[j].name); 2869 } 2870 } 2871 printf("CTL Port information end\n"); 2872 mtx_unlock(&softc->ctl_lock); 2873 /* 2874 * XXX KDM calling this without a lock. We'd likely want 2875 * to drop the lock before calling the frontend's dump 2876 * routine anyway. 2877 */ 2878 printf("CTL Frontends:\n"); 2879 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2880 printf(" Frontend '%s'\n", fe->name); 2881 if (fe->fe_dump != NULL) 2882 fe->fe_dump(); 2883 } 2884 printf("CTL Frontend information end\n"); 2885 break; 2886 } 2887 case CTL_LUN_REQ: { 2888 struct ctl_lun_req *lun_req; 2889 struct ctl_backend_driver *backend; 2890 void *packed; 2891 nvlist_t *tmp_args_nvl; 2892 size_t packed_len; 2893 2894 lun_req = (struct ctl_lun_req *)addr; 2895 tmp_args_nvl = lun_req->args_nvl; 2896 2897 backend = ctl_backend_find(lun_req->backend); 2898 if (backend == NULL) { 2899 lun_req->status = CTL_LUN_ERROR; 2900 snprintf(lun_req->error_str, 2901 sizeof(lun_req->error_str), 2902 "Backend \"%s\" not found.", 2903 lun_req->backend); 2904 break; 2905 } 2906 2907 if (lun_req->args != NULL) { 2908 packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); 2909 if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { 2910 free(packed, M_CTL); 2911 lun_req->status = CTL_LUN_ERROR; 2912 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2913 "Cannot copyin args."); 2914 break; 2915 } 2916 lun_req->args_nvl = nvlist_unpack(packed, 2917 lun_req->args_len, 0); 2918 free(packed, M_CTL); 2919 2920 if (lun_req->args_nvl == NULL) { 2921 lun_req->status = CTL_LUN_ERROR; 2922 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2923 "Cannot unpack args nvlist."); 2924 break; 2925 } 2926 } else 2927 lun_req->args_nvl = nvlist_create(0); 2928 2929 retval = backend->ioctl(dev, cmd, addr, flag, td); 2930 nvlist_destroy(lun_req->args_nvl); 2931 lun_req->args_nvl = tmp_args_nvl; 2932 2933 if (lun_req->result_nvl != NULL) { 2934 if (lun_req->result != NULL) { 2935 packed = nvlist_pack(lun_req->result_nvl, 2936 &packed_len); 2937 if (packed == NULL) { 2938 lun_req->status = CTL_LUN_ERROR; 2939 snprintf(lun_req->error_str, 2940 sizeof(lun_req->error_str), 2941 "Cannot pack result nvlist."); 2942 break; 2943 } 2944 2945 if (packed_len > lun_req->result_len) { 2946 lun_req->status = CTL_LUN_ERROR; 2947 snprintf(lun_req->error_str, 2948 sizeof(lun_req->error_str), 2949 "Result nvlist too large."); 2950 free(packed, M_NVLIST); 2951 break; 2952 } 2953 2954 if (copyout(packed, lun_req->result, packed_len)) { 2955 lun_req->status = CTL_LUN_ERROR; 2956 snprintf(lun_req->error_str, 2957 sizeof(lun_req->error_str), 2958 "Cannot copyout() the result."); 2959 free(packed, M_NVLIST); 2960 break; 2961 } 2962 2963 lun_req->result_len = packed_len; 2964 free(packed, M_NVLIST); 2965 } 2966 2967 nvlist_destroy(lun_req->result_nvl); 2968 } 2969 break; 2970 } 2971 case CTL_LUN_LIST: { 2972 struct sbuf *sb; 2973 struct ctl_lun_list *list; 2974 const char *name, *value; 2975 void *cookie; 2976 int type; 2977 2978 list = (struct ctl_lun_list *)addr; 2979 2980 /* 2981 * Allocate a fixed length sbuf here, based on the length 2982 * of the user's buffer. We could allocate an auto-extending 2983 * buffer, and then tell the user how much larger our 2984 * amount of data is than his buffer, but that presents 2985 * some problems: 2986 * 2987 * 1. The sbuf(9) routines use a blocking malloc, and so 2988 * we can't hold a lock while calling them with an 2989 * auto-extending buffer. 2990 * 2991 * 2. There is not currently a LUN reference counting 2992 * mechanism, outside of outstanding transactions on 2993 * the LUN's OOA queue. So a LUN could go away on us 2994 * while we're getting the LUN number, backend-specific 2995 * information, etc. Thus, given the way things 2996 * currently work, we need to hold the CTL lock while 2997 * grabbing LUN information. 2998 * 2999 * So, from the user's standpoint, the best thing to do is 3000 * allocate what he thinks is a reasonable buffer length, 3001 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3002 * double the buffer length and try again. (And repeat 3003 * that until he succeeds.) 3004 */ 3005 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3006 if (sb == NULL) { 3007 list->status = CTL_LUN_LIST_ERROR; 3008 snprintf(list->error_str, sizeof(list->error_str), 3009 "Unable to allocate %d bytes for LUN list", 3010 list->alloc_len); 3011 break; 3012 } 3013 3014 sbuf_printf(sb, "<ctllunlist>\n"); 3015 3016 mtx_lock(&softc->ctl_lock); 3017 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3018 mtx_lock(&lun->lun_lock); 3019 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3020 (uintmax_t)lun->lun); 3021 3022 /* 3023 * Bail out as soon as we see that we've overfilled 3024 * the buffer. 3025 */ 3026 if (retval != 0) 3027 break; 3028 3029 retval = sbuf_printf(sb, "\t<backend_type>%s" 3030 "</backend_type>\n", 3031 (lun->backend == NULL) ? "none" : 3032 lun->backend->name); 3033 3034 if (retval != 0) 3035 break; 3036 3037 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3038 lun->be_lun->lun_type); 3039 3040 if (retval != 0) 3041 break; 3042 3043 if (lun->backend == NULL) { 3044 retval = sbuf_printf(sb, "</lun>\n"); 3045 if (retval != 0) 3046 break; 3047 continue; 3048 } 3049 3050 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3051 (lun->be_lun->maxlba > 0) ? 3052 lun->be_lun->maxlba + 1 : 0); 3053 3054 if (retval != 0) 3055 break; 3056 3057 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3058 lun->be_lun->blocksize); 3059 3060 if (retval != 0) 3061 break; 3062 3063 retval = sbuf_printf(sb, "\t<serial_number>"); 3064 3065 if (retval != 0) 3066 break; 3067 3068 retval = ctl_sbuf_printf_esc(sb, 3069 lun->be_lun->serial_num, 3070 sizeof(lun->be_lun->serial_num)); 3071 3072 if (retval != 0) 3073 break; 3074 3075 retval = sbuf_printf(sb, "</serial_number>\n"); 3076 3077 if (retval != 0) 3078 break; 3079 3080 retval = sbuf_printf(sb, "\t<device_id>"); 3081 3082 if (retval != 0) 3083 break; 3084 3085 retval = ctl_sbuf_printf_esc(sb, 3086 lun->be_lun->device_id, 3087 sizeof(lun->be_lun->device_id)); 3088 3089 if (retval != 0) 3090 break; 3091 3092 retval = sbuf_printf(sb, "</device_id>\n"); 3093 3094 if (retval != 0) 3095 break; 3096 3097 if (lun->backend->lun_info != NULL) { 3098 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3099 if (retval != 0) 3100 break; 3101 } 3102 3103 cookie = NULL; 3104 while ((name = nvlist_next(lun->be_lun->options, &type, 3105 &cookie)) != NULL) { 3106 sbuf_printf(sb, "\t<%s>", name); 3107 3108 if (type == NV_TYPE_STRING) { 3109 value = dnvlist_get_string( 3110 lun->be_lun->options, name, NULL); 3111 if (value != NULL) 3112 sbuf_printf(sb, "%s", value); 3113 } 3114 3115 sbuf_printf(sb, "</%s>\n", name); 3116 } 3117 3118 retval = sbuf_printf(sb, "</lun>\n"); 3119 3120 if (retval != 0) 3121 break; 3122 mtx_unlock(&lun->lun_lock); 3123 } 3124 if (lun != NULL) 3125 mtx_unlock(&lun->lun_lock); 3126 mtx_unlock(&softc->ctl_lock); 3127 3128 if ((retval != 0) 3129 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3130 retval = 0; 3131 sbuf_delete(sb); 3132 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3133 snprintf(list->error_str, sizeof(list->error_str), 3134 "Out of space, %d bytes is too small", 3135 list->alloc_len); 3136 break; 3137 } 3138 3139 sbuf_finish(sb); 3140 3141 retval = copyout(sbuf_data(sb), list->lun_xml, 3142 sbuf_len(sb) + 1); 3143 3144 list->fill_len = sbuf_len(sb) + 1; 3145 list->status = CTL_LUN_LIST_OK; 3146 sbuf_delete(sb); 3147 break; 3148 } 3149 case CTL_ISCSI: { 3150 struct ctl_iscsi *ci; 3151 struct ctl_frontend *fe; 3152 3153 ci = (struct ctl_iscsi *)addr; 3154 3155 fe = ctl_frontend_find("iscsi"); 3156 if (fe == NULL) { 3157 ci->status = CTL_ISCSI_ERROR; 3158 snprintf(ci->error_str, sizeof(ci->error_str), 3159 "Frontend \"iscsi\" not found."); 3160 break; 3161 } 3162 3163 retval = fe->ioctl(dev, cmd, addr, flag, td); 3164 break; 3165 } 3166 case CTL_PORT_REQ: { 3167 struct ctl_req *req; 3168 struct ctl_frontend *fe; 3169 void *packed; 3170 nvlist_t *tmp_args_nvl; 3171 size_t packed_len; 3172 3173 req = (struct ctl_req *)addr; 3174 tmp_args_nvl = req->args_nvl; 3175 3176 fe = ctl_frontend_find(req->driver); 3177 if (fe == NULL) { 3178 req->status = CTL_LUN_ERROR; 3179 snprintf(req->error_str, sizeof(req->error_str), 3180 "Frontend \"%s\" not found.", req->driver); 3181 break; 3182 } 3183 3184 if (req->args != NULL) { 3185 packed = malloc(req->args_len, M_CTL, M_WAITOK); 3186 if (copyin(req->args, packed, req->args_len) != 0) { 3187 free(packed, M_CTL); 3188 req->status = CTL_LUN_ERROR; 3189 snprintf(req->error_str, sizeof(req->error_str), 3190 "Cannot copyin args."); 3191 break; 3192 } 3193 req->args_nvl = nvlist_unpack(packed, 3194 req->args_len, 0); 3195 free(packed, M_CTL); 3196 3197 if (req->args_nvl == NULL) { 3198 req->status = CTL_LUN_ERROR; 3199 snprintf(req->error_str, sizeof(req->error_str), 3200 "Cannot unpack args nvlist."); 3201 break; 3202 } 3203 } else 3204 req->args_nvl = nvlist_create(0); 3205 3206 if (fe->ioctl) 3207 retval = fe->ioctl(dev, cmd, addr, flag, td); 3208 else 3209 retval = ENODEV; 3210 3211 nvlist_destroy(req->args_nvl); 3212 req->args_nvl = tmp_args_nvl; 3213 3214 if (req->result_nvl != NULL) { 3215 if (req->result != NULL) { 3216 packed = nvlist_pack(req->result_nvl, 3217 &packed_len); 3218 if (packed == NULL) { 3219 req->status = CTL_LUN_ERROR; 3220 snprintf(req->error_str, 3221 sizeof(req->error_str), 3222 "Cannot pack result nvlist."); 3223 break; 3224 } 3225 3226 if (packed_len > req->result_len) { 3227 req->status = CTL_LUN_ERROR; 3228 snprintf(req->error_str, 3229 sizeof(req->error_str), 3230 "Result nvlist too large."); 3231 free(packed, M_NVLIST); 3232 break; 3233 } 3234 3235 if (copyout(packed, req->result, packed_len)) { 3236 req->status = CTL_LUN_ERROR; 3237 snprintf(req->error_str, 3238 sizeof(req->error_str), 3239 "Cannot copyout() the result."); 3240 free(packed, M_NVLIST); 3241 break; 3242 } 3243 3244 req->result_len = packed_len; 3245 free(packed, M_NVLIST); 3246 } 3247 3248 nvlist_destroy(req->result_nvl); 3249 } 3250 break; 3251 } 3252 case CTL_PORT_LIST: { 3253 struct sbuf *sb; 3254 struct ctl_port *port; 3255 struct ctl_lun_list *list; 3256 const char *name, *value; 3257 void *cookie; 3258 int j, type; 3259 uint32_t plun; 3260 3261 list = (struct ctl_lun_list *)addr; 3262 3263 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3264 if (sb == NULL) { 3265 list->status = CTL_LUN_LIST_ERROR; 3266 snprintf(list->error_str, sizeof(list->error_str), 3267 "Unable to allocate %d bytes for LUN list", 3268 list->alloc_len); 3269 break; 3270 } 3271 3272 sbuf_printf(sb, "<ctlportlist>\n"); 3273 3274 mtx_lock(&softc->ctl_lock); 3275 STAILQ_FOREACH(port, &softc->port_list, links) { 3276 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3277 (uintmax_t)port->targ_port); 3278 3279 /* 3280 * Bail out as soon as we see that we've overfilled 3281 * the buffer. 3282 */ 3283 if (retval != 0) 3284 break; 3285 3286 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3287 "</frontend_type>\n", port->frontend->name); 3288 if (retval != 0) 3289 break; 3290 3291 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3292 port->port_type); 3293 if (retval != 0) 3294 break; 3295 3296 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3297 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3298 if (retval != 0) 3299 break; 3300 3301 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3302 port->port_name); 3303 if (retval != 0) 3304 break; 3305 3306 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3307 port->physical_port); 3308 if (retval != 0) 3309 break; 3310 3311 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3312 port->virtual_port); 3313 if (retval != 0) 3314 break; 3315 3316 if (port->target_devid != NULL) { 3317 sbuf_printf(sb, "\t<target>"); 3318 ctl_id_sbuf(port->target_devid, sb); 3319 sbuf_printf(sb, "</target>\n"); 3320 } 3321 3322 if (port->port_devid != NULL) { 3323 sbuf_printf(sb, "\t<port>"); 3324 ctl_id_sbuf(port->port_devid, sb); 3325 sbuf_printf(sb, "</port>\n"); 3326 } 3327 3328 if (port->port_info != NULL) { 3329 retval = port->port_info(port->onoff_arg, sb); 3330 if (retval != 0) 3331 break; 3332 } 3333 3334 cookie = NULL; 3335 while ((name = nvlist_next(port->options, &type, 3336 &cookie)) != NULL) { 3337 sbuf_printf(sb, "\t<%s>", name); 3338 3339 if (type == NV_TYPE_STRING) { 3340 value = dnvlist_get_string(port->options, 3341 name, NULL); 3342 if (value != NULL) 3343 sbuf_printf(sb, "%s", value); 3344 } 3345 3346 sbuf_printf(sb, "</%s>\n", name); 3347 } 3348 3349 if (port->lun_map != NULL) { 3350 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3351 for (j = 0; j < port->lun_map_size; j++) { 3352 plun = ctl_lun_map_from_port(port, j); 3353 if (plun == UINT32_MAX) 3354 continue; 3355 sbuf_printf(sb, 3356 "\t<lun id=\"%u\">%u</lun>\n", 3357 j, plun); 3358 } 3359 } 3360 3361 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3362 if (port->wwpn_iid[j].in_use == 0 || 3363 (port->wwpn_iid[j].wwpn == 0 && 3364 port->wwpn_iid[j].name == NULL)) 3365 continue; 3366 3367 if (port->wwpn_iid[j].name != NULL) 3368 retval = sbuf_printf(sb, 3369 "\t<initiator id=\"%u\">%s</initiator>\n", 3370 j, port->wwpn_iid[j].name); 3371 else 3372 retval = sbuf_printf(sb, 3373 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3374 j, port->wwpn_iid[j].wwpn); 3375 if (retval != 0) 3376 break; 3377 } 3378 if (retval != 0) 3379 break; 3380 3381 retval = sbuf_printf(sb, "</targ_port>\n"); 3382 if (retval != 0) 3383 break; 3384 } 3385 mtx_unlock(&softc->ctl_lock); 3386 3387 if ((retval != 0) 3388 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3389 retval = 0; 3390 sbuf_delete(sb); 3391 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3392 snprintf(list->error_str, sizeof(list->error_str), 3393 "Out of space, %d bytes is too small", 3394 list->alloc_len); 3395 break; 3396 } 3397 3398 sbuf_finish(sb); 3399 3400 retval = copyout(sbuf_data(sb), list->lun_xml, 3401 sbuf_len(sb) + 1); 3402 3403 list->fill_len = sbuf_len(sb) + 1; 3404 list->status = CTL_LUN_LIST_OK; 3405 sbuf_delete(sb); 3406 break; 3407 } 3408 case CTL_LUN_MAP: { 3409 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3410 struct ctl_port *port; 3411 3412 mtx_lock(&softc->ctl_lock); 3413 if (lm->port < softc->port_min || 3414 lm->port >= softc->port_max || 3415 (port = softc->ctl_ports[lm->port]) == NULL) { 3416 mtx_unlock(&softc->ctl_lock); 3417 return (ENXIO); 3418 } 3419 if (port->status & CTL_PORT_STATUS_ONLINE) { 3420 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3421 if (ctl_lun_map_to_port(port, lun->lun) == 3422 UINT32_MAX) 3423 continue; 3424 mtx_lock(&lun->lun_lock); 3425 ctl_est_ua_port(lun, lm->port, -1, 3426 CTL_UA_LUN_CHANGE); 3427 mtx_unlock(&lun->lun_lock); 3428 } 3429 } 3430 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3431 if (lm->plun != UINT32_MAX) { 3432 if (lm->lun == UINT32_MAX) 3433 retval = ctl_lun_map_unset(port, lm->plun); 3434 else if (lm->lun < ctl_max_luns && 3435 softc->ctl_luns[lm->lun] != NULL) 3436 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3437 else 3438 return (ENXIO); 3439 } else { 3440 if (lm->lun == UINT32_MAX) 3441 retval = ctl_lun_map_deinit(port); 3442 else 3443 retval = ctl_lun_map_init(port); 3444 } 3445 if (port->status & CTL_PORT_STATUS_ONLINE) 3446 ctl_isc_announce_port(port); 3447 break; 3448 } 3449 case CTL_GET_LUN_STATS: { 3450 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3451 int i; 3452 3453 /* 3454 * XXX KDM no locking here. If the LUN list changes, 3455 * things can blow up. 3456 */ 3457 i = 0; 3458 stats->status = CTL_SS_OK; 3459 stats->fill_len = 0; 3460 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3461 if (lun->lun < stats->first_item) 3462 continue; 3463 if (stats->fill_len + sizeof(lun->stats) > 3464 stats->alloc_len) { 3465 stats->status = CTL_SS_NEED_MORE_SPACE; 3466 break; 3467 } 3468 retval = copyout(&lun->stats, &stats->stats[i++], 3469 sizeof(lun->stats)); 3470 if (retval != 0) 3471 break; 3472 stats->fill_len += sizeof(lun->stats); 3473 } 3474 stats->num_items = softc->num_luns; 3475 stats->flags = CTL_STATS_FLAG_NONE; 3476 #ifdef CTL_TIME_IO 3477 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3478 #endif 3479 getnanouptime(&stats->timestamp); 3480 break; 3481 } 3482 case CTL_GET_PORT_STATS: { 3483 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3484 int i; 3485 3486 /* 3487 * XXX KDM no locking here. If the LUN list changes, 3488 * things can blow up. 3489 */ 3490 i = 0; 3491 stats->status = CTL_SS_OK; 3492 stats->fill_len = 0; 3493 STAILQ_FOREACH(port, &softc->port_list, links) { 3494 if (port->targ_port < stats->first_item) 3495 continue; 3496 if (stats->fill_len + sizeof(port->stats) > 3497 stats->alloc_len) { 3498 stats->status = CTL_SS_NEED_MORE_SPACE; 3499 break; 3500 } 3501 retval = copyout(&port->stats, &stats->stats[i++], 3502 sizeof(port->stats)); 3503 if (retval != 0) 3504 break; 3505 stats->fill_len += sizeof(port->stats); 3506 } 3507 stats->num_items = softc->num_ports; 3508 stats->flags = CTL_STATS_FLAG_NONE; 3509 #ifdef CTL_TIME_IO 3510 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3511 #endif 3512 getnanouptime(&stats->timestamp); 3513 break; 3514 } 3515 default: { 3516 /* XXX KDM should we fix this? */ 3517 #if 0 3518 struct ctl_backend_driver *backend; 3519 unsigned int type; 3520 int found; 3521 3522 found = 0; 3523 3524 /* 3525 * We encode the backend type as the ioctl type for backend 3526 * ioctls. So parse it out here, and then search for a 3527 * backend of this type. 3528 */ 3529 type = _IOC_TYPE(cmd); 3530 3531 STAILQ_FOREACH(backend, &softc->be_list, links) { 3532 if (backend->type == type) { 3533 found = 1; 3534 break; 3535 } 3536 } 3537 if (found == 0) { 3538 printf("ctl: unknown ioctl command %#lx or backend " 3539 "%d\n", cmd, type); 3540 retval = EINVAL; 3541 break; 3542 } 3543 retval = backend->ioctl(dev, cmd, addr, flag, td); 3544 #endif 3545 retval = ENOTTY; 3546 break; 3547 } 3548 } 3549 return (retval); 3550 } 3551 3552 uint32_t 3553 ctl_get_initindex(struct ctl_nexus *nexus) 3554 { 3555 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3556 } 3557 3558 int 3559 ctl_lun_map_init(struct ctl_port *port) 3560 { 3561 struct ctl_softc *softc = port->ctl_softc; 3562 struct ctl_lun *lun; 3563 int size = ctl_lun_map_size; 3564 uint32_t i; 3565 3566 if (port->lun_map == NULL || port->lun_map_size < size) { 3567 port->lun_map_size = 0; 3568 free(port->lun_map, M_CTL); 3569 port->lun_map = malloc(size * sizeof(uint32_t), 3570 M_CTL, M_NOWAIT); 3571 } 3572 if (port->lun_map == NULL) 3573 return (ENOMEM); 3574 for (i = 0; i < size; i++) 3575 port->lun_map[i] = UINT32_MAX; 3576 port->lun_map_size = size; 3577 if (port->status & CTL_PORT_STATUS_ONLINE) { 3578 if (port->lun_disable != NULL) { 3579 STAILQ_FOREACH(lun, &softc->lun_list, links) 3580 port->lun_disable(port->targ_lun_arg, lun->lun); 3581 } 3582 ctl_isc_announce_port(port); 3583 } 3584 return (0); 3585 } 3586 3587 int 3588 ctl_lun_map_deinit(struct ctl_port *port) 3589 { 3590 struct ctl_softc *softc = port->ctl_softc; 3591 struct ctl_lun *lun; 3592 3593 if (port->lun_map == NULL) 3594 return (0); 3595 port->lun_map_size = 0; 3596 free(port->lun_map, M_CTL); 3597 port->lun_map = NULL; 3598 if (port->status & CTL_PORT_STATUS_ONLINE) { 3599 if (port->lun_enable != NULL) { 3600 STAILQ_FOREACH(lun, &softc->lun_list, links) 3601 port->lun_enable(port->targ_lun_arg, lun->lun); 3602 } 3603 ctl_isc_announce_port(port); 3604 } 3605 return (0); 3606 } 3607 3608 int 3609 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3610 { 3611 int status; 3612 uint32_t old; 3613 3614 if (port->lun_map == NULL) { 3615 status = ctl_lun_map_init(port); 3616 if (status != 0) 3617 return (status); 3618 } 3619 if (plun >= port->lun_map_size) 3620 return (EINVAL); 3621 old = port->lun_map[plun]; 3622 port->lun_map[plun] = glun; 3623 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3624 if (port->lun_enable != NULL) 3625 port->lun_enable(port->targ_lun_arg, plun); 3626 ctl_isc_announce_port(port); 3627 } 3628 return (0); 3629 } 3630 3631 int 3632 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3633 { 3634 uint32_t old; 3635 3636 if (port->lun_map == NULL || plun >= port->lun_map_size) 3637 return (0); 3638 old = port->lun_map[plun]; 3639 port->lun_map[plun] = UINT32_MAX; 3640 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3641 if (port->lun_disable != NULL) 3642 port->lun_disable(port->targ_lun_arg, plun); 3643 ctl_isc_announce_port(port); 3644 } 3645 return (0); 3646 } 3647 3648 uint32_t 3649 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3650 { 3651 3652 if (port == NULL) 3653 return (UINT32_MAX); 3654 if (port->lun_map == NULL) 3655 return (lun_id); 3656 if (lun_id > port->lun_map_size) 3657 return (UINT32_MAX); 3658 return (port->lun_map[lun_id]); 3659 } 3660 3661 uint32_t 3662 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3663 { 3664 uint32_t i; 3665 3666 if (port == NULL) 3667 return (UINT32_MAX); 3668 if (port->lun_map == NULL) 3669 return (lun_id); 3670 for (i = 0; i < port->lun_map_size; i++) { 3671 if (port->lun_map[i] == lun_id) 3672 return (i); 3673 } 3674 return (UINT32_MAX); 3675 } 3676 3677 uint32_t 3678 ctl_decode_lun(uint64_t encoded) 3679 { 3680 uint8_t lun[8]; 3681 uint32_t result = 0xffffffff; 3682 3683 be64enc(lun, encoded); 3684 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3685 case RPL_LUNDATA_ATYP_PERIPH: 3686 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3687 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3688 result = lun[1]; 3689 break; 3690 case RPL_LUNDATA_ATYP_FLAT: 3691 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3692 lun[6] == 0 && lun[7] == 0) 3693 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3694 break; 3695 case RPL_LUNDATA_ATYP_EXTLUN: 3696 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3697 case 0x02: 3698 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3699 case 0x00: 3700 result = lun[1]; 3701 break; 3702 case 0x10: 3703 result = (lun[1] << 16) + (lun[2] << 8) + 3704 lun[3]; 3705 break; 3706 case 0x20: 3707 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3708 result = (lun[2] << 24) + 3709 (lun[3] << 16) + (lun[4] << 8) + 3710 lun[5]; 3711 break; 3712 } 3713 break; 3714 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3715 result = 0xffffffff; 3716 break; 3717 } 3718 break; 3719 } 3720 return (result); 3721 } 3722 3723 uint64_t 3724 ctl_encode_lun(uint32_t decoded) 3725 { 3726 uint64_t l = decoded; 3727 3728 if (l <= 0xff) 3729 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3730 if (l <= 0x3fff) 3731 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3732 if (l <= 0xffffff) 3733 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3734 (l << 32)); 3735 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3736 } 3737 3738 int 3739 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3740 { 3741 int i; 3742 3743 for (i = first; i < last; i++) { 3744 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3745 return (i); 3746 } 3747 return (-1); 3748 } 3749 3750 int 3751 ctl_set_mask(uint32_t *mask, uint32_t bit) 3752 { 3753 uint32_t chunk, piece; 3754 3755 chunk = bit >> 5; 3756 piece = bit % (sizeof(uint32_t) * 8); 3757 3758 if ((mask[chunk] & (1 << piece)) != 0) 3759 return (-1); 3760 else 3761 mask[chunk] |= (1 << piece); 3762 3763 return (0); 3764 } 3765 3766 int 3767 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3768 { 3769 uint32_t chunk, piece; 3770 3771 chunk = bit >> 5; 3772 piece = bit % (sizeof(uint32_t) * 8); 3773 3774 if ((mask[chunk] & (1 << piece)) == 0) 3775 return (-1); 3776 else 3777 mask[chunk] &= ~(1 << piece); 3778 3779 return (0); 3780 } 3781 3782 int 3783 ctl_is_set(uint32_t *mask, uint32_t bit) 3784 { 3785 uint32_t chunk, piece; 3786 3787 chunk = bit >> 5; 3788 piece = bit % (sizeof(uint32_t) * 8); 3789 3790 if ((mask[chunk] & (1 << piece)) == 0) 3791 return (0); 3792 else 3793 return (1); 3794 } 3795 3796 static uint64_t 3797 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3798 { 3799 uint64_t *t; 3800 3801 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3802 if (t == NULL) 3803 return (0); 3804 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3805 } 3806 3807 static void 3808 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3809 { 3810 uint64_t *t; 3811 3812 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3813 if (t == NULL) 3814 return; 3815 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3816 } 3817 3818 static void 3819 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3820 { 3821 uint64_t *p; 3822 u_int i; 3823 3824 i = residx/CTL_MAX_INIT_PER_PORT; 3825 if (lun->pr_keys[i] != NULL) 3826 return; 3827 mtx_unlock(&lun->lun_lock); 3828 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3829 M_WAITOK | M_ZERO); 3830 mtx_lock(&lun->lun_lock); 3831 if (lun->pr_keys[i] == NULL) 3832 lun->pr_keys[i] = p; 3833 else 3834 free(p, M_CTL); 3835 } 3836 3837 static void 3838 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3839 { 3840 uint64_t *t; 3841 3842 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3843 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3844 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3845 } 3846 3847 /* 3848 * ctl_softc, pool_name, total_ctl_io are passed in. 3849 * npool is passed out. 3850 */ 3851 int 3852 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3853 uint32_t total_ctl_io, void **npool) 3854 { 3855 struct ctl_io_pool *pool; 3856 3857 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3858 M_NOWAIT | M_ZERO); 3859 if (pool == NULL) 3860 return (ENOMEM); 3861 3862 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3863 pool->ctl_softc = ctl_softc; 3864 #ifdef IO_POOLS 3865 pool->zone = uma_zsecond_create(pool->name, NULL, 3866 NULL, NULL, NULL, ctl_softc->io_zone); 3867 /* uma_prealloc(pool->zone, total_ctl_io); */ 3868 #else 3869 pool->zone = ctl_softc->io_zone; 3870 #endif 3871 3872 *npool = pool; 3873 return (0); 3874 } 3875 3876 void 3877 ctl_pool_free(struct ctl_io_pool *pool) 3878 { 3879 3880 if (pool == NULL) 3881 return; 3882 3883 #ifdef IO_POOLS 3884 uma_zdestroy(pool->zone); 3885 #endif 3886 free(pool, M_CTL); 3887 } 3888 3889 union ctl_io * 3890 ctl_alloc_io(void *pool_ref) 3891 { 3892 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3893 union ctl_io *io; 3894 3895 io = uma_zalloc(pool->zone, M_WAITOK); 3896 if (io != NULL) { 3897 io->io_hdr.pool = pool_ref; 3898 CTL_SOFTC(io) = pool->ctl_softc; 3899 TAILQ_INIT(&io->io_hdr.blocked_queue); 3900 } 3901 return (io); 3902 } 3903 3904 union ctl_io * 3905 ctl_alloc_io_nowait(void *pool_ref) 3906 { 3907 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3908 union ctl_io *io; 3909 3910 io = uma_zalloc(pool->zone, M_NOWAIT); 3911 if (io != NULL) { 3912 io->io_hdr.pool = pool_ref; 3913 CTL_SOFTC(io) = pool->ctl_softc; 3914 TAILQ_INIT(&io->io_hdr.blocked_queue); 3915 } 3916 return (io); 3917 } 3918 3919 void 3920 ctl_free_io(union ctl_io *io) 3921 { 3922 struct ctl_io_pool *pool; 3923 3924 if (io == NULL) 3925 return; 3926 3927 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3928 uma_zfree(pool->zone, io); 3929 } 3930 3931 void 3932 ctl_zero_io(union ctl_io *io) 3933 { 3934 struct ctl_io_pool *pool; 3935 3936 if (io == NULL) 3937 return; 3938 3939 /* 3940 * May need to preserve linked list pointers at some point too. 3941 */ 3942 pool = io->io_hdr.pool; 3943 memset(io, 0, sizeof(*io)); 3944 io->io_hdr.pool = pool; 3945 CTL_SOFTC(io) = pool->ctl_softc; 3946 TAILQ_INIT(&io->io_hdr.blocked_queue); 3947 } 3948 3949 int 3950 ctl_expand_number(const char *buf, uint64_t *num) 3951 { 3952 char *endptr; 3953 uint64_t number; 3954 unsigned shift; 3955 3956 number = strtoq(buf, &endptr, 0); 3957 3958 switch (tolower((unsigned char)*endptr)) { 3959 case 'e': 3960 shift = 60; 3961 break; 3962 case 'p': 3963 shift = 50; 3964 break; 3965 case 't': 3966 shift = 40; 3967 break; 3968 case 'g': 3969 shift = 30; 3970 break; 3971 case 'm': 3972 shift = 20; 3973 break; 3974 case 'k': 3975 shift = 10; 3976 break; 3977 case 'b': 3978 case '\0': /* No unit. */ 3979 *num = number; 3980 return (0); 3981 default: 3982 /* Unrecognized unit. */ 3983 return (-1); 3984 } 3985 3986 if ((number << shift) >> shift != number) { 3987 /* Overflow */ 3988 return (-1); 3989 } 3990 *num = number << shift; 3991 return (0); 3992 } 3993 3994 3995 /* 3996 * This routine could be used in the future to load default and/or saved 3997 * mode page parameters for a particuar lun. 3998 */ 3999 static int 4000 ctl_init_page_index(struct ctl_lun *lun) 4001 { 4002 int i, page_code; 4003 struct ctl_page_index *page_index; 4004 const char *value; 4005 uint64_t ival; 4006 4007 memcpy(&lun->mode_pages.index, page_index_template, 4008 sizeof(page_index_template)); 4009 4010 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4011 4012 page_index = &lun->mode_pages.index[i]; 4013 if (lun->be_lun->lun_type == T_DIRECT && 4014 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4015 continue; 4016 if (lun->be_lun->lun_type == T_PROCESSOR && 4017 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4018 continue; 4019 if (lun->be_lun->lun_type == T_CDROM && 4020 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4021 continue; 4022 4023 page_code = page_index->page_code & SMPH_PC_MASK; 4024 switch (page_code) { 4025 case SMS_RW_ERROR_RECOVERY_PAGE: { 4026 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4027 ("subpage %#x for page %#x is incorrect!", 4028 page_index->subpage, page_code)); 4029 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4030 &rw_er_page_default, 4031 sizeof(rw_er_page_default)); 4032 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4033 &rw_er_page_changeable, 4034 sizeof(rw_er_page_changeable)); 4035 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4036 &rw_er_page_default, 4037 sizeof(rw_er_page_default)); 4038 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4039 &rw_er_page_default, 4040 sizeof(rw_er_page_default)); 4041 page_index->page_data = 4042 (uint8_t *)lun->mode_pages.rw_er_page; 4043 break; 4044 } 4045 case SMS_FORMAT_DEVICE_PAGE: { 4046 struct scsi_format_page *format_page; 4047 4048 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4049 ("subpage %#x for page %#x is incorrect!", 4050 page_index->subpage, page_code)); 4051 4052 /* 4053 * Sectors per track are set above. Bytes per 4054 * sector need to be set here on a per-LUN basis. 4055 */ 4056 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4057 &format_page_default, 4058 sizeof(format_page_default)); 4059 memcpy(&lun->mode_pages.format_page[ 4060 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4061 sizeof(format_page_changeable)); 4062 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4063 &format_page_default, 4064 sizeof(format_page_default)); 4065 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4066 &format_page_default, 4067 sizeof(format_page_default)); 4068 4069 format_page = &lun->mode_pages.format_page[ 4070 CTL_PAGE_CURRENT]; 4071 scsi_ulto2b(lun->be_lun->blocksize, 4072 format_page->bytes_per_sector); 4073 4074 format_page = &lun->mode_pages.format_page[ 4075 CTL_PAGE_DEFAULT]; 4076 scsi_ulto2b(lun->be_lun->blocksize, 4077 format_page->bytes_per_sector); 4078 4079 format_page = &lun->mode_pages.format_page[ 4080 CTL_PAGE_SAVED]; 4081 scsi_ulto2b(lun->be_lun->blocksize, 4082 format_page->bytes_per_sector); 4083 4084 page_index->page_data = 4085 (uint8_t *)lun->mode_pages.format_page; 4086 break; 4087 } 4088 case SMS_RIGID_DISK_PAGE: { 4089 struct scsi_rigid_disk_page *rigid_disk_page; 4090 uint32_t sectors_per_cylinder; 4091 uint64_t cylinders; 4092 #ifndef __XSCALE__ 4093 int shift; 4094 #endif /* !__XSCALE__ */ 4095 4096 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4097 ("subpage %#x for page %#x is incorrect!", 4098 page_index->subpage, page_code)); 4099 4100 /* 4101 * Rotation rate and sectors per track are set 4102 * above. We calculate the cylinders here based on 4103 * capacity. Due to the number of heads and 4104 * sectors per track we're using, smaller arrays 4105 * may turn out to have 0 cylinders. Linux and 4106 * FreeBSD don't pay attention to these mode pages 4107 * to figure out capacity, but Solaris does. It 4108 * seems to deal with 0 cylinders just fine, and 4109 * works out a fake geometry based on the capacity. 4110 */ 4111 memcpy(&lun->mode_pages.rigid_disk_page[ 4112 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4113 sizeof(rigid_disk_page_default)); 4114 memcpy(&lun->mode_pages.rigid_disk_page[ 4115 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4116 sizeof(rigid_disk_page_changeable)); 4117 4118 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4119 CTL_DEFAULT_HEADS; 4120 4121 /* 4122 * The divide method here will be more accurate, 4123 * probably, but results in floating point being 4124 * used in the kernel on i386 (__udivdi3()). On the 4125 * XScale, though, __udivdi3() is implemented in 4126 * software. 4127 * 4128 * The shift method for cylinder calculation is 4129 * accurate if sectors_per_cylinder is a power of 4130 * 2. Otherwise it might be slightly off -- you 4131 * might have a bit of a truncation problem. 4132 */ 4133 #ifdef __XSCALE__ 4134 cylinders = (lun->be_lun->maxlba + 1) / 4135 sectors_per_cylinder; 4136 #else 4137 for (shift = 31; shift > 0; shift--) { 4138 if (sectors_per_cylinder & (1 << shift)) 4139 break; 4140 } 4141 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4142 #endif 4143 4144 /* 4145 * We've basically got 3 bytes, or 24 bits for the 4146 * cylinder size in the mode page. If we're over, 4147 * just round down to 2^24. 4148 */ 4149 if (cylinders > 0xffffff) 4150 cylinders = 0xffffff; 4151 4152 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4153 CTL_PAGE_DEFAULT]; 4154 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4155 4156 if ((value = dnvlist_get_string(lun->be_lun->options, 4157 "rpm", NULL)) != NULL) { 4158 scsi_ulto2b(strtol(value, NULL, 0), 4159 rigid_disk_page->rotation_rate); 4160 } 4161 4162 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4163 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4164 sizeof(rigid_disk_page_default)); 4165 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4166 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4167 sizeof(rigid_disk_page_default)); 4168 4169 page_index->page_data = 4170 (uint8_t *)lun->mode_pages.rigid_disk_page; 4171 break; 4172 } 4173 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4174 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4175 ("subpage %#x for page %#x is incorrect!", 4176 page_index->subpage, page_code)); 4177 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4178 &verify_er_page_default, 4179 sizeof(verify_er_page_default)); 4180 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4181 &verify_er_page_changeable, 4182 sizeof(verify_er_page_changeable)); 4183 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4184 &verify_er_page_default, 4185 sizeof(verify_er_page_default)); 4186 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4187 &verify_er_page_default, 4188 sizeof(verify_er_page_default)); 4189 page_index->page_data = 4190 (uint8_t *)lun->mode_pages.verify_er_page; 4191 break; 4192 } 4193 case SMS_CACHING_PAGE: { 4194 struct scsi_caching_page *caching_page; 4195 4196 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4197 ("subpage %#x for page %#x is incorrect!", 4198 page_index->subpage, page_code)); 4199 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4200 &caching_page_default, 4201 sizeof(caching_page_default)); 4202 memcpy(&lun->mode_pages.caching_page[ 4203 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4204 sizeof(caching_page_changeable)); 4205 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4206 &caching_page_default, 4207 sizeof(caching_page_default)); 4208 caching_page = &lun->mode_pages.caching_page[ 4209 CTL_PAGE_SAVED]; 4210 value = dnvlist_get_string(lun->be_lun->options, 4211 "writecache", NULL); 4212 if (value != NULL && strcmp(value, "off") == 0) 4213 caching_page->flags1 &= ~SCP_WCE; 4214 value = dnvlist_get_string(lun->be_lun->options, 4215 "readcache", NULL); 4216 if (value != NULL && strcmp(value, "off") == 0) 4217 caching_page->flags1 |= SCP_RCD; 4218 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4219 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4220 sizeof(caching_page_default)); 4221 page_index->page_data = 4222 (uint8_t *)lun->mode_pages.caching_page; 4223 break; 4224 } 4225 case SMS_CONTROL_MODE_PAGE: { 4226 switch (page_index->subpage) { 4227 case SMS_SUBPAGE_PAGE_0: { 4228 struct scsi_control_page *control_page; 4229 4230 memcpy(&lun->mode_pages.control_page[ 4231 CTL_PAGE_DEFAULT], 4232 &control_page_default, 4233 sizeof(control_page_default)); 4234 memcpy(&lun->mode_pages.control_page[ 4235 CTL_PAGE_CHANGEABLE], 4236 &control_page_changeable, 4237 sizeof(control_page_changeable)); 4238 memcpy(&lun->mode_pages.control_page[ 4239 CTL_PAGE_SAVED], 4240 &control_page_default, 4241 sizeof(control_page_default)); 4242 control_page = &lun->mode_pages.control_page[ 4243 CTL_PAGE_SAVED]; 4244 value = dnvlist_get_string(lun->be_lun->options, 4245 "reordering", NULL); 4246 if (value != NULL && 4247 strcmp(value, "unrestricted") == 0) { 4248 control_page->queue_flags &= 4249 ~SCP_QUEUE_ALG_MASK; 4250 control_page->queue_flags |= 4251 SCP_QUEUE_ALG_UNRESTRICTED; 4252 } 4253 memcpy(&lun->mode_pages.control_page[ 4254 CTL_PAGE_CURRENT], 4255 &lun->mode_pages.control_page[ 4256 CTL_PAGE_SAVED], 4257 sizeof(control_page_default)); 4258 page_index->page_data = 4259 (uint8_t *)lun->mode_pages.control_page; 4260 break; 4261 } 4262 case 0x01: 4263 memcpy(&lun->mode_pages.control_ext_page[ 4264 CTL_PAGE_DEFAULT], 4265 &control_ext_page_default, 4266 sizeof(control_ext_page_default)); 4267 memcpy(&lun->mode_pages.control_ext_page[ 4268 CTL_PAGE_CHANGEABLE], 4269 &control_ext_page_changeable, 4270 sizeof(control_ext_page_changeable)); 4271 memcpy(&lun->mode_pages.control_ext_page[ 4272 CTL_PAGE_SAVED], 4273 &control_ext_page_default, 4274 sizeof(control_ext_page_default)); 4275 memcpy(&lun->mode_pages.control_ext_page[ 4276 CTL_PAGE_CURRENT], 4277 &lun->mode_pages.control_ext_page[ 4278 CTL_PAGE_SAVED], 4279 sizeof(control_ext_page_default)); 4280 page_index->page_data = 4281 (uint8_t *)lun->mode_pages.control_ext_page; 4282 break; 4283 default: 4284 panic("subpage %#x for page %#x is incorrect!", 4285 page_index->subpage, page_code); 4286 } 4287 break; 4288 } 4289 case SMS_INFO_EXCEPTIONS_PAGE: { 4290 switch (page_index->subpage) { 4291 case SMS_SUBPAGE_PAGE_0: 4292 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4293 &ie_page_default, 4294 sizeof(ie_page_default)); 4295 memcpy(&lun->mode_pages.ie_page[ 4296 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4297 sizeof(ie_page_changeable)); 4298 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4299 &ie_page_default, 4300 sizeof(ie_page_default)); 4301 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4302 &ie_page_default, 4303 sizeof(ie_page_default)); 4304 page_index->page_data = 4305 (uint8_t *)lun->mode_pages.ie_page; 4306 break; 4307 case 0x02: { 4308 struct ctl_logical_block_provisioning_page *page; 4309 4310 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4311 &lbp_page_default, 4312 sizeof(lbp_page_default)); 4313 memcpy(&lun->mode_pages.lbp_page[ 4314 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4315 sizeof(lbp_page_changeable)); 4316 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4317 &lbp_page_default, 4318 sizeof(lbp_page_default)); 4319 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4320 value = dnvlist_get_string(lun->be_lun->options, 4321 "avail-threshold", NULL); 4322 if (value != NULL && 4323 ctl_expand_number(value, &ival) == 0) { 4324 page->descr[0].flags |= SLBPPD_ENABLED | 4325 SLBPPD_ARMING_DEC; 4326 if (lun->be_lun->blocksize) 4327 ival /= lun->be_lun->blocksize; 4328 else 4329 ival /= 512; 4330 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4331 page->descr[0].count); 4332 } 4333 value = dnvlist_get_string(lun->be_lun->options, 4334 "used-threshold", NULL); 4335 if (value != NULL && 4336 ctl_expand_number(value, &ival) == 0) { 4337 page->descr[1].flags |= SLBPPD_ENABLED | 4338 SLBPPD_ARMING_INC; 4339 if (lun->be_lun->blocksize) 4340 ival /= lun->be_lun->blocksize; 4341 else 4342 ival /= 512; 4343 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4344 page->descr[1].count); 4345 } 4346 value = dnvlist_get_string(lun->be_lun->options, 4347 "pool-avail-threshold", NULL); 4348 if (value != NULL && 4349 ctl_expand_number(value, &ival) == 0) { 4350 page->descr[2].flags |= SLBPPD_ENABLED | 4351 SLBPPD_ARMING_DEC; 4352 if (lun->be_lun->blocksize) 4353 ival /= lun->be_lun->blocksize; 4354 else 4355 ival /= 512; 4356 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4357 page->descr[2].count); 4358 } 4359 value = dnvlist_get_string(lun->be_lun->options, 4360 "pool-used-threshold", NULL); 4361 if (value != NULL && 4362 ctl_expand_number(value, &ival) == 0) { 4363 page->descr[3].flags |= SLBPPD_ENABLED | 4364 SLBPPD_ARMING_INC; 4365 if (lun->be_lun->blocksize) 4366 ival /= lun->be_lun->blocksize; 4367 else 4368 ival /= 512; 4369 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4370 page->descr[3].count); 4371 } 4372 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4373 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4374 sizeof(lbp_page_default)); 4375 page_index->page_data = 4376 (uint8_t *)lun->mode_pages.lbp_page; 4377 break; 4378 } 4379 default: 4380 panic("subpage %#x for page %#x is incorrect!", 4381 page_index->subpage, page_code); 4382 } 4383 break; 4384 } 4385 case SMS_CDDVD_CAPS_PAGE:{ 4386 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4387 ("subpage %#x for page %#x is incorrect!", 4388 page_index->subpage, page_code)); 4389 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4390 &cddvd_page_default, 4391 sizeof(cddvd_page_default)); 4392 memcpy(&lun->mode_pages.cddvd_page[ 4393 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4394 sizeof(cddvd_page_changeable)); 4395 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4396 &cddvd_page_default, 4397 sizeof(cddvd_page_default)); 4398 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4399 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4400 sizeof(cddvd_page_default)); 4401 page_index->page_data = 4402 (uint8_t *)lun->mode_pages.cddvd_page; 4403 break; 4404 } 4405 default: 4406 panic("invalid page code value %#x", page_code); 4407 } 4408 } 4409 4410 return (CTL_RETVAL_COMPLETE); 4411 } 4412 4413 static int 4414 ctl_init_log_page_index(struct ctl_lun *lun) 4415 { 4416 struct ctl_page_index *page_index; 4417 int i, j, k, prev; 4418 4419 memcpy(&lun->log_pages.index, log_page_index_template, 4420 sizeof(log_page_index_template)); 4421 4422 prev = -1; 4423 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4424 4425 page_index = &lun->log_pages.index[i]; 4426 if (lun->be_lun->lun_type == T_DIRECT && 4427 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4428 continue; 4429 if (lun->be_lun->lun_type == T_PROCESSOR && 4430 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4431 continue; 4432 if (lun->be_lun->lun_type == T_CDROM && 4433 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4434 continue; 4435 4436 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4437 lun->backend->lun_attr == NULL) 4438 continue; 4439 4440 if (page_index->page_code != prev) { 4441 lun->log_pages.pages_page[j] = page_index->page_code; 4442 prev = page_index->page_code; 4443 j++; 4444 } 4445 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4446 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4447 k++; 4448 } 4449 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4450 lun->log_pages.index[0].page_len = j; 4451 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4452 lun->log_pages.index[1].page_len = k * 2; 4453 lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; 4454 lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); 4455 lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; 4456 lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; 4457 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; 4458 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); 4459 lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; 4460 lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); 4461 4462 return (CTL_RETVAL_COMPLETE); 4463 } 4464 4465 static int 4466 hex2bin(const char *str, uint8_t *buf, int buf_size) 4467 { 4468 int i; 4469 u_char c; 4470 4471 memset(buf, 0, buf_size); 4472 while (isspace(str[0])) 4473 str++; 4474 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4475 str += 2; 4476 buf_size *= 2; 4477 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4478 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4479 str++; 4480 c = str[i]; 4481 if (isdigit(c)) 4482 c -= '0'; 4483 else if (isalpha(c)) 4484 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4485 else 4486 break; 4487 if (c >= 16) 4488 break; 4489 if ((i & 1) == 0) 4490 buf[i / 2] |= (c << 4); 4491 else 4492 buf[i / 2] |= c; 4493 } 4494 return ((i + 1) / 2); 4495 } 4496 4497 /* 4498 * LUN allocation. 4499 * 4500 * Requirements: 4501 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4502 * wants us to allocate the LUN and he can block. 4503 * - ctl_softc is always set 4504 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4505 * 4506 * Returns 0 for success, non-zero (errno) for failure. 4507 */ 4508 static int 4509 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4510 struct ctl_be_lun *const be_lun) 4511 { 4512 struct ctl_lun *nlun, *lun; 4513 struct scsi_vpd_id_descriptor *desc; 4514 struct scsi_vpd_id_t10 *t10id; 4515 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4516 int lun_number, lun_malloced; 4517 int devidlen, idlen1, idlen2 = 0, len; 4518 4519 if (be_lun == NULL) 4520 return (EINVAL); 4521 4522 /* 4523 * We currently only support Direct Access or Processor LUN types. 4524 */ 4525 switch (be_lun->lun_type) { 4526 case T_DIRECT: 4527 case T_PROCESSOR: 4528 case T_CDROM: 4529 break; 4530 case T_SEQUENTIAL: 4531 case T_CHANGER: 4532 default: 4533 be_lun->lun_config_status(be_lun->be_lun, 4534 CTL_LUN_CONFIG_FAILURE); 4535 break; 4536 } 4537 if (ctl_lun == NULL) { 4538 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4539 lun_malloced = 1; 4540 } else { 4541 lun_malloced = 0; 4542 lun = ctl_lun; 4543 } 4544 4545 memset(lun, 0, sizeof(*lun)); 4546 if (lun_malloced) 4547 lun->flags = CTL_LUN_MALLOCED; 4548 4549 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * 4550 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4551 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, 4552 M_DEVBUF, M_WAITOK | M_ZERO); 4553 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, 4554 M_DEVBUF, M_WAITOK | M_ZERO); 4555 4556 /* Generate LUN ID. */ 4557 devidlen = max(CTL_DEVID_MIN_LEN, 4558 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4559 idlen1 = sizeof(*t10id) + devidlen; 4560 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4561 scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); 4562 if (scsiname != NULL) { 4563 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4564 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4565 } 4566 eui = dnvlist_get_string(be_lun->options, "eui", NULL); 4567 if (eui != NULL) { 4568 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4569 } 4570 naa = dnvlist_get_string(be_lun->options, "naa", NULL); 4571 if (naa != NULL) { 4572 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4573 } 4574 uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); 4575 if (uuid != NULL) { 4576 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4577 } 4578 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4579 M_CTL, M_WAITOK | M_ZERO); 4580 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4581 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4582 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4583 desc->length = idlen1; 4584 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4585 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4586 if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { 4587 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4588 } else { 4589 strncpy(t10id->vendor, vendor, 4590 min(sizeof(t10id->vendor), strlen(vendor))); 4591 } 4592 strncpy((char *)t10id->vendor_spec_id, 4593 (char *)be_lun->device_id, devidlen); 4594 if (scsiname != NULL) { 4595 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4596 desc->length); 4597 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4598 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4599 SVPD_ID_TYPE_SCSI_NAME; 4600 desc->length = idlen2; 4601 strlcpy(desc->identifier, scsiname, idlen2); 4602 } 4603 if (eui != NULL) { 4604 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4605 desc->length); 4606 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4607 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4608 SVPD_ID_TYPE_EUI64; 4609 desc->length = hex2bin(eui, desc->identifier, 16); 4610 desc->length = desc->length > 12 ? 16 : 4611 (desc->length > 8 ? 12 : 8); 4612 len -= 16 - desc->length; 4613 } 4614 if (naa != NULL) { 4615 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4616 desc->length); 4617 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4618 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4619 SVPD_ID_TYPE_NAA; 4620 desc->length = hex2bin(naa, desc->identifier, 16); 4621 desc->length = desc->length > 8 ? 16 : 8; 4622 len -= 16 - desc->length; 4623 } 4624 if (uuid != NULL) { 4625 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4626 desc->length); 4627 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4628 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4629 SVPD_ID_TYPE_UUID; 4630 desc->identifier[0] = 0x10; 4631 hex2bin(uuid, &desc->identifier[2], 16); 4632 desc->length = 18; 4633 } 4634 lun->lun_devid->len = len; 4635 4636 mtx_lock(&ctl_softc->ctl_lock); 4637 /* 4638 * See if the caller requested a particular LUN number. If so, see 4639 * if it is available. Otherwise, allocate the first available LUN. 4640 */ 4641 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4642 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) 4643 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4644 mtx_unlock(&ctl_softc->ctl_lock); 4645 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { 4646 printf("ctl: requested LUN ID %d is higher " 4647 "than ctl_max_luns - 1 (%d)\n", 4648 be_lun->req_lun_id, ctl_max_luns - 1); 4649 } else { 4650 /* 4651 * XXX KDM return an error, or just assign 4652 * another LUN ID in this case?? 4653 */ 4654 printf("ctl: requested LUN ID %d is already " 4655 "in use\n", be_lun->req_lun_id); 4656 } 4657 fail: 4658 free(lun->lun_devid, M_CTL); 4659 if (lun->flags & CTL_LUN_MALLOCED) 4660 free(lun, M_CTL); 4661 be_lun->lun_config_status(be_lun->be_lun, 4662 CTL_LUN_CONFIG_FAILURE); 4663 return (ENOSPC); 4664 } 4665 lun_number = be_lun->req_lun_id; 4666 } else { 4667 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); 4668 if (lun_number == -1) { 4669 mtx_unlock(&ctl_softc->ctl_lock); 4670 printf("ctl: can't allocate LUN, out of LUNs\n"); 4671 goto fail; 4672 } 4673 } 4674 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4675 mtx_unlock(&ctl_softc->ctl_lock); 4676 4677 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4678 lun->lun = lun_number; 4679 lun->be_lun = be_lun; 4680 /* 4681 * The processor LUN is always enabled. Disk LUNs come on line 4682 * disabled, and must be enabled by the backend. 4683 */ 4684 lun->flags |= CTL_LUN_DISABLED; 4685 lun->backend = be_lun->be; 4686 be_lun->ctl_lun = lun; 4687 be_lun->lun_id = lun_number; 4688 atomic_add_int(&be_lun->be->num_luns, 1); 4689 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4690 lun->flags |= CTL_LUN_EJECTED; 4691 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4692 lun->flags |= CTL_LUN_NO_MEDIA; 4693 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4694 lun->flags |= CTL_LUN_STOPPED; 4695 4696 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4697 lun->flags |= CTL_LUN_PRIMARY_SC; 4698 4699 value = dnvlist_get_string(be_lun->options, "removable", NULL); 4700 if (value != NULL) { 4701 if (strcmp(value, "on") == 0) 4702 lun->flags |= CTL_LUN_REMOVABLE; 4703 } else if (be_lun->lun_type == T_CDROM) 4704 lun->flags |= CTL_LUN_REMOVABLE; 4705 4706 lun->ctl_softc = ctl_softc; 4707 #ifdef CTL_TIME_IO 4708 lun->last_busy = getsbinuptime(); 4709 #endif 4710 TAILQ_INIT(&lun->ooa_queue); 4711 STAILQ_INIT(&lun->error_list); 4712 lun->ie_reported = 1; 4713 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4714 ctl_tpc_lun_init(lun); 4715 if (lun->flags & CTL_LUN_REMOVABLE) { 4716 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4717 M_CTL, M_WAITOK); 4718 } 4719 4720 /* 4721 * Initialize the mode and log page index. 4722 */ 4723 ctl_init_page_index(lun); 4724 ctl_init_log_page_index(lun); 4725 4726 /* Setup statistics gathering */ 4727 lun->stats.item = lun_number; 4728 4729 /* 4730 * Now, before we insert this lun on the lun list, set the lun 4731 * inventory changed UA for all other luns. 4732 */ 4733 mtx_lock(&ctl_softc->ctl_lock); 4734 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4735 mtx_lock(&nlun->lun_lock); 4736 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4737 mtx_unlock(&nlun->lun_lock); 4738 } 4739 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4740 ctl_softc->ctl_luns[lun_number] = lun; 4741 ctl_softc->num_luns++; 4742 mtx_unlock(&ctl_softc->ctl_lock); 4743 4744 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4745 return (0); 4746 } 4747 4748 /* 4749 * Delete a LUN. 4750 * Assumptions: 4751 * - LUN has already been marked invalid and any pending I/O has been taken 4752 * care of. 4753 */ 4754 static int 4755 ctl_free_lun(struct ctl_lun *lun) 4756 { 4757 struct ctl_softc *softc = lun->ctl_softc; 4758 struct ctl_lun *nlun; 4759 int i; 4760 4761 KASSERT(TAILQ_EMPTY(&lun->ooa_queue), 4762 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4763 4764 mtx_lock(&softc->ctl_lock); 4765 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4766 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4767 softc->ctl_luns[lun->lun] = NULL; 4768 softc->num_luns--; 4769 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4770 mtx_lock(&nlun->lun_lock); 4771 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4772 mtx_unlock(&nlun->lun_lock); 4773 } 4774 mtx_unlock(&softc->ctl_lock); 4775 4776 /* 4777 * Tell the backend to free resources, if this LUN has a backend. 4778 */ 4779 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4780 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4781 4782 lun->ie_reportcnt = UINT32_MAX; 4783 callout_drain(&lun->ie_callout); 4784 ctl_tpc_lun_shutdown(lun); 4785 mtx_destroy(&lun->lun_lock); 4786 free(lun->lun_devid, M_CTL); 4787 for (i = 0; i < ctl_max_ports; i++) 4788 free(lun->pending_ua[i], M_CTL); 4789 free(lun->pending_ua, M_DEVBUF); 4790 for (i = 0; i < ctl_max_ports; i++) 4791 free(lun->pr_keys[i], M_CTL); 4792 free(lun->pr_keys, M_DEVBUF); 4793 free(lun->write_buffer, M_CTL); 4794 free(lun->prevent, M_CTL); 4795 if (lun->flags & CTL_LUN_MALLOCED) 4796 free(lun, M_CTL); 4797 4798 return (0); 4799 } 4800 4801 static void 4802 ctl_create_lun(struct ctl_be_lun *be_lun) 4803 { 4804 4805 /* 4806 * ctl_alloc_lun() should handle all potential failure cases. 4807 */ 4808 ctl_alloc_lun(control_softc, NULL, be_lun); 4809 } 4810 4811 int 4812 ctl_add_lun(struct ctl_be_lun *be_lun) 4813 { 4814 struct ctl_softc *softc = control_softc; 4815 4816 mtx_lock(&softc->ctl_lock); 4817 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4818 mtx_unlock(&softc->ctl_lock); 4819 wakeup(&softc->pending_lun_queue); 4820 4821 return (0); 4822 } 4823 4824 int 4825 ctl_enable_lun(struct ctl_be_lun *be_lun) 4826 { 4827 struct ctl_softc *softc; 4828 struct ctl_port *port, *nport; 4829 struct ctl_lun *lun; 4830 int retval; 4831 4832 lun = (struct ctl_lun *)be_lun->ctl_lun; 4833 softc = lun->ctl_softc; 4834 4835 mtx_lock(&softc->ctl_lock); 4836 mtx_lock(&lun->lun_lock); 4837 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4838 /* 4839 * eh? Why did we get called if the LUN is already 4840 * enabled? 4841 */ 4842 mtx_unlock(&lun->lun_lock); 4843 mtx_unlock(&softc->ctl_lock); 4844 return (0); 4845 } 4846 lun->flags &= ~CTL_LUN_DISABLED; 4847 mtx_unlock(&lun->lun_lock); 4848 4849 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4850 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4851 port->lun_map != NULL || port->lun_enable == NULL) 4852 continue; 4853 4854 /* 4855 * Drop the lock while we call the FETD's enable routine. 4856 * This can lead to a callback into CTL (at least in the 4857 * case of the internal initiator frontend. 4858 */ 4859 mtx_unlock(&softc->ctl_lock); 4860 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4861 mtx_lock(&softc->ctl_lock); 4862 if (retval != 0) { 4863 printf("%s: FETD %s port %d returned error " 4864 "%d for lun_enable on lun %jd\n", 4865 __func__, port->port_name, port->targ_port, 4866 retval, (intmax_t)lun->lun); 4867 } 4868 } 4869 4870 mtx_unlock(&softc->ctl_lock); 4871 ctl_isc_announce_lun(lun); 4872 4873 return (0); 4874 } 4875 4876 int 4877 ctl_disable_lun(struct ctl_be_lun *be_lun) 4878 { 4879 struct ctl_softc *softc; 4880 struct ctl_port *port; 4881 struct ctl_lun *lun; 4882 int retval; 4883 4884 lun = (struct ctl_lun *)be_lun->ctl_lun; 4885 softc = lun->ctl_softc; 4886 4887 mtx_lock(&softc->ctl_lock); 4888 mtx_lock(&lun->lun_lock); 4889 if (lun->flags & CTL_LUN_DISABLED) { 4890 mtx_unlock(&lun->lun_lock); 4891 mtx_unlock(&softc->ctl_lock); 4892 return (0); 4893 } 4894 lun->flags |= CTL_LUN_DISABLED; 4895 mtx_unlock(&lun->lun_lock); 4896 4897 STAILQ_FOREACH(port, &softc->port_list, links) { 4898 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4899 port->lun_map != NULL || port->lun_disable == NULL) 4900 continue; 4901 4902 /* 4903 * Drop the lock before we call the frontend's disable 4904 * routine, to avoid lock order reversals. 4905 * 4906 * XXX KDM what happens if the frontend list changes while 4907 * we're traversing it? It's unlikely, but should be handled. 4908 */ 4909 mtx_unlock(&softc->ctl_lock); 4910 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4911 mtx_lock(&softc->ctl_lock); 4912 if (retval != 0) { 4913 printf("%s: FETD %s port %d returned error " 4914 "%d for lun_disable on lun %jd\n", 4915 __func__, port->port_name, port->targ_port, 4916 retval, (intmax_t)lun->lun); 4917 } 4918 } 4919 4920 mtx_unlock(&softc->ctl_lock); 4921 ctl_isc_announce_lun(lun); 4922 4923 return (0); 4924 } 4925 4926 int 4927 ctl_start_lun(struct ctl_be_lun *be_lun) 4928 { 4929 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4930 4931 mtx_lock(&lun->lun_lock); 4932 lun->flags &= ~CTL_LUN_STOPPED; 4933 mtx_unlock(&lun->lun_lock); 4934 return (0); 4935 } 4936 4937 int 4938 ctl_stop_lun(struct ctl_be_lun *be_lun) 4939 { 4940 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4941 4942 mtx_lock(&lun->lun_lock); 4943 lun->flags |= CTL_LUN_STOPPED; 4944 mtx_unlock(&lun->lun_lock); 4945 return (0); 4946 } 4947 4948 int 4949 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4950 { 4951 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4952 4953 mtx_lock(&lun->lun_lock); 4954 lun->flags |= CTL_LUN_NO_MEDIA; 4955 mtx_unlock(&lun->lun_lock); 4956 return (0); 4957 } 4958 4959 int 4960 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4961 { 4962 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4963 union ctl_ha_msg msg; 4964 4965 mtx_lock(&lun->lun_lock); 4966 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4967 if (lun->flags & CTL_LUN_REMOVABLE) 4968 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4969 mtx_unlock(&lun->lun_lock); 4970 if ((lun->flags & CTL_LUN_REMOVABLE) && 4971 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4972 bzero(&msg.ua, sizeof(msg.ua)); 4973 msg.hdr.msg_type = CTL_MSG_UA; 4974 msg.hdr.nexus.initid = -1; 4975 msg.hdr.nexus.targ_port = -1; 4976 msg.hdr.nexus.targ_lun = lun->lun; 4977 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4978 msg.ua.ua_all = 1; 4979 msg.ua.ua_set = 1; 4980 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4981 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4982 M_WAITOK); 4983 } 4984 return (0); 4985 } 4986 4987 int 4988 ctl_lun_ejected(struct ctl_be_lun *be_lun) 4989 { 4990 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4991 4992 mtx_lock(&lun->lun_lock); 4993 lun->flags |= CTL_LUN_EJECTED; 4994 mtx_unlock(&lun->lun_lock); 4995 return (0); 4996 } 4997 4998 int 4999 ctl_lun_primary(struct ctl_be_lun *be_lun) 5000 { 5001 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5002 5003 mtx_lock(&lun->lun_lock); 5004 lun->flags |= CTL_LUN_PRIMARY_SC; 5005 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5006 mtx_unlock(&lun->lun_lock); 5007 ctl_isc_announce_lun(lun); 5008 return (0); 5009 } 5010 5011 int 5012 ctl_lun_secondary(struct ctl_be_lun *be_lun) 5013 { 5014 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5015 5016 mtx_lock(&lun->lun_lock); 5017 lun->flags &= ~CTL_LUN_PRIMARY_SC; 5018 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5019 mtx_unlock(&lun->lun_lock); 5020 ctl_isc_announce_lun(lun); 5021 return (0); 5022 } 5023 5024 int 5025 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 5026 { 5027 struct ctl_lun *lun; 5028 5029 lun = (struct ctl_lun *)be_lun->ctl_lun; 5030 5031 mtx_lock(&lun->lun_lock); 5032 5033 /* 5034 * The LUN needs to be disabled before it can be marked invalid. 5035 */ 5036 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5037 mtx_unlock(&lun->lun_lock); 5038 return (-1); 5039 } 5040 /* 5041 * Mark the LUN invalid. 5042 */ 5043 lun->flags |= CTL_LUN_INVALID; 5044 5045 /* 5046 * If there is nothing in the OOA queue, go ahead and free the LUN. 5047 * If we have something in the OOA queue, we'll free it when the 5048 * last I/O completes. 5049 */ 5050 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5051 mtx_unlock(&lun->lun_lock); 5052 ctl_free_lun(lun); 5053 } else 5054 mtx_unlock(&lun->lun_lock); 5055 5056 return (0); 5057 } 5058 5059 void 5060 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5061 { 5062 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5063 union ctl_ha_msg msg; 5064 5065 mtx_lock(&lun->lun_lock); 5066 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5067 mtx_unlock(&lun->lun_lock); 5068 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5069 /* Send msg to other side. */ 5070 bzero(&msg.ua, sizeof(msg.ua)); 5071 msg.hdr.msg_type = CTL_MSG_UA; 5072 msg.hdr.nexus.initid = -1; 5073 msg.hdr.nexus.targ_port = -1; 5074 msg.hdr.nexus.targ_lun = lun->lun; 5075 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5076 msg.ua.ua_all = 1; 5077 msg.ua.ua_set = 1; 5078 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5079 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5080 M_WAITOK); 5081 } 5082 } 5083 5084 /* 5085 * Backend "memory move is complete" callback for requests that never 5086 * make it down to say RAIDCore's configuration code. 5087 */ 5088 int 5089 ctl_config_move_done(union ctl_io *io) 5090 { 5091 int retval; 5092 5093 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5094 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5095 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5096 5097 if ((io->io_hdr.port_status != 0) && 5098 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5099 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5100 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5101 /*retry_count*/ io->io_hdr.port_status); 5102 } else if (io->scsiio.kern_data_resid != 0 && 5103 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5104 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5105 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5106 ctl_set_invalid_field_ciu(&io->scsiio); 5107 } 5108 5109 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5110 ctl_data_print(io); 5111 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5112 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5113 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5114 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5115 /* 5116 * XXX KDM just assuming a single pointer here, and not a 5117 * S/G list. If we start using S/G lists for config data, 5118 * we'll need to know how to clean them up here as well. 5119 */ 5120 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5121 free(io->scsiio.kern_data_ptr, M_CTL); 5122 ctl_done(io); 5123 retval = CTL_RETVAL_COMPLETE; 5124 } else { 5125 /* 5126 * XXX KDM now we need to continue data movement. Some 5127 * options: 5128 * - call ctl_scsiio() again? We don't do this for data 5129 * writes, because for those at least we know ahead of 5130 * time where the write will go and how long it is. For 5131 * config writes, though, that information is largely 5132 * contained within the write itself, thus we need to 5133 * parse out the data again. 5134 * 5135 * - Call some other function once the data is in? 5136 */ 5137 5138 /* 5139 * XXX KDM call ctl_scsiio() again for now, and check flag 5140 * bits to see whether we're allocated or not. 5141 */ 5142 retval = ctl_scsiio(&io->scsiio); 5143 } 5144 return (retval); 5145 } 5146 5147 /* 5148 * This gets called by a backend driver when it is done with a 5149 * data_submit method. 5150 */ 5151 void 5152 ctl_data_submit_done(union ctl_io *io) 5153 { 5154 /* 5155 * If the IO_CONT flag is set, we need to call the supplied 5156 * function to continue processing the I/O, instead of completing 5157 * the I/O just yet. 5158 * 5159 * If there is an error, though, we don't want to keep processing. 5160 * Instead, just send status back to the initiator. 5161 */ 5162 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5163 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5164 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5165 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5166 io->scsiio.io_cont(io); 5167 return; 5168 } 5169 ctl_done(io); 5170 } 5171 5172 /* 5173 * This gets called by a backend driver when it is done with a 5174 * configuration write. 5175 */ 5176 void 5177 ctl_config_write_done(union ctl_io *io) 5178 { 5179 uint8_t *buf; 5180 5181 /* 5182 * If the IO_CONT flag is set, we need to call the supplied 5183 * function to continue processing the I/O, instead of completing 5184 * the I/O just yet. 5185 * 5186 * If there is an error, though, we don't want to keep processing. 5187 * Instead, just send status back to the initiator. 5188 */ 5189 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5190 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5191 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5192 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5193 io->scsiio.io_cont(io); 5194 return; 5195 } 5196 /* 5197 * Since a configuration write can be done for commands that actually 5198 * have data allocated, like write buffer, and commands that have 5199 * no data, like start/stop unit, we need to check here. 5200 */ 5201 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5202 buf = io->scsiio.kern_data_ptr; 5203 else 5204 buf = NULL; 5205 ctl_done(io); 5206 if (buf) 5207 free(buf, M_CTL); 5208 } 5209 5210 void 5211 ctl_config_read_done(union ctl_io *io) 5212 { 5213 uint8_t *buf; 5214 5215 /* 5216 * If there is some error -- we are done, skip data transfer. 5217 */ 5218 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5219 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5220 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5221 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5222 buf = io->scsiio.kern_data_ptr; 5223 else 5224 buf = NULL; 5225 ctl_done(io); 5226 if (buf) 5227 free(buf, M_CTL); 5228 return; 5229 } 5230 5231 /* 5232 * If the IO_CONT flag is set, we need to call the supplied 5233 * function to continue processing the I/O, instead of completing 5234 * the I/O just yet. 5235 */ 5236 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5237 io->scsiio.io_cont(io); 5238 return; 5239 } 5240 5241 ctl_datamove(io); 5242 } 5243 5244 /* 5245 * SCSI release command. 5246 */ 5247 int 5248 ctl_scsi_release(struct ctl_scsiio *ctsio) 5249 { 5250 struct ctl_lun *lun = CTL_LUN(ctsio); 5251 uint32_t residx; 5252 5253 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5254 5255 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5256 5257 /* 5258 * XXX KDM right now, we only support LUN reservation. We don't 5259 * support 3rd party reservations, or extent reservations, which 5260 * might actually need the parameter list. If we've gotten this 5261 * far, we've got a LUN reservation. Anything else got kicked out 5262 * above. So, according to SPC, ignore the length. 5263 */ 5264 5265 mtx_lock(&lun->lun_lock); 5266 5267 /* 5268 * According to SPC, it is not an error for an intiator to attempt 5269 * to release a reservation on a LUN that isn't reserved, or that 5270 * is reserved by another initiator. The reservation can only be 5271 * released, though, by the initiator who made it or by one of 5272 * several reset type events. 5273 */ 5274 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5275 lun->flags &= ~CTL_LUN_RESERVED; 5276 5277 mtx_unlock(&lun->lun_lock); 5278 5279 ctl_set_success(ctsio); 5280 ctl_done((union ctl_io *)ctsio); 5281 return (CTL_RETVAL_COMPLETE); 5282 } 5283 5284 int 5285 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5286 { 5287 struct ctl_lun *lun = CTL_LUN(ctsio); 5288 uint32_t residx; 5289 5290 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5291 5292 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5293 5294 /* 5295 * XXX KDM right now, we only support LUN reservation. We don't 5296 * support 3rd party reservations, or extent reservations, which 5297 * might actually need the parameter list. If we've gotten this 5298 * far, we've got a LUN reservation. Anything else got kicked out 5299 * above. So, according to SPC, ignore the length. 5300 */ 5301 5302 mtx_lock(&lun->lun_lock); 5303 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5304 ctl_set_reservation_conflict(ctsio); 5305 goto bailout; 5306 } 5307 5308 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5309 if (lun->flags & CTL_LUN_PR_RESERVED) { 5310 ctl_set_success(ctsio); 5311 goto bailout; 5312 } 5313 5314 lun->flags |= CTL_LUN_RESERVED; 5315 lun->res_idx = residx; 5316 ctl_set_success(ctsio); 5317 5318 bailout: 5319 mtx_unlock(&lun->lun_lock); 5320 ctl_done((union ctl_io *)ctsio); 5321 return (CTL_RETVAL_COMPLETE); 5322 } 5323 5324 int 5325 ctl_start_stop(struct ctl_scsiio *ctsio) 5326 { 5327 struct ctl_lun *lun = CTL_LUN(ctsio); 5328 struct scsi_start_stop_unit *cdb; 5329 int retval; 5330 5331 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5332 5333 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5334 5335 if ((cdb->how & SSS_PC_MASK) == 0) { 5336 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5337 (cdb->how & SSS_START) == 0) { 5338 uint32_t residx; 5339 5340 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5341 if (ctl_get_prkey(lun, residx) == 0 || 5342 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5343 5344 ctl_set_reservation_conflict(ctsio); 5345 ctl_done((union ctl_io *)ctsio); 5346 return (CTL_RETVAL_COMPLETE); 5347 } 5348 } 5349 5350 if ((cdb->how & SSS_LOEJ) && 5351 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5352 ctl_set_invalid_field(ctsio, 5353 /*sks_valid*/ 1, 5354 /*command*/ 1, 5355 /*field*/ 4, 5356 /*bit_valid*/ 1, 5357 /*bit*/ 1); 5358 ctl_done((union ctl_io *)ctsio); 5359 return (CTL_RETVAL_COMPLETE); 5360 } 5361 5362 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5363 lun->prevent_count > 0) { 5364 /* "Medium removal prevented" */ 5365 ctl_set_sense(ctsio, /*current_error*/ 1, 5366 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5367 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5368 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5369 ctl_done((union ctl_io *)ctsio); 5370 return (CTL_RETVAL_COMPLETE); 5371 } 5372 } 5373 5374 retval = lun->backend->config_write((union ctl_io *)ctsio); 5375 return (retval); 5376 } 5377 5378 int 5379 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5380 { 5381 struct ctl_lun *lun = CTL_LUN(ctsio); 5382 struct scsi_prevent *cdb; 5383 int retval; 5384 uint32_t initidx; 5385 5386 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5387 5388 cdb = (struct scsi_prevent *)ctsio->cdb; 5389 5390 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5391 ctl_set_invalid_opcode(ctsio); 5392 ctl_done((union ctl_io *)ctsio); 5393 return (CTL_RETVAL_COMPLETE); 5394 } 5395 5396 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5397 mtx_lock(&lun->lun_lock); 5398 if ((cdb->how & PR_PREVENT) && 5399 ctl_is_set(lun->prevent, initidx) == 0) { 5400 ctl_set_mask(lun->prevent, initidx); 5401 lun->prevent_count++; 5402 } else if ((cdb->how & PR_PREVENT) == 0 && 5403 ctl_is_set(lun->prevent, initidx)) { 5404 ctl_clear_mask(lun->prevent, initidx); 5405 lun->prevent_count--; 5406 } 5407 mtx_unlock(&lun->lun_lock); 5408 retval = lun->backend->config_write((union ctl_io *)ctsio); 5409 return (retval); 5410 } 5411 5412 /* 5413 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5414 * we don't really do anything with the LBA and length fields if the user 5415 * passes them in. Instead we'll just flush out the cache for the entire 5416 * LUN. 5417 */ 5418 int 5419 ctl_sync_cache(struct ctl_scsiio *ctsio) 5420 { 5421 struct ctl_lun *lun = CTL_LUN(ctsio); 5422 struct ctl_lba_len_flags *lbalen; 5423 uint64_t starting_lba; 5424 uint32_t block_count; 5425 int retval; 5426 uint8_t byte2; 5427 5428 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5429 5430 retval = 0; 5431 5432 switch (ctsio->cdb[0]) { 5433 case SYNCHRONIZE_CACHE: { 5434 struct scsi_sync_cache *cdb; 5435 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5436 5437 starting_lba = scsi_4btoul(cdb->begin_lba); 5438 block_count = scsi_2btoul(cdb->lb_count); 5439 byte2 = cdb->byte2; 5440 break; 5441 } 5442 case SYNCHRONIZE_CACHE_16: { 5443 struct scsi_sync_cache_16 *cdb; 5444 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5445 5446 starting_lba = scsi_8btou64(cdb->begin_lba); 5447 block_count = scsi_4btoul(cdb->lb_count); 5448 byte2 = cdb->byte2; 5449 break; 5450 } 5451 default: 5452 ctl_set_invalid_opcode(ctsio); 5453 ctl_done((union ctl_io *)ctsio); 5454 goto bailout; 5455 break; /* NOTREACHED */ 5456 } 5457 5458 /* 5459 * We check the LBA and length, but don't do anything with them. 5460 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5461 * get flushed. This check will just help satisfy anyone who wants 5462 * to see an error for an out of range LBA. 5463 */ 5464 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5465 ctl_set_lba_out_of_range(ctsio, 5466 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5467 ctl_done((union ctl_io *)ctsio); 5468 goto bailout; 5469 } 5470 5471 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5472 lbalen->lba = starting_lba; 5473 lbalen->len = block_count; 5474 lbalen->flags = byte2; 5475 retval = lun->backend->config_write((union ctl_io *)ctsio); 5476 5477 bailout: 5478 return (retval); 5479 } 5480 5481 int 5482 ctl_format(struct ctl_scsiio *ctsio) 5483 { 5484 struct scsi_format *cdb; 5485 int length, defect_list_len; 5486 5487 CTL_DEBUG_PRINT(("ctl_format\n")); 5488 5489 cdb = (struct scsi_format *)ctsio->cdb; 5490 5491 length = 0; 5492 if (cdb->byte2 & SF_FMTDATA) { 5493 if (cdb->byte2 & SF_LONGLIST) 5494 length = sizeof(struct scsi_format_header_long); 5495 else 5496 length = sizeof(struct scsi_format_header_short); 5497 } 5498 5499 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5500 && (length > 0)) { 5501 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5502 ctsio->kern_data_len = length; 5503 ctsio->kern_total_len = length; 5504 ctsio->kern_rel_offset = 0; 5505 ctsio->kern_sg_entries = 0; 5506 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5507 ctsio->be_move_done = ctl_config_move_done; 5508 ctl_datamove((union ctl_io *)ctsio); 5509 5510 return (CTL_RETVAL_COMPLETE); 5511 } 5512 5513 defect_list_len = 0; 5514 5515 if (cdb->byte2 & SF_FMTDATA) { 5516 if (cdb->byte2 & SF_LONGLIST) { 5517 struct scsi_format_header_long *header; 5518 5519 header = (struct scsi_format_header_long *) 5520 ctsio->kern_data_ptr; 5521 5522 defect_list_len = scsi_4btoul(header->defect_list_len); 5523 if (defect_list_len != 0) { 5524 ctl_set_invalid_field(ctsio, 5525 /*sks_valid*/ 1, 5526 /*command*/ 0, 5527 /*field*/ 2, 5528 /*bit_valid*/ 0, 5529 /*bit*/ 0); 5530 goto bailout; 5531 } 5532 } else { 5533 struct scsi_format_header_short *header; 5534 5535 header = (struct scsi_format_header_short *) 5536 ctsio->kern_data_ptr; 5537 5538 defect_list_len = scsi_2btoul(header->defect_list_len); 5539 if (defect_list_len != 0) { 5540 ctl_set_invalid_field(ctsio, 5541 /*sks_valid*/ 1, 5542 /*command*/ 0, 5543 /*field*/ 2, 5544 /*bit_valid*/ 0, 5545 /*bit*/ 0); 5546 goto bailout; 5547 } 5548 } 5549 } 5550 5551 ctl_set_success(ctsio); 5552 bailout: 5553 5554 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5555 free(ctsio->kern_data_ptr, M_CTL); 5556 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5557 } 5558 5559 ctl_done((union ctl_io *)ctsio); 5560 return (CTL_RETVAL_COMPLETE); 5561 } 5562 5563 int 5564 ctl_read_buffer(struct ctl_scsiio *ctsio) 5565 { 5566 struct ctl_lun *lun = CTL_LUN(ctsio); 5567 uint64_t buffer_offset; 5568 uint32_t len; 5569 uint8_t byte2; 5570 static uint8_t descr[4]; 5571 static uint8_t echo_descr[4] = { 0 }; 5572 5573 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5574 5575 switch (ctsio->cdb[0]) { 5576 case READ_BUFFER: { 5577 struct scsi_read_buffer *cdb; 5578 5579 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5580 buffer_offset = scsi_3btoul(cdb->offset); 5581 len = scsi_3btoul(cdb->length); 5582 byte2 = cdb->byte2; 5583 break; 5584 } 5585 case READ_BUFFER_16: { 5586 struct scsi_read_buffer_16 *cdb; 5587 5588 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5589 buffer_offset = scsi_8btou64(cdb->offset); 5590 len = scsi_4btoul(cdb->length); 5591 byte2 = cdb->byte2; 5592 break; 5593 } 5594 default: /* This shouldn't happen. */ 5595 ctl_set_invalid_opcode(ctsio); 5596 ctl_done((union ctl_io *)ctsio); 5597 return (CTL_RETVAL_COMPLETE); 5598 } 5599 5600 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5601 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5602 ctl_set_invalid_field(ctsio, 5603 /*sks_valid*/ 1, 5604 /*command*/ 1, 5605 /*field*/ 6, 5606 /*bit_valid*/ 0, 5607 /*bit*/ 0); 5608 ctl_done((union ctl_io *)ctsio); 5609 return (CTL_RETVAL_COMPLETE); 5610 } 5611 5612 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5613 descr[0] = 0; 5614 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5615 ctsio->kern_data_ptr = descr; 5616 len = min(len, sizeof(descr)); 5617 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5618 ctsio->kern_data_ptr = echo_descr; 5619 len = min(len, sizeof(echo_descr)); 5620 } else { 5621 if (lun->write_buffer == NULL) { 5622 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5623 M_CTL, M_WAITOK); 5624 } 5625 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5626 } 5627 ctsio->kern_data_len = len; 5628 ctsio->kern_total_len = len; 5629 ctsio->kern_rel_offset = 0; 5630 ctsio->kern_sg_entries = 0; 5631 ctl_set_success(ctsio); 5632 ctsio->be_move_done = ctl_config_move_done; 5633 ctl_datamove((union ctl_io *)ctsio); 5634 return (CTL_RETVAL_COMPLETE); 5635 } 5636 5637 int 5638 ctl_write_buffer(struct ctl_scsiio *ctsio) 5639 { 5640 struct ctl_lun *lun = CTL_LUN(ctsio); 5641 struct scsi_write_buffer *cdb; 5642 int buffer_offset, len; 5643 5644 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5645 5646 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5647 5648 len = scsi_3btoul(cdb->length); 5649 buffer_offset = scsi_3btoul(cdb->offset); 5650 5651 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5652 ctl_set_invalid_field(ctsio, 5653 /*sks_valid*/ 1, 5654 /*command*/ 1, 5655 /*field*/ 6, 5656 /*bit_valid*/ 0, 5657 /*bit*/ 0); 5658 ctl_done((union ctl_io *)ctsio); 5659 return (CTL_RETVAL_COMPLETE); 5660 } 5661 5662 /* 5663 * If we've got a kernel request that hasn't been malloced yet, 5664 * malloc it and tell the caller the data buffer is here. 5665 */ 5666 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5667 if (lun->write_buffer == NULL) { 5668 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5669 M_CTL, M_WAITOK); 5670 } 5671 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5672 ctsio->kern_data_len = len; 5673 ctsio->kern_total_len = len; 5674 ctsio->kern_rel_offset = 0; 5675 ctsio->kern_sg_entries = 0; 5676 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5677 ctsio->be_move_done = ctl_config_move_done; 5678 ctl_datamove((union ctl_io *)ctsio); 5679 5680 return (CTL_RETVAL_COMPLETE); 5681 } 5682 5683 ctl_set_success(ctsio); 5684 ctl_done((union ctl_io *)ctsio); 5685 return (CTL_RETVAL_COMPLETE); 5686 } 5687 5688 int 5689 ctl_write_same(struct ctl_scsiio *ctsio) 5690 { 5691 struct ctl_lun *lun = CTL_LUN(ctsio); 5692 struct ctl_lba_len_flags *lbalen; 5693 uint64_t lba; 5694 uint32_t num_blocks; 5695 int len, retval; 5696 uint8_t byte2; 5697 5698 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5699 5700 switch (ctsio->cdb[0]) { 5701 case WRITE_SAME_10: { 5702 struct scsi_write_same_10 *cdb; 5703 5704 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5705 5706 lba = scsi_4btoul(cdb->addr); 5707 num_blocks = scsi_2btoul(cdb->length); 5708 byte2 = cdb->byte2; 5709 break; 5710 } 5711 case WRITE_SAME_16: { 5712 struct scsi_write_same_16 *cdb; 5713 5714 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5715 5716 lba = scsi_8btou64(cdb->addr); 5717 num_blocks = scsi_4btoul(cdb->length); 5718 byte2 = cdb->byte2; 5719 break; 5720 } 5721 default: 5722 /* 5723 * We got a command we don't support. This shouldn't 5724 * happen, commands should be filtered out above us. 5725 */ 5726 ctl_set_invalid_opcode(ctsio); 5727 ctl_done((union ctl_io *)ctsio); 5728 5729 return (CTL_RETVAL_COMPLETE); 5730 break; /* NOTREACHED */ 5731 } 5732 5733 /* ANCHOR flag can be used only together with UNMAP */ 5734 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5735 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5736 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5737 ctl_done((union ctl_io *)ctsio); 5738 return (CTL_RETVAL_COMPLETE); 5739 } 5740 5741 /* 5742 * The first check is to make sure we're in bounds, the second 5743 * check is to catch wrap-around problems. If the lba + num blocks 5744 * is less than the lba, then we've wrapped around and the block 5745 * range is invalid anyway. 5746 */ 5747 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5748 || ((lba + num_blocks) < lba)) { 5749 ctl_set_lba_out_of_range(ctsio, 5750 MAX(lba, lun->be_lun->maxlba + 1)); 5751 ctl_done((union ctl_io *)ctsio); 5752 return (CTL_RETVAL_COMPLETE); 5753 } 5754 5755 /* Zero number of blocks means "to the last logical block" */ 5756 if (num_blocks == 0) { 5757 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5758 ctl_set_invalid_field(ctsio, 5759 /*sks_valid*/ 0, 5760 /*command*/ 1, 5761 /*field*/ 0, 5762 /*bit_valid*/ 0, 5763 /*bit*/ 0); 5764 ctl_done((union ctl_io *)ctsio); 5765 return (CTL_RETVAL_COMPLETE); 5766 } 5767 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5768 } 5769 5770 len = lun->be_lun->blocksize; 5771 5772 /* 5773 * If we've got a kernel request that hasn't been malloced yet, 5774 * malloc it and tell the caller the data buffer is here. 5775 */ 5776 if ((byte2 & SWS_NDOB) == 0 && 5777 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5778 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5779 ctsio->kern_data_len = len; 5780 ctsio->kern_total_len = len; 5781 ctsio->kern_rel_offset = 0; 5782 ctsio->kern_sg_entries = 0; 5783 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5784 ctsio->be_move_done = ctl_config_move_done; 5785 ctl_datamove((union ctl_io *)ctsio); 5786 5787 return (CTL_RETVAL_COMPLETE); 5788 } 5789 5790 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5791 lbalen->lba = lba; 5792 lbalen->len = num_blocks; 5793 lbalen->flags = byte2; 5794 retval = lun->backend->config_write((union ctl_io *)ctsio); 5795 5796 return (retval); 5797 } 5798 5799 int 5800 ctl_unmap(struct ctl_scsiio *ctsio) 5801 { 5802 struct ctl_lun *lun = CTL_LUN(ctsio); 5803 struct scsi_unmap *cdb; 5804 struct ctl_ptr_len_flags *ptrlen; 5805 struct scsi_unmap_header *hdr; 5806 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5807 uint64_t lba; 5808 uint32_t num_blocks; 5809 int len, retval; 5810 uint8_t byte2; 5811 5812 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5813 5814 cdb = (struct scsi_unmap *)ctsio->cdb; 5815 len = scsi_2btoul(cdb->length); 5816 byte2 = cdb->byte2; 5817 5818 /* 5819 * If we've got a kernel request that hasn't been malloced yet, 5820 * malloc it and tell the caller the data buffer is here. 5821 */ 5822 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5823 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5824 ctsio->kern_data_len = len; 5825 ctsio->kern_total_len = len; 5826 ctsio->kern_rel_offset = 0; 5827 ctsio->kern_sg_entries = 0; 5828 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5829 ctsio->be_move_done = ctl_config_move_done; 5830 ctl_datamove((union ctl_io *)ctsio); 5831 5832 return (CTL_RETVAL_COMPLETE); 5833 } 5834 5835 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5836 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5837 if (len < sizeof (*hdr) || 5838 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5839 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5840 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5841 ctl_set_invalid_field(ctsio, 5842 /*sks_valid*/ 0, 5843 /*command*/ 0, 5844 /*field*/ 0, 5845 /*bit_valid*/ 0, 5846 /*bit*/ 0); 5847 goto done; 5848 } 5849 len = scsi_2btoul(hdr->desc_length); 5850 buf = (struct scsi_unmap_desc *)(hdr + 1); 5851 end = buf + len / sizeof(*buf); 5852 5853 endnz = buf; 5854 for (range = buf; range < end; range++) { 5855 lba = scsi_8btou64(range->lba); 5856 num_blocks = scsi_4btoul(range->length); 5857 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5858 || ((lba + num_blocks) < lba)) { 5859 ctl_set_lba_out_of_range(ctsio, 5860 MAX(lba, lun->be_lun->maxlba + 1)); 5861 ctl_done((union ctl_io *)ctsio); 5862 return (CTL_RETVAL_COMPLETE); 5863 } 5864 if (num_blocks != 0) 5865 endnz = range + 1; 5866 } 5867 5868 /* 5869 * Block backend can not handle zero last range. 5870 * Filter it out and return if there is nothing left. 5871 */ 5872 len = (uint8_t *)endnz - (uint8_t *)buf; 5873 if (len == 0) { 5874 ctl_set_success(ctsio); 5875 goto done; 5876 } 5877 5878 mtx_lock(&lun->lun_lock); 5879 ptrlen = (struct ctl_ptr_len_flags *) 5880 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5881 ptrlen->ptr = (void *)buf; 5882 ptrlen->len = len; 5883 ptrlen->flags = byte2; 5884 ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); 5885 mtx_unlock(&lun->lun_lock); 5886 5887 retval = lun->backend->config_write((union ctl_io *)ctsio); 5888 return (retval); 5889 5890 done: 5891 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5892 free(ctsio->kern_data_ptr, M_CTL); 5893 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5894 } 5895 ctl_done((union ctl_io *)ctsio); 5896 return (CTL_RETVAL_COMPLETE); 5897 } 5898 5899 int 5900 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5901 struct ctl_page_index *page_index, uint8_t *page_ptr) 5902 { 5903 struct ctl_lun *lun = CTL_LUN(ctsio); 5904 uint8_t *current_cp; 5905 int set_ua; 5906 uint32_t initidx; 5907 5908 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5909 set_ua = 0; 5910 5911 current_cp = (page_index->page_data + (page_index->page_len * 5912 CTL_PAGE_CURRENT)); 5913 5914 mtx_lock(&lun->lun_lock); 5915 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5916 memcpy(current_cp, page_ptr, page_index->page_len); 5917 set_ua = 1; 5918 } 5919 if (set_ua != 0) 5920 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5921 mtx_unlock(&lun->lun_lock); 5922 if (set_ua) { 5923 ctl_isc_announce_mode(lun, 5924 ctl_get_initindex(&ctsio->io_hdr.nexus), 5925 page_index->page_code, page_index->subpage); 5926 } 5927 return (CTL_RETVAL_COMPLETE); 5928 } 5929 5930 static void 5931 ctl_ie_timer(void *arg) 5932 { 5933 struct ctl_lun *lun = arg; 5934 uint64_t t; 5935 5936 if (lun->ie_asc == 0) 5937 return; 5938 5939 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5940 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5941 else 5942 lun->ie_reported = 0; 5943 5944 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5945 lun->ie_reportcnt++; 5946 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5947 if (t == 0 || t == UINT32_MAX) 5948 t = 3000; /* 5 min */ 5949 callout_schedule(&lun->ie_callout, t * hz / 10); 5950 } 5951 } 5952 5953 int 5954 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5955 struct ctl_page_index *page_index, uint8_t *page_ptr) 5956 { 5957 struct ctl_lun *lun = CTL_LUN(ctsio); 5958 struct scsi_info_exceptions_page *pg; 5959 uint64_t t; 5960 5961 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5962 5963 pg = (struct scsi_info_exceptions_page *)page_ptr; 5964 mtx_lock(&lun->lun_lock); 5965 if (pg->info_flags & SIEP_FLAGS_TEST) { 5966 lun->ie_asc = 0x5d; 5967 lun->ie_ascq = 0xff; 5968 if (pg->mrie == SIEP_MRIE_UA) { 5969 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5970 lun->ie_reported = 1; 5971 } else { 5972 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5973 lun->ie_reported = -1; 5974 } 5975 lun->ie_reportcnt = 1; 5976 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5977 lun->ie_reportcnt++; 5978 t = scsi_4btoul(pg->interval_timer); 5979 if (t == 0 || t == UINT32_MAX) 5980 t = 3000; /* 5 min */ 5981 callout_reset(&lun->ie_callout, t * hz / 10, 5982 ctl_ie_timer, lun); 5983 } 5984 } else { 5985 lun->ie_asc = 0; 5986 lun->ie_ascq = 0; 5987 lun->ie_reported = 1; 5988 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5989 lun->ie_reportcnt = UINT32_MAX; 5990 callout_stop(&lun->ie_callout); 5991 } 5992 mtx_unlock(&lun->lun_lock); 5993 return (CTL_RETVAL_COMPLETE); 5994 } 5995 5996 static int 5997 ctl_do_mode_select(union ctl_io *io) 5998 { 5999 struct ctl_lun *lun = CTL_LUN(io); 6000 struct scsi_mode_page_header *page_header; 6001 struct ctl_page_index *page_index; 6002 struct ctl_scsiio *ctsio; 6003 int page_len, page_len_offset, page_len_size; 6004 union ctl_modepage_info *modepage_info; 6005 uint16_t *len_left, *len_used; 6006 int retval, i; 6007 6008 ctsio = &io->scsiio; 6009 page_index = NULL; 6010 page_len = 0; 6011 6012 modepage_info = (union ctl_modepage_info *) 6013 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6014 len_left = &modepage_info->header.len_left; 6015 len_used = &modepage_info->header.len_used; 6016 6017 do_next_page: 6018 6019 page_header = (struct scsi_mode_page_header *) 6020 (ctsio->kern_data_ptr + *len_used); 6021 6022 if (*len_left == 0) { 6023 free(ctsio->kern_data_ptr, M_CTL); 6024 ctl_set_success(ctsio); 6025 ctl_done((union ctl_io *)ctsio); 6026 return (CTL_RETVAL_COMPLETE); 6027 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6028 6029 free(ctsio->kern_data_ptr, M_CTL); 6030 ctl_set_param_len_error(ctsio); 6031 ctl_done((union ctl_io *)ctsio); 6032 return (CTL_RETVAL_COMPLETE); 6033 6034 } else if ((page_header->page_code & SMPH_SPF) 6035 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6036 6037 free(ctsio->kern_data_ptr, M_CTL); 6038 ctl_set_param_len_error(ctsio); 6039 ctl_done((union ctl_io *)ctsio); 6040 return (CTL_RETVAL_COMPLETE); 6041 } 6042 6043 6044 /* 6045 * XXX KDM should we do something with the block descriptor? 6046 */ 6047 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6048 page_index = &lun->mode_pages.index[i]; 6049 if (lun->be_lun->lun_type == T_DIRECT && 6050 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6051 continue; 6052 if (lun->be_lun->lun_type == T_PROCESSOR && 6053 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6054 continue; 6055 if (lun->be_lun->lun_type == T_CDROM && 6056 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6057 continue; 6058 6059 if ((page_index->page_code & SMPH_PC_MASK) != 6060 (page_header->page_code & SMPH_PC_MASK)) 6061 continue; 6062 6063 /* 6064 * If neither page has a subpage code, then we've got a 6065 * match. 6066 */ 6067 if (((page_index->page_code & SMPH_SPF) == 0) 6068 && ((page_header->page_code & SMPH_SPF) == 0)) { 6069 page_len = page_header->page_length; 6070 break; 6071 } 6072 6073 /* 6074 * If both pages have subpages, then the subpage numbers 6075 * have to match. 6076 */ 6077 if ((page_index->page_code & SMPH_SPF) 6078 && (page_header->page_code & SMPH_SPF)) { 6079 struct scsi_mode_page_header_sp *sph; 6080 6081 sph = (struct scsi_mode_page_header_sp *)page_header; 6082 if (page_index->subpage == sph->subpage) { 6083 page_len = scsi_2btoul(sph->page_length); 6084 break; 6085 } 6086 } 6087 } 6088 6089 /* 6090 * If we couldn't find the page, or if we don't have a mode select 6091 * handler for it, send back an error to the user. 6092 */ 6093 if ((i >= CTL_NUM_MODE_PAGES) 6094 || (page_index->select_handler == NULL)) { 6095 ctl_set_invalid_field(ctsio, 6096 /*sks_valid*/ 1, 6097 /*command*/ 0, 6098 /*field*/ *len_used, 6099 /*bit_valid*/ 0, 6100 /*bit*/ 0); 6101 free(ctsio->kern_data_ptr, M_CTL); 6102 ctl_done((union ctl_io *)ctsio); 6103 return (CTL_RETVAL_COMPLETE); 6104 } 6105 6106 if (page_index->page_code & SMPH_SPF) { 6107 page_len_offset = 2; 6108 page_len_size = 2; 6109 } else { 6110 page_len_size = 1; 6111 page_len_offset = 1; 6112 } 6113 6114 /* 6115 * If the length the initiator gives us isn't the one we specify in 6116 * the mode page header, or if they didn't specify enough data in 6117 * the CDB to avoid truncating this page, kick out the request. 6118 */ 6119 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6120 ctl_set_invalid_field(ctsio, 6121 /*sks_valid*/ 1, 6122 /*command*/ 0, 6123 /*field*/ *len_used + page_len_offset, 6124 /*bit_valid*/ 0, 6125 /*bit*/ 0); 6126 free(ctsio->kern_data_ptr, M_CTL); 6127 ctl_done((union ctl_io *)ctsio); 6128 return (CTL_RETVAL_COMPLETE); 6129 } 6130 if (*len_left < page_index->page_len) { 6131 free(ctsio->kern_data_ptr, M_CTL); 6132 ctl_set_param_len_error(ctsio); 6133 ctl_done((union ctl_io *)ctsio); 6134 return (CTL_RETVAL_COMPLETE); 6135 } 6136 6137 /* 6138 * Run through the mode page, checking to make sure that the bits 6139 * the user changed are actually legal for him to change. 6140 */ 6141 for (i = 0; i < page_index->page_len; i++) { 6142 uint8_t *user_byte, *change_mask, *current_byte; 6143 int bad_bit; 6144 int j; 6145 6146 user_byte = (uint8_t *)page_header + i; 6147 change_mask = page_index->page_data + 6148 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6149 current_byte = page_index->page_data + 6150 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6151 6152 /* 6153 * Check to see whether the user set any bits in this byte 6154 * that he is not allowed to set. 6155 */ 6156 if ((*user_byte & ~(*change_mask)) == 6157 (*current_byte & ~(*change_mask))) 6158 continue; 6159 6160 /* 6161 * Go through bit by bit to determine which one is illegal. 6162 */ 6163 bad_bit = 0; 6164 for (j = 7; j >= 0; j--) { 6165 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6166 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6167 bad_bit = i; 6168 break; 6169 } 6170 } 6171 ctl_set_invalid_field(ctsio, 6172 /*sks_valid*/ 1, 6173 /*command*/ 0, 6174 /*field*/ *len_used + i, 6175 /*bit_valid*/ 1, 6176 /*bit*/ bad_bit); 6177 free(ctsio->kern_data_ptr, M_CTL); 6178 ctl_done((union ctl_io *)ctsio); 6179 return (CTL_RETVAL_COMPLETE); 6180 } 6181 6182 /* 6183 * Decrement these before we call the page handler, since we may 6184 * end up getting called back one way or another before the handler 6185 * returns to this context. 6186 */ 6187 *len_left -= page_index->page_len; 6188 *len_used += page_index->page_len; 6189 6190 retval = page_index->select_handler(ctsio, page_index, 6191 (uint8_t *)page_header); 6192 6193 /* 6194 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6195 * wait until this queued command completes to finish processing 6196 * the mode page. If it returns anything other than 6197 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6198 * already set the sense information, freed the data pointer, and 6199 * completed the io for us. 6200 */ 6201 if (retval != CTL_RETVAL_COMPLETE) 6202 goto bailout_no_done; 6203 6204 /* 6205 * If the initiator sent us more than one page, parse the next one. 6206 */ 6207 if (*len_left > 0) 6208 goto do_next_page; 6209 6210 ctl_set_success(ctsio); 6211 free(ctsio->kern_data_ptr, M_CTL); 6212 ctl_done((union ctl_io *)ctsio); 6213 6214 bailout_no_done: 6215 6216 return (CTL_RETVAL_COMPLETE); 6217 6218 } 6219 6220 int 6221 ctl_mode_select(struct ctl_scsiio *ctsio) 6222 { 6223 struct ctl_lun *lun = CTL_LUN(ctsio); 6224 union ctl_modepage_info *modepage_info; 6225 int bd_len, i, header_size, param_len, rtd; 6226 uint32_t initidx; 6227 6228 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6229 switch (ctsio->cdb[0]) { 6230 case MODE_SELECT_6: { 6231 struct scsi_mode_select_6 *cdb; 6232 6233 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6234 6235 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6236 param_len = cdb->length; 6237 header_size = sizeof(struct scsi_mode_header_6); 6238 break; 6239 } 6240 case MODE_SELECT_10: { 6241 struct scsi_mode_select_10 *cdb; 6242 6243 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6244 6245 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6246 param_len = scsi_2btoul(cdb->length); 6247 header_size = sizeof(struct scsi_mode_header_10); 6248 break; 6249 } 6250 default: 6251 ctl_set_invalid_opcode(ctsio); 6252 ctl_done((union ctl_io *)ctsio); 6253 return (CTL_RETVAL_COMPLETE); 6254 } 6255 6256 if (rtd) { 6257 if (param_len != 0) { 6258 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6259 /*command*/ 1, /*field*/ 0, 6260 /*bit_valid*/ 0, /*bit*/ 0); 6261 ctl_done((union ctl_io *)ctsio); 6262 return (CTL_RETVAL_COMPLETE); 6263 } 6264 6265 /* Revert to defaults. */ 6266 ctl_init_page_index(lun); 6267 mtx_lock(&lun->lun_lock); 6268 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6269 mtx_unlock(&lun->lun_lock); 6270 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6271 ctl_isc_announce_mode(lun, -1, 6272 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6273 lun->mode_pages.index[i].subpage); 6274 } 6275 ctl_set_success(ctsio); 6276 ctl_done((union ctl_io *)ctsio); 6277 return (CTL_RETVAL_COMPLETE); 6278 } 6279 6280 /* 6281 * From SPC-3: 6282 * "A parameter list length of zero indicates that the Data-Out Buffer 6283 * shall be empty. This condition shall not be considered as an error." 6284 */ 6285 if (param_len == 0) { 6286 ctl_set_success(ctsio); 6287 ctl_done((union ctl_io *)ctsio); 6288 return (CTL_RETVAL_COMPLETE); 6289 } 6290 6291 /* 6292 * Since we'll hit this the first time through, prior to 6293 * allocation, we don't need to free a data buffer here. 6294 */ 6295 if (param_len < header_size) { 6296 ctl_set_param_len_error(ctsio); 6297 ctl_done((union ctl_io *)ctsio); 6298 return (CTL_RETVAL_COMPLETE); 6299 } 6300 6301 /* 6302 * Allocate the data buffer and grab the user's data. In theory, 6303 * we shouldn't have to sanity check the parameter list length here 6304 * because the maximum size is 64K. We should be able to malloc 6305 * that much without too many problems. 6306 */ 6307 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6308 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6309 ctsio->kern_data_len = param_len; 6310 ctsio->kern_total_len = param_len; 6311 ctsio->kern_rel_offset = 0; 6312 ctsio->kern_sg_entries = 0; 6313 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6314 ctsio->be_move_done = ctl_config_move_done; 6315 ctl_datamove((union ctl_io *)ctsio); 6316 6317 return (CTL_RETVAL_COMPLETE); 6318 } 6319 6320 switch (ctsio->cdb[0]) { 6321 case MODE_SELECT_6: { 6322 struct scsi_mode_header_6 *mh6; 6323 6324 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6325 bd_len = mh6->blk_desc_len; 6326 break; 6327 } 6328 case MODE_SELECT_10: { 6329 struct scsi_mode_header_10 *mh10; 6330 6331 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6332 bd_len = scsi_2btoul(mh10->blk_desc_len); 6333 break; 6334 } 6335 default: 6336 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6337 } 6338 6339 if (param_len < (header_size + bd_len)) { 6340 free(ctsio->kern_data_ptr, M_CTL); 6341 ctl_set_param_len_error(ctsio); 6342 ctl_done((union ctl_io *)ctsio); 6343 return (CTL_RETVAL_COMPLETE); 6344 } 6345 6346 /* 6347 * Set the IO_CONT flag, so that if this I/O gets passed to 6348 * ctl_config_write_done(), it'll get passed back to 6349 * ctl_do_mode_select() for further processing, or completion if 6350 * we're all done. 6351 */ 6352 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6353 ctsio->io_cont = ctl_do_mode_select; 6354 6355 modepage_info = (union ctl_modepage_info *) 6356 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6357 memset(modepage_info, 0, sizeof(*modepage_info)); 6358 modepage_info->header.len_left = param_len - header_size - bd_len; 6359 modepage_info->header.len_used = header_size + bd_len; 6360 6361 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6362 } 6363 6364 int 6365 ctl_mode_sense(struct ctl_scsiio *ctsio) 6366 { 6367 struct ctl_lun *lun = CTL_LUN(ctsio); 6368 int pc, page_code, dbd, subpage; 6369 int alloc_len, page_len, header_len, total_len; 6370 struct scsi_mode_block_descr *block_desc; 6371 struct ctl_page_index *page_index; 6372 6373 dbd = 0; 6374 block_desc = NULL; 6375 6376 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6377 6378 switch (ctsio->cdb[0]) { 6379 case MODE_SENSE_6: { 6380 struct scsi_mode_sense_6 *cdb; 6381 6382 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6383 6384 header_len = sizeof(struct scsi_mode_hdr_6); 6385 if (cdb->byte2 & SMS_DBD) 6386 dbd = 1; 6387 else 6388 header_len += sizeof(struct scsi_mode_block_descr); 6389 6390 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6391 page_code = cdb->page & SMS_PAGE_CODE; 6392 subpage = cdb->subpage; 6393 alloc_len = cdb->length; 6394 break; 6395 } 6396 case MODE_SENSE_10: { 6397 struct scsi_mode_sense_10 *cdb; 6398 6399 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6400 6401 header_len = sizeof(struct scsi_mode_hdr_10); 6402 6403 if (cdb->byte2 & SMS_DBD) 6404 dbd = 1; 6405 else 6406 header_len += sizeof(struct scsi_mode_block_descr); 6407 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6408 page_code = cdb->page & SMS_PAGE_CODE; 6409 subpage = cdb->subpage; 6410 alloc_len = scsi_2btoul(cdb->length); 6411 break; 6412 } 6413 default: 6414 ctl_set_invalid_opcode(ctsio); 6415 ctl_done((union ctl_io *)ctsio); 6416 return (CTL_RETVAL_COMPLETE); 6417 break; /* NOTREACHED */ 6418 } 6419 6420 /* 6421 * We have to make a first pass through to calculate the size of 6422 * the pages that match the user's query. Then we allocate enough 6423 * memory to hold it, and actually copy the data into the buffer. 6424 */ 6425 switch (page_code) { 6426 case SMS_ALL_PAGES_PAGE: { 6427 u_int i; 6428 6429 page_len = 0; 6430 6431 /* 6432 * At the moment, values other than 0 and 0xff here are 6433 * reserved according to SPC-3. 6434 */ 6435 if ((subpage != SMS_SUBPAGE_PAGE_0) 6436 && (subpage != SMS_SUBPAGE_ALL)) { 6437 ctl_set_invalid_field(ctsio, 6438 /*sks_valid*/ 1, 6439 /*command*/ 1, 6440 /*field*/ 3, 6441 /*bit_valid*/ 0, 6442 /*bit*/ 0); 6443 ctl_done((union ctl_io *)ctsio); 6444 return (CTL_RETVAL_COMPLETE); 6445 } 6446 6447 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6448 page_index = &lun->mode_pages.index[i]; 6449 6450 /* Make sure the page is supported for this dev type */ 6451 if (lun->be_lun->lun_type == T_DIRECT && 6452 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6453 continue; 6454 if (lun->be_lun->lun_type == T_PROCESSOR && 6455 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6456 continue; 6457 if (lun->be_lun->lun_type == T_CDROM && 6458 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6459 continue; 6460 6461 /* 6462 * We don't use this subpage if the user didn't 6463 * request all subpages. 6464 */ 6465 if ((page_index->subpage != 0) 6466 && (subpage == SMS_SUBPAGE_PAGE_0)) 6467 continue; 6468 6469 page_len += page_index->page_len; 6470 } 6471 break; 6472 } 6473 default: { 6474 u_int i; 6475 6476 page_len = 0; 6477 6478 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6479 page_index = &lun->mode_pages.index[i]; 6480 6481 /* Make sure the page is supported for this dev type */ 6482 if (lun->be_lun->lun_type == T_DIRECT && 6483 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6484 continue; 6485 if (lun->be_lun->lun_type == T_PROCESSOR && 6486 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6487 continue; 6488 if (lun->be_lun->lun_type == T_CDROM && 6489 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6490 continue; 6491 6492 /* Look for the right page code */ 6493 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6494 continue; 6495 6496 /* Look for the right subpage or the subpage wildcard*/ 6497 if ((page_index->subpage != subpage) 6498 && (subpage != SMS_SUBPAGE_ALL)) 6499 continue; 6500 6501 page_len += page_index->page_len; 6502 } 6503 6504 if (page_len == 0) { 6505 ctl_set_invalid_field(ctsio, 6506 /*sks_valid*/ 1, 6507 /*command*/ 1, 6508 /*field*/ 2, 6509 /*bit_valid*/ 1, 6510 /*bit*/ 5); 6511 ctl_done((union ctl_io *)ctsio); 6512 return (CTL_RETVAL_COMPLETE); 6513 } 6514 break; 6515 } 6516 } 6517 6518 total_len = header_len + page_len; 6519 6520 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6521 ctsio->kern_sg_entries = 0; 6522 ctsio->kern_rel_offset = 0; 6523 ctsio->kern_data_len = min(total_len, alloc_len); 6524 ctsio->kern_total_len = ctsio->kern_data_len; 6525 6526 switch (ctsio->cdb[0]) { 6527 case MODE_SENSE_6: { 6528 struct scsi_mode_hdr_6 *header; 6529 6530 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6531 6532 header->datalen = MIN(total_len - 1, 254); 6533 if (lun->be_lun->lun_type == T_DIRECT) { 6534 header->dev_specific = 0x10; /* DPOFUA */ 6535 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6536 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6537 header->dev_specific |= 0x80; /* WP */ 6538 } 6539 if (dbd) 6540 header->block_descr_len = 0; 6541 else 6542 header->block_descr_len = 6543 sizeof(struct scsi_mode_block_descr); 6544 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6545 break; 6546 } 6547 case MODE_SENSE_10: { 6548 struct scsi_mode_hdr_10 *header; 6549 int datalen; 6550 6551 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6552 6553 datalen = MIN(total_len - 2, 65533); 6554 scsi_ulto2b(datalen, header->datalen); 6555 if (lun->be_lun->lun_type == T_DIRECT) { 6556 header->dev_specific = 0x10; /* DPOFUA */ 6557 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6558 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6559 header->dev_specific |= 0x80; /* WP */ 6560 } 6561 if (dbd) 6562 scsi_ulto2b(0, header->block_descr_len); 6563 else 6564 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6565 header->block_descr_len); 6566 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6567 break; 6568 } 6569 default: 6570 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6571 } 6572 6573 /* 6574 * If we've got a disk, use its blocksize in the block 6575 * descriptor. Otherwise, just set it to 0. 6576 */ 6577 if (dbd == 0) { 6578 if (lun->be_lun->lun_type == T_DIRECT) 6579 scsi_ulto3b(lun->be_lun->blocksize, 6580 block_desc->block_len); 6581 else 6582 scsi_ulto3b(0, block_desc->block_len); 6583 } 6584 6585 switch (page_code) { 6586 case SMS_ALL_PAGES_PAGE: { 6587 int i, data_used; 6588 6589 data_used = header_len; 6590 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6591 struct ctl_page_index *page_index; 6592 6593 page_index = &lun->mode_pages.index[i]; 6594 if (lun->be_lun->lun_type == T_DIRECT && 6595 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6596 continue; 6597 if (lun->be_lun->lun_type == T_PROCESSOR && 6598 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6599 continue; 6600 if (lun->be_lun->lun_type == T_CDROM && 6601 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6602 continue; 6603 6604 /* 6605 * We don't use this subpage if the user didn't 6606 * request all subpages. We already checked (above) 6607 * to make sure the user only specified a subpage 6608 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6609 */ 6610 if ((page_index->subpage != 0) 6611 && (subpage == SMS_SUBPAGE_PAGE_0)) 6612 continue; 6613 6614 /* 6615 * Call the handler, if it exists, to update the 6616 * page to the latest values. 6617 */ 6618 if (page_index->sense_handler != NULL) 6619 page_index->sense_handler(ctsio, page_index,pc); 6620 6621 memcpy(ctsio->kern_data_ptr + data_used, 6622 page_index->page_data + 6623 (page_index->page_len * pc), 6624 page_index->page_len); 6625 data_used += page_index->page_len; 6626 } 6627 break; 6628 } 6629 default: { 6630 int i, data_used; 6631 6632 data_used = header_len; 6633 6634 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6635 struct ctl_page_index *page_index; 6636 6637 page_index = &lun->mode_pages.index[i]; 6638 6639 /* Look for the right page code */ 6640 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6641 continue; 6642 6643 /* Look for the right subpage or the subpage wildcard*/ 6644 if ((page_index->subpage != subpage) 6645 && (subpage != SMS_SUBPAGE_ALL)) 6646 continue; 6647 6648 /* Make sure the page is supported for this dev type */ 6649 if (lun->be_lun->lun_type == T_DIRECT && 6650 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6651 continue; 6652 if (lun->be_lun->lun_type == T_PROCESSOR && 6653 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6654 continue; 6655 if (lun->be_lun->lun_type == T_CDROM && 6656 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6657 continue; 6658 6659 /* 6660 * Call the handler, if it exists, to update the 6661 * page to the latest values. 6662 */ 6663 if (page_index->sense_handler != NULL) 6664 page_index->sense_handler(ctsio, page_index,pc); 6665 6666 memcpy(ctsio->kern_data_ptr + data_used, 6667 page_index->page_data + 6668 (page_index->page_len * pc), 6669 page_index->page_len); 6670 data_used += page_index->page_len; 6671 } 6672 break; 6673 } 6674 } 6675 6676 ctl_set_success(ctsio); 6677 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6678 ctsio->be_move_done = ctl_config_move_done; 6679 ctl_datamove((union ctl_io *)ctsio); 6680 return (CTL_RETVAL_COMPLETE); 6681 } 6682 6683 int 6684 ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, 6685 struct ctl_page_index *page_index, 6686 int pc) 6687 { 6688 struct ctl_lun *lun = CTL_LUN(ctsio); 6689 struct scsi_log_temperature *data; 6690 const char *value; 6691 6692 data = (struct scsi_log_temperature *)page_index->page_data; 6693 6694 scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); 6695 data->hdr.param_control = SLP_LBIN; 6696 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6697 sizeof(struct scsi_log_param_header); 6698 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6699 NULL)) != NULL) 6700 data->temperature = strtol(value, NULL, 0); 6701 else 6702 data->temperature = 0xff; 6703 data++; 6704 6705 scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); 6706 data->hdr.param_control = SLP_LBIN; 6707 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6708 sizeof(struct scsi_log_param_header); 6709 if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", 6710 NULL)) != NULL) 6711 data->temperature = strtol(value, NULL, 0); 6712 else 6713 data->temperature = 0xff; 6714 return (0); 6715 } 6716 6717 int 6718 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6719 struct ctl_page_index *page_index, 6720 int pc) 6721 { 6722 struct ctl_lun *lun = CTL_LUN(ctsio); 6723 struct scsi_log_param_header *phdr; 6724 uint8_t *data; 6725 uint64_t val; 6726 6727 data = page_index->page_data; 6728 6729 if (lun->backend->lun_attr != NULL && 6730 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6731 != UINT64_MAX) { 6732 phdr = (struct scsi_log_param_header *)data; 6733 scsi_ulto2b(0x0001, phdr->param_code); 6734 phdr->param_control = SLP_LBIN | SLP_LP; 6735 phdr->param_len = 8; 6736 data = (uint8_t *)(phdr + 1); 6737 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6738 data[4] = 0x02; /* per-pool */ 6739 data += phdr->param_len; 6740 } 6741 6742 if (lun->backend->lun_attr != NULL && 6743 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6744 != UINT64_MAX) { 6745 phdr = (struct scsi_log_param_header *)data; 6746 scsi_ulto2b(0x0002, phdr->param_code); 6747 phdr->param_control = SLP_LBIN | SLP_LP; 6748 phdr->param_len = 8; 6749 data = (uint8_t *)(phdr + 1); 6750 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6751 data[4] = 0x01; /* per-LUN */ 6752 data += phdr->param_len; 6753 } 6754 6755 if (lun->backend->lun_attr != NULL && 6756 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6757 != UINT64_MAX) { 6758 phdr = (struct scsi_log_param_header *)data; 6759 scsi_ulto2b(0x00f1, phdr->param_code); 6760 phdr->param_control = SLP_LBIN | SLP_LP; 6761 phdr->param_len = 8; 6762 data = (uint8_t *)(phdr + 1); 6763 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6764 data[4] = 0x02; /* per-pool */ 6765 data += phdr->param_len; 6766 } 6767 6768 if (lun->backend->lun_attr != NULL && 6769 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6770 != UINT64_MAX) { 6771 phdr = (struct scsi_log_param_header *)data; 6772 scsi_ulto2b(0x00f2, phdr->param_code); 6773 phdr->param_control = SLP_LBIN | SLP_LP; 6774 phdr->param_len = 8; 6775 data = (uint8_t *)(phdr + 1); 6776 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6777 data[4] = 0x02; /* per-pool */ 6778 data += phdr->param_len; 6779 } 6780 6781 page_index->page_len = data - page_index->page_data; 6782 return (0); 6783 } 6784 6785 int 6786 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6787 struct ctl_page_index *page_index, 6788 int pc) 6789 { 6790 struct ctl_lun *lun = CTL_LUN(ctsio); 6791 struct stat_page *data; 6792 struct bintime *t; 6793 6794 data = (struct stat_page *)page_index->page_data; 6795 6796 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6797 data->sap.hdr.param_control = SLP_LBIN; 6798 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6799 sizeof(struct scsi_log_param_header); 6800 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6801 data->sap.read_num); 6802 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6803 data->sap.write_num); 6804 if (lun->be_lun->blocksize > 0) { 6805 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6806 lun->be_lun->blocksize, data->sap.recvieved_lba); 6807 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6808 lun->be_lun->blocksize, data->sap.transmitted_lba); 6809 } 6810 t = &lun->stats.time[CTL_STATS_READ]; 6811 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6812 data->sap.read_int); 6813 t = &lun->stats.time[CTL_STATS_WRITE]; 6814 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6815 data->sap.write_int); 6816 scsi_u64to8b(0, data->sap.weighted_num); 6817 scsi_u64to8b(0, data->sap.weighted_int); 6818 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6819 data->it.hdr.param_control = SLP_LBIN; 6820 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6821 sizeof(struct scsi_log_param_header); 6822 #ifdef CTL_TIME_IO 6823 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6824 #endif 6825 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6826 data->it.hdr.param_control = SLP_LBIN; 6827 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6828 sizeof(struct scsi_log_param_header); 6829 scsi_ulto4b(3, data->ti.exponent); 6830 scsi_ulto4b(1, data->ti.integer); 6831 return (0); 6832 } 6833 6834 int 6835 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6836 struct ctl_page_index *page_index, 6837 int pc) 6838 { 6839 struct ctl_lun *lun = CTL_LUN(ctsio); 6840 struct scsi_log_informational_exceptions *data; 6841 const char *value; 6842 6843 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6844 6845 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6846 data->hdr.param_control = SLP_LBIN; 6847 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6848 sizeof(struct scsi_log_param_header); 6849 data->ie_asc = lun->ie_asc; 6850 data->ie_ascq = lun->ie_ascq; 6851 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6852 NULL)) != NULL) 6853 data->temperature = strtol(value, NULL, 0); 6854 else 6855 data->temperature = 0xff; 6856 return (0); 6857 } 6858 6859 int 6860 ctl_log_sense(struct ctl_scsiio *ctsio) 6861 { 6862 struct ctl_lun *lun = CTL_LUN(ctsio); 6863 int i, pc, page_code, subpage; 6864 int alloc_len, total_len; 6865 struct ctl_page_index *page_index; 6866 struct scsi_log_sense *cdb; 6867 struct scsi_log_header *header; 6868 6869 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6870 6871 cdb = (struct scsi_log_sense *)ctsio->cdb; 6872 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6873 page_code = cdb->page & SLS_PAGE_CODE; 6874 subpage = cdb->subpage; 6875 alloc_len = scsi_2btoul(cdb->length); 6876 6877 page_index = NULL; 6878 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6879 page_index = &lun->log_pages.index[i]; 6880 6881 /* Look for the right page code */ 6882 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6883 continue; 6884 6885 /* Look for the right subpage or the subpage wildcard*/ 6886 if (page_index->subpage != subpage) 6887 continue; 6888 6889 break; 6890 } 6891 if (i >= CTL_NUM_LOG_PAGES) { 6892 ctl_set_invalid_field(ctsio, 6893 /*sks_valid*/ 1, 6894 /*command*/ 1, 6895 /*field*/ 2, 6896 /*bit_valid*/ 0, 6897 /*bit*/ 0); 6898 ctl_done((union ctl_io *)ctsio); 6899 return (CTL_RETVAL_COMPLETE); 6900 } 6901 6902 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6903 6904 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6905 ctsio->kern_sg_entries = 0; 6906 ctsio->kern_rel_offset = 0; 6907 ctsio->kern_data_len = min(total_len, alloc_len); 6908 ctsio->kern_total_len = ctsio->kern_data_len; 6909 6910 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6911 header->page = page_index->page_code; 6912 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6913 header->page |= SL_DS; 6914 if (page_index->subpage) { 6915 header->page |= SL_SPF; 6916 header->subpage = page_index->subpage; 6917 } 6918 scsi_ulto2b(page_index->page_len, header->datalen); 6919 6920 /* 6921 * Call the handler, if it exists, to update the 6922 * page to the latest values. 6923 */ 6924 if (page_index->sense_handler != NULL) 6925 page_index->sense_handler(ctsio, page_index, pc); 6926 6927 memcpy(header + 1, page_index->page_data, page_index->page_len); 6928 6929 ctl_set_success(ctsio); 6930 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6931 ctsio->be_move_done = ctl_config_move_done; 6932 ctl_datamove((union ctl_io *)ctsio); 6933 return (CTL_RETVAL_COMPLETE); 6934 } 6935 6936 int 6937 ctl_read_capacity(struct ctl_scsiio *ctsio) 6938 { 6939 struct ctl_lun *lun = CTL_LUN(ctsio); 6940 struct scsi_read_capacity *cdb; 6941 struct scsi_read_capacity_data *data; 6942 uint32_t lba; 6943 6944 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6945 6946 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6947 6948 lba = scsi_4btoul(cdb->addr); 6949 if (((cdb->pmi & SRC_PMI) == 0) 6950 && (lba != 0)) { 6951 ctl_set_invalid_field(/*ctsio*/ ctsio, 6952 /*sks_valid*/ 1, 6953 /*command*/ 1, 6954 /*field*/ 2, 6955 /*bit_valid*/ 0, 6956 /*bit*/ 0); 6957 ctl_done((union ctl_io *)ctsio); 6958 return (CTL_RETVAL_COMPLETE); 6959 } 6960 6961 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6962 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6963 ctsio->kern_data_len = sizeof(*data); 6964 ctsio->kern_total_len = sizeof(*data); 6965 ctsio->kern_rel_offset = 0; 6966 ctsio->kern_sg_entries = 0; 6967 6968 /* 6969 * If the maximum LBA is greater than 0xfffffffe, the user must 6970 * issue a SERVICE ACTION IN (16) command, with the read capacity 6971 * serivce action set. 6972 */ 6973 if (lun->be_lun->maxlba > 0xfffffffe) 6974 scsi_ulto4b(0xffffffff, data->addr); 6975 else 6976 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6977 6978 /* 6979 * XXX KDM this may not be 512 bytes... 6980 */ 6981 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6982 6983 ctl_set_success(ctsio); 6984 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6985 ctsio->be_move_done = ctl_config_move_done; 6986 ctl_datamove((union ctl_io *)ctsio); 6987 return (CTL_RETVAL_COMPLETE); 6988 } 6989 6990 int 6991 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6992 { 6993 struct ctl_lun *lun = CTL_LUN(ctsio); 6994 struct scsi_read_capacity_16 *cdb; 6995 struct scsi_read_capacity_data_long *data; 6996 uint64_t lba; 6997 uint32_t alloc_len; 6998 6999 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7000 7001 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7002 7003 alloc_len = scsi_4btoul(cdb->alloc_len); 7004 lba = scsi_8btou64(cdb->addr); 7005 7006 if ((cdb->reladr & SRC16_PMI) 7007 && (lba != 0)) { 7008 ctl_set_invalid_field(/*ctsio*/ ctsio, 7009 /*sks_valid*/ 1, 7010 /*command*/ 1, 7011 /*field*/ 2, 7012 /*bit_valid*/ 0, 7013 /*bit*/ 0); 7014 ctl_done((union ctl_io *)ctsio); 7015 return (CTL_RETVAL_COMPLETE); 7016 } 7017 7018 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7019 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7020 ctsio->kern_rel_offset = 0; 7021 ctsio->kern_sg_entries = 0; 7022 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 7023 ctsio->kern_total_len = ctsio->kern_data_len; 7024 7025 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7026 /* XXX KDM this may not be 512 bytes... */ 7027 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7028 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7029 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7030 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7031 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7032 7033 ctl_set_success(ctsio); 7034 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7035 ctsio->be_move_done = ctl_config_move_done; 7036 ctl_datamove((union ctl_io *)ctsio); 7037 return (CTL_RETVAL_COMPLETE); 7038 } 7039 7040 int 7041 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7042 { 7043 struct ctl_lun *lun = CTL_LUN(ctsio); 7044 struct scsi_get_lba_status *cdb; 7045 struct scsi_get_lba_status_data *data; 7046 struct ctl_lba_len_flags *lbalen; 7047 uint64_t lba; 7048 uint32_t alloc_len, total_len; 7049 int retval; 7050 7051 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7052 7053 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7054 lba = scsi_8btou64(cdb->addr); 7055 alloc_len = scsi_4btoul(cdb->alloc_len); 7056 7057 if (lba > lun->be_lun->maxlba) { 7058 ctl_set_lba_out_of_range(ctsio, lba); 7059 ctl_done((union ctl_io *)ctsio); 7060 return (CTL_RETVAL_COMPLETE); 7061 } 7062 7063 total_len = sizeof(*data) + sizeof(data->descr[0]); 7064 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7065 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7066 ctsio->kern_rel_offset = 0; 7067 ctsio->kern_sg_entries = 0; 7068 ctsio->kern_data_len = min(total_len, alloc_len); 7069 ctsio->kern_total_len = ctsio->kern_data_len; 7070 7071 /* Fill dummy data in case backend can't tell anything. */ 7072 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7073 scsi_u64to8b(lba, data->descr[0].addr); 7074 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7075 data->descr[0].length); 7076 data->descr[0].status = 0; /* Mapped or unknown. */ 7077 7078 ctl_set_success(ctsio); 7079 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7080 ctsio->be_move_done = ctl_config_move_done; 7081 7082 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7083 lbalen->lba = lba; 7084 lbalen->len = total_len; 7085 lbalen->flags = 0; 7086 retval = lun->backend->config_read((union ctl_io *)ctsio); 7087 return (retval); 7088 } 7089 7090 int 7091 ctl_read_defect(struct ctl_scsiio *ctsio) 7092 { 7093 struct scsi_read_defect_data_10 *ccb10; 7094 struct scsi_read_defect_data_12 *ccb12; 7095 struct scsi_read_defect_data_hdr_10 *data10; 7096 struct scsi_read_defect_data_hdr_12 *data12; 7097 uint32_t alloc_len, data_len; 7098 uint8_t format; 7099 7100 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7101 7102 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7103 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7104 format = ccb10->format; 7105 alloc_len = scsi_2btoul(ccb10->alloc_length); 7106 data_len = sizeof(*data10); 7107 } else { 7108 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7109 format = ccb12->format; 7110 alloc_len = scsi_4btoul(ccb12->alloc_length); 7111 data_len = sizeof(*data12); 7112 } 7113 if (alloc_len == 0) { 7114 ctl_set_success(ctsio); 7115 ctl_done((union ctl_io *)ctsio); 7116 return (CTL_RETVAL_COMPLETE); 7117 } 7118 7119 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7120 ctsio->kern_rel_offset = 0; 7121 ctsio->kern_sg_entries = 0; 7122 ctsio->kern_data_len = min(data_len, alloc_len); 7123 ctsio->kern_total_len = ctsio->kern_data_len; 7124 7125 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7126 data10 = (struct scsi_read_defect_data_hdr_10 *) 7127 ctsio->kern_data_ptr; 7128 data10->format = format; 7129 scsi_ulto2b(0, data10->length); 7130 } else { 7131 data12 = (struct scsi_read_defect_data_hdr_12 *) 7132 ctsio->kern_data_ptr; 7133 data12->format = format; 7134 scsi_ulto2b(0, data12->generation); 7135 scsi_ulto4b(0, data12->length); 7136 } 7137 7138 ctl_set_success(ctsio); 7139 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7140 ctsio->be_move_done = ctl_config_move_done; 7141 ctl_datamove((union ctl_io *)ctsio); 7142 return (CTL_RETVAL_COMPLETE); 7143 } 7144 7145 int 7146 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7147 { 7148 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7149 struct ctl_lun *lun = CTL_LUN(ctsio); 7150 struct scsi_maintenance_in *cdb; 7151 int retval; 7152 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7153 int num_ha_groups, num_target_ports, shared_group; 7154 struct ctl_port *port; 7155 struct scsi_target_group_data *rtg_ptr; 7156 struct scsi_target_group_data_extended *rtg_ext_ptr; 7157 struct scsi_target_port_group_descriptor *tpg_desc; 7158 7159 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7160 7161 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7162 retval = CTL_RETVAL_COMPLETE; 7163 7164 switch (cdb->byte2 & STG_PDF_MASK) { 7165 case STG_PDF_LENGTH: 7166 ext = 0; 7167 break; 7168 case STG_PDF_EXTENDED: 7169 ext = 1; 7170 break; 7171 default: 7172 ctl_set_invalid_field(/*ctsio*/ ctsio, 7173 /*sks_valid*/ 1, 7174 /*command*/ 1, 7175 /*field*/ 2, 7176 /*bit_valid*/ 1, 7177 /*bit*/ 5); 7178 ctl_done((union ctl_io *)ctsio); 7179 return(retval); 7180 } 7181 7182 num_target_ports = 0; 7183 shared_group = (softc->is_single != 0); 7184 mtx_lock(&softc->ctl_lock); 7185 STAILQ_FOREACH(port, &softc->port_list, links) { 7186 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7187 continue; 7188 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7189 continue; 7190 num_target_ports++; 7191 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7192 shared_group = 1; 7193 } 7194 mtx_unlock(&softc->ctl_lock); 7195 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7196 7197 if (ext) 7198 total_len = sizeof(struct scsi_target_group_data_extended); 7199 else 7200 total_len = sizeof(struct scsi_target_group_data); 7201 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7202 (shared_group + num_ha_groups) + 7203 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7204 7205 alloc_len = scsi_4btoul(cdb->length); 7206 7207 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7208 ctsio->kern_sg_entries = 0; 7209 ctsio->kern_rel_offset = 0; 7210 ctsio->kern_data_len = min(total_len, alloc_len); 7211 ctsio->kern_total_len = ctsio->kern_data_len; 7212 7213 if (ext) { 7214 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7215 ctsio->kern_data_ptr; 7216 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7217 rtg_ext_ptr->format_type = 0x10; 7218 rtg_ext_ptr->implicit_transition_time = 0; 7219 tpg_desc = &rtg_ext_ptr->groups[0]; 7220 } else { 7221 rtg_ptr = (struct scsi_target_group_data *) 7222 ctsio->kern_data_ptr; 7223 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7224 tpg_desc = &rtg_ptr->groups[0]; 7225 } 7226 7227 mtx_lock(&softc->ctl_lock); 7228 pg = softc->port_min / softc->port_cnt; 7229 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7230 /* Some shelf is known to be primary. */ 7231 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7232 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7233 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7234 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7235 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7236 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7237 else 7238 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7239 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7240 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7241 } else { 7242 ts = os; 7243 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7244 } 7245 } else { 7246 /* No known primary shelf. */ 7247 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7248 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7249 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7250 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7251 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7252 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7253 } else { 7254 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7255 } 7256 } 7257 if (shared_group) { 7258 tpg_desc->pref_state = ts; 7259 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7260 TPG_U_SUP | TPG_T_SUP; 7261 scsi_ulto2b(1, tpg_desc->target_port_group); 7262 tpg_desc->status = TPG_IMPLICIT; 7263 pc = 0; 7264 STAILQ_FOREACH(port, &softc->port_list, links) { 7265 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7266 continue; 7267 if (!softc->is_single && 7268 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7269 continue; 7270 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7271 continue; 7272 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7273 relative_target_port_identifier); 7274 pc++; 7275 } 7276 tpg_desc->target_port_count = pc; 7277 tpg_desc = (struct scsi_target_port_group_descriptor *) 7278 &tpg_desc->descriptors[pc]; 7279 } 7280 for (g = 0; g < num_ha_groups; g++) { 7281 tpg_desc->pref_state = (g == pg) ? ts : os; 7282 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7283 TPG_U_SUP | TPG_T_SUP; 7284 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7285 tpg_desc->status = TPG_IMPLICIT; 7286 pc = 0; 7287 STAILQ_FOREACH(port, &softc->port_list, links) { 7288 if (port->targ_port < g * softc->port_cnt || 7289 port->targ_port >= (g + 1) * softc->port_cnt) 7290 continue; 7291 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7292 continue; 7293 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7294 continue; 7295 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7296 continue; 7297 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7298 relative_target_port_identifier); 7299 pc++; 7300 } 7301 tpg_desc->target_port_count = pc; 7302 tpg_desc = (struct scsi_target_port_group_descriptor *) 7303 &tpg_desc->descriptors[pc]; 7304 } 7305 mtx_unlock(&softc->ctl_lock); 7306 7307 ctl_set_success(ctsio); 7308 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7309 ctsio->be_move_done = ctl_config_move_done; 7310 ctl_datamove((union ctl_io *)ctsio); 7311 return(retval); 7312 } 7313 7314 int 7315 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7316 { 7317 struct ctl_lun *lun = CTL_LUN(ctsio); 7318 struct scsi_report_supported_opcodes *cdb; 7319 const struct ctl_cmd_entry *entry, *sentry; 7320 struct scsi_report_supported_opcodes_all *all; 7321 struct scsi_report_supported_opcodes_descr *descr; 7322 struct scsi_report_supported_opcodes_one *one; 7323 int retval; 7324 int alloc_len, total_len; 7325 int opcode, service_action, i, j, num; 7326 7327 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7328 7329 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7330 retval = CTL_RETVAL_COMPLETE; 7331 7332 opcode = cdb->requested_opcode; 7333 service_action = scsi_2btoul(cdb->requested_service_action); 7334 switch (cdb->options & RSO_OPTIONS_MASK) { 7335 case RSO_OPTIONS_ALL: 7336 num = 0; 7337 for (i = 0; i < 256; i++) { 7338 entry = &ctl_cmd_table[i]; 7339 if (entry->flags & CTL_CMD_FLAG_SA5) { 7340 for (j = 0; j < 32; j++) { 7341 sentry = &((const struct ctl_cmd_entry *) 7342 entry->execute)[j]; 7343 if (ctl_cmd_applicable( 7344 lun->be_lun->lun_type, sentry)) 7345 num++; 7346 } 7347 } else { 7348 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7349 entry)) 7350 num++; 7351 } 7352 } 7353 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7354 num * sizeof(struct scsi_report_supported_opcodes_descr); 7355 break; 7356 case RSO_OPTIONS_OC: 7357 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7358 ctl_set_invalid_field(/*ctsio*/ ctsio, 7359 /*sks_valid*/ 1, 7360 /*command*/ 1, 7361 /*field*/ 2, 7362 /*bit_valid*/ 1, 7363 /*bit*/ 2); 7364 ctl_done((union ctl_io *)ctsio); 7365 return (CTL_RETVAL_COMPLETE); 7366 } 7367 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7368 break; 7369 case RSO_OPTIONS_OC_SA: 7370 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7371 service_action >= 32) { 7372 ctl_set_invalid_field(/*ctsio*/ ctsio, 7373 /*sks_valid*/ 1, 7374 /*command*/ 1, 7375 /*field*/ 2, 7376 /*bit_valid*/ 1, 7377 /*bit*/ 2); 7378 ctl_done((union ctl_io *)ctsio); 7379 return (CTL_RETVAL_COMPLETE); 7380 } 7381 /* FALLTHROUGH */ 7382 case RSO_OPTIONS_OC_ASA: 7383 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7384 break; 7385 default: 7386 ctl_set_invalid_field(/*ctsio*/ ctsio, 7387 /*sks_valid*/ 1, 7388 /*command*/ 1, 7389 /*field*/ 2, 7390 /*bit_valid*/ 1, 7391 /*bit*/ 2); 7392 ctl_done((union ctl_io *)ctsio); 7393 return (CTL_RETVAL_COMPLETE); 7394 } 7395 7396 alloc_len = scsi_4btoul(cdb->length); 7397 7398 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7399 ctsio->kern_sg_entries = 0; 7400 ctsio->kern_rel_offset = 0; 7401 ctsio->kern_data_len = min(total_len, alloc_len); 7402 ctsio->kern_total_len = ctsio->kern_data_len; 7403 7404 switch (cdb->options & RSO_OPTIONS_MASK) { 7405 case RSO_OPTIONS_ALL: 7406 all = (struct scsi_report_supported_opcodes_all *) 7407 ctsio->kern_data_ptr; 7408 num = 0; 7409 for (i = 0; i < 256; i++) { 7410 entry = &ctl_cmd_table[i]; 7411 if (entry->flags & CTL_CMD_FLAG_SA5) { 7412 for (j = 0; j < 32; j++) { 7413 sentry = &((const struct ctl_cmd_entry *) 7414 entry->execute)[j]; 7415 if (!ctl_cmd_applicable( 7416 lun->be_lun->lun_type, sentry)) 7417 continue; 7418 descr = &all->descr[num++]; 7419 descr->opcode = i; 7420 scsi_ulto2b(j, descr->service_action); 7421 descr->flags = RSO_SERVACTV; 7422 scsi_ulto2b(sentry->length, 7423 descr->cdb_length); 7424 } 7425 } else { 7426 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7427 entry)) 7428 continue; 7429 descr = &all->descr[num++]; 7430 descr->opcode = i; 7431 scsi_ulto2b(0, descr->service_action); 7432 descr->flags = 0; 7433 scsi_ulto2b(entry->length, descr->cdb_length); 7434 } 7435 } 7436 scsi_ulto4b( 7437 num * sizeof(struct scsi_report_supported_opcodes_descr), 7438 all->length); 7439 break; 7440 case RSO_OPTIONS_OC: 7441 one = (struct scsi_report_supported_opcodes_one *) 7442 ctsio->kern_data_ptr; 7443 entry = &ctl_cmd_table[opcode]; 7444 goto fill_one; 7445 case RSO_OPTIONS_OC_SA: 7446 one = (struct scsi_report_supported_opcodes_one *) 7447 ctsio->kern_data_ptr; 7448 entry = &ctl_cmd_table[opcode]; 7449 entry = &((const struct ctl_cmd_entry *) 7450 entry->execute)[service_action]; 7451 fill_one: 7452 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7453 one->support = 3; 7454 scsi_ulto2b(entry->length, one->cdb_length); 7455 one->cdb_usage[0] = opcode; 7456 memcpy(&one->cdb_usage[1], entry->usage, 7457 entry->length - 1); 7458 } else 7459 one->support = 1; 7460 break; 7461 case RSO_OPTIONS_OC_ASA: 7462 one = (struct scsi_report_supported_opcodes_one *) 7463 ctsio->kern_data_ptr; 7464 entry = &ctl_cmd_table[opcode]; 7465 if (entry->flags & CTL_CMD_FLAG_SA5) { 7466 entry = &((const struct ctl_cmd_entry *) 7467 entry->execute)[service_action]; 7468 } else if (service_action != 0) { 7469 one->support = 1; 7470 break; 7471 } 7472 goto fill_one; 7473 } 7474 7475 ctl_set_success(ctsio); 7476 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7477 ctsio->be_move_done = ctl_config_move_done; 7478 ctl_datamove((union ctl_io *)ctsio); 7479 return(retval); 7480 } 7481 7482 int 7483 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7484 { 7485 struct scsi_report_supported_tmf *cdb; 7486 struct scsi_report_supported_tmf_ext_data *data; 7487 int retval; 7488 int alloc_len, total_len; 7489 7490 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7491 7492 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7493 7494 retval = CTL_RETVAL_COMPLETE; 7495 7496 if (cdb->options & RST_REPD) 7497 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7498 else 7499 total_len = sizeof(struct scsi_report_supported_tmf_data); 7500 alloc_len = scsi_4btoul(cdb->length); 7501 7502 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7503 ctsio->kern_sg_entries = 0; 7504 ctsio->kern_rel_offset = 0; 7505 ctsio->kern_data_len = min(total_len, alloc_len); 7506 ctsio->kern_total_len = ctsio->kern_data_len; 7507 7508 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7509 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7510 RST_TRS; 7511 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7512 data->length = total_len - 4; 7513 7514 ctl_set_success(ctsio); 7515 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7516 ctsio->be_move_done = ctl_config_move_done; 7517 ctl_datamove((union ctl_io *)ctsio); 7518 return (retval); 7519 } 7520 7521 int 7522 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7523 { 7524 struct scsi_report_timestamp *cdb; 7525 struct scsi_report_timestamp_data *data; 7526 struct timeval tv; 7527 int64_t timestamp; 7528 int retval; 7529 int alloc_len, total_len; 7530 7531 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7532 7533 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7534 7535 retval = CTL_RETVAL_COMPLETE; 7536 7537 total_len = sizeof(struct scsi_report_timestamp_data); 7538 alloc_len = scsi_4btoul(cdb->length); 7539 7540 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7541 ctsio->kern_sg_entries = 0; 7542 ctsio->kern_rel_offset = 0; 7543 ctsio->kern_data_len = min(total_len, alloc_len); 7544 ctsio->kern_total_len = ctsio->kern_data_len; 7545 7546 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7547 scsi_ulto2b(sizeof(*data) - 2, data->length); 7548 data->origin = RTS_ORIG_OUTSIDE; 7549 getmicrotime(&tv); 7550 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7551 scsi_ulto4b(timestamp >> 16, data->timestamp); 7552 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7553 7554 ctl_set_success(ctsio); 7555 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7556 ctsio->be_move_done = ctl_config_move_done; 7557 ctl_datamove((union ctl_io *)ctsio); 7558 return (retval); 7559 } 7560 7561 int 7562 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7563 { 7564 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7565 struct ctl_lun *lun = CTL_LUN(ctsio); 7566 struct scsi_per_res_in *cdb; 7567 int alloc_len, total_len = 0; 7568 /* struct scsi_per_res_in_rsrv in_data; */ 7569 uint64_t key; 7570 7571 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7572 7573 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7574 7575 alloc_len = scsi_2btoul(cdb->length); 7576 7577 retry: 7578 mtx_lock(&lun->lun_lock); 7579 switch (cdb->action) { 7580 case SPRI_RK: /* read keys */ 7581 total_len = sizeof(struct scsi_per_res_in_keys) + 7582 lun->pr_key_count * 7583 sizeof(struct scsi_per_res_key); 7584 break; 7585 case SPRI_RR: /* read reservation */ 7586 if (lun->flags & CTL_LUN_PR_RESERVED) 7587 total_len = sizeof(struct scsi_per_res_in_rsrv); 7588 else 7589 total_len = sizeof(struct scsi_per_res_in_header); 7590 break; 7591 case SPRI_RC: /* report capabilities */ 7592 total_len = sizeof(struct scsi_per_res_cap); 7593 break; 7594 case SPRI_RS: /* read full status */ 7595 total_len = sizeof(struct scsi_per_res_in_header) + 7596 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7597 lun->pr_key_count; 7598 break; 7599 default: 7600 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7601 } 7602 mtx_unlock(&lun->lun_lock); 7603 7604 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7605 ctsio->kern_rel_offset = 0; 7606 ctsio->kern_sg_entries = 0; 7607 ctsio->kern_data_len = min(total_len, alloc_len); 7608 ctsio->kern_total_len = ctsio->kern_data_len; 7609 7610 mtx_lock(&lun->lun_lock); 7611 switch (cdb->action) { 7612 case SPRI_RK: { // read keys 7613 struct scsi_per_res_in_keys *res_keys; 7614 int i, key_count; 7615 7616 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7617 7618 /* 7619 * We had to drop the lock to allocate our buffer, which 7620 * leaves time for someone to come in with another 7621 * persistent reservation. (That is unlikely, though, 7622 * since this should be the only persistent reservation 7623 * command active right now.) 7624 */ 7625 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7626 (lun->pr_key_count * 7627 sizeof(struct scsi_per_res_key)))){ 7628 mtx_unlock(&lun->lun_lock); 7629 free(ctsio->kern_data_ptr, M_CTL); 7630 printf("%s: reservation length changed, retrying\n", 7631 __func__); 7632 goto retry; 7633 } 7634 7635 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7636 7637 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7638 lun->pr_key_count, res_keys->header.length); 7639 7640 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7641 if ((key = ctl_get_prkey(lun, i)) == 0) 7642 continue; 7643 7644 /* 7645 * We used lun->pr_key_count to calculate the 7646 * size to allocate. If it turns out the number of 7647 * initiators with the registered flag set is 7648 * larger than that (i.e. they haven't been kept in 7649 * sync), we've got a problem. 7650 */ 7651 if (key_count >= lun->pr_key_count) { 7652 key_count++; 7653 continue; 7654 } 7655 scsi_u64to8b(key, res_keys->keys[key_count].key); 7656 key_count++; 7657 } 7658 break; 7659 } 7660 case SPRI_RR: { // read reservation 7661 struct scsi_per_res_in_rsrv *res; 7662 int tmp_len, header_only; 7663 7664 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7665 7666 scsi_ulto4b(lun->pr_generation, res->header.generation); 7667 7668 if (lun->flags & CTL_LUN_PR_RESERVED) 7669 { 7670 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7671 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7672 res->header.length); 7673 header_only = 0; 7674 } else { 7675 tmp_len = sizeof(struct scsi_per_res_in_header); 7676 scsi_ulto4b(0, res->header.length); 7677 header_only = 1; 7678 } 7679 7680 /* 7681 * We had to drop the lock to allocate our buffer, which 7682 * leaves time for someone to come in with another 7683 * persistent reservation. (That is unlikely, though, 7684 * since this should be the only persistent reservation 7685 * command active right now.) 7686 */ 7687 if (tmp_len != total_len) { 7688 mtx_unlock(&lun->lun_lock); 7689 free(ctsio->kern_data_ptr, M_CTL); 7690 printf("%s: reservation status changed, retrying\n", 7691 __func__); 7692 goto retry; 7693 } 7694 7695 /* 7696 * No reservation held, so we're done. 7697 */ 7698 if (header_only != 0) 7699 break; 7700 7701 /* 7702 * If the registration is an All Registrants type, the key 7703 * is 0, since it doesn't really matter. 7704 */ 7705 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7706 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7707 res->data.reservation); 7708 } 7709 res->data.scopetype = lun->pr_res_type; 7710 break; 7711 } 7712 case SPRI_RC: //report capabilities 7713 { 7714 struct scsi_per_res_cap *res_cap; 7715 uint16_t type_mask; 7716 7717 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7718 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7719 res_cap->flags1 = SPRI_CRH; 7720 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7721 type_mask = SPRI_TM_WR_EX_AR | 7722 SPRI_TM_EX_AC_RO | 7723 SPRI_TM_WR_EX_RO | 7724 SPRI_TM_EX_AC | 7725 SPRI_TM_WR_EX | 7726 SPRI_TM_EX_AC_AR; 7727 scsi_ulto2b(type_mask, res_cap->type_mask); 7728 break; 7729 } 7730 case SPRI_RS: { // read full status 7731 struct scsi_per_res_in_full *res_status; 7732 struct scsi_per_res_in_full_desc *res_desc; 7733 struct ctl_port *port; 7734 int i, len; 7735 7736 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7737 7738 /* 7739 * We had to drop the lock to allocate our buffer, which 7740 * leaves time for someone to come in with another 7741 * persistent reservation. (That is unlikely, though, 7742 * since this should be the only persistent reservation 7743 * command active right now.) 7744 */ 7745 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7746 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7747 lun->pr_key_count)){ 7748 mtx_unlock(&lun->lun_lock); 7749 free(ctsio->kern_data_ptr, M_CTL); 7750 printf("%s: reservation length changed, retrying\n", 7751 __func__); 7752 goto retry; 7753 } 7754 7755 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7756 7757 res_desc = &res_status->desc[0]; 7758 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7759 if ((key = ctl_get_prkey(lun, i)) == 0) 7760 continue; 7761 7762 scsi_u64to8b(key, res_desc->res_key.key); 7763 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7764 (lun->pr_res_idx == i || 7765 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7766 res_desc->flags = SPRI_FULL_R_HOLDER; 7767 res_desc->scopetype = lun->pr_res_type; 7768 } 7769 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7770 res_desc->rel_trgt_port_id); 7771 len = 0; 7772 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7773 if (port != NULL) 7774 len = ctl_create_iid(port, 7775 i % CTL_MAX_INIT_PER_PORT, 7776 res_desc->transport_id); 7777 scsi_ulto4b(len, res_desc->additional_length); 7778 res_desc = (struct scsi_per_res_in_full_desc *) 7779 &res_desc->transport_id[len]; 7780 } 7781 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7782 res_status->header.length); 7783 break; 7784 } 7785 default: 7786 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7787 } 7788 mtx_unlock(&lun->lun_lock); 7789 7790 ctl_set_success(ctsio); 7791 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7792 ctsio->be_move_done = ctl_config_move_done; 7793 ctl_datamove((union ctl_io *)ctsio); 7794 return (CTL_RETVAL_COMPLETE); 7795 } 7796 7797 /* 7798 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7799 * it should return. 7800 */ 7801 static int 7802 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7803 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7804 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7805 struct scsi_per_res_out_parms* param) 7806 { 7807 union ctl_ha_msg persis_io; 7808 int i; 7809 7810 mtx_lock(&lun->lun_lock); 7811 if (sa_res_key == 0) { 7812 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7813 /* validate scope and type */ 7814 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7815 SPR_LU_SCOPE) { 7816 mtx_unlock(&lun->lun_lock); 7817 ctl_set_invalid_field(/*ctsio*/ ctsio, 7818 /*sks_valid*/ 1, 7819 /*command*/ 1, 7820 /*field*/ 2, 7821 /*bit_valid*/ 1, 7822 /*bit*/ 4); 7823 ctl_done((union ctl_io *)ctsio); 7824 return (1); 7825 } 7826 7827 if (type>8 || type==2 || type==4 || type==0) { 7828 mtx_unlock(&lun->lun_lock); 7829 ctl_set_invalid_field(/*ctsio*/ ctsio, 7830 /*sks_valid*/ 1, 7831 /*command*/ 1, 7832 /*field*/ 2, 7833 /*bit_valid*/ 1, 7834 /*bit*/ 0); 7835 ctl_done((union ctl_io *)ctsio); 7836 return (1); 7837 } 7838 7839 /* 7840 * Unregister everybody else and build UA for 7841 * them 7842 */ 7843 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7844 if (i == residx || ctl_get_prkey(lun, i) == 0) 7845 continue; 7846 7847 ctl_clr_prkey(lun, i); 7848 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7849 } 7850 lun->pr_key_count = 1; 7851 lun->pr_res_type = type; 7852 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7853 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7854 lun->pr_res_idx = residx; 7855 lun->pr_generation++; 7856 mtx_unlock(&lun->lun_lock); 7857 7858 /* send msg to other side */ 7859 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7860 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7861 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7862 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7863 persis_io.pr.pr_info.res_type = type; 7864 memcpy(persis_io.pr.pr_info.sa_res_key, 7865 param->serv_act_res_key, 7866 sizeof(param->serv_act_res_key)); 7867 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7868 sizeof(persis_io.pr), M_WAITOK); 7869 } else { 7870 /* not all registrants */ 7871 mtx_unlock(&lun->lun_lock); 7872 free(ctsio->kern_data_ptr, M_CTL); 7873 ctl_set_invalid_field(ctsio, 7874 /*sks_valid*/ 1, 7875 /*command*/ 0, 7876 /*field*/ 8, 7877 /*bit_valid*/ 0, 7878 /*bit*/ 0); 7879 ctl_done((union ctl_io *)ctsio); 7880 return (1); 7881 } 7882 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7883 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7884 int found = 0; 7885 7886 if (res_key == sa_res_key) { 7887 /* special case */ 7888 /* 7889 * The spec implies this is not good but doesn't 7890 * say what to do. There are two choices either 7891 * generate a res conflict or check condition 7892 * with illegal field in parameter data. Since 7893 * that is what is done when the sa_res_key is 7894 * zero I'll take that approach since this has 7895 * to do with the sa_res_key. 7896 */ 7897 mtx_unlock(&lun->lun_lock); 7898 free(ctsio->kern_data_ptr, M_CTL); 7899 ctl_set_invalid_field(ctsio, 7900 /*sks_valid*/ 1, 7901 /*command*/ 0, 7902 /*field*/ 8, 7903 /*bit_valid*/ 0, 7904 /*bit*/ 0); 7905 ctl_done((union ctl_io *)ctsio); 7906 return (1); 7907 } 7908 7909 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7910 if (ctl_get_prkey(lun, i) != sa_res_key) 7911 continue; 7912 7913 found = 1; 7914 ctl_clr_prkey(lun, i); 7915 lun->pr_key_count--; 7916 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7917 } 7918 if (!found) { 7919 mtx_unlock(&lun->lun_lock); 7920 free(ctsio->kern_data_ptr, M_CTL); 7921 ctl_set_reservation_conflict(ctsio); 7922 ctl_done((union ctl_io *)ctsio); 7923 return (CTL_RETVAL_COMPLETE); 7924 } 7925 lun->pr_generation++; 7926 mtx_unlock(&lun->lun_lock); 7927 7928 /* send msg to other side */ 7929 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7930 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7931 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7932 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7933 persis_io.pr.pr_info.res_type = type; 7934 memcpy(persis_io.pr.pr_info.sa_res_key, 7935 param->serv_act_res_key, 7936 sizeof(param->serv_act_res_key)); 7937 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7938 sizeof(persis_io.pr), M_WAITOK); 7939 } else { 7940 /* Reserved but not all registrants */ 7941 /* sa_res_key is res holder */ 7942 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7943 /* validate scope and type */ 7944 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7945 SPR_LU_SCOPE) { 7946 mtx_unlock(&lun->lun_lock); 7947 ctl_set_invalid_field(/*ctsio*/ ctsio, 7948 /*sks_valid*/ 1, 7949 /*command*/ 1, 7950 /*field*/ 2, 7951 /*bit_valid*/ 1, 7952 /*bit*/ 4); 7953 ctl_done((union ctl_io *)ctsio); 7954 return (1); 7955 } 7956 7957 if (type>8 || type==2 || type==4 || type==0) { 7958 mtx_unlock(&lun->lun_lock); 7959 ctl_set_invalid_field(/*ctsio*/ ctsio, 7960 /*sks_valid*/ 1, 7961 /*command*/ 1, 7962 /*field*/ 2, 7963 /*bit_valid*/ 1, 7964 /*bit*/ 0); 7965 ctl_done((union ctl_io *)ctsio); 7966 return (1); 7967 } 7968 7969 /* 7970 * Do the following: 7971 * if sa_res_key != res_key remove all 7972 * registrants w/sa_res_key and generate UA 7973 * for these registrants(Registrations 7974 * Preempted) if it wasn't an exclusive 7975 * reservation generate UA(Reservations 7976 * Preempted) for all other registered nexuses 7977 * if the type has changed. Establish the new 7978 * reservation and holder. If res_key and 7979 * sa_res_key are the same do the above 7980 * except don't unregister the res holder. 7981 */ 7982 7983 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7984 if (i == residx || ctl_get_prkey(lun, i) == 0) 7985 continue; 7986 7987 if (sa_res_key == ctl_get_prkey(lun, i)) { 7988 ctl_clr_prkey(lun, i); 7989 lun->pr_key_count--; 7990 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7991 } else if (type != lun->pr_res_type && 7992 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 7993 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 7994 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7995 } 7996 } 7997 lun->pr_res_type = type; 7998 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7999 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8000 lun->pr_res_idx = residx; 8001 else 8002 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8003 lun->pr_generation++; 8004 mtx_unlock(&lun->lun_lock); 8005 8006 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8007 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8008 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8009 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8010 persis_io.pr.pr_info.res_type = type; 8011 memcpy(persis_io.pr.pr_info.sa_res_key, 8012 param->serv_act_res_key, 8013 sizeof(param->serv_act_res_key)); 8014 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8015 sizeof(persis_io.pr), M_WAITOK); 8016 } else { 8017 /* 8018 * sa_res_key is not the res holder just 8019 * remove registrants 8020 */ 8021 int found=0; 8022 8023 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8024 if (sa_res_key != ctl_get_prkey(lun, i)) 8025 continue; 8026 8027 found = 1; 8028 ctl_clr_prkey(lun, i); 8029 lun->pr_key_count--; 8030 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8031 } 8032 8033 if (!found) { 8034 mtx_unlock(&lun->lun_lock); 8035 free(ctsio->kern_data_ptr, M_CTL); 8036 ctl_set_reservation_conflict(ctsio); 8037 ctl_done((union ctl_io *)ctsio); 8038 return (1); 8039 } 8040 lun->pr_generation++; 8041 mtx_unlock(&lun->lun_lock); 8042 8043 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8044 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8045 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8046 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8047 persis_io.pr.pr_info.res_type = type; 8048 memcpy(persis_io.pr.pr_info.sa_res_key, 8049 param->serv_act_res_key, 8050 sizeof(param->serv_act_res_key)); 8051 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8052 sizeof(persis_io.pr), M_WAITOK); 8053 } 8054 } 8055 return (0); 8056 } 8057 8058 static void 8059 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8060 { 8061 uint64_t sa_res_key; 8062 int i; 8063 8064 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8065 8066 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8067 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8068 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8069 if (sa_res_key == 0) { 8070 /* 8071 * Unregister everybody else and build UA for 8072 * them 8073 */ 8074 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8075 if (i == msg->pr.pr_info.residx || 8076 ctl_get_prkey(lun, i) == 0) 8077 continue; 8078 8079 ctl_clr_prkey(lun, i); 8080 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8081 } 8082 8083 lun->pr_key_count = 1; 8084 lun->pr_res_type = msg->pr.pr_info.res_type; 8085 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8086 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8087 lun->pr_res_idx = msg->pr.pr_info.residx; 8088 } else { 8089 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8090 if (sa_res_key == ctl_get_prkey(lun, i)) 8091 continue; 8092 8093 ctl_clr_prkey(lun, i); 8094 lun->pr_key_count--; 8095 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8096 } 8097 } 8098 } else { 8099 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8100 if (i == msg->pr.pr_info.residx || 8101 ctl_get_prkey(lun, i) == 0) 8102 continue; 8103 8104 if (sa_res_key == ctl_get_prkey(lun, i)) { 8105 ctl_clr_prkey(lun, i); 8106 lun->pr_key_count--; 8107 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8108 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8109 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8110 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8111 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8112 } 8113 } 8114 lun->pr_res_type = msg->pr.pr_info.res_type; 8115 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8116 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8117 lun->pr_res_idx = msg->pr.pr_info.residx; 8118 else 8119 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8120 } 8121 lun->pr_generation++; 8122 8123 } 8124 8125 8126 int 8127 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8128 { 8129 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8130 struct ctl_lun *lun = CTL_LUN(ctsio); 8131 int retval; 8132 u_int32_t param_len; 8133 struct scsi_per_res_out *cdb; 8134 struct scsi_per_res_out_parms* param; 8135 uint32_t residx; 8136 uint64_t res_key, sa_res_key, key; 8137 uint8_t type; 8138 union ctl_ha_msg persis_io; 8139 int i; 8140 8141 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8142 8143 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8144 retval = CTL_RETVAL_COMPLETE; 8145 8146 /* 8147 * We only support whole-LUN scope. The scope & type are ignored for 8148 * register, register and ignore existing key and clear. 8149 * We sometimes ignore scope and type on preempts too!! 8150 * Verify reservation type here as well. 8151 */ 8152 type = cdb->scope_type & SPR_TYPE_MASK; 8153 if ((cdb->action == SPRO_RESERVE) 8154 || (cdb->action == SPRO_RELEASE)) { 8155 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8156 ctl_set_invalid_field(/*ctsio*/ ctsio, 8157 /*sks_valid*/ 1, 8158 /*command*/ 1, 8159 /*field*/ 2, 8160 /*bit_valid*/ 1, 8161 /*bit*/ 4); 8162 ctl_done((union ctl_io *)ctsio); 8163 return (CTL_RETVAL_COMPLETE); 8164 } 8165 8166 if (type>8 || type==2 || type==4 || type==0) { 8167 ctl_set_invalid_field(/*ctsio*/ ctsio, 8168 /*sks_valid*/ 1, 8169 /*command*/ 1, 8170 /*field*/ 2, 8171 /*bit_valid*/ 1, 8172 /*bit*/ 0); 8173 ctl_done((union ctl_io *)ctsio); 8174 return (CTL_RETVAL_COMPLETE); 8175 } 8176 } 8177 8178 param_len = scsi_4btoul(cdb->length); 8179 8180 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8181 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8182 ctsio->kern_data_len = param_len; 8183 ctsio->kern_total_len = param_len; 8184 ctsio->kern_rel_offset = 0; 8185 ctsio->kern_sg_entries = 0; 8186 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8187 ctsio->be_move_done = ctl_config_move_done; 8188 ctl_datamove((union ctl_io *)ctsio); 8189 8190 return (CTL_RETVAL_COMPLETE); 8191 } 8192 8193 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8194 8195 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8196 res_key = scsi_8btou64(param->res_key.key); 8197 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8198 8199 /* 8200 * Validate the reservation key here except for SPRO_REG_IGNO 8201 * This must be done for all other service actions 8202 */ 8203 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8204 mtx_lock(&lun->lun_lock); 8205 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8206 if (res_key != key) { 8207 /* 8208 * The current key passed in doesn't match 8209 * the one the initiator previously 8210 * registered. 8211 */ 8212 mtx_unlock(&lun->lun_lock); 8213 free(ctsio->kern_data_ptr, M_CTL); 8214 ctl_set_reservation_conflict(ctsio); 8215 ctl_done((union ctl_io *)ctsio); 8216 return (CTL_RETVAL_COMPLETE); 8217 } 8218 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8219 /* 8220 * We are not registered 8221 */ 8222 mtx_unlock(&lun->lun_lock); 8223 free(ctsio->kern_data_ptr, M_CTL); 8224 ctl_set_reservation_conflict(ctsio); 8225 ctl_done((union ctl_io *)ctsio); 8226 return (CTL_RETVAL_COMPLETE); 8227 } else if (res_key != 0) { 8228 /* 8229 * We are not registered and trying to register but 8230 * the register key isn't zero. 8231 */ 8232 mtx_unlock(&lun->lun_lock); 8233 free(ctsio->kern_data_ptr, M_CTL); 8234 ctl_set_reservation_conflict(ctsio); 8235 ctl_done((union ctl_io *)ctsio); 8236 return (CTL_RETVAL_COMPLETE); 8237 } 8238 mtx_unlock(&lun->lun_lock); 8239 } 8240 8241 switch (cdb->action & SPRO_ACTION_MASK) { 8242 case SPRO_REGISTER: 8243 case SPRO_REG_IGNO: { 8244 8245 /* 8246 * We don't support any of these options, as we report in 8247 * the read capabilities request (see 8248 * ctl_persistent_reserve_in(), above). 8249 */ 8250 if ((param->flags & SPR_SPEC_I_PT) 8251 || (param->flags & SPR_ALL_TG_PT) 8252 || (param->flags & SPR_APTPL)) { 8253 int bit_ptr; 8254 8255 if (param->flags & SPR_APTPL) 8256 bit_ptr = 0; 8257 else if (param->flags & SPR_ALL_TG_PT) 8258 bit_ptr = 2; 8259 else /* SPR_SPEC_I_PT */ 8260 bit_ptr = 3; 8261 8262 free(ctsio->kern_data_ptr, M_CTL); 8263 ctl_set_invalid_field(ctsio, 8264 /*sks_valid*/ 1, 8265 /*command*/ 0, 8266 /*field*/ 20, 8267 /*bit_valid*/ 1, 8268 /*bit*/ bit_ptr); 8269 ctl_done((union ctl_io *)ctsio); 8270 return (CTL_RETVAL_COMPLETE); 8271 } 8272 8273 mtx_lock(&lun->lun_lock); 8274 8275 /* 8276 * The initiator wants to clear the 8277 * key/unregister. 8278 */ 8279 if (sa_res_key == 0) { 8280 if ((res_key == 0 8281 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8282 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8283 && ctl_get_prkey(lun, residx) == 0)) { 8284 mtx_unlock(&lun->lun_lock); 8285 goto done; 8286 } 8287 8288 ctl_clr_prkey(lun, residx); 8289 lun->pr_key_count--; 8290 8291 if (residx == lun->pr_res_idx) { 8292 lun->flags &= ~CTL_LUN_PR_RESERVED; 8293 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8294 8295 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8296 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8297 lun->pr_key_count) { 8298 /* 8299 * If the reservation is a registrants 8300 * only type we need to generate a UA 8301 * for other registered inits. The 8302 * sense code should be RESERVATIONS 8303 * RELEASED 8304 */ 8305 8306 for (i = softc->init_min; i < softc->init_max; i++){ 8307 if (ctl_get_prkey(lun, i) == 0) 8308 continue; 8309 ctl_est_ua(lun, i, 8310 CTL_UA_RES_RELEASE); 8311 } 8312 } 8313 lun->pr_res_type = 0; 8314 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8315 if (lun->pr_key_count==0) { 8316 lun->flags &= ~CTL_LUN_PR_RESERVED; 8317 lun->pr_res_type = 0; 8318 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8319 } 8320 } 8321 lun->pr_generation++; 8322 mtx_unlock(&lun->lun_lock); 8323 8324 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8325 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8326 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8327 persis_io.pr.pr_info.residx = residx; 8328 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8329 sizeof(persis_io.pr), M_WAITOK); 8330 } else /* sa_res_key != 0 */ { 8331 8332 /* 8333 * If we aren't registered currently then increment 8334 * the key count and set the registered flag. 8335 */ 8336 ctl_alloc_prkey(lun, residx); 8337 if (ctl_get_prkey(lun, residx) == 0) 8338 lun->pr_key_count++; 8339 ctl_set_prkey(lun, residx, sa_res_key); 8340 lun->pr_generation++; 8341 mtx_unlock(&lun->lun_lock); 8342 8343 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8344 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8345 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8346 persis_io.pr.pr_info.residx = residx; 8347 memcpy(persis_io.pr.pr_info.sa_res_key, 8348 param->serv_act_res_key, 8349 sizeof(param->serv_act_res_key)); 8350 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8351 sizeof(persis_io.pr), M_WAITOK); 8352 } 8353 8354 break; 8355 } 8356 case SPRO_RESERVE: 8357 mtx_lock(&lun->lun_lock); 8358 if (lun->flags & CTL_LUN_PR_RESERVED) { 8359 /* 8360 * if this isn't the reservation holder and it's 8361 * not a "all registrants" type or if the type is 8362 * different then we have a conflict 8363 */ 8364 if ((lun->pr_res_idx != residx 8365 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8366 || lun->pr_res_type != type) { 8367 mtx_unlock(&lun->lun_lock); 8368 free(ctsio->kern_data_ptr, M_CTL); 8369 ctl_set_reservation_conflict(ctsio); 8370 ctl_done((union ctl_io *)ctsio); 8371 return (CTL_RETVAL_COMPLETE); 8372 } 8373 mtx_unlock(&lun->lun_lock); 8374 } else /* create a reservation */ { 8375 /* 8376 * If it's not an "all registrants" type record 8377 * reservation holder 8378 */ 8379 if (type != SPR_TYPE_WR_EX_AR 8380 && type != SPR_TYPE_EX_AC_AR) 8381 lun->pr_res_idx = residx; /* Res holder */ 8382 else 8383 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8384 8385 lun->flags |= CTL_LUN_PR_RESERVED; 8386 lun->pr_res_type = type; 8387 8388 mtx_unlock(&lun->lun_lock); 8389 8390 /* send msg to other side */ 8391 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8392 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8393 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8394 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8395 persis_io.pr.pr_info.res_type = type; 8396 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8397 sizeof(persis_io.pr), M_WAITOK); 8398 } 8399 break; 8400 8401 case SPRO_RELEASE: 8402 mtx_lock(&lun->lun_lock); 8403 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8404 /* No reservation exists return good status */ 8405 mtx_unlock(&lun->lun_lock); 8406 goto done; 8407 } 8408 /* 8409 * Is this nexus a reservation holder? 8410 */ 8411 if (lun->pr_res_idx != residx 8412 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8413 /* 8414 * not a res holder return good status but 8415 * do nothing 8416 */ 8417 mtx_unlock(&lun->lun_lock); 8418 goto done; 8419 } 8420 8421 if (lun->pr_res_type != type) { 8422 mtx_unlock(&lun->lun_lock); 8423 free(ctsio->kern_data_ptr, M_CTL); 8424 ctl_set_illegal_pr_release(ctsio); 8425 ctl_done((union ctl_io *)ctsio); 8426 return (CTL_RETVAL_COMPLETE); 8427 } 8428 8429 /* okay to release */ 8430 lun->flags &= ~CTL_LUN_PR_RESERVED; 8431 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8432 lun->pr_res_type = 0; 8433 8434 /* 8435 * If this isn't an exclusive access reservation and NUAR 8436 * is not set, generate UA for all other registrants. 8437 */ 8438 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8439 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8440 for (i = softc->init_min; i < softc->init_max; i++) { 8441 if (i == residx || ctl_get_prkey(lun, i) == 0) 8442 continue; 8443 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8444 } 8445 } 8446 mtx_unlock(&lun->lun_lock); 8447 8448 /* Send msg to other side */ 8449 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8450 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8451 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8452 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8453 sizeof(persis_io.pr), M_WAITOK); 8454 break; 8455 8456 case SPRO_CLEAR: 8457 /* send msg to other side */ 8458 8459 mtx_lock(&lun->lun_lock); 8460 lun->flags &= ~CTL_LUN_PR_RESERVED; 8461 lun->pr_res_type = 0; 8462 lun->pr_key_count = 0; 8463 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8464 8465 ctl_clr_prkey(lun, residx); 8466 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8467 if (ctl_get_prkey(lun, i) != 0) { 8468 ctl_clr_prkey(lun, i); 8469 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8470 } 8471 lun->pr_generation++; 8472 mtx_unlock(&lun->lun_lock); 8473 8474 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8475 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8476 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8477 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8478 sizeof(persis_io.pr), M_WAITOK); 8479 break; 8480 8481 case SPRO_PREEMPT: 8482 case SPRO_PRE_ABO: { 8483 int nretval; 8484 8485 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8486 residx, ctsio, cdb, param); 8487 if (nretval != 0) 8488 return (CTL_RETVAL_COMPLETE); 8489 break; 8490 } 8491 default: 8492 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8493 } 8494 8495 done: 8496 free(ctsio->kern_data_ptr, M_CTL); 8497 ctl_set_success(ctsio); 8498 ctl_done((union ctl_io *)ctsio); 8499 8500 return (retval); 8501 } 8502 8503 /* 8504 * This routine is for handling a message from the other SC pertaining to 8505 * persistent reserve out. All the error checking will have been done 8506 * so only perorming the action need be done here to keep the two 8507 * in sync. 8508 */ 8509 static void 8510 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8511 { 8512 struct ctl_softc *softc = CTL_SOFTC(io); 8513 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8514 struct ctl_lun *lun; 8515 int i; 8516 uint32_t residx, targ_lun; 8517 8518 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8519 mtx_lock(&softc->ctl_lock); 8520 if (targ_lun >= ctl_max_luns || 8521 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8522 mtx_unlock(&softc->ctl_lock); 8523 return; 8524 } 8525 mtx_lock(&lun->lun_lock); 8526 mtx_unlock(&softc->ctl_lock); 8527 if (lun->flags & CTL_LUN_DISABLED) { 8528 mtx_unlock(&lun->lun_lock); 8529 return; 8530 } 8531 residx = ctl_get_initindex(&msg->hdr.nexus); 8532 switch(msg->pr.pr_info.action) { 8533 case CTL_PR_REG_KEY: 8534 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8535 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8536 lun->pr_key_count++; 8537 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8538 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8539 lun->pr_generation++; 8540 break; 8541 8542 case CTL_PR_UNREG_KEY: 8543 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8544 lun->pr_key_count--; 8545 8546 /* XXX Need to see if the reservation has been released */ 8547 /* if so do we need to generate UA? */ 8548 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8549 lun->flags &= ~CTL_LUN_PR_RESERVED; 8550 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8551 8552 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8553 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8554 lun->pr_key_count) { 8555 /* 8556 * If the reservation is a registrants 8557 * only type we need to generate a UA 8558 * for other registered inits. The 8559 * sense code should be RESERVATIONS 8560 * RELEASED 8561 */ 8562 8563 for (i = softc->init_min; i < softc->init_max; i++) { 8564 if (ctl_get_prkey(lun, i) == 0) 8565 continue; 8566 8567 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8568 } 8569 } 8570 lun->pr_res_type = 0; 8571 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8572 if (lun->pr_key_count==0) { 8573 lun->flags &= ~CTL_LUN_PR_RESERVED; 8574 lun->pr_res_type = 0; 8575 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8576 } 8577 } 8578 lun->pr_generation++; 8579 break; 8580 8581 case CTL_PR_RESERVE: 8582 lun->flags |= CTL_LUN_PR_RESERVED; 8583 lun->pr_res_type = msg->pr.pr_info.res_type; 8584 lun->pr_res_idx = msg->pr.pr_info.residx; 8585 8586 break; 8587 8588 case CTL_PR_RELEASE: 8589 /* 8590 * If this isn't an exclusive access reservation and NUAR 8591 * is not set, generate UA for all other registrants. 8592 */ 8593 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8594 lun->pr_res_type != SPR_TYPE_WR_EX && 8595 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8596 for (i = softc->init_min; i < softc->init_max; i++) { 8597 if (i == residx || ctl_get_prkey(lun, i) == 0) 8598 continue; 8599 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8600 } 8601 } 8602 8603 lun->flags &= ~CTL_LUN_PR_RESERVED; 8604 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8605 lun->pr_res_type = 0; 8606 break; 8607 8608 case CTL_PR_PREEMPT: 8609 ctl_pro_preempt_other(lun, msg); 8610 break; 8611 case CTL_PR_CLEAR: 8612 lun->flags &= ~CTL_LUN_PR_RESERVED; 8613 lun->pr_res_type = 0; 8614 lun->pr_key_count = 0; 8615 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8616 8617 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8618 if (ctl_get_prkey(lun, i) == 0) 8619 continue; 8620 ctl_clr_prkey(lun, i); 8621 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8622 } 8623 lun->pr_generation++; 8624 break; 8625 } 8626 8627 mtx_unlock(&lun->lun_lock); 8628 } 8629 8630 int 8631 ctl_read_write(struct ctl_scsiio *ctsio) 8632 { 8633 struct ctl_lun *lun = CTL_LUN(ctsio); 8634 struct ctl_lba_len_flags *lbalen; 8635 uint64_t lba; 8636 uint32_t num_blocks; 8637 int flags, retval; 8638 int isread; 8639 8640 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8641 8642 flags = 0; 8643 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8644 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8645 switch (ctsio->cdb[0]) { 8646 case READ_6: 8647 case WRITE_6: { 8648 struct scsi_rw_6 *cdb; 8649 8650 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8651 8652 lba = scsi_3btoul(cdb->addr); 8653 /* only 5 bits are valid in the most significant address byte */ 8654 lba &= 0x1fffff; 8655 num_blocks = cdb->length; 8656 /* 8657 * This is correct according to SBC-2. 8658 */ 8659 if (num_blocks == 0) 8660 num_blocks = 256; 8661 break; 8662 } 8663 case READ_10: 8664 case WRITE_10: { 8665 struct scsi_rw_10 *cdb; 8666 8667 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8668 if (cdb->byte2 & SRW10_FUA) 8669 flags |= CTL_LLF_FUA; 8670 if (cdb->byte2 & SRW10_DPO) 8671 flags |= CTL_LLF_DPO; 8672 lba = scsi_4btoul(cdb->addr); 8673 num_blocks = scsi_2btoul(cdb->length); 8674 break; 8675 } 8676 case WRITE_VERIFY_10: { 8677 struct scsi_write_verify_10 *cdb; 8678 8679 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8680 flags |= CTL_LLF_FUA; 8681 if (cdb->byte2 & SWV_DPO) 8682 flags |= CTL_LLF_DPO; 8683 lba = scsi_4btoul(cdb->addr); 8684 num_blocks = scsi_2btoul(cdb->length); 8685 break; 8686 } 8687 case READ_12: 8688 case WRITE_12: { 8689 struct scsi_rw_12 *cdb; 8690 8691 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8692 if (cdb->byte2 & SRW12_FUA) 8693 flags |= CTL_LLF_FUA; 8694 if (cdb->byte2 & SRW12_DPO) 8695 flags |= CTL_LLF_DPO; 8696 lba = scsi_4btoul(cdb->addr); 8697 num_blocks = scsi_4btoul(cdb->length); 8698 break; 8699 } 8700 case WRITE_VERIFY_12: { 8701 struct scsi_write_verify_12 *cdb; 8702 8703 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8704 flags |= CTL_LLF_FUA; 8705 if (cdb->byte2 & SWV_DPO) 8706 flags |= CTL_LLF_DPO; 8707 lba = scsi_4btoul(cdb->addr); 8708 num_blocks = scsi_4btoul(cdb->length); 8709 break; 8710 } 8711 case READ_16: 8712 case WRITE_16: { 8713 struct scsi_rw_16 *cdb; 8714 8715 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8716 if (cdb->byte2 & SRW12_FUA) 8717 flags |= CTL_LLF_FUA; 8718 if (cdb->byte2 & SRW12_DPO) 8719 flags |= CTL_LLF_DPO; 8720 lba = scsi_8btou64(cdb->addr); 8721 num_blocks = scsi_4btoul(cdb->length); 8722 break; 8723 } 8724 case WRITE_ATOMIC_16: { 8725 struct scsi_write_atomic_16 *cdb; 8726 8727 if (lun->be_lun->atomicblock == 0) { 8728 ctl_set_invalid_opcode(ctsio); 8729 ctl_done((union ctl_io *)ctsio); 8730 return (CTL_RETVAL_COMPLETE); 8731 } 8732 8733 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8734 if (cdb->byte2 & SRW12_FUA) 8735 flags |= CTL_LLF_FUA; 8736 if (cdb->byte2 & SRW12_DPO) 8737 flags |= CTL_LLF_DPO; 8738 lba = scsi_8btou64(cdb->addr); 8739 num_blocks = scsi_2btoul(cdb->length); 8740 if (num_blocks > lun->be_lun->atomicblock) { 8741 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8742 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8743 /*bit*/ 0); 8744 ctl_done((union ctl_io *)ctsio); 8745 return (CTL_RETVAL_COMPLETE); 8746 } 8747 break; 8748 } 8749 case WRITE_VERIFY_16: { 8750 struct scsi_write_verify_16 *cdb; 8751 8752 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8753 flags |= CTL_LLF_FUA; 8754 if (cdb->byte2 & SWV_DPO) 8755 flags |= CTL_LLF_DPO; 8756 lba = scsi_8btou64(cdb->addr); 8757 num_blocks = scsi_4btoul(cdb->length); 8758 break; 8759 } 8760 default: 8761 /* 8762 * We got a command we don't support. This shouldn't 8763 * happen, commands should be filtered out above us. 8764 */ 8765 ctl_set_invalid_opcode(ctsio); 8766 ctl_done((union ctl_io *)ctsio); 8767 8768 return (CTL_RETVAL_COMPLETE); 8769 break; /* NOTREACHED */ 8770 } 8771 8772 /* 8773 * The first check is to make sure we're in bounds, the second 8774 * check is to catch wrap-around problems. If the lba + num blocks 8775 * is less than the lba, then we've wrapped around and the block 8776 * range is invalid anyway. 8777 */ 8778 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8779 || ((lba + num_blocks) < lba)) { 8780 ctl_set_lba_out_of_range(ctsio, 8781 MAX(lba, lun->be_lun->maxlba + 1)); 8782 ctl_done((union ctl_io *)ctsio); 8783 return (CTL_RETVAL_COMPLETE); 8784 } 8785 8786 /* 8787 * According to SBC-3, a transfer length of 0 is not an error. 8788 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8789 * translates to 256 blocks for those commands. 8790 */ 8791 if (num_blocks == 0) { 8792 ctl_set_success(ctsio); 8793 ctl_done((union ctl_io *)ctsio); 8794 return (CTL_RETVAL_COMPLETE); 8795 } 8796 8797 /* Set FUA and/or DPO if caches are disabled. */ 8798 if (isread) { 8799 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8800 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8801 } else { 8802 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8803 flags |= CTL_LLF_FUA; 8804 } 8805 8806 lbalen = (struct ctl_lba_len_flags *) 8807 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8808 lbalen->lba = lba; 8809 lbalen->len = num_blocks; 8810 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8811 8812 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8813 ctsio->kern_rel_offset = 0; 8814 8815 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8816 8817 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8818 return (retval); 8819 } 8820 8821 static int 8822 ctl_cnw_cont(union ctl_io *io) 8823 { 8824 struct ctl_lun *lun = CTL_LUN(io); 8825 struct ctl_scsiio *ctsio; 8826 struct ctl_lba_len_flags *lbalen; 8827 int retval; 8828 8829 ctsio = &io->scsiio; 8830 ctsio->io_hdr.status = CTL_STATUS_NONE; 8831 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8832 lbalen = (struct ctl_lba_len_flags *) 8833 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8834 lbalen->flags &= ~CTL_LLF_COMPARE; 8835 lbalen->flags |= CTL_LLF_WRITE; 8836 8837 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8838 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8839 return (retval); 8840 } 8841 8842 int 8843 ctl_cnw(struct ctl_scsiio *ctsio) 8844 { 8845 struct ctl_lun *lun = CTL_LUN(ctsio); 8846 struct ctl_lba_len_flags *lbalen; 8847 uint64_t lba; 8848 uint32_t num_blocks; 8849 int flags, retval; 8850 8851 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8852 8853 flags = 0; 8854 switch (ctsio->cdb[0]) { 8855 case COMPARE_AND_WRITE: { 8856 struct scsi_compare_and_write *cdb; 8857 8858 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8859 if (cdb->byte2 & SRW10_FUA) 8860 flags |= CTL_LLF_FUA; 8861 if (cdb->byte2 & SRW10_DPO) 8862 flags |= CTL_LLF_DPO; 8863 lba = scsi_8btou64(cdb->addr); 8864 num_blocks = cdb->length; 8865 break; 8866 } 8867 default: 8868 /* 8869 * We got a command we don't support. This shouldn't 8870 * happen, commands should be filtered out above us. 8871 */ 8872 ctl_set_invalid_opcode(ctsio); 8873 ctl_done((union ctl_io *)ctsio); 8874 8875 return (CTL_RETVAL_COMPLETE); 8876 break; /* NOTREACHED */ 8877 } 8878 8879 /* 8880 * The first check is to make sure we're in bounds, the second 8881 * check is to catch wrap-around problems. If the lba + num blocks 8882 * is less than the lba, then we've wrapped around and the block 8883 * range is invalid anyway. 8884 */ 8885 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8886 || ((lba + num_blocks) < lba)) { 8887 ctl_set_lba_out_of_range(ctsio, 8888 MAX(lba, lun->be_lun->maxlba + 1)); 8889 ctl_done((union ctl_io *)ctsio); 8890 return (CTL_RETVAL_COMPLETE); 8891 } 8892 8893 /* 8894 * According to SBC-3, a transfer length of 0 is not an error. 8895 */ 8896 if (num_blocks == 0) { 8897 ctl_set_success(ctsio); 8898 ctl_done((union ctl_io *)ctsio); 8899 return (CTL_RETVAL_COMPLETE); 8900 } 8901 8902 /* Set FUA if write cache is disabled. */ 8903 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8904 flags |= CTL_LLF_FUA; 8905 8906 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8907 ctsio->kern_rel_offset = 0; 8908 8909 /* 8910 * Set the IO_CONT flag, so that if this I/O gets passed to 8911 * ctl_data_submit_done(), it'll get passed back to 8912 * ctl_ctl_cnw_cont() for further processing. 8913 */ 8914 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8915 ctsio->io_cont = ctl_cnw_cont; 8916 8917 lbalen = (struct ctl_lba_len_flags *) 8918 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8919 lbalen->lba = lba; 8920 lbalen->len = num_blocks; 8921 lbalen->flags = CTL_LLF_COMPARE | flags; 8922 8923 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8924 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8925 return (retval); 8926 } 8927 8928 int 8929 ctl_verify(struct ctl_scsiio *ctsio) 8930 { 8931 struct ctl_lun *lun = CTL_LUN(ctsio); 8932 struct ctl_lba_len_flags *lbalen; 8933 uint64_t lba; 8934 uint32_t num_blocks; 8935 int bytchk, flags; 8936 int retval; 8937 8938 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8939 8940 bytchk = 0; 8941 flags = CTL_LLF_FUA; 8942 switch (ctsio->cdb[0]) { 8943 case VERIFY_10: { 8944 struct scsi_verify_10 *cdb; 8945 8946 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8947 if (cdb->byte2 & SVFY_BYTCHK) 8948 bytchk = 1; 8949 if (cdb->byte2 & SVFY_DPO) 8950 flags |= CTL_LLF_DPO; 8951 lba = scsi_4btoul(cdb->addr); 8952 num_blocks = scsi_2btoul(cdb->length); 8953 break; 8954 } 8955 case VERIFY_12: { 8956 struct scsi_verify_12 *cdb; 8957 8958 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8959 if (cdb->byte2 & SVFY_BYTCHK) 8960 bytchk = 1; 8961 if (cdb->byte2 & SVFY_DPO) 8962 flags |= CTL_LLF_DPO; 8963 lba = scsi_4btoul(cdb->addr); 8964 num_blocks = scsi_4btoul(cdb->length); 8965 break; 8966 } 8967 case VERIFY_16: { 8968 struct scsi_rw_16 *cdb; 8969 8970 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8971 if (cdb->byte2 & SVFY_BYTCHK) 8972 bytchk = 1; 8973 if (cdb->byte2 & SVFY_DPO) 8974 flags |= CTL_LLF_DPO; 8975 lba = scsi_8btou64(cdb->addr); 8976 num_blocks = scsi_4btoul(cdb->length); 8977 break; 8978 } 8979 default: 8980 /* 8981 * We got a command we don't support. This shouldn't 8982 * happen, commands should be filtered out above us. 8983 */ 8984 ctl_set_invalid_opcode(ctsio); 8985 ctl_done((union ctl_io *)ctsio); 8986 return (CTL_RETVAL_COMPLETE); 8987 } 8988 8989 /* 8990 * The first check is to make sure we're in bounds, the second 8991 * check is to catch wrap-around problems. If the lba + num blocks 8992 * is less than the lba, then we've wrapped around and the block 8993 * range is invalid anyway. 8994 */ 8995 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8996 || ((lba + num_blocks) < lba)) { 8997 ctl_set_lba_out_of_range(ctsio, 8998 MAX(lba, lun->be_lun->maxlba + 1)); 8999 ctl_done((union ctl_io *)ctsio); 9000 return (CTL_RETVAL_COMPLETE); 9001 } 9002 9003 /* 9004 * According to SBC-3, a transfer length of 0 is not an error. 9005 */ 9006 if (num_blocks == 0) { 9007 ctl_set_success(ctsio); 9008 ctl_done((union ctl_io *)ctsio); 9009 return (CTL_RETVAL_COMPLETE); 9010 } 9011 9012 lbalen = (struct ctl_lba_len_flags *) 9013 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9014 lbalen->lba = lba; 9015 lbalen->len = num_blocks; 9016 if (bytchk) { 9017 lbalen->flags = CTL_LLF_COMPARE | flags; 9018 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9019 } else { 9020 lbalen->flags = CTL_LLF_VERIFY | flags; 9021 ctsio->kern_total_len = 0; 9022 } 9023 ctsio->kern_rel_offset = 0; 9024 9025 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9026 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9027 return (retval); 9028 } 9029 9030 int 9031 ctl_report_luns(struct ctl_scsiio *ctsio) 9032 { 9033 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9034 struct ctl_port *port = CTL_PORT(ctsio); 9035 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9036 struct scsi_report_luns *cdb; 9037 struct scsi_report_luns_data *lun_data; 9038 int num_filled, num_luns, num_port_luns, retval; 9039 uint32_t alloc_len, lun_datalen; 9040 uint32_t initidx, targ_lun_id, lun_id; 9041 9042 retval = CTL_RETVAL_COMPLETE; 9043 cdb = (struct scsi_report_luns *)ctsio->cdb; 9044 9045 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9046 9047 num_luns = 0; 9048 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; 9049 mtx_lock(&softc->ctl_lock); 9050 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9051 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9052 num_luns++; 9053 } 9054 mtx_unlock(&softc->ctl_lock); 9055 9056 switch (cdb->select_report) { 9057 case RPL_REPORT_DEFAULT: 9058 case RPL_REPORT_ALL: 9059 case RPL_REPORT_NONSUBSID: 9060 break; 9061 case RPL_REPORT_WELLKNOWN: 9062 case RPL_REPORT_ADMIN: 9063 case RPL_REPORT_CONGLOM: 9064 num_luns = 0; 9065 break; 9066 default: 9067 ctl_set_invalid_field(ctsio, 9068 /*sks_valid*/ 1, 9069 /*command*/ 1, 9070 /*field*/ 2, 9071 /*bit_valid*/ 0, 9072 /*bit*/ 0); 9073 ctl_done((union ctl_io *)ctsio); 9074 return (retval); 9075 break; /* NOTREACHED */ 9076 } 9077 9078 alloc_len = scsi_4btoul(cdb->length); 9079 /* 9080 * The initiator has to allocate at least 16 bytes for this request, 9081 * so he can at least get the header and the first LUN. Otherwise 9082 * we reject the request (per SPC-3 rev 14, section 6.21). 9083 */ 9084 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9085 sizeof(struct scsi_report_luns_lundata))) { 9086 ctl_set_invalid_field(ctsio, 9087 /*sks_valid*/ 1, 9088 /*command*/ 1, 9089 /*field*/ 6, 9090 /*bit_valid*/ 0, 9091 /*bit*/ 0); 9092 ctl_done((union ctl_io *)ctsio); 9093 return (retval); 9094 } 9095 9096 lun_datalen = sizeof(*lun_data) + 9097 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9098 9099 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9100 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9101 ctsio->kern_sg_entries = 0; 9102 9103 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9104 9105 mtx_lock(&softc->ctl_lock); 9106 for (targ_lun_id = 0, num_filled = 0; 9107 targ_lun_id < num_port_luns && num_filled < num_luns; 9108 targ_lun_id++) { 9109 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9110 if (lun_id == UINT32_MAX) 9111 continue; 9112 lun = softc->ctl_luns[lun_id]; 9113 if (lun == NULL) 9114 continue; 9115 9116 be64enc(lun_data->luns[num_filled++].lundata, 9117 ctl_encode_lun(targ_lun_id)); 9118 9119 /* 9120 * According to SPC-3, rev 14 section 6.21: 9121 * 9122 * "The execution of a REPORT LUNS command to any valid and 9123 * installed logical unit shall clear the REPORTED LUNS DATA 9124 * HAS CHANGED unit attention condition for all logical 9125 * units of that target with respect to the requesting 9126 * initiator. A valid and installed logical unit is one 9127 * having a PERIPHERAL QUALIFIER of 000b in the standard 9128 * INQUIRY data (see 6.4.2)." 9129 * 9130 * If request_lun is NULL, the LUN this report luns command 9131 * was issued to is either disabled or doesn't exist. In that 9132 * case, we shouldn't clear any pending lun change unit 9133 * attention. 9134 */ 9135 if (request_lun != NULL) { 9136 mtx_lock(&lun->lun_lock); 9137 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9138 mtx_unlock(&lun->lun_lock); 9139 } 9140 } 9141 mtx_unlock(&softc->ctl_lock); 9142 9143 /* 9144 * It's quite possible that we've returned fewer LUNs than we allocated 9145 * space for. Trim it. 9146 */ 9147 lun_datalen = sizeof(*lun_data) + 9148 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9149 ctsio->kern_rel_offset = 0; 9150 ctsio->kern_sg_entries = 0; 9151 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9152 ctsio->kern_total_len = ctsio->kern_data_len; 9153 9154 /* 9155 * We set this to the actual data length, regardless of how much 9156 * space we actually have to return results. If the user looks at 9157 * this value, he'll know whether or not he allocated enough space 9158 * and reissue the command if necessary. We don't support well 9159 * known logical units, so if the user asks for that, return none. 9160 */ 9161 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9162 9163 /* 9164 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9165 * this request. 9166 */ 9167 ctl_set_success(ctsio); 9168 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9169 ctsio->be_move_done = ctl_config_move_done; 9170 ctl_datamove((union ctl_io *)ctsio); 9171 return (retval); 9172 } 9173 9174 int 9175 ctl_request_sense(struct ctl_scsiio *ctsio) 9176 { 9177 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9178 struct ctl_lun *lun = CTL_LUN(ctsio); 9179 struct scsi_request_sense *cdb; 9180 struct scsi_sense_data *sense_ptr, *ps; 9181 uint32_t initidx; 9182 int have_error; 9183 u_int sense_len = SSD_FULL_SIZE; 9184 scsi_sense_data_type sense_format; 9185 ctl_ua_type ua_type; 9186 uint8_t asc = 0, ascq = 0; 9187 9188 cdb = (struct scsi_request_sense *)ctsio->cdb; 9189 9190 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9191 9192 /* 9193 * Determine which sense format the user wants. 9194 */ 9195 if (cdb->byte2 & SRS_DESC) 9196 sense_format = SSD_TYPE_DESC; 9197 else 9198 sense_format = SSD_TYPE_FIXED; 9199 9200 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9201 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9202 ctsio->kern_sg_entries = 0; 9203 ctsio->kern_rel_offset = 0; 9204 9205 /* 9206 * struct scsi_sense_data, which is currently set to 256 bytes, is 9207 * larger than the largest allowed value for the length field in the 9208 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9209 */ 9210 ctsio->kern_data_len = cdb->length; 9211 ctsio->kern_total_len = cdb->length; 9212 9213 /* 9214 * If we don't have a LUN, we don't have any pending sense. 9215 */ 9216 if (lun == NULL || 9217 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9218 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9219 /* "Logical unit not supported" */ 9220 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9221 /*current_error*/ 1, 9222 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9223 /*asc*/ 0x25, 9224 /*ascq*/ 0x00, 9225 SSD_ELEM_NONE); 9226 goto send; 9227 } 9228 9229 have_error = 0; 9230 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9231 /* 9232 * Check for pending sense, and then for pending unit attentions. 9233 * Pending sense gets returned first, then pending unit attentions. 9234 */ 9235 mtx_lock(&lun->lun_lock); 9236 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9237 if (ps != NULL) 9238 ps += initidx % CTL_MAX_INIT_PER_PORT; 9239 if (ps != NULL && ps->error_code != 0) { 9240 scsi_sense_data_type stored_format; 9241 9242 /* 9243 * Check to see which sense format was used for the stored 9244 * sense data. 9245 */ 9246 stored_format = scsi_sense_type(ps); 9247 9248 /* 9249 * If the user requested a different sense format than the 9250 * one we stored, then we need to convert it to the other 9251 * format. If we're going from descriptor to fixed format 9252 * sense data, we may lose things in translation, depending 9253 * on what options were used. 9254 * 9255 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9256 * for some reason we'll just copy it out as-is. 9257 */ 9258 if ((stored_format == SSD_TYPE_FIXED) 9259 && (sense_format == SSD_TYPE_DESC)) 9260 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9261 ps, (struct scsi_sense_data_desc *)sense_ptr); 9262 else if ((stored_format == SSD_TYPE_DESC) 9263 && (sense_format == SSD_TYPE_FIXED)) 9264 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9265 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9266 else 9267 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9268 9269 ps->error_code = 0; 9270 have_error = 1; 9271 } else { 9272 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9273 sense_format); 9274 if (ua_type != CTL_UA_NONE) 9275 have_error = 1; 9276 } 9277 if (have_error == 0) { 9278 /* 9279 * Report informational exception if have one and allowed. 9280 */ 9281 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9282 asc = lun->ie_asc; 9283 ascq = lun->ie_ascq; 9284 } 9285 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9286 /*current_error*/ 1, 9287 /*sense_key*/ SSD_KEY_NO_SENSE, 9288 /*asc*/ asc, 9289 /*ascq*/ ascq, 9290 SSD_ELEM_NONE); 9291 } 9292 mtx_unlock(&lun->lun_lock); 9293 9294 send: 9295 /* 9296 * We report the SCSI status as OK, since the status of the command 9297 * itself is OK. We're reporting sense as parameter data. 9298 */ 9299 ctl_set_success(ctsio); 9300 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9301 ctsio->be_move_done = ctl_config_move_done; 9302 ctl_datamove((union ctl_io *)ctsio); 9303 return (CTL_RETVAL_COMPLETE); 9304 } 9305 9306 int 9307 ctl_tur(struct ctl_scsiio *ctsio) 9308 { 9309 9310 CTL_DEBUG_PRINT(("ctl_tur\n")); 9311 9312 ctl_set_success(ctsio); 9313 ctl_done((union ctl_io *)ctsio); 9314 9315 return (CTL_RETVAL_COMPLETE); 9316 } 9317 9318 /* 9319 * SCSI VPD page 0x00, the Supported VPD Pages page. 9320 */ 9321 static int 9322 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9323 { 9324 struct ctl_lun *lun = CTL_LUN(ctsio); 9325 struct scsi_vpd_supported_pages *pages; 9326 int sup_page_size; 9327 int p; 9328 9329 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9330 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9331 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9332 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9333 ctsio->kern_rel_offset = 0; 9334 ctsio->kern_sg_entries = 0; 9335 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9336 ctsio->kern_total_len = ctsio->kern_data_len; 9337 9338 /* 9339 * The control device is always connected. The disk device, on the 9340 * other hand, may not be online all the time. Need to change this 9341 * to figure out whether the disk device is actually online or not. 9342 */ 9343 if (lun != NULL) 9344 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9345 lun->be_lun->lun_type; 9346 else 9347 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9348 9349 p = 0; 9350 /* Supported VPD pages */ 9351 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9352 /* Serial Number */ 9353 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9354 /* Device Identification */ 9355 pages->page_list[p++] = SVPD_DEVICE_ID; 9356 /* Extended INQUIRY Data */ 9357 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9358 /* Mode Page Policy */ 9359 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9360 /* SCSI Ports */ 9361 pages->page_list[p++] = SVPD_SCSI_PORTS; 9362 /* Third-party Copy */ 9363 pages->page_list[p++] = SVPD_SCSI_TPC; 9364 /* SCSI Feature Sets */ 9365 pages->page_list[p++] = SVPD_SCSI_SFS; 9366 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9367 /* Block limits */ 9368 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9369 /* Block Device Characteristics */ 9370 pages->page_list[p++] = SVPD_BDC; 9371 /* Logical Block Provisioning */ 9372 pages->page_list[p++] = SVPD_LBP; 9373 } 9374 pages->length = p; 9375 9376 ctl_set_success(ctsio); 9377 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9378 ctsio->be_move_done = ctl_config_move_done; 9379 ctl_datamove((union ctl_io *)ctsio); 9380 return (CTL_RETVAL_COMPLETE); 9381 } 9382 9383 /* 9384 * SCSI VPD page 0x80, the Unit Serial Number page. 9385 */ 9386 static int 9387 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9388 { 9389 struct ctl_lun *lun = CTL_LUN(ctsio); 9390 struct scsi_vpd_unit_serial_number *sn_ptr; 9391 int data_len; 9392 9393 data_len = 4 + CTL_SN_LEN; 9394 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9395 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9396 ctsio->kern_rel_offset = 0; 9397 ctsio->kern_sg_entries = 0; 9398 ctsio->kern_data_len = min(data_len, alloc_len); 9399 ctsio->kern_total_len = ctsio->kern_data_len; 9400 9401 /* 9402 * The control device is always connected. The disk device, on the 9403 * other hand, may not be online all the time. Need to change this 9404 * to figure out whether the disk device is actually online or not. 9405 */ 9406 if (lun != NULL) 9407 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9408 lun->be_lun->lun_type; 9409 else 9410 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9411 9412 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9413 sn_ptr->length = CTL_SN_LEN; 9414 /* 9415 * If we don't have a LUN, we just leave the serial number as 9416 * all spaces. 9417 */ 9418 if (lun != NULL) { 9419 strncpy((char *)sn_ptr->serial_num, 9420 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9421 } else 9422 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9423 9424 ctl_set_success(ctsio); 9425 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9426 ctsio->be_move_done = ctl_config_move_done; 9427 ctl_datamove((union ctl_io *)ctsio); 9428 return (CTL_RETVAL_COMPLETE); 9429 } 9430 9431 9432 /* 9433 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9434 */ 9435 static int 9436 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9437 { 9438 struct ctl_lun *lun = CTL_LUN(ctsio); 9439 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9440 int data_len; 9441 9442 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9443 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9444 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9445 ctsio->kern_sg_entries = 0; 9446 ctsio->kern_rel_offset = 0; 9447 ctsio->kern_data_len = min(data_len, alloc_len); 9448 ctsio->kern_total_len = ctsio->kern_data_len; 9449 9450 /* 9451 * The control device is always connected. The disk device, on the 9452 * other hand, may not be online all the time. 9453 */ 9454 if (lun != NULL) 9455 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9456 lun->be_lun->lun_type; 9457 else 9458 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9459 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9460 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9461 /* 9462 * We support head of queue, ordered and simple tags. 9463 */ 9464 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9465 /* 9466 * Volatile cache supported. 9467 */ 9468 eid_ptr->flags3 = SVPD_EID_V_SUP; 9469 9470 /* 9471 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9472 * attention for a particular IT nexus on all LUNs once we report 9473 * it to that nexus once. This bit is required as of SPC-4. 9474 */ 9475 eid_ptr->flags4 = SVPD_EID_LUICLR; 9476 9477 /* 9478 * We support revert to defaults (RTD) bit in MODE SELECT. 9479 */ 9480 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9481 9482 /* 9483 * XXX KDM in order to correctly answer this, we would need 9484 * information from the SIM to determine how much sense data it 9485 * can send. So this would really be a path inquiry field, most 9486 * likely. This can be set to a maximum of 252 according to SPC-4, 9487 * but the hardware may or may not be able to support that much. 9488 * 0 just means that the maximum sense data length is not reported. 9489 */ 9490 eid_ptr->max_sense_length = 0; 9491 9492 ctl_set_success(ctsio); 9493 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9494 ctsio->be_move_done = ctl_config_move_done; 9495 ctl_datamove((union ctl_io *)ctsio); 9496 return (CTL_RETVAL_COMPLETE); 9497 } 9498 9499 static int 9500 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9501 { 9502 struct ctl_lun *lun = CTL_LUN(ctsio); 9503 struct scsi_vpd_mode_page_policy *mpp_ptr; 9504 int data_len; 9505 9506 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9507 sizeof(struct scsi_vpd_mode_page_policy_descr); 9508 9509 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9510 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9511 ctsio->kern_rel_offset = 0; 9512 ctsio->kern_sg_entries = 0; 9513 ctsio->kern_data_len = min(data_len, alloc_len); 9514 ctsio->kern_total_len = ctsio->kern_data_len; 9515 9516 /* 9517 * The control device is always connected. The disk device, on the 9518 * other hand, may not be online all the time. 9519 */ 9520 if (lun != NULL) 9521 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9522 lun->be_lun->lun_type; 9523 else 9524 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9525 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9526 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9527 mpp_ptr->descr[0].page_code = 0x3f; 9528 mpp_ptr->descr[0].subpage_code = 0xff; 9529 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9530 9531 ctl_set_success(ctsio); 9532 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9533 ctsio->be_move_done = ctl_config_move_done; 9534 ctl_datamove((union ctl_io *)ctsio); 9535 return (CTL_RETVAL_COMPLETE); 9536 } 9537 9538 /* 9539 * SCSI VPD page 0x83, the Device Identification page. 9540 */ 9541 static int 9542 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9543 { 9544 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9545 struct ctl_port *port = CTL_PORT(ctsio); 9546 struct ctl_lun *lun = CTL_LUN(ctsio); 9547 struct scsi_vpd_device_id *devid_ptr; 9548 struct scsi_vpd_id_descriptor *desc; 9549 int data_len, g; 9550 uint8_t proto; 9551 9552 data_len = sizeof(struct scsi_vpd_device_id) + 9553 sizeof(struct scsi_vpd_id_descriptor) + 9554 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9555 sizeof(struct scsi_vpd_id_descriptor) + 9556 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9557 if (lun && lun->lun_devid) 9558 data_len += lun->lun_devid->len; 9559 if (port && port->port_devid) 9560 data_len += port->port_devid->len; 9561 if (port && port->target_devid) 9562 data_len += port->target_devid->len; 9563 9564 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9565 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9566 ctsio->kern_sg_entries = 0; 9567 ctsio->kern_rel_offset = 0; 9568 ctsio->kern_sg_entries = 0; 9569 ctsio->kern_data_len = min(data_len, alloc_len); 9570 ctsio->kern_total_len = ctsio->kern_data_len; 9571 9572 /* 9573 * The control device is always connected. The disk device, on the 9574 * other hand, may not be online all the time. 9575 */ 9576 if (lun != NULL) 9577 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9578 lun->be_lun->lun_type; 9579 else 9580 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9581 devid_ptr->page_code = SVPD_DEVICE_ID; 9582 scsi_ulto2b(data_len - 4, devid_ptr->length); 9583 9584 if (port && port->port_type == CTL_PORT_FC) 9585 proto = SCSI_PROTO_FC << 4; 9586 else if (port && port->port_type == CTL_PORT_SAS) 9587 proto = SCSI_PROTO_SAS << 4; 9588 else if (port && port->port_type == CTL_PORT_ISCSI) 9589 proto = SCSI_PROTO_ISCSI << 4; 9590 else 9591 proto = SCSI_PROTO_SPI << 4; 9592 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9593 9594 /* 9595 * We're using a LUN association here. i.e., this device ID is a 9596 * per-LUN identifier. 9597 */ 9598 if (lun && lun->lun_devid) { 9599 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9600 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9601 lun->lun_devid->len); 9602 } 9603 9604 /* 9605 * This is for the WWPN which is a port association. 9606 */ 9607 if (port && port->port_devid) { 9608 memcpy(desc, port->port_devid->data, port->port_devid->len); 9609 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9610 port->port_devid->len); 9611 } 9612 9613 /* 9614 * This is for the Relative Target Port(type 4h) identifier 9615 */ 9616 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9617 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9618 SVPD_ID_TYPE_RELTARG; 9619 desc->length = 4; 9620 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9621 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9622 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9623 9624 /* 9625 * This is for the Target Port Group(type 5h) identifier 9626 */ 9627 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9628 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9629 SVPD_ID_TYPE_TPORTGRP; 9630 desc->length = 4; 9631 if (softc->is_single || 9632 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9633 g = 1; 9634 else 9635 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9636 scsi_ulto2b(g, &desc->identifier[2]); 9637 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9638 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9639 9640 /* 9641 * This is for the Target identifier 9642 */ 9643 if (port && port->target_devid) { 9644 memcpy(desc, port->target_devid->data, port->target_devid->len); 9645 } 9646 9647 ctl_set_success(ctsio); 9648 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9649 ctsio->be_move_done = ctl_config_move_done; 9650 ctl_datamove((union ctl_io *)ctsio); 9651 return (CTL_RETVAL_COMPLETE); 9652 } 9653 9654 static int 9655 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9656 { 9657 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9658 struct ctl_lun *lun = CTL_LUN(ctsio); 9659 struct scsi_vpd_scsi_ports *sp; 9660 struct scsi_vpd_port_designation *pd; 9661 struct scsi_vpd_port_designation_cont *pdc; 9662 struct ctl_port *port; 9663 int data_len, num_target_ports, iid_len, id_len; 9664 9665 num_target_ports = 0; 9666 iid_len = 0; 9667 id_len = 0; 9668 mtx_lock(&softc->ctl_lock); 9669 STAILQ_FOREACH(port, &softc->port_list, links) { 9670 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9671 continue; 9672 if (lun != NULL && 9673 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9674 continue; 9675 num_target_ports++; 9676 if (port->init_devid) 9677 iid_len += port->init_devid->len; 9678 if (port->port_devid) 9679 id_len += port->port_devid->len; 9680 } 9681 mtx_unlock(&softc->ctl_lock); 9682 9683 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9684 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9685 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9686 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9687 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9688 ctsio->kern_sg_entries = 0; 9689 ctsio->kern_rel_offset = 0; 9690 ctsio->kern_sg_entries = 0; 9691 ctsio->kern_data_len = min(data_len, alloc_len); 9692 ctsio->kern_total_len = ctsio->kern_data_len; 9693 9694 /* 9695 * The control device is always connected. The disk device, on the 9696 * other hand, may not be online all the time. Need to change this 9697 * to figure out whether the disk device is actually online or not. 9698 */ 9699 if (lun != NULL) 9700 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9701 lun->be_lun->lun_type; 9702 else 9703 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9704 9705 sp->page_code = SVPD_SCSI_PORTS; 9706 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9707 sp->page_length); 9708 pd = &sp->design[0]; 9709 9710 mtx_lock(&softc->ctl_lock); 9711 STAILQ_FOREACH(port, &softc->port_list, links) { 9712 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9713 continue; 9714 if (lun != NULL && 9715 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9716 continue; 9717 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9718 if (port->init_devid) { 9719 iid_len = port->init_devid->len; 9720 memcpy(pd->initiator_transportid, 9721 port->init_devid->data, port->init_devid->len); 9722 } else 9723 iid_len = 0; 9724 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9725 pdc = (struct scsi_vpd_port_designation_cont *) 9726 (&pd->initiator_transportid[iid_len]); 9727 if (port->port_devid) { 9728 id_len = port->port_devid->len; 9729 memcpy(pdc->target_port_descriptors, 9730 port->port_devid->data, port->port_devid->len); 9731 } else 9732 id_len = 0; 9733 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9734 pd = (struct scsi_vpd_port_designation *) 9735 ((uint8_t *)pdc->target_port_descriptors + id_len); 9736 } 9737 mtx_unlock(&softc->ctl_lock); 9738 9739 ctl_set_success(ctsio); 9740 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9741 ctsio->be_move_done = ctl_config_move_done; 9742 ctl_datamove((union ctl_io *)ctsio); 9743 return (CTL_RETVAL_COMPLETE); 9744 } 9745 9746 static int 9747 ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) 9748 { 9749 struct ctl_lun *lun = CTL_LUN(ctsio); 9750 struct scsi_vpd_sfs *sfs_ptr; 9751 int sfs_page_size, n; 9752 9753 sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; 9754 ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); 9755 sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; 9756 ctsio->kern_sg_entries = 0; 9757 ctsio->kern_rel_offset = 0; 9758 ctsio->kern_sg_entries = 0; 9759 ctsio->kern_data_len = min(sfs_page_size, alloc_len); 9760 ctsio->kern_total_len = ctsio->kern_data_len; 9761 9762 /* 9763 * The control device is always connected. The disk device, on the 9764 * other hand, may not be online all the time. Need to change this 9765 * to figure out whether the disk device is actually online or not. 9766 */ 9767 if (lun != NULL) 9768 sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9769 lun->be_lun->lun_type; 9770 else 9771 sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9772 9773 sfs_ptr->page_code = SVPD_SCSI_SFS; 9774 n = 0; 9775 /* Discovery 2016 */ 9776 scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); 9777 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9778 /* SBC Base 2016 */ 9779 scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); 9780 /* SBC Base 2010 */ 9781 scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); 9782 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9783 /* Basic Provisioning 2016 */ 9784 scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); 9785 } 9786 /* Drive Maintenance 2016 */ 9787 //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); 9788 } 9789 scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); 9790 9791 ctl_set_success(ctsio); 9792 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9793 ctsio->be_move_done = ctl_config_move_done; 9794 ctl_datamove((union ctl_io *)ctsio); 9795 return (CTL_RETVAL_COMPLETE); 9796 } 9797 9798 static int 9799 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9800 { 9801 struct ctl_lun *lun = CTL_LUN(ctsio); 9802 struct scsi_vpd_block_limits *bl_ptr; 9803 const char *val; 9804 uint64_t ival; 9805 9806 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9807 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9808 ctsio->kern_sg_entries = 0; 9809 ctsio->kern_rel_offset = 0; 9810 ctsio->kern_sg_entries = 0; 9811 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9812 ctsio->kern_total_len = ctsio->kern_data_len; 9813 9814 /* 9815 * The control device is always connected. The disk device, on the 9816 * other hand, may not be online all the time. Need to change this 9817 * to figure out whether the disk device is actually online or not. 9818 */ 9819 if (lun != NULL) 9820 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9821 lun->be_lun->lun_type; 9822 else 9823 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9824 9825 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9826 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9827 bl_ptr->max_cmp_write_len = 0xff; 9828 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9829 if (lun != NULL) { 9830 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9831 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9832 ival = 0xffffffff; 9833 val = dnvlist_get_string(lun->be_lun->options, 9834 "unmap_max_lba", NULL); 9835 if (val != NULL) 9836 ctl_expand_number(val, &ival); 9837 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9838 ival = 0xffffffff; 9839 val = dnvlist_get_string(lun->be_lun->options, 9840 "unmap_max_descr", NULL); 9841 if (val != NULL) 9842 ctl_expand_number(val, &ival); 9843 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9844 if (lun->be_lun->ublockexp != 0) { 9845 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9846 bl_ptr->opt_unmap_grain); 9847 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9848 bl_ptr->unmap_grain_align); 9849 } 9850 } 9851 scsi_ulto4b(lun->be_lun->atomicblock, 9852 bl_ptr->max_atomic_transfer_length); 9853 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9854 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9855 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9856 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9857 ival = UINT64_MAX; 9858 val = dnvlist_get_string(lun->be_lun->options, 9859 "write_same_max_lba", NULL); 9860 if (val != NULL) 9861 ctl_expand_number(val, &ival); 9862 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9863 } 9864 9865 ctl_set_success(ctsio); 9866 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9867 ctsio->be_move_done = ctl_config_move_done; 9868 ctl_datamove((union ctl_io *)ctsio); 9869 return (CTL_RETVAL_COMPLETE); 9870 } 9871 9872 static int 9873 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9874 { 9875 struct ctl_lun *lun = CTL_LUN(ctsio); 9876 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9877 const char *value; 9878 u_int i; 9879 9880 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9881 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9882 ctsio->kern_sg_entries = 0; 9883 ctsio->kern_rel_offset = 0; 9884 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9885 ctsio->kern_total_len = ctsio->kern_data_len; 9886 9887 /* 9888 * The control device is always connected. The disk device, on the 9889 * other hand, may not be online all the time. Need to change this 9890 * to figure out whether the disk device is actually online or not. 9891 */ 9892 if (lun != NULL) 9893 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9894 lun->be_lun->lun_type; 9895 else 9896 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9897 bdc_ptr->page_code = SVPD_BDC; 9898 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9899 if (lun != NULL && 9900 (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) 9901 i = strtol(value, NULL, 0); 9902 else 9903 i = CTL_DEFAULT_ROTATION_RATE; 9904 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9905 if (lun != NULL && 9906 (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) 9907 i = strtol(value, NULL, 0); 9908 else 9909 i = 0; 9910 bdc_ptr->wab_wac_ff = (i & 0x0f); 9911 bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; 9912 9913 ctl_set_success(ctsio); 9914 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9915 ctsio->be_move_done = ctl_config_move_done; 9916 ctl_datamove((union ctl_io *)ctsio); 9917 return (CTL_RETVAL_COMPLETE); 9918 } 9919 9920 static int 9921 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9922 { 9923 struct ctl_lun *lun = CTL_LUN(ctsio); 9924 struct scsi_vpd_logical_block_prov *lbp_ptr; 9925 const char *value; 9926 9927 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9928 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9929 ctsio->kern_sg_entries = 0; 9930 ctsio->kern_rel_offset = 0; 9931 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9932 ctsio->kern_total_len = ctsio->kern_data_len; 9933 9934 /* 9935 * The control device is always connected. The disk device, on the 9936 * other hand, may not be online all the time. Need to change this 9937 * to figure out whether the disk device is actually online or not. 9938 */ 9939 if (lun != NULL) 9940 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9941 lun->be_lun->lun_type; 9942 else 9943 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9944 9945 lbp_ptr->page_code = SVPD_LBP; 9946 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9947 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9948 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9949 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9950 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9951 value = dnvlist_get_string(lun->be_lun->options, 9952 "provisioning_type", NULL); 9953 if (value != NULL) { 9954 if (strcmp(value, "resource") == 0) 9955 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 9956 else if (strcmp(value, "thin") == 0) 9957 lbp_ptr->prov_type = SVPD_LBP_THIN; 9958 } else 9959 lbp_ptr->prov_type = SVPD_LBP_THIN; 9960 } 9961 9962 ctl_set_success(ctsio); 9963 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9964 ctsio->be_move_done = ctl_config_move_done; 9965 ctl_datamove((union ctl_io *)ctsio); 9966 return (CTL_RETVAL_COMPLETE); 9967 } 9968 9969 /* 9970 * INQUIRY with the EVPD bit set. 9971 */ 9972 static int 9973 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9974 { 9975 struct ctl_lun *lun = CTL_LUN(ctsio); 9976 struct scsi_inquiry *cdb; 9977 int alloc_len, retval; 9978 9979 cdb = (struct scsi_inquiry *)ctsio->cdb; 9980 alloc_len = scsi_2btoul(cdb->length); 9981 9982 switch (cdb->page_code) { 9983 case SVPD_SUPPORTED_PAGES: 9984 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9985 break; 9986 case SVPD_UNIT_SERIAL_NUMBER: 9987 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9988 break; 9989 case SVPD_DEVICE_ID: 9990 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9991 break; 9992 case SVPD_EXTENDED_INQUIRY_DATA: 9993 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9994 break; 9995 case SVPD_MODE_PAGE_POLICY: 9996 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9997 break; 9998 case SVPD_SCSI_PORTS: 9999 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10000 break; 10001 case SVPD_SCSI_TPC: 10002 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10003 break; 10004 case SVPD_SCSI_SFS: 10005 retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); 10006 break; 10007 case SVPD_BLOCK_LIMITS: 10008 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10009 goto err; 10010 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10011 break; 10012 case SVPD_BDC: 10013 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10014 goto err; 10015 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10016 break; 10017 case SVPD_LBP: 10018 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10019 goto err; 10020 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10021 break; 10022 default: 10023 err: 10024 ctl_set_invalid_field(ctsio, 10025 /*sks_valid*/ 1, 10026 /*command*/ 1, 10027 /*field*/ 2, 10028 /*bit_valid*/ 0, 10029 /*bit*/ 0); 10030 ctl_done((union ctl_io *)ctsio); 10031 retval = CTL_RETVAL_COMPLETE; 10032 break; 10033 } 10034 10035 return (retval); 10036 } 10037 10038 /* 10039 * Standard INQUIRY data. 10040 */ 10041 static int 10042 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10043 { 10044 struct ctl_softc *softc = CTL_SOFTC(ctsio); 10045 struct ctl_port *port = CTL_PORT(ctsio); 10046 struct ctl_lun *lun = CTL_LUN(ctsio); 10047 struct scsi_inquiry_data *inq_ptr; 10048 struct scsi_inquiry *cdb; 10049 const char *val; 10050 uint32_t alloc_len, data_len; 10051 ctl_port_type port_type; 10052 10053 port_type = port->port_type; 10054 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10055 port_type = CTL_PORT_SCSI; 10056 10057 cdb = (struct scsi_inquiry *)ctsio->cdb; 10058 alloc_len = scsi_2btoul(cdb->length); 10059 10060 /* 10061 * We malloc the full inquiry data size here and fill it 10062 * in. If the user only asks for less, we'll give him 10063 * that much. 10064 */ 10065 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10066 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10067 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10068 ctsio->kern_sg_entries = 0; 10069 ctsio->kern_rel_offset = 0; 10070 ctsio->kern_data_len = min(data_len, alloc_len); 10071 ctsio->kern_total_len = ctsio->kern_data_len; 10072 10073 if (lun != NULL) { 10074 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10075 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10076 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10077 lun->be_lun->lun_type; 10078 } else { 10079 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10080 lun->be_lun->lun_type; 10081 } 10082 if (lun->flags & CTL_LUN_REMOVABLE) 10083 inq_ptr->dev_qual2 |= SID_RMB; 10084 } else 10085 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10086 10087 /* RMB in byte 2 is 0 */ 10088 inq_ptr->version = SCSI_REV_SPC5; 10089 10090 /* 10091 * According to SAM-3, even if a device only supports a single 10092 * level of LUN addressing, it should still set the HISUP bit: 10093 * 10094 * 4.9.1 Logical unit numbers overview 10095 * 10096 * All logical unit number formats described in this standard are 10097 * hierarchical in structure even when only a single level in that 10098 * hierarchy is used. The HISUP bit shall be set to one in the 10099 * standard INQUIRY data (see SPC-2) when any logical unit number 10100 * format described in this standard is used. Non-hierarchical 10101 * formats are outside the scope of this standard. 10102 * 10103 * Therefore we set the HiSup bit here. 10104 * 10105 * The response format is 2, per SPC-3. 10106 */ 10107 inq_ptr->response_format = SID_HiSup | 2; 10108 10109 inq_ptr->additional_length = data_len - 10110 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10111 CTL_DEBUG_PRINT(("additional_length = %d\n", 10112 inq_ptr->additional_length)); 10113 10114 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10115 if (port_type == CTL_PORT_SCSI) 10116 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10117 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10118 inq_ptr->flags = SID_CmdQue; 10119 if (port_type == CTL_PORT_SCSI) 10120 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10121 10122 /* 10123 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10124 * We have 8 bytes for the vendor name, and 16 bytes for the device 10125 * name and 4 bytes for the revision. 10126 */ 10127 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10128 "vendor", NULL)) == NULL) { 10129 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10130 } else { 10131 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10132 strncpy(inq_ptr->vendor, val, 10133 min(sizeof(inq_ptr->vendor), strlen(val))); 10134 } 10135 if (lun == NULL) { 10136 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10137 sizeof(inq_ptr->product)); 10138 } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", 10139 NULL)) == NULL) { 10140 switch (lun->be_lun->lun_type) { 10141 case T_DIRECT: 10142 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10143 sizeof(inq_ptr->product)); 10144 break; 10145 case T_PROCESSOR: 10146 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10147 sizeof(inq_ptr->product)); 10148 break; 10149 case T_CDROM: 10150 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10151 sizeof(inq_ptr->product)); 10152 break; 10153 default: 10154 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10155 sizeof(inq_ptr->product)); 10156 break; 10157 } 10158 } else { 10159 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10160 strncpy(inq_ptr->product, val, 10161 min(sizeof(inq_ptr->product), strlen(val))); 10162 } 10163 10164 /* 10165 * XXX make this a macro somewhere so it automatically gets 10166 * incremented when we make changes. 10167 */ 10168 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10169 "revision", NULL)) == NULL) { 10170 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10171 } else { 10172 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10173 strncpy(inq_ptr->revision, val, 10174 min(sizeof(inq_ptr->revision), strlen(val))); 10175 } 10176 10177 /* 10178 * For parallel SCSI, we support double transition and single 10179 * transition clocking. We also support QAS (Quick Arbitration 10180 * and Selection) and Information Unit transfers on both the 10181 * control and array devices. 10182 */ 10183 if (port_type == CTL_PORT_SCSI) 10184 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10185 SID_SPI_IUS; 10186 10187 /* SAM-6 (no version claimed) */ 10188 scsi_ulto2b(0x00C0, inq_ptr->version1); 10189 /* SPC-5 (no version claimed) */ 10190 scsi_ulto2b(0x05C0, inq_ptr->version2); 10191 if (port_type == CTL_PORT_FC) { 10192 /* FCP-2 ANSI INCITS.350:2003 */ 10193 scsi_ulto2b(0x0917, inq_ptr->version3); 10194 } else if (port_type == CTL_PORT_SCSI) { 10195 /* SPI-4 ANSI INCITS.362:200x */ 10196 scsi_ulto2b(0x0B56, inq_ptr->version3); 10197 } else if (port_type == CTL_PORT_ISCSI) { 10198 /* iSCSI (no version claimed) */ 10199 scsi_ulto2b(0x0960, inq_ptr->version3); 10200 } else if (port_type == CTL_PORT_SAS) { 10201 /* SAS (no version claimed) */ 10202 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10203 } else if (port_type == CTL_PORT_UMASS) { 10204 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10205 scsi_ulto2b(0x1730, inq_ptr->version3); 10206 } 10207 10208 if (lun == NULL) { 10209 /* SBC-4 (no version claimed) */ 10210 scsi_ulto2b(0x0600, inq_ptr->version4); 10211 } else { 10212 switch (lun->be_lun->lun_type) { 10213 case T_DIRECT: 10214 /* SBC-4 (no version claimed) */ 10215 scsi_ulto2b(0x0600, inq_ptr->version4); 10216 break; 10217 case T_PROCESSOR: 10218 break; 10219 case T_CDROM: 10220 /* MMC-6 (no version claimed) */ 10221 scsi_ulto2b(0x04E0, inq_ptr->version4); 10222 break; 10223 default: 10224 break; 10225 } 10226 } 10227 10228 ctl_set_success(ctsio); 10229 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10230 ctsio->be_move_done = ctl_config_move_done; 10231 ctl_datamove((union ctl_io *)ctsio); 10232 return (CTL_RETVAL_COMPLETE); 10233 } 10234 10235 int 10236 ctl_inquiry(struct ctl_scsiio *ctsio) 10237 { 10238 struct scsi_inquiry *cdb; 10239 int retval; 10240 10241 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10242 10243 cdb = (struct scsi_inquiry *)ctsio->cdb; 10244 if (cdb->byte2 & SI_EVPD) 10245 retval = ctl_inquiry_evpd(ctsio); 10246 else if (cdb->page_code == 0) 10247 retval = ctl_inquiry_std(ctsio); 10248 else { 10249 ctl_set_invalid_field(ctsio, 10250 /*sks_valid*/ 1, 10251 /*command*/ 1, 10252 /*field*/ 2, 10253 /*bit_valid*/ 0, 10254 /*bit*/ 0); 10255 ctl_done((union ctl_io *)ctsio); 10256 return (CTL_RETVAL_COMPLETE); 10257 } 10258 10259 return (retval); 10260 } 10261 10262 int 10263 ctl_get_config(struct ctl_scsiio *ctsio) 10264 { 10265 struct ctl_lun *lun = CTL_LUN(ctsio); 10266 struct scsi_get_config_header *hdr; 10267 struct scsi_get_config_feature *feature; 10268 struct scsi_get_config *cdb; 10269 uint32_t alloc_len, data_len; 10270 int rt, starting; 10271 10272 cdb = (struct scsi_get_config *)ctsio->cdb; 10273 rt = (cdb->rt & SGC_RT_MASK); 10274 starting = scsi_2btoul(cdb->starting_feature); 10275 alloc_len = scsi_2btoul(cdb->length); 10276 10277 data_len = sizeof(struct scsi_get_config_header) + 10278 sizeof(struct scsi_get_config_feature) + 8 + 10279 sizeof(struct scsi_get_config_feature) + 8 + 10280 sizeof(struct scsi_get_config_feature) + 4 + 10281 sizeof(struct scsi_get_config_feature) + 4 + 10282 sizeof(struct scsi_get_config_feature) + 8 + 10283 sizeof(struct scsi_get_config_feature) + 10284 sizeof(struct scsi_get_config_feature) + 4 + 10285 sizeof(struct scsi_get_config_feature) + 4 + 10286 sizeof(struct scsi_get_config_feature) + 4 + 10287 sizeof(struct scsi_get_config_feature) + 4 + 10288 sizeof(struct scsi_get_config_feature) + 4 + 10289 sizeof(struct scsi_get_config_feature) + 4; 10290 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10291 ctsio->kern_sg_entries = 0; 10292 ctsio->kern_rel_offset = 0; 10293 10294 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10295 if (lun->flags & CTL_LUN_NO_MEDIA) 10296 scsi_ulto2b(0x0000, hdr->current_profile); 10297 else 10298 scsi_ulto2b(0x0010, hdr->current_profile); 10299 feature = (struct scsi_get_config_feature *)(hdr + 1); 10300 10301 if (starting > 0x003b) 10302 goto done; 10303 if (starting > 0x003a) 10304 goto f3b; 10305 if (starting > 0x002b) 10306 goto f3a; 10307 if (starting > 0x002a) 10308 goto f2b; 10309 if (starting > 0x001f) 10310 goto f2a; 10311 if (starting > 0x001e) 10312 goto f1f; 10313 if (starting > 0x001d) 10314 goto f1e; 10315 if (starting > 0x0010) 10316 goto f1d; 10317 if (starting > 0x0003) 10318 goto f10; 10319 if (starting > 0x0002) 10320 goto f3; 10321 if (starting > 0x0001) 10322 goto f2; 10323 if (starting > 0x0000) 10324 goto f1; 10325 10326 /* Profile List */ 10327 scsi_ulto2b(0x0000, feature->feature_code); 10328 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10329 feature->add_length = 8; 10330 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10331 feature->feature_data[2] = 0x00; 10332 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10333 feature->feature_data[6] = 0x01; 10334 feature = (struct scsi_get_config_feature *) 10335 &feature->feature_data[feature->add_length]; 10336 10337 f1: /* Core */ 10338 scsi_ulto2b(0x0001, feature->feature_code); 10339 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10340 feature->add_length = 8; 10341 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10342 feature->feature_data[4] = 0x03; 10343 feature = (struct scsi_get_config_feature *) 10344 &feature->feature_data[feature->add_length]; 10345 10346 f2: /* Morphing */ 10347 scsi_ulto2b(0x0002, feature->feature_code); 10348 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10349 feature->add_length = 4; 10350 feature->feature_data[0] = 0x02; 10351 feature = (struct scsi_get_config_feature *) 10352 &feature->feature_data[feature->add_length]; 10353 10354 f3: /* Removable Medium */ 10355 scsi_ulto2b(0x0003, feature->feature_code); 10356 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10357 feature->add_length = 4; 10358 feature->feature_data[0] = 0x39; 10359 feature = (struct scsi_get_config_feature *) 10360 &feature->feature_data[feature->add_length]; 10361 10362 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10363 goto done; 10364 10365 f10: /* Random Read */ 10366 scsi_ulto2b(0x0010, feature->feature_code); 10367 feature->flags = 0x00; 10368 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10369 feature->flags |= SGC_F_CURRENT; 10370 feature->add_length = 8; 10371 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10372 scsi_ulto2b(1, &feature->feature_data[4]); 10373 feature->feature_data[6] = 0x00; 10374 feature = (struct scsi_get_config_feature *) 10375 &feature->feature_data[feature->add_length]; 10376 10377 f1d: /* Multi-Read */ 10378 scsi_ulto2b(0x001D, feature->feature_code); 10379 feature->flags = 0x00; 10380 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10381 feature->flags |= SGC_F_CURRENT; 10382 feature->add_length = 0; 10383 feature = (struct scsi_get_config_feature *) 10384 &feature->feature_data[feature->add_length]; 10385 10386 f1e: /* CD Read */ 10387 scsi_ulto2b(0x001E, feature->feature_code); 10388 feature->flags = 0x00; 10389 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10390 feature->flags |= SGC_F_CURRENT; 10391 feature->add_length = 4; 10392 feature->feature_data[0] = 0x00; 10393 feature = (struct scsi_get_config_feature *) 10394 &feature->feature_data[feature->add_length]; 10395 10396 f1f: /* DVD Read */ 10397 scsi_ulto2b(0x001F, feature->feature_code); 10398 feature->flags = 0x08; 10399 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10400 feature->flags |= SGC_F_CURRENT; 10401 feature->add_length = 4; 10402 feature->feature_data[0] = 0x01; 10403 feature->feature_data[2] = 0x03; 10404 feature = (struct scsi_get_config_feature *) 10405 &feature->feature_data[feature->add_length]; 10406 10407 f2a: /* DVD+RW */ 10408 scsi_ulto2b(0x002A, feature->feature_code); 10409 feature->flags = 0x04; 10410 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10411 feature->flags |= SGC_F_CURRENT; 10412 feature->add_length = 4; 10413 feature->feature_data[0] = 0x00; 10414 feature->feature_data[1] = 0x00; 10415 feature = (struct scsi_get_config_feature *) 10416 &feature->feature_data[feature->add_length]; 10417 10418 f2b: /* DVD+R */ 10419 scsi_ulto2b(0x002B, feature->feature_code); 10420 feature->flags = 0x00; 10421 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10422 feature->flags |= SGC_F_CURRENT; 10423 feature->add_length = 4; 10424 feature->feature_data[0] = 0x00; 10425 feature = (struct scsi_get_config_feature *) 10426 &feature->feature_data[feature->add_length]; 10427 10428 f3a: /* DVD+RW Dual Layer */ 10429 scsi_ulto2b(0x003A, feature->feature_code); 10430 feature->flags = 0x00; 10431 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10432 feature->flags |= SGC_F_CURRENT; 10433 feature->add_length = 4; 10434 feature->feature_data[0] = 0x00; 10435 feature->feature_data[1] = 0x00; 10436 feature = (struct scsi_get_config_feature *) 10437 &feature->feature_data[feature->add_length]; 10438 10439 f3b: /* DVD+R Dual Layer */ 10440 scsi_ulto2b(0x003B, feature->feature_code); 10441 feature->flags = 0x00; 10442 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10443 feature->flags |= SGC_F_CURRENT; 10444 feature->add_length = 4; 10445 feature->feature_data[0] = 0x00; 10446 feature = (struct scsi_get_config_feature *) 10447 &feature->feature_data[feature->add_length]; 10448 10449 done: 10450 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10451 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10452 feature = (struct scsi_get_config_feature *)(hdr + 1); 10453 if (scsi_2btoul(feature->feature_code) == starting) 10454 feature = (struct scsi_get_config_feature *) 10455 &feature->feature_data[feature->add_length]; 10456 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10457 } 10458 scsi_ulto4b(data_len - 4, hdr->data_length); 10459 ctsio->kern_data_len = min(data_len, alloc_len); 10460 ctsio->kern_total_len = ctsio->kern_data_len; 10461 10462 ctl_set_success(ctsio); 10463 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10464 ctsio->be_move_done = ctl_config_move_done; 10465 ctl_datamove((union ctl_io *)ctsio); 10466 return (CTL_RETVAL_COMPLETE); 10467 } 10468 10469 int 10470 ctl_get_event_status(struct ctl_scsiio *ctsio) 10471 { 10472 struct scsi_get_event_status_header *hdr; 10473 struct scsi_get_event_status *cdb; 10474 uint32_t alloc_len, data_len; 10475 10476 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10477 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10478 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10479 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10480 ctl_done((union ctl_io *)ctsio); 10481 return (CTL_RETVAL_COMPLETE); 10482 } 10483 alloc_len = scsi_2btoul(cdb->length); 10484 10485 data_len = sizeof(struct scsi_get_event_status_header); 10486 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10487 ctsio->kern_sg_entries = 0; 10488 ctsio->kern_rel_offset = 0; 10489 ctsio->kern_data_len = min(data_len, alloc_len); 10490 ctsio->kern_total_len = ctsio->kern_data_len; 10491 10492 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10493 scsi_ulto2b(0, hdr->descr_length); 10494 hdr->nea_class = SGESN_NEA; 10495 hdr->supported_class = 0; 10496 10497 ctl_set_success(ctsio); 10498 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10499 ctsio->be_move_done = ctl_config_move_done; 10500 ctl_datamove((union ctl_io *)ctsio); 10501 return (CTL_RETVAL_COMPLETE); 10502 } 10503 10504 int 10505 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10506 { 10507 struct scsi_mechanism_status_header *hdr; 10508 struct scsi_mechanism_status *cdb; 10509 uint32_t alloc_len, data_len; 10510 10511 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10512 alloc_len = scsi_2btoul(cdb->length); 10513 10514 data_len = sizeof(struct scsi_mechanism_status_header); 10515 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10516 ctsio->kern_sg_entries = 0; 10517 ctsio->kern_rel_offset = 0; 10518 ctsio->kern_data_len = min(data_len, alloc_len); 10519 ctsio->kern_total_len = ctsio->kern_data_len; 10520 10521 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10522 hdr->state1 = 0x00; 10523 hdr->state2 = 0xe0; 10524 scsi_ulto3b(0, hdr->lba); 10525 hdr->slots_num = 0; 10526 scsi_ulto2b(0, hdr->slots_length); 10527 10528 ctl_set_success(ctsio); 10529 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10530 ctsio->be_move_done = ctl_config_move_done; 10531 ctl_datamove((union ctl_io *)ctsio); 10532 return (CTL_RETVAL_COMPLETE); 10533 } 10534 10535 static void 10536 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10537 { 10538 10539 lba += 150; 10540 buf[0] = 0; 10541 buf[1] = bin2bcd((lba / 75) / 60); 10542 buf[2] = bin2bcd((lba / 75) % 60); 10543 buf[3] = bin2bcd(lba % 75); 10544 } 10545 10546 int 10547 ctl_read_toc(struct ctl_scsiio *ctsio) 10548 { 10549 struct ctl_lun *lun = CTL_LUN(ctsio); 10550 struct scsi_read_toc_hdr *hdr; 10551 struct scsi_read_toc_type01_descr *descr; 10552 struct scsi_read_toc *cdb; 10553 uint32_t alloc_len, data_len; 10554 int format, msf; 10555 10556 cdb = (struct scsi_read_toc *)ctsio->cdb; 10557 msf = (cdb->byte2 & CD_MSF) != 0; 10558 format = cdb->format; 10559 alloc_len = scsi_2btoul(cdb->data_len); 10560 10561 data_len = sizeof(struct scsi_read_toc_hdr); 10562 if (format == 0) 10563 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10564 else 10565 data_len += sizeof(struct scsi_read_toc_type01_descr); 10566 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10567 ctsio->kern_sg_entries = 0; 10568 ctsio->kern_rel_offset = 0; 10569 ctsio->kern_data_len = min(data_len, alloc_len); 10570 ctsio->kern_total_len = ctsio->kern_data_len; 10571 10572 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10573 if (format == 0) { 10574 scsi_ulto2b(0x12, hdr->data_length); 10575 hdr->first = 1; 10576 hdr->last = 1; 10577 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10578 descr->addr_ctl = 0x14; 10579 descr->track_number = 1; 10580 if (msf) 10581 ctl_ultomsf(0, descr->track_start); 10582 else 10583 scsi_ulto4b(0, descr->track_start); 10584 descr++; 10585 descr->addr_ctl = 0x14; 10586 descr->track_number = 0xaa; 10587 if (msf) 10588 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10589 else 10590 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10591 } else { 10592 scsi_ulto2b(0x0a, hdr->data_length); 10593 hdr->first = 1; 10594 hdr->last = 1; 10595 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10596 descr->addr_ctl = 0x14; 10597 descr->track_number = 1; 10598 if (msf) 10599 ctl_ultomsf(0, descr->track_start); 10600 else 10601 scsi_ulto4b(0, descr->track_start); 10602 } 10603 10604 ctl_set_success(ctsio); 10605 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10606 ctsio->be_move_done = ctl_config_move_done; 10607 ctl_datamove((union ctl_io *)ctsio); 10608 return (CTL_RETVAL_COMPLETE); 10609 } 10610 10611 /* 10612 * For known CDB types, parse the LBA and length. 10613 */ 10614 static int 10615 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10616 { 10617 if (io->io_hdr.io_type != CTL_IO_SCSI) 10618 return (1); 10619 10620 switch (io->scsiio.cdb[0]) { 10621 case COMPARE_AND_WRITE: { 10622 struct scsi_compare_and_write *cdb; 10623 10624 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10625 10626 *lba = scsi_8btou64(cdb->addr); 10627 *len = cdb->length; 10628 break; 10629 } 10630 case READ_6: 10631 case WRITE_6: { 10632 struct scsi_rw_6 *cdb; 10633 10634 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10635 10636 *lba = scsi_3btoul(cdb->addr); 10637 /* only 5 bits are valid in the most significant address byte */ 10638 *lba &= 0x1fffff; 10639 *len = cdb->length; 10640 break; 10641 } 10642 case READ_10: 10643 case WRITE_10: { 10644 struct scsi_rw_10 *cdb; 10645 10646 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10647 10648 *lba = scsi_4btoul(cdb->addr); 10649 *len = scsi_2btoul(cdb->length); 10650 break; 10651 } 10652 case WRITE_VERIFY_10: { 10653 struct scsi_write_verify_10 *cdb; 10654 10655 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10656 10657 *lba = scsi_4btoul(cdb->addr); 10658 *len = scsi_2btoul(cdb->length); 10659 break; 10660 } 10661 case READ_12: 10662 case WRITE_12: { 10663 struct scsi_rw_12 *cdb; 10664 10665 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10666 10667 *lba = scsi_4btoul(cdb->addr); 10668 *len = scsi_4btoul(cdb->length); 10669 break; 10670 } 10671 case WRITE_VERIFY_12: { 10672 struct scsi_write_verify_12 *cdb; 10673 10674 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10675 10676 *lba = scsi_4btoul(cdb->addr); 10677 *len = scsi_4btoul(cdb->length); 10678 break; 10679 } 10680 case READ_16: 10681 case WRITE_16: { 10682 struct scsi_rw_16 *cdb; 10683 10684 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10685 10686 *lba = scsi_8btou64(cdb->addr); 10687 *len = scsi_4btoul(cdb->length); 10688 break; 10689 } 10690 case WRITE_ATOMIC_16: { 10691 struct scsi_write_atomic_16 *cdb; 10692 10693 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10694 10695 *lba = scsi_8btou64(cdb->addr); 10696 *len = scsi_2btoul(cdb->length); 10697 break; 10698 } 10699 case WRITE_VERIFY_16: { 10700 struct scsi_write_verify_16 *cdb; 10701 10702 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10703 10704 *lba = scsi_8btou64(cdb->addr); 10705 *len = scsi_4btoul(cdb->length); 10706 break; 10707 } 10708 case WRITE_SAME_10: { 10709 struct scsi_write_same_10 *cdb; 10710 10711 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10712 10713 *lba = scsi_4btoul(cdb->addr); 10714 *len = scsi_2btoul(cdb->length); 10715 break; 10716 } 10717 case WRITE_SAME_16: { 10718 struct scsi_write_same_16 *cdb; 10719 10720 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10721 10722 *lba = scsi_8btou64(cdb->addr); 10723 *len = scsi_4btoul(cdb->length); 10724 break; 10725 } 10726 case VERIFY_10: { 10727 struct scsi_verify_10 *cdb; 10728 10729 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10730 10731 *lba = scsi_4btoul(cdb->addr); 10732 *len = scsi_2btoul(cdb->length); 10733 break; 10734 } 10735 case VERIFY_12: { 10736 struct scsi_verify_12 *cdb; 10737 10738 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10739 10740 *lba = scsi_4btoul(cdb->addr); 10741 *len = scsi_4btoul(cdb->length); 10742 break; 10743 } 10744 case VERIFY_16: { 10745 struct scsi_verify_16 *cdb; 10746 10747 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10748 10749 *lba = scsi_8btou64(cdb->addr); 10750 *len = scsi_4btoul(cdb->length); 10751 break; 10752 } 10753 case UNMAP: { 10754 *lba = 0; 10755 *len = UINT64_MAX; 10756 break; 10757 } 10758 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10759 struct scsi_get_lba_status *cdb; 10760 10761 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10762 *lba = scsi_8btou64(cdb->addr); 10763 *len = UINT32_MAX; 10764 break; 10765 } 10766 default: 10767 return (1); 10768 break; /* NOTREACHED */ 10769 } 10770 10771 return (0); 10772 } 10773 10774 static ctl_action 10775 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10776 bool seq) 10777 { 10778 uint64_t endlba1, endlba2; 10779 10780 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10781 endlba2 = lba2 + len2 - 1; 10782 10783 if ((endlba1 < lba2) || (endlba2 < lba1)) 10784 return (CTL_ACTION_PASS); 10785 else 10786 return (CTL_ACTION_BLOCK); 10787 } 10788 10789 static int 10790 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10791 { 10792 struct ctl_ptr_len_flags *ptrlen; 10793 struct scsi_unmap_desc *buf, *end, *range; 10794 uint64_t lba; 10795 uint32_t len; 10796 10797 /* If not UNMAP -- go other way. */ 10798 if (io->io_hdr.io_type != CTL_IO_SCSI || 10799 io->scsiio.cdb[0] != UNMAP) 10800 return (CTL_ACTION_ERROR); 10801 10802 /* If UNMAP without data -- block and wait for data. */ 10803 ptrlen = (struct ctl_ptr_len_flags *) 10804 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10805 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10806 ptrlen->ptr == NULL) 10807 return (CTL_ACTION_BLOCK); 10808 10809 /* UNMAP with data -- check for collision. */ 10810 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10811 end = buf + ptrlen->len / sizeof(*buf); 10812 for (range = buf; range < end; range++) { 10813 lba = scsi_8btou64(range->lba); 10814 len = scsi_4btoul(range->length); 10815 if ((lba < lba2 + len2) && (lba + len > lba2)) 10816 return (CTL_ACTION_BLOCK); 10817 } 10818 return (CTL_ACTION_PASS); 10819 } 10820 10821 static ctl_action 10822 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10823 { 10824 uint64_t lba1, lba2; 10825 uint64_t len1, len2; 10826 int retval; 10827 10828 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10829 return (CTL_ACTION_ERROR); 10830 10831 retval = ctl_extent_check_unmap(io1, lba2, len2); 10832 if (retval != CTL_ACTION_ERROR) 10833 return (retval); 10834 10835 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10836 return (CTL_ACTION_ERROR); 10837 10838 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10839 seq = FALSE; 10840 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10841 } 10842 10843 static ctl_action 10844 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10845 { 10846 uint64_t lba1, lba2; 10847 uint64_t len1, len2; 10848 10849 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10850 return (CTL_ACTION_PASS); 10851 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10852 return (CTL_ACTION_ERROR); 10853 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10854 return (CTL_ACTION_ERROR); 10855 10856 if (lba1 + len1 == lba2) 10857 return (CTL_ACTION_BLOCK); 10858 return (CTL_ACTION_PASS); 10859 } 10860 10861 static ctl_action 10862 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10863 union ctl_io *ooa_io) 10864 { 10865 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10866 const ctl_serialize_action *serialize_row; 10867 10868 /* 10869 * Aborted commands are not going to be executed and may even 10870 * not report completion, so we don't care about their order. 10871 * Let them complete ASAP to clean the OOA queue. 10872 */ 10873 if (pending_io->io_hdr.flags & CTL_FLAG_ABORT) 10874 return (CTL_ACTION_SKIP); 10875 10876 /* 10877 * The initiator attempted multiple untagged commands at the same 10878 * time. Can't do that. 10879 */ 10880 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10881 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10882 && ((pending_io->io_hdr.nexus.targ_port == 10883 ooa_io->io_hdr.nexus.targ_port) 10884 && (pending_io->io_hdr.nexus.initid == 10885 ooa_io->io_hdr.nexus.initid)) 10886 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10887 CTL_FLAG_STATUS_SENT)) == 0)) 10888 return (CTL_ACTION_OVERLAP); 10889 10890 /* 10891 * The initiator attempted to send multiple tagged commands with 10892 * the same ID. (It's fine if different initiators have the same 10893 * tag ID.) 10894 * 10895 * Even if all of those conditions are true, we don't kill the I/O 10896 * if the command ahead of us has been aborted. We won't end up 10897 * sending it to the FETD, and it's perfectly legal to resend a 10898 * command with the same tag number as long as the previous 10899 * instance of this tag number has been aborted somehow. 10900 */ 10901 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10902 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10903 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10904 && ((pending_io->io_hdr.nexus.targ_port == 10905 ooa_io->io_hdr.nexus.targ_port) 10906 && (pending_io->io_hdr.nexus.initid == 10907 ooa_io->io_hdr.nexus.initid)) 10908 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10909 CTL_FLAG_STATUS_SENT)) == 0)) 10910 return (CTL_ACTION_OVERLAP_TAG); 10911 10912 /* 10913 * If we get a head of queue tag, SAM-3 says that we should 10914 * immediately execute it. 10915 * 10916 * What happens if this command would normally block for some other 10917 * reason? e.g. a request sense with a head of queue tag 10918 * immediately after a write. Normally that would block, but this 10919 * will result in its getting executed immediately... 10920 * 10921 * We currently return "pass" instead of "skip", so we'll end up 10922 * going through the rest of the queue to check for overlapped tags. 10923 * 10924 * XXX KDM check for other types of blockage first?? 10925 */ 10926 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10927 return (CTL_ACTION_PASS); 10928 10929 /* 10930 * Ordered tags have to block until all items ahead of them 10931 * have completed. If we get called with an ordered tag, we always 10932 * block, if something else is ahead of us in the queue. 10933 */ 10934 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10935 return (CTL_ACTION_BLOCK); 10936 10937 /* 10938 * Simple tags get blocked until all head of queue and ordered tags 10939 * ahead of them have completed. I'm lumping untagged commands in 10940 * with simple tags here. XXX KDM is that the right thing to do? 10941 */ 10942 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10943 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10944 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10945 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10946 return (CTL_ACTION_BLOCK); 10947 10948 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10949 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 10950 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 10951 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 10952 pending_io->scsiio.cdb[1], pending_io)); 10953 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10954 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 10955 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 10956 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 10957 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 10958 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 10959 ooa_io->scsiio.cdb[1], ooa_io)); 10960 10961 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10962 10963 switch (serialize_row[pending_entry->seridx]) { 10964 case CTL_SER_BLOCK: 10965 return (CTL_ACTION_BLOCK); 10966 case CTL_SER_EXTENT: 10967 return (ctl_extent_check(ooa_io, pending_io, 10968 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10969 case CTL_SER_EXTENTOPT: 10970 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10971 SCP_QUEUE_ALG_UNRESTRICTED) 10972 return (ctl_extent_check(ooa_io, pending_io, 10973 (lun->be_lun && 10974 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10975 return (CTL_ACTION_PASS); 10976 case CTL_SER_EXTENTSEQ: 10977 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10978 return (ctl_extent_check_seq(ooa_io, pending_io)); 10979 return (CTL_ACTION_PASS); 10980 case CTL_SER_PASS: 10981 return (CTL_ACTION_PASS); 10982 case CTL_SER_BLOCKOPT: 10983 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10984 SCP_QUEUE_ALG_UNRESTRICTED) 10985 return (CTL_ACTION_BLOCK); 10986 return (CTL_ACTION_PASS); 10987 case CTL_SER_SKIP: 10988 return (CTL_ACTION_SKIP); 10989 default: 10990 panic("%s: Invalid serialization value %d for %d => %d", 10991 __func__, serialize_row[pending_entry->seridx], 10992 pending_entry->seridx, ooa_entry->seridx); 10993 } 10994 10995 return (CTL_ACTION_ERROR); 10996 } 10997 10998 /* 10999 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11000 * Assumptions: 11001 * - pending_io is generally either incoming, or on the blocked queue 11002 * - starting I/O is the I/O we want to start the check with. 11003 */ 11004 static ctl_action 11005 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11006 union ctl_io **starting_io) 11007 { 11008 union ctl_io *ooa_io; 11009 ctl_action action; 11010 11011 mtx_assert(&lun->lun_lock, MA_OWNED); 11012 11013 /* 11014 * Run back along the OOA queue, starting with the current 11015 * blocked I/O and going through every I/O before it on the 11016 * queue. If starting_io is NULL, we'll just end up returning 11017 * CTL_ACTION_PASS. 11018 */ 11019 for (ooa_io = *starting_io; ooa_io != NULL; 11020 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 11021 ooa_links)){ 11022 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 11023 if (action != CTL_ACTION_PASS) { 11024 *starting_io = ooa_io; 11025 return (action); 11026 } 11027 } 11028 11029 *starting_io = NULL; 11030 return (CTL_ACTION_PASS); 11031 } 11032 11033 /* 11034 * Try to unblock the specified I/O. 11035 * 11036 * skip parameter allows explicitly skip present blocker of the I/O, 11037 * starting from the previous one on OOA queue. It can be used when 11038 * we know for sure that the blocker I/O does no longer count. 11039 */ 11040 static void 11041 ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) 11042 { 11043 struct ctl_softc *softc = lun->ctl_softc; 11044 union ctl_io *bio, *obio; 11045 const struct ctl_cmd_entry *entry; 11046 union ctl_ha_msg msg_info; 11047 ctl_action action; 11048 11049 mtx_assert(&lun->lun_lock, MA_OWNED); 11050 11051 if (io->io_hdr.blocker == NULL) 11052 return; 11053 11054 obio = bio = io->io_hdr.blocker; 11055 if (skip) 11056 bio = (union ctl_io *)TAILQ_PREV(&bio->io_hdr, ctl_ooaq, 11057 ooa_links); 11058 action = ctl_check_ooa(lun, io, &bio); 11059 if (action == CTL_ACTION_BLOCK) { 11060 /* Still blocked, but may be by different I/O now. */ 11061 if (bio != obio) { 11062 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, 11063 &io->io_hdr, blocked_links); 11064 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, 11065 &io->io_hdr, blocked_links); 11066 io->io_hdr.blocker = bio; 11067 } 11068 return; 11069 } 11070 11071 /* No longer blocked, one way or another. */ 11072 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); 11073 io->io_hdr.blocker = NULL; 11074 11075 switch (action) { 11076 case CTL_ACTION_OVERLAP: 11077 ctl_set_overlapped_cmd(&io->scsiio); 11078 goto error; 11079 case CTL_ACTION_OVERLAP_TAG: 11080 ctl_set_overlapped_tag(&io->scsiio, 11081 io->scsiio.tag_num & 0xff); 11082 goto error; 11083 case CTL_ACTION_PASS: 11084 case CTL_ACTION_SKIP: 11085 11086 /* Serializing commands from the other SC retire there. */ 11087 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11088 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11089 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11090 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11091 msg_info.hdr.serializing_sc = io; 11092 msg_info.hdr.msg_type = CTL_MSG_R2R; 11093 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11094 sizeof(msg_info.hdr), M_NOWAIT); 11095 break; 11096 } 11097 11098 /* 11099 * Check this I/O for LUN state changes that may have happened 11100 * while this command was blocked. The LUN state may have been 11101 * changed by a command ahead of us in the queue. 11102 */ 11103 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11104 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 11105 ctl_done(io); 11106 break; 11107 } 11108 11109 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11110 ctl_enqueue_rtr(io); 11111 break; 11112 case CTL_ACTION_ERROR: 11113 default: 11114 ctl_set_internal_failure(&io->scsiio, 11115 /*sks_valid*/ 0, 11116 /*retry_count*/ 0); 11117 11118 error: 11119 /* Serializing commands from the other SC are done here. */ 11120 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11121 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11122 ctl_try_unblock_others(lun, io, TRUE); 11123 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 11124 11125 ctl_copy_sense_data_back(io, &msg_info); 11126 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11127 msg_info.hdr.serializing_sc = NULL; 11128 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 11129 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11130 sizeof(msg_info.scsi), M_WAITOK); 11131 ctl_free_io(io); 11132 break; 11133 } 11134 11135 ctl_done(io); 11136 break; 11137 } 11138 } 11139 11140 /* 11141 * Try to unblock I/Os blocked by the specified I/O. 11142 * 11143 * skip parameter allows explicitly skip the specified I/O as blocker, 11144 * starting from the previous one on the OOA queue. It can be used when 11145 * we know for sure that the specified I/O does no longer count (done). 11146 * It has to be still on OOA queue though so that we know where to start. 11147 */ 11148 static void 11149 ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) 11150 { 11151 union ctl_io *io, *next_io; 11152 11153 mtx_assert(&lun->lun_lock, MA_OWNED); 11154 11155 for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); 11156 io != NULL; io = next_io) { 11157 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); 11158 11159 KASSERT(io->io_hdr.blocker != NULL, 11160 ("I/O %p on blocked list without blocker", io)); 11161 ctl_try_unblock_io(lun, io, skip); 11162 } 11163 KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), 11164 ("blocked_queue is not empty after skipping %p", bio)); 11165 } 11166 11167 /* 11168 * This routine (with one exception) checks LUN flags that can be set by 11169 * commands ahead of us in the OOA queue. These flags have to be checked 11170 * when a command initially comes in, and when we pull a command off the 11171 * blocked queue and are preparing to execute it. The reason we have to 11172 * check these flags for commands on the blocked queue is that the LUN 11173 * state may have been changed by a command ahead of us while we're on the 11174 * blocked queue. 11175 * 11176 * Ordering is somewhat important with these checks, so please pay 11177 * careful attention to the placement of any new checks. 11178 */ 11179 static int 11180 ctl_scsiio_lun_check(struct ctl_lun *lun, 11181 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11182 { 11183 struct ctl_softc *softc = lun->ctl_softc; 11184 int retval; 11185 uint32_t residx; 11186 11187 retval = 0; 11188 11189 mtx_assert(&lun->lun_lock, MA_OWNED); 11190 11191 /* 11192 * If this shelf is a secondary shelf controller, we may have to 11193 * reject some commands disallowed by HA mode and link state. 11194 */ 11195 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11196 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11197 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11198 ctl_set_lun_unavail(ctsio); 11199 retval = 1; 11200 goto bailout; 11201 } 11202 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11203 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11204 ctl_set_lun_transit(ctsio); 11205 retval = 1; 11206 goto bailout; 11207 } 11208 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11209 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11210 ctl_set_lun_standby(ctsio); 11211 retval = 1; 11212 goto bailout; 11213 } 11214 11215 /* The rest of checks are only done on executing side */ 11216 if (softc->ha_mode == CTL_HA_MODE_XFER) 11217 goto bailout; 11218 } 11219 11220 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11221 if (lun->be_lun && 11222 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11223 ctl_set_hw_write_protected(ctsio); 11224 retval = 1; 11225 goto bailout; 11226 } 11227 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11228 ctl_set_sense(ctsio, /*current_error*/ 1, 11229 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11230 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11231 retval = 1; 11232 goto bailout; 11233 } 11234 } 11235 11236 /* 11237 * Check for a reservation conflict. If this command isn't allowed 11238 * even on reserved LUNs, and if this initiator isn't the one who 11239 * reserved us, reject the command with a reservation conflict. 11240 */ 11241 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11242 if ((lun->flags & CTL_LUN_RESERVED) 11243 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11244 if (lun->res_idx != residx) { 11245 ctl_set_reservation_conflict(ctsio); 11246 retval = 1; 11247 goto bailout; 11248 } 11249 } 11250 11251 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11252 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11253 /* No reservation or command is allowed. */; 11254 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11255 (lun->pr_res_type == SPR_TYPE_WR_EX || 11256 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11257 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11258 /* The command is allowed for Write Exclusive resv. */; 11259 } else { 11260 /* 11261 * if we aren't registered or it's a res holder type 11262 * reservation and this isn't the res holder then set a 11263 * conflict. 11264 */ 11265 if (ctl_get_prkey(lun, residx) == 0 || 11266 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11267 ctl_set_reservation_conflict(ctsio); 11268 retval = 1; 11269 goto bailout; 11270 } 11271 } 11272 11273 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11274 if (lun->flags & CTL_LUN_EJECTED) 11275 ctl_set_lun_ejected(ctsio); 11276 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11277 if (lun->flags & CTL_LUN_REMOVABLE) 11278 ctl_set_lun_no_media(ctsio); 11279 else 11280 ctl_set_lun_int_reqd(ctsio); 11281 } else if (lun->flags & CTL_LUN_STOPPED) 11282 ctl_set_lun_stopped(ctsio); 11283 else 11284 goto bailout; 11285 retval = 1; 11286 goto bailout; 11287 } 11288 11289 bailout: 11290 return (retval); 11291 } 11292 11293 static void 11294 ctl_failover_io(union ctl_io *io, int have_lock) 11295 { 11296 ctl_set_busy(&io->scsiio); 11297 ctl_done(io); 11298 } 11299 11300 static void 11301 ctl_failover_lun(union ctl_io *rio) 11302 { 11303 struct ctl_softc *softc = CTL_SOFTC(rio); 11304 struct ctl_lun *lun; 11305 struct ctl_io_hdr *io, *next_io; 11306 uint32_t targ_lun; 11307 11308 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11309 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); 11310 11311 /* Find and lock the LUN. */ 11312 mtx_lock(&softc->ctl_lock); 11313 if (targ_lun > ctl_max_luns || 11314 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11315 mtx_unlock(&softc->ctl_lock); 11316 return; 11317 } 11318 mtx_lock(&lun->lun_lock); 11319 mtx_unlock(&softc->ctl_lock); 11320 if (lun->flags & CTL_LUN_DISABLED) { 11321 mtx_unlock(&lun->lun_lock); 11322 return; 11323 } 11324 11325 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11326 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11327 /* We are master */ 11328 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11329 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11330 io->flags |= CTL_FLAG_ABORT; 11331 io->flags |= CTL_FLAG_FAILOVER; 11332 ctl_try_unblock_io(lun, 11333 (union ctl_io *)io, FALSE); 11334 } else { /* This can be only due to DATAMOVE */ 11335 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11336 io->flags &= ~CTL_FLAG_DMA_INPROG; 11337 io->flags |= CTL_FLAG_IO_ACTIVE; 11338 io->port_status = 31340; 11339 ctl_enqueue_isc((union ctl_io *)io); 11340 } 11341 } else 11342 /* We are slave */ 11343 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11344 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11345 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11346 io->flags |= CTL_FLAG_FAILOVER; 11347 } else { 11348 ctl_set_busy(&((union ctl_io *)io)-> 11349 scsiio); 11350 ctl_done((union ctl_io *)io); 11351 } 11352 } 11353 } 11354 } else { /* SERIALIZE modes */ 11355 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11356 /* We are master */ 11357 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11358 if (io->blocker != NULL) { 11359 TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, 11360 io, blocked_links); 11361 io->blocker = NULL; 11362 } 11363 ctl_try_unblock_others(lun, (union ctl_io *)io, 11364 TRUE); 11365 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11366 ctl_free_io((union ctl_io *)io); 11367 } else 11368 /* We are slave */ 11369 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11370 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11371 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11372 ctl_set_busy(&((union ctl_io *)io)-> 11373 scsiio); 11374 ctl_done((union ctl_io *)io); 11375 } 11376 } 11377 } 11378 } 11379 mtx_unlock(&lun->lun_lock); 11380 } 11381 11382 static int 11383 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11384 { 11385 struct ctl_lun *lun; 11386 const struct ctl_cmd_entry *entry; 11387 union ctl_io *bio; 11388 uint32_t initidx, targ_lun; 11389 int retval = 0; 11390 11391 lun = NULL; 11392 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11393 if (targ_lun < ctl_max_luns) 11394 lun = softc->ctl_luns[targ_lun]; 11395 if (lun) { 11396 /* 11397 * If the LUN is invalid, pretend that it doesn't exist. 11398 * It will go away as soon as all pending I/O has been 11399 * completed. 11400 */ 11401 mtx_lock(&lun->lun_lock); 11402 if (lun->flags & CTL_LUN_DISABLED) { 11403 mtx_unlock(&lun->lun_lock); 11404 lun = NULL; 11405 } 11406 } 11407 CTL_LUN(ctsio) = lun; 11408 if (lun) { 11409 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11410 11411 /* 11412 * Every I/O goes into the OOA queue for a particular LUN, 11413 * and stays there until completion. 11414 */ 11415 #ifdef CTL_TIME_IO 11416 if (TAILQ_EMPTY(&lun->ooa_queue)) 11417 lun->idle_time += getsbinuptime() - lun->last_busy; 11418 #endif 11419 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11420 } 11421 11422 /* Get command entry and return error if it is unsuppotyed. */ 11423 entry = ctl_validate_command(ctsio); 11424 if (entry == NULL) { 11425 if (lun) 11426 mtx_unlock(&lun->lun_lock); 11427 return (retval); 11428 } 11429 11430 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11431 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11432 11433 /* 11434 * Check to see whether we can send this command to LUNs that don't 11435 * exist. This should pretty much only be the case for inquiry 11436 * and request sense. Further checks, below, really require having 11437 * a LUN, so we can't really check the command anymore. Just put 11438 * it on the rtr queue. 11439 */ 11440 if (lun == NULL) { 11441 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11442 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11443 ctl_enqueue_rtr((union ctl_io *)ctsio); 11444 return (retval); 11445 } 11446 11447 ctl_set_unsupported_lun(ctsio); 11448 ctl_done((union ctl_io *)ctsio); 11449 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11450 return (retval); 11451 } else { 11452 /* 11453 * Make sure we support this particular command on this LUN. 11454 * e.g., we don't support writes to the control LUN. 11455 */ 11456 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11457 mtx_unlock(&lun->lun_lock); 11458 ctl_set_invalid_opcode(ctsio); 11459 ctl_done((union ctl_io *)ctsio); 11460 return (retval); 11461 } 11462 } 11463 11464 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11465 11466 /* 11467 * If we've got a request sense, it'll clear the contingent 11468 * allegiance condition. Otherwise, if we have a CA condition for 11469 * this initiator, clear it, because it sent down a command other 11470 * than request sense. 11471 */ 11472 if (ctsio->cdb[0] != REQUEST_SENSE) { 11473 struct scsi_sense_data *ps; 11474 11475 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11476 if (ps != NULL) 11477 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11478 } 11479 11480 /* 11481 * If the command has this flag set, it handles its own unit 11482 * attention reporting, we shouldn't do anything. Otherwise we 11483 * check for any pending unit attentions, and send them back to the 11484 * initiator. We only do this when a command initially comes in, 11485 * not when we pull it off the blocked queue. 11486 * 11487 * According to SAM-3, section 5.3.2, the order that things get 11488 * presented back to the host is basically unit attentions caused 11489 * by some sort of reset event, busy status, reservation conflicts 11490 * or task set full, and finally any other status. 11491 * 11492 * One issue here is that some of the unit attentions we report 11493 * don't fall into the "reset" category (e.g. "reported luns data 11494 * has changed"). So reporting it here, before the reservation 11495 * check, may be technically wrong. I guess the only thing to do 11496 * would be to check for and report the reset events here, and then 11497 * check for the other unit attention types after we check for a 11498 * reservation conflict. 11499 * 11500 * XXX KDM need to fix this 11501 */ 11502 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11503 ctl_ua_type ua_type; 11504 u_int sense_len = 0; 11505 11506 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11507 &sense_len, SSD_TYPE_NONE); 11508 if (ua_type != CTL_UA_NONE) { 11509 mtx_unlock(&lun->lun_lock); 11510 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11511 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11512 ctsio->sense_len = sense_len; 11513 ctl_done((union ctl_io *)ctsio); 11514 return (retval); 11515 } 11516 } 11517 11518 11519 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11520 mtx_unlock(&lun->lun_lock); 11521 ctl_done((union ctl_io *)ctsio); 11522 return (retval); 11523 } 11524 11525 /* 11526 * XXX CHD this is where we want to send IO to other side if 11527 * this LUN is secondary on this SC. We will need to make a copy 11528 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11529 * the copy we send as FROM_OTHER. 11530 * We also need to stuff the address of the original IO so we can 11531 * find it easily. Something similar will need be done on the other 11532 * side so when we are done we can find the copy. 11533 */ 11534 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11535 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11536 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11537 union ctl_ha_msg msg_info; 11538 int isc_retval; 11539 11540 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11541 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11542 mtx_unlock(&lun->lun_lock); 11543 11544 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11545 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11546 msg_info.hdr.serializing_sc = NULL; 11547 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11548 msg_info.scsi.tag_num = ctsio->tag_num; 11549 msg_info.scsi.tag_type = ctsio->tag_type; 11550 msg_info.scsi.cdb_len = ctsio->cdb_len; 11551 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11552 11553 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11554 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11555 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11556 ctl_set_busy(ctsio); 11557 ctl_done((union ctl_io *)ctsio); 11558 return (retval); 11559 } 11560 return (retval); 11561 } 11562 11563 bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links); 11564 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 11565 case CTL_ACTION_BLOCK: 11566 ctsio->io_hdr.blocker = bio; 11567 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 11568 blocked_links); 11569 mtx_unlock(&lun->lun_lock); 11570 return (retval); 11571 case CTL_ACTION_PASS: 11572 case CTL_ACTION_SKIP: 11573 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11574 mtx_unlock(&lun->lun_lock); 11575 ctl_enqueue_rtr((union ctl_io *)ctsio); 11576 break; 11577 case CTL_ACTION_OVERLAP: 11578 mtx_unlock(&lun->lun_lock); 11579 ctl_set_overlapped_cmd(ctsio); 11580 ctl_done((union ctl_io *)ctsio); 11581 break; 11582 case CTL_ACTION_OVERLAP_TAG: 11583 mtx_unlock(&lun->lun_lock); 11584 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11585 ctl_done((union ctl_io *)ctsio); 11586 break; 11587 case CTL_ACTION_ERROR: 11588 default: 11589 mtx_unlock(&lun->lun_lock); 11590 ctl_set_internal_failure(ctsio, 11591 /*sks_valid*/ 0, 11592 /*retry_count*/ 0); 11593 ctl_done((union ctl_io *)ctsio); 11594 break; 11595 } 11596 return (retval); 11597 } 11598 11599 const struct ctl_cmd_entry * 11600 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11601 { 11602 const struct ctl_cmd_entry *entry; 11603 int service_action; 11604 11605 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11606 if (sa) 11607 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11608 if (entry->flags & CTL_CMD_FLAG_SA5) { 11609 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11610 entry = &((const struct ctl_cmd_entry *) 11611 entry->execute)[service_action]; 11612 } 11613 return (entry); 11614 } 11615 11616 const struct ctl_cmd_entry * 11617 ctl_validate_command(struct ctl_scsiio *ctsio) 11618 { 11619 const struct ctl_cmd_entry *entry; 11620 int i, sa; 11621 uint8_t diff; 11622 11623 entry = ctl_get_cmd_entry(ctsio, &sa); 11624 if (entry->execute == NULL) { 11625 if (sa) 11626 ctl_set_invalid_field(ctsio, 11627 /*sks_valid*/ 1, 11628 /*command*/ 1, 11629 /*field*/ 1, 11630 /*bit_valid*/ 1, 11631 /*bit*/ 4); 11632 else 11633 ctl_set_invalid_opcode(ctsio); 11634 ctl_done((union ctl_io *)ctsio); 11635 return (NULL); 11636 } 11637 KASSERT(entry->length > 0, 11638 ("Not defined length for command 0x%02x/0x%02x", 11639 ctsio->cdb[0], ctsio->cdb[1])); 11640 for (i = 1; i < entry->length; i++) { 11641 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11642 if (diff == 0) 11643 continue; 11644 ctl_set_invalid_field(ctsio, 11645 /*sks_valid*/ 1, 11646 /*command*/ 1, 11647 /*field*/ i, 11648 /*bit_valid*/ 1, 11649 /*bit*/ fls(diff) - 1); 11650 ctl_done((union ctl_io *)ctsio); 11651 return (NULL); 11652 } 11653 return (entry); 11654 } 11655 11656 static int 11657 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11658 { 11659 11660 switch (lun_type) { 11661 case T_DIRECT: 11662 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11663 return (0); 11664 break; 11665 case T_PROCESSOR: 11666 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11667 return (0); 11668 break; 11669 case T_CDROM: 11670 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11671 return (0); 11672 break; 11673 default: 11674 return (0); 11675 } 11676 return (1); 11677 } 11678 11679 static int 11680 ctl_scsiio(struct ctl_scsiio *ctsio) 11681 { 11682 int retval; 11683 const struct ctl_cmd_entry *entry; 11684 11685 retval = CTL_RETVAL_COMPLETE; 11686 11687 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11688 11689 entry = ctl_get_cmd_entry(ctsio, NULL); 11690 11691 /* 11692 * If this I/O has been aborted, just send it straight to 11693 * ctl_done() without executing it. 11694 */ 11695 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11696 ctl_done((union ctl_io *)ctsio); 11697 goto bailout; 11698 } 11699 11700 /* 11701 * All the checks should have been handled by ctl_scsiio_precheck(). 11702 * We should be clear now to just execute the I/O. 11703 */ 11704 retval = entry->execute(ctsio); 11705 11706 bailout: 11707 return (retval); 11708 } 11709 11710 static int 11711 ctl_target_reset(union ctl_io *io) 11712 { 11713 struct ctl_softc *softc = CTL_SOFTC(io); 11714 struct ctl_port *port = CTL_PORT(io); 11715 struct ctl_lun *lun; 11716 uint32_t initidx; 11717 ctl_ua_type ua_type; 11718 11719 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11720 union ctl_ha_msg msg_info; 11721 11722 msg_info.hdr.nexus = io->io_hdr.nexus; 11723 msg_info.task.task_action = io->taskio.task_action; 11724 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11725 msg_info.hdr.original_sc = NULL; 11726 msg_info.hdr.serializing_sc = NULL; 11727 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11728 sizeof(msg_info.task), M_WAITOK); 11729 } 11730 11731 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11732 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11733 ua_type = CTL_UA_TARG_RESET; 11734 else 11735 ua_type = CTL_UA_BUS_RESET; 11736 mtx_lock(&softc->ctl_lock); 11737 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11738 if (port != NULL && 11739 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11740 continue; 11741 ctl_do_lun_reset(lun, initidx, ua_type); 11742 } 11743 mtx_unlock(&softc->ctl_lock); 11744 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11745 return (0); 11746 } 11747 11748 /* 11749 * The LUN should always be set. The I/O is optional, and is used to 11750 * distinguish between I/Os sent by this initiator, and by other 11751 * initiators. We set unit attention for initiators other than this one. 11752 * SAM-3 is vague on this point. It does say that a unit attention should 11753 * be established for other initiators when a LUN is reset (see section 11754 * 5.7.3), but it doesn't specifically say that the unit attention should 11755 * be established for this particular initiator when a LUN is reset. Here 11756 * is the relevant text, from SAM-3 rev 8: 11757 * 11758 * 5.7.2 When a SCSI initiator port aborts its own tasks 11759 * 11760 * When a SCSI initiator port causes its own task(s) to be aborted, no 11761 * notification that the task(s) have been aborted shall be returned to 11762 * the SCSI initiator port other than the completion response for the 11763 * command or task management function action that caused the task(s) to 11764 * be aborted and notification(s) associated with related effects of the 11765 * action (e.g., a reset unit attention condition). 11766 * 11767 * XXX KDM for now, we're setting unit attention for all initiators. 11768 */ 11769 static void 11770 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11771 { 11772 union ctl_io *xio; 11773 int i; 11774 11775 mtx_lock(&lun->lun_lock); 11776 /* Abort tasks. */ 11777 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11778 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11779 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11780 ctl_try_unblock_io(lun, xio, FALSE); 11781 } 11782 /* Clear CA. */ 11783 for (i = 0; i < ctl_max_ports; i++) { 11784 free(lun->pending_sense[i], M_CTL); 11785 lun->pending_sense[i] = NULL; 11786 } 11787 /* Clear reservation. */ 11788 lun->flags &= ~CTL_LUN_RESERVED; 11789 /* Clear prevent media removal. */ 11790 if (lun->prevent) { 11791 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11792 ctl_clear_mask(lun->prevent, i); 11793 lun->prevent_count = 0; 11794 } 11795 /* Clear TPC status */ 11796 ctl_tpc_lun_clear(lun, -1); 11797 /* Establish UA. */ 11798 #if 0 11799 ctl_est_ua_all(lun, initidx, ua_type); 11800 #else 11801 ctl_est_ua_all(lun, -1, ua_type); 11802 #endif 11803 mtx_unlock(&lun->lun_lock); 11804 } 11805 11806 static int 11807 ctl_lun_reset(union ctl_io *io) 11808 { 11809 struct ctl_softc *softc = CTL_SOFTC(io); 11810 struct ctl_lun *lun; 11811 uint32_t targ_lun, initidx; 11812 11813 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11814 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11815 mtx_lock(&softc->ctl_lock); 11816 if (targ_lun >= ctl_max_luns || 11817 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11818 mtx_unlock(&softc->ctl_lock); 11819 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11820 return (1); 11821 } 11822 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11823 mtx_unlock(&softc->ctl_lock); 11824 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11825 11826 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11827 union ctl_ha_msg msg_info; 11828 11829 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11830 msg_info.hdr.nexus = io->io_hdr.nexus; 11831 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11832 msg_info.hdr.original_sc = NULL; 11833 msg_info.hdr.serializing_sc = NULL; 11834 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11835 sizeof(msg_info.task), M_WAITOK); 11836 } 11837 return (0); 11838 } 11839 11840 static void 11841 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11842 int other_sc) 11843 { 11844 union ctl_io *xio; 11845 11846 mtx_assert(&lun->lun_lock, MA_OWNED); 11847 11848 /* 11849 * Run through the OOA queue and attempt to find the given I/O. 11850 * The target port, initiator ID, tag type and tag number have to 11851 * match the values that we got from the initiator. If we have an 11852 * untagged command to abort, simply abort the first untagged command 11853 * we come to. We only allow one untagged command at a time of course. 11854 */ 11855 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11856 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11857 11858 if ((targ_port == UINT32_MAX || 11859 targ_port == xio->io_hdr.nexus.targ_port) && 11860 (init_id == UINT32_MAX || 11861 init_id == xio->io_hdr.nexus.initid)) { 11862 if (targ_port != xio->io_hdr.nexus.targ_port || 11863 init_id != xio->io_hdr.nexus.initid) 11864 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11865 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11866 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11867 union ctl_ha_msg msg_info; 11868 11869 msg_info.hdr.nexus = xio->io_hdr.nexus; 11870 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11871 msg_info.task.tag_num = xio->scsiio.tag_num; 11872 msg_info.task.tag_type = xio->scsiio.tag_type; 11873 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11874 msg_info.hdr.original_sc = NULL; 11875 msg_info.hdr.serializing_sc = NULL; 11876 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11877 sizeof(msg_info.task), M_NOWAIT); 11878 } 11879 ctl_try_unblock_io(lun, xio, FALSE); 11880 } 11881 } 11882 } 11883 11884 static int 11885 ctl_abort_task_set(union ctl_io *io) 11886 { 11887 struct ctl_softc *softc = CTL_SOFTC(io); 11888 struct ctl_lun *lun; 11889 uint32_t targ_lun; 11890 11891 /* 11892 * Look up the LUN. 11893 */ 11894 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11895 mtx_lock(&softc->ctl_lock); 11896 if (targ_lun >= ctl_max_luns || 11897 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11898 mtx_unlock(&softc->ctl_lock); 11899 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11900 return (1); 11901 } 11902 11903 mtx_lock(&lun->lun_lock); 11904 mtx_unlock(&softc->ctl_lock); 11905 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11906 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11907 io->io_hdr.nexus.initid, 11908 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11909 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11910 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11911 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11912 } 11913 mtx_unlock(&lun->lun_lock); 11914 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11915 return (0); 11916 } 11917 11918 static void 11919 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 11920 ctl_ua_type ua_type) 11921 { 11922 struct ctl_lun *lun; 11923 struct scsi_sense_data *ps; 11924 uint32_t p, i; 11925 11926 p = initidx / CTL_MAX_INIT_PER_PORT; 11927 i = initidx % CTL_MAX_INIT_PER_PORT; 11928 mtx_lock(&softc->ctl_lock); 11929 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11930 mtx_lock(&lun->lun_lock); 11931 /* Abort tasks. */ 11932 ctl_abort_tasks_lun(lun, p, i, 1); 11933 /* Clear CA. */ 11934 ps = lun->pending_sense[p]; 11935 if (ps != NULL) 11936 ps[i].error_code = 0; 11937 /* Clear reservation. */ 11938 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11939 lun->flags &= ~CTL_LUN_RESERVED; 11940 /* Clear prevent media removal. */ 11941 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11942 ctl_clear_mask(lun->prevent, initidx); 11943 lun->prevent_count--; 11944 } 11945 /* Clear TPC status */ 11946 ctl_tpc_lun_clear(lun, initidx); 11947 /* Establish UA. */ 11948 ctl_est_ua(lun, initidx, ua_type); 11949 mtx_unlock(&lun->lun_lock); 11950 } 11951 mtx_unlock(&softc->ctl_lock); 11952 } 11953 11954 static int 11955 ctl_i_t_nexus_reset(union ctl_io *io) 11956 { 11957 struct ctl_softc *softc = CTL_SOFTC(io); 11958 uint32_t initidx; 11959 11960 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11961 union ctl_ha_msg msg_info; 11962 11963 msg_info.hdr.nexus = io->io_hdr.nexus; 11964 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11965 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11966 msg_info.hdr.original_sc = NULL; 11967 msg_info.hdr.serializing_sc = NULL; 11968 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11969 sizeof(msg_info.task), M_WAITOK); 11970 } 11971 11972 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11973 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 11974 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11975 return (0); 11976 } 11977 11978 static int 11979 ctl_abort_task(union ctl_io *io) 11980 { 11981 struct ctl_softc *softc = CTL_SOFTC(io); 11982 union ctl_io *xio; 11983 struct ctl_lun *lun; 11984 uint32_t targ_lun; 11985 11986 /* 11987 * Look up the LUN. 11988 */ 11989 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11990 mtx_lock(&softc->ctl_lock); 11991 if (targ_lun >= ctl_max_luns || 11992 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11993 mtx_unlock(&softc->ctl_lock); 11994 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11995 return (1); 11996 } 11997 11998 mtx_lock(&lun->lun_lock); 11999 mtx_unlock(&softc->ctl_lock); 12000 /* 12001 * Run through the OOA queue and attempt to find the given I/O. 12002 * The target port, initiator ID, tag type and tag number have to 12003 * match the values that we got from the initiator. If we have an 12004 * untagged command to abort, simply abort the first untagged command 12005 * we come to. We only allow one untagged command at a time of course. 12006 */ 12007 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12008 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12009 12010 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12011 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12012 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12013 continue; 12014 12015 /* 12016 * If the abort says that the task is untagged, the 12017 * task in the queue must be untagged. Otherwise, 12018 * we just check to see whether the tag numbers 12019 * match. This is because the QLogic firmware 12020 * doesn't pass back the tag type in an abort 12021 * request. 12022 */ 12023 #if 0 12024 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12025 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12026 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12027 #else 12028 /* 12029 * XXX KDM we've got problems with FC, because it 12030 * doesn't send down a tag type with aborts. So we 12031 * can only really go by the tag number... 12032 * This may cause problems with parallel SCSI. 12033 * Need to figure that out!! 12034 */ 12035 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12036 #endif 12037 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12038 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12039 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12040 union ctl_ha_msg msg_info; 12041 12042 msg_info.hdr.nexus = io->io_hdr.nexus; 12043 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12044 msg_info.task.tag_num = io->taskio.tag_num; 12045 msg_info.task.tag_type = io->taskio.tag_type; 12046 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12047 msg_info.hdr.original_sc = NULL; 12048 msg_info.hdr.serializing_sc = NULL; 12049 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12050 sizeof(msg_info.task), M_NOWAIT); 12051 } 12052 ctl_try_unblock_io(lun, xio, FALSE); 12053 } 12054 } 12055 mtx_unlock(&lun->lun_lock); 12056 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12057 return (0); 12058 } 12059 12060 static int 12061 ctl_query_task(union ctl_io *io, int task_set) 12062 { 12063 struct ctl_softc *softc = CTL_SOFTC(io); 12064 union ctl_io *xio; 12065 struct ctl_lun *lun; 12066 int found = 0; 12067 uint32_t targ_lun; 12068 12069 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12070 mtx_lock(&softc->ctl_lock); 12071 if (targ_lun >= ctl_max_luns || 12072 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12073 mtx_unlock(&softc->ctl_lock); 12074 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12075 return (1); 12076 } 12077 mtx_lock(&lun->lun_lock); 12078 mtx_unlock(&softc->ctl_lock); 12079 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12080 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12081 12082 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12083 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12084 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12085 continue; 12086 12087 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12088 found = 1; 12089 break; 12090 } 12091 } 12092 mtx_unlock(&lun->lun_lock); 12093 if (found) 12094 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12095 else 12096 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12097 return (0); 12098 } 12099 12100 static int 12101 ctl_query_async_event(union ctl_io *io) 12102 { 12103 struct ctl_softc *softc = CTL_SOFTC(io); 12104 struct ctl_lun *lun; 12105 ctl_ua_type ua; 12106 uint32_t targ_lun, initidx; 12107 12108 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12109 mtx_lock(&softc->ctl_lock); 12110 if (targ_lun >= ctl_max_luns || 12111 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12112 mtx_unlock(&softc->ctl_lock); 12113 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12114 return (1); 12115 } 12116 mtx_lock(&lun->lun_lock); 12117 mtx_unlock(&softc->ctl_lock); 12118 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12119 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12120 mtx_unlock(&lun->lun_lock); 12121 if (ua != CTL_UA_NONE) 12122 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12123 else 12124 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12125 return (0); 12126 } 12127 12128 static void 12129 ctl_run_task(union ctl_io *io) 12130 { 12131 int retval = 1; 12132 12133 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12134 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12135 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12136 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12137 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12138 switch (io->taskio.task_action) { 12139 case CTL_TASK_ABORT_TASK: 12140 retval = ctl_abort_task(io); 12141 break; 12142 case CTL_TASK_ABORT_TASK_SET: 12143 case CTL_TASK_CLEAR_TASK_SET: 12144 retval = ctl_abort_task_set(io); 12145 break; 12146 case CTL_TASK_CLEAR_ACA: 12147 break; 12148 case CTL_TASK_I_T_NEXUS_RESET: 12149 retval = ctl_i_t_nexus_reset(io); 12150 break; 12151 case CTL_TASK_LUN_RESET: 12152 retval = ctl_lun_reset(io); 12153 break; 12154 case CTL_TASK_TARGET_RESET: 12155 case CTL_TASK_BUS_RESET: 12156 retval = ctl_target_reset(io); 12157 break; 12158 case CTL_TASK_PORT_LOGIN: 12159 break; 12160 case CTL_TASK_PORT_LOGOUT: 12161 break; 12162 case CTL_TASK_QUERY_TASK: 12163 retval = ctl_query_task(io, 0); 12164 break; 12165 case CTL_TASK_QUERY_TASK_SET: 12166 retval = ctl_query_task(io, 1); 12167 break; 12168 case CTL_TASK_QUERY_ASYNC_EVENT: 12169 retval = ctl_query_async_event(io); 12170 break; 12171 default: 12172 printf("%s: got unknown task management event %d\n", 12173 __func__, io->taskio.task_action); 12174 break; 12175 } 12176 if (retval == 0) 12177 io->io_hdr.status = CTL_SUCCESS; 12178 else 12179 io->io_hdr.status = CTL_ERROR; 12180 ctl_done(io); 12181 } 12182 12183 /* 12184 * For HA operation. Handle commands that come in from the other 12185 * controller. 12186 */ 12187 static void 12188 ctl_handle_isc(union ctl_io *io) 12189 { 12190 struct ctl_softc *softc = CTL_SOFTC(io); 12191 struct ctl_lun *lun; 12192 const struct ctl_cmd_entry *entry; 12193 uint32_t targ_lun; 12194 12195 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12196 switch (io->io_hdr.msg_type) { 12197 case CTL_MSG_SERIALIZE: 12198 ctl_serialize_other_sc_cmd(&io->scsiio); 12199 break; 12200 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12201 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12202 if (targ_lun >= ctl_max_luns || 12203 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12204 ctl_done(io); 12205 break; 12206 } 12207 mtx_lock(&lun->lun_lock); 12208 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12209 mtx_unlock(&lun->lun_lock); 12210 ctl_done(io); 12211 break; 12212 } 12213 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12214 mtx_unlock(&lun->lun_lock); 12215 ctl_enqueue_rtr(io); 12216 break; 12217 case CTL_MSG_FINISH_IO: 12218 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12219 ctl_done(io); 12220 break; 12221 } 12222 if (targ_lun >= ctl_max_luns || 12223 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12224 ctl_free_io(io); 12225 break; 12226 } 12227 mtx_lock(&lun->lun_lock); 12228 ctl_try_unblock_others(lun, io, TRUE); 12229 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12230 mtx_unlock(&lun->lun_lock); 12231 ctl_free_io(io); 12232 break; 12233 case CTL_MSG_PERS_ACTION: 12234 ctl_hndl_per_res_out_on_other_sc(io); 12235 ctl_free_io(io); 12236 break; 12237 case CTL_MSG_BAD_JUJU: 12238 ctl_done(io); 12239 break; 12240 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12241 ctl_datamove_remote(io); 12242 break; 12243 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12244 io->scsiio.be_move_done(io); 12245 break; 12246 case CTL_MSG_FAILOVER: 12247 ctl_failover_lun(io); 12248 ctl_free_io(io); 12249 break; 12250 default: 12251 printf("%s: Invalid message type %d\n", 12252 __func__, io->io_hdr.msg_type); 12253 ctl_free_io(io); 12254 break; 12255 } 12256 12257 } 12258 12259 12260 /* 12261 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12262 * there is no match. 12263 */ 12264 static ctl_lun_error_pattern 12265 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12266 { 12267 const struct ctl_cmd_entry *entry; 12268 ctl_lun_error_pattern filtered_pattern, pattern; 12269 12270 pattern = desc->error_pattern; 12271 12272 /* 12273 * XXX KDM we need more data passed into this function to match a 12274 * custom pattern, and we actually need to implement custom pattern 12275 * matching. 12276 */ 12277 if (pattern & CTL_LUN_PAT_CMD) 12278 return (CTL_LUN_PAT_CMD); 12279 12280 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12281 return (CTL_LUN_PAT_ANY); 12282 12283 entry = ctl_get_cmd_entry(ctsio, NULL); 12284 12285 filtered_pattern = entry->pattern & pattern; 12286 12287 /* 12288 * If the user requested specific flags in the pattern (e.g. 12289 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12290 * flags. 12291 * 12292 * If the user did not specify any flags, it doesn't matter whether 12293 * or not the command supports the flags. 12294 */ 12295 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12296 (pattern & ~CTL_LUN_PAT_MASK)) 12297 return (CTL_LUN_PAT_NONE); 12298 12299 /* 12300 * If the user asked for a range check, see if the requested LBA 12301 * range overlaps with this command's LBA range. 12302 */ 12303 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12304 uint64_t lba1; 12305 uint64_t len1; 12306 ctl_action action; 12307 int retval; 12308 12309 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12310 if (retval != 0) 12311 return (CTL_LUN_PAT_NONE); 12312 12313 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12314 desc->lba_range.len, FALSE); 12315 /* 12316 * A "pass" means that the LBA ranges don't overlap, so 12317 * this doesn't match the user's range criteria. 12318 */ 12319 if (action == CTL_ACTION_PASS) 12320 return (CTL_LUN_PAT_NONE); 12321 } 12322 12323 return (filtered_pattern); 12324 } 12325 12326 static void 12327 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12328 { 12329 struct ctl_error_desc *desc, *desc2; 12330 12331 mtx_assert(&lun->lun_lock, MA_OWNED); 12332 12333 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12334 ctl_lun_error_pattern pattern; 12335 /* 12336 * Check to see whether this particular command matches 12337 * the pattern in the descriptor. 12338 */ 12339 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12340 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12341 continue; 12342 12343 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12344 case CTL_LUN_INJ_ABORTED: 12345 ctl_set_aborted(&io->scsiio); 12346 break; 12347 case CTL_LUN_INJ_MEDIUM_ERR: 12348 ctl_set_medium_error(&io->scsiio, 12349 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12350 CTL_FLAG_DATA_OUT); 12351 break; 12352 case CTL_LUN_INJ_UA: 12353 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12354 * OCCURRED */ 12355 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12356 break; 12357 case CTL_LUN_INJ_CUSTOM: 12358 /* 12359 * We're assuming the user knows what he is doing. 12360 * Just copy the sense information without doing 12361 * checks. 12362 */ 12363 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12364 MIN(sizeof(desc->custom_sense), 12365 sizeof(io->scsiio.sense_data))); 12366 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12367 io->scsiio.sense_len = SSD_FULL_SIZE; 12368 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12369 break; 12370 case CTL_LUN_INJ_NONE: 12371 default: 12372 /* 12373 * If this is an error injection type we don't know 12374 * about, clear the continuous flag (if it is set) 12375 * so it will get deleted below. 12376 */ 12377 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12378 break; 12379 } 12380 /* 12381 * By default, each error injection action is a one-shot 12382 */ 12383 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12384 continue; 12385 12386 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12387 12388 free(desc, M_CTL); 12389 } 12390 } 12391 12392 #ifdef CTL_IO_DELAY 12393 static void 12394 ctl_datamove_timer_wakeup(void *arg) 12395 { 12396 union ctl_io *io; 12397 12398 io = (union ctl_io *)arg; 12399 12400 ctl_datamove(io); 12401 } 12402 #endif /* CTL_IO_DELAY */ 12403 12404 void 12405 ctl_datamove(union ctl_io *io) 12406 { 12407 void (*fe_datamove)(union ctl_io *io); 12408 12409 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12410 12411 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12412 12413 /* No data transferred yet. Frontend must update this when done. */ 12414 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12415 12416 #ifdef CTL_TIME_IO 12417 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12418 char str[256]; 12419 char path_str[64]; 12420 struct sbuf sb; 12421 12422 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12423 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12424 12425 sbuf_cat(&sb, path_str); 12426 switch (io->io_hdr.io_type) { 12427 case CTL_IO_SCSI: 12428 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12429 sbuf_printf(&sb, "\n"); 12430 sbuf_cat(&sb, path_str); 12431 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12432 io->scsiio.tag_num, io->scsiio.tag_type); 12433 break; 12434 case CTL_IO_TASK: 12435 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12436 "Tag Type: %d\n", io->taskio.task_action, 12437 io->taskio.tag_num, io->taskio.tag_type); 12438 break; 12439 default: 12440 panic("%s: Invalid CTL I/O type %d\n", 12441 __func__, io->io_hdr.io_type); 12442 } 12443 sbuf_cat(&sb, path_str); 12444 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12445 (intmax_t)time_uptime - io->io_hdr.start_time); 12446 sbuf_finish(&sb); 12447 printf("%s", sbuf_data(&sb)); 12448 } 12449 #endif /* CTL_TIME_IO */ 12450 12451 #ifdef CTL_IO_DELAY 12452 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12453 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12454 } else { 12455 struct ctl_lun *lun; 12456 12457 lun = CTL_LUN(io); 12458 if ((lun != NULL) 12459 && (lun->delay_info.datamove_delay > 0)) { 12460 12461 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12462 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12463 callout_reset(&io->io_hdr.delay_callout, 12464 lun->delay_info.datamove_delay * hz, 12465 ctl_datamove_timer_wakeup, io); 12466 if (lun->delay_info.datamove_type == 12467 CTL_DELAY_TYPE_ONESHOT) 12468 lun->delay_info.datamove_delay = 0; 12469 return; 12470 } 12471 } 12472 #endif 12473 12474 /* 12475 * This command has been aborted. Set the port status, so we fail 12476 * the data move. 12477 */ 12478 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12479 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12480 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12481 io->io_hdr.nexus.targ_port, 12482 io->io_hdr.nexus.targ_lun); 12483 io->io_hdr.port_status = 31337; 12484 /* 12485 * Note that the backend, in this case, will get the 12486 * callback in its context. In other cases it may get 12487 * called in the frontend's interrupt thread context. 12488 */ 12489 io->scsiio.be_move_done(io); 12490 return; 12491 } 12492 12493 /* Don't confuse frontend with zero length data move. */ 12494 if (io->scsiio.kern_data_len == 0) { 12495 io->scsiio.be_move_done(io); 12496 return; 12497 } 12498 12499 fe_datamove = CTL_PORT(io)->fe_datamove; 12500 fe_datamove(io); 12501 } 12502 12503 static void 12504 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12505 { 12506 union ctl_ha_msg msg; 12507 #ifdef CTL_TIME_IO 12508 struct bintime cur_bt; 12509 #endif 12510 12511 memset(&msg, 0, sizeof(msg)); 12512 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12513 msg.hdr.original_sc = io; 12514 msg.hdr.serializing_sc = io->io_hdr.remote_io; 12515 msg.hdr.nexus = io->io_hdr.nexus; 12516 msg.hdr.status = io->io_hdr.status; 12517 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12518 msg.scsi.tag_num = io->scsiio.tag_num; 12519 msg.scsi.tag_type = io->scsiio.tag_type; 12520 msg.scsi.scsi_status = io->scsiio.scsi_status; 12521 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12522 io->scsiio.sense_len); 12523 msg.scsi.sense_len = io->scsiio.sense_len; 12524 msg.scsi.port_status = io->io_hdr.port_status; 12525 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12526 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12527 ctl_failover_io(io, /*have_lock*/ have_lock); 12528 return; 12529 } 12530 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12531 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12532 msg.scsi.sense_len, M_WAITOK); 12533 12534 #ifdef CTL_TIME_IO 12535 getbinuptime(&cur_bt); 12536 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12537 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12538 #endif 12539 io->io_hdr.num_dmas++; 12540 } 12541 12542 /* 12543 * The DMA to the remote side is done, now we need to tell the other side 12544 * we're done so it can continue with its data movement. 12545 */ 12546 static void 12547 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12548 { 12549 union ctl_io *io; 12550 uint32_t i; 12551 12552 io = rq->context; 12553 12554 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12555 printf("%s: ISC DMA write failed with error %d", __func__, 12556 rq->ret); 12557 ctl_set_internal_failure(&io->scsiio, 12558 /*sks_valid*/ 1, 12559 /*retry_count*/ rq->ret); 12560 } 12561 12562 ctl_dt_req_free(rq); 12563 12564 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12565 free(CTL_LSGLT(io)[i].addr, M_CTL); 12566 free(CTL_RSGL(io), M_CTL); 12567 CTL_RSGL(io) = NULL; 12568 CTL_LSGL(io) = NULL; 12569 12570 /* 12571 * The data is in local and remote memory, so now we need to send 12572 * status (good or back) back to the other side. 12573 */ 12574 ctl_send_datamove_done(io, /*have_lock*/ 0); 12575 } 12576 12577 /* 12578 * We've moved the data from the host/controller into local memory. Now we 12579 * need to push it over to the remote controller's memory. 12580 */ 12581 static int 12582 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12583 { 12584 int retval; 12585 12586 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12587 ctl_datamove_remote_write_cb); 12588 return (retval); 12589 } 12590 12591 static void 12592 ctl_datamove_remote_write(union ctl_io *io) 12593 { 12594 int retval; 12595 void (*fe_datamove)(union ctl_io *io); 12596 12597 /* 12598 * - Get the data from the host/HBA into local memory. 12599 * - DMA memory from the local controller to the remote controller. 12600 * - Send status back to the remote controller. 12601 */ 12602 12603 retval = ctl_datamove_remote_sgl_setup(io); 12604 if (retval != 0) 12605 return; 12606 12607 /* Switch the pointer over so the FETD knows what to do */ 12608 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12609 12610 /* 12611 * Use a custom move done callback, since we need to send completion 12612 * back to the other controller, not to the backend on this side. 12613 */ 12614 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12615 12616 fe_datamove = CTL_PORT(io)->fe_datamove; 12617 fe_datamove(io); 12618 } 12619 12620 static int 12621 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12622 { 12623 uint32_t i; 12624 12625 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12626 free(CTL_LSGLT(io)[i].addr, M_CTL); 12627 free(CTL_RSGL(io), M_CTL); 12628 CTL_RSGL(io) = NULL; 12629 CTL_LSGL(io) = NULL; 12630 12631 /* 12632 * The read is done, now we need to send status (good or bad) back 12633 * to the other side. 12634 */ 12635 ctl_send_datamove_done(io, /*have_lock*/ 0); 12636 12637 return (0); 12638 } 12639 12640 static void 12641 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12642 { 12643 union ctl_io *io; 12644 void (*fe_datamove)(union ctl_io *io); 12645 12646 io = rq->context; 12647 12648 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12649 printf("%s: ISC DMA read failed with error %d\n", __func__, 12650 rq->ret); 12651 ctl_set_internal_failure(&io->scsiio, 12652 /*sks_valid*/ 1, 12653 /*retry_count*/ rq->ret); 12654 } 12655 12656 ctl_dt_req_free(rq); 12657 12658 /* Switch the pointer over so the FETD knows what to do */ 12659 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12660 12661 /* 12662 * Use a custom move done callback, since we need to send completion 12663 * back to the other controller, not to the backend on this side. 12664 */ 12665 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12666 12667 /* XXX KDM add checks like the ones in ctl_datamove? */ 12668 12669 fe_datamove = CTL_PORT(io)->fe_datamove; 12670 fe_datamove(io); 12671 } 12672 12673 static int 12674 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12675 { 12676 struct ctl_sg_entry *local_sglist; 12677 uint32_t len_to_go; 12678 int retval; 12679 int i; 12680 12681 retval = 0; 12682 local_sglist = CTL_LSGL(io); 12683 len_to_go = io->scsiio.kern_data_len; 12684 12685 /* 12686 * The difficult thing here is that the size of the various 12687 * S/G segments may be different than the size from the 12688 * remote controller. That'll make it harder when DMAing 12689 * the data back to the other side. 12690 */ 12691 for (i = 0; len_to_go > 0; i++) { 12692 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12693 local_sglist[i].addr = 12694 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12695 12696 len_to_go -= local_sglist[i].len; 12697 } 12698 /* 12699 * Reset the number of S/G entries accordingly. The original 12700 * number of S/G entries is available in rem_sg_entries. 12701 */ 12702 io->scsiio.kern_sg_entries = i; 12703 12704 return (retval); 12705 } 12706 12707 static int 12708 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12709 ctl_ha_dt_cb callback) 12710 { 12711 struct ctl_ha_dt_req *rq; 12712 struct ctl_sg_entry *remote_sglist, *local_sglist; 12713 uint32_t local_used, remote_used, total_used; 12714 int i, j, isc_ret; 12715 12716 rq = ctl_dt_req_alloc(); 12717 12718 /* 12719 * If we failed to allocate the request, and if the DMA didn't fail 12720 * anyway, set busy status. This is just a resource allocation 12721 * failure. 12722 */ 12723 if ((rq == NULL) 12724 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12725 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12726 ctl_set_busy(&io->scsiio); 12727 12728 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12729 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12730 12731 if (rq != NULL) 12732 ctl_dt_req_free(rq); 12733 12734 /* 12735 * The data move failed. We need to return status back 12736 * to the other controller. No point in trying to DMA 12737 * data to the remote controller. 12738 */ 12739 12740 ctl_send_datamove_done(io, /*have_lock*/ 0); 12741 12742 return (1); 12743 } 12744 12745 local_sglist = CTL_LSGL(io); 12746 remote_sglist = CTL_RSGL(io); 12747 local_used = 0; 12748 remote_used = 0; 12749 total_used = 0; 12750 12751 /* 12752 * Pull/push the data over the wire from/to the other controller. 12753 * This takes into account the possibility that the local and 12754 * remote sglists may not be identical in terms of the size of 12755 * the elements and the number of elements. 12756 * 12757 * One fundamental assumption here is that the length allocated for 12758 * both the local and remote sglists is identical. Otherwise, we've 12759 * essentially got a coding error of some sort. 12760 */ 12761 isc_ret = CTL_HA_STATUS_SUCCESS; 12762 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12763 uint32_t cur_len; 12764 uint8_t *tmp_ptr; 12765 12766 rq->command = command; 12767 rq->context = io; 12768 12769 /* 12770 * Both pointers should be aligned. But it is possible 12771 * that the allocation length is not. They should both 12772 * also have enough slack left over at the end, though, 12773 * to round up to the next 8 byte boundary. 12774 */ 12775 cur_len = MIN(local_sglist[i].len - local_used, 12776 remote_sglist[j].len - remote_used); 12777 rq->size = cur_len; 12778 12779 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12780 tmp_ptr += local_used; 12781 12782 #if 0 12783 /* Use physical addresses when talking to ISC hardware */ 12784 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12785 /* XXX KDM use busdma */ 12786 rq->local = vtophys(tmp_ptr); 12787 } else 12788 rq->local = tmp_ptr; 12789 #else 12790 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12791 ("HA does not support BUS_ADDR")); 12792 rq->local = tmp_ptr; 12793 #endif 12794 12795 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12796 tmp_ptr += remote_used; 12797 rq->remote = tmp_ptr; 12798 12799 rq->callback = NULL; 12800 12801 local_used += cur_len; 12802 if (local_used >= local_sglist[i].len) { 12803 i++; 12804 local_used = 0; 12805 } 12806 12807 remote_used += cur_len; 12808 if (remote_used >= remote_sglist[j].len) { 12809 j++; 12810 remote_used = 0; 12811 } 12812 total_used += cur_len; 12813 12814 if (total_used >= io->scsiio.kern_data_len) 12815 rq->callback = callback; 12816 12817 isc_ret = ctl_dt_single(rq); 12818 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12819 break; 12820 } 12821 if (isc_ret != CTL_HA_STATUS_WAIT) { 12822 rq->ret = isc_ret; 12823 callback(rq); 12824 } 12825 12826 return (0); 12827 } 12828 12829 static void 12830 ctl_datamove_remote_read(union ctl_io *io) 12831 { 12832 int retval; 12833 uint32_t i; 12834 12835 /* 12836 * This will send an error to the other controller in the case of a 12837 * failure. 12838 */ 12839 retval = ctl_datamove_remote_sgl_setup(io); 12840 if (retval != 0) 12841 return; 12842 12843 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12844 ctl_datamove_remote_read_cb); 12845 if (retval != 0) { 12846 /* 12847 * Make sure we free memory if there was an error.. The 12848 * ctl_datamove_remote_xfer() function will send the 12849 * datamove done message, or call the callback with an 12850 * error if there is a problem. 12851 */ 12852 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12853 free(CTL_LSGLT(io)[i].addr, M_CTL); 12854 free(CTL_RSGL(io), M_CTL); 12855 CTL_RSGL(io) = NULL; 12856 CTL_LSGL(io) = NULL; 12857 } 12858 } 12859 12860 /* 12861 * Process a datamove request from the other controller. This is used for 12862 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12863 * first. Once that is complete, the data gets DMAed into the remote 12864 * controller's memory. For reads, we DMA from the remote controller's 12865 * memory into our memory first, and then move it out to the FETD. 12866 */ 12867 static void 12868 ctl_datamove_remote(union ctl_io *io) 12869 { 12870 12871 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12872 12873 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12874 ctl_failover_io(io, /*have_lock*/ 0); 12875 return; 12876 } 12877 12878 /* 12879 * Note that we look for an aborted I/O here, but don't do some of 12880 * the other checks that ctl_datamove() normally does. 12881 * We don't need to run the datamove delay code, since that should 12882 * have been done if need be on the other controller. 12883 */ 12884 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12885 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12886 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12887 io->io_hdr.nexus.targ_port, 12888 io->io_hdr.nexus.targ_lun); 12889 io->io_hdr.port_status = 31338; 12890 ctl_send_datamove_done(io, /*have_lock*/ 0); 12891 return; 12892 } 12893 12894 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12895 ctl_datamove_remote_write(io); 12896 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12897 ctl_datamove_remote_read(io); 12898 else { 12899 io->io_hdr.port_status = 31339; 12900 ctl_send_datamove_done(io, /*have_lock*/ 0); 12901 } 12902 } 12903 12904 static void 12905 ctl_process_done(union ctl_io *io) 12906 { 12907 struct ctl_softc *softc = CTL_SOFTC(io); 12908 struct ctl_port *port = CTL_PORT(io); 12909 struct ctl_lun *lun = CTL_LUN(io); 12910 void (*fe_done)(union ctl_io *io); 12911 union ctl_ha_msg msg; 12912 12913 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12914 fe_done = port->fe_done; 12915 12916 #ifdef CTL_TIME_IO 12917 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12918 char str[256]; 12919 char path_str[64]; 12920 struct sbuf sb; 12921 12922 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12923 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12924 12925 sbuf_cat(&sb, path_str); 12926 switch (io->io_hdr.io_type) { 12927 case CTL_IO_SCSI: 12928 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12929 sbuf_printf(&sb, "\n"); 12930 sbuf_cat(&sb, path_str); 12931 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12932 io->scsiio.tag_num, io->scsiio.tag_type); 12933 break; 12934 case CTL_IO_TASK: 12935 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12936 "Tag Type: %d\n", io->taskio.task_action, 12937 io->taskio.tag_num, io->taskio.tag_type); 12938 break; 12939 default: 12940 panic("%s: Invalid CTL I/O type %d\n", 12941 __func__, io->io_hdr.io_type); 12942 } 12943 sbuf_cat(&sb, path_str); 12944 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12945 (intmax_t)time_uptime - io->io_hdr.start_time); 12946 sbuf_finish(&sb); 12947 printf("%s", sbuf_data(&sb)); 12948 } 12949 #endif /* CTL_TIME_IO */ 12950 12951 switch (io->io_hdr.io_type) { 12952 case CTL_IO_SCSI: 12953 break; 12954 case CTL_IO_TASK: 12955 if (ctl_debug & CTL_DEBUG_INFO) 12956 ctl_io_error_print(io, NULL); 12957 fe_done(io); 12958 return; 12959 default: 12960 panic("%s: Invalid CTL I/O type %d\n", 12961 __func__, io->io_hdr.io_type); 12962 } 12963 12964 if (lun == NULL) { 12965 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12966 io->io_hdr.nexus.targ_mapped_lun)); 12967 goto bailout; 12968 } 12969 12970 mtx_lock(&lun->lun_lock); 12971 12972 /* 12973 * Check to see if we have any informational exception and status 12974 * of this command can be modified to report it in form of either 12975 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 12976 */ 12977 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 12978 io->io_hdr.status == CTL_SUCCESS && 12979 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 12980 uint8_t mrie = lun->MODE_IE.mrie; 12981 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 12982 (lun->MODE_VER.byte3 & SMS_VER_PER)); 12983 if (((mrie == SIEP_MRIE_REC_COND && per) || 12984 mrie == SIEP_MRIE_REC_UNCOND || 12985 mrie == SIEP_MRIE_NO_SENSE) && 12986 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 12987 CTL_CMD_FLAG_NO_SENSE) == 0) { 12988 ctl_set_sense(&io->scsiio, 12989 /*current_error*/ 1, 12990 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 12991 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 12992 /*asc*/ lun->ie_asc, 12993 /*ascq*/ lun->ie_ascq, 12994 SSD_ELEM_NONE); 12995 lun->ie_reported = 1; 12996 } 12997 } else if (lun->ie_reported < 0) 12998 lun->ie_reported = 0; 12999 13000 /* 13001 * Check to see if we have any errors to inject here. We only 13002 * inject errors for commands that don't already have errors set. 13003 */ 13004 if (!STAILQ_EMPTY(&lun->error_list) && 13005 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13006 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13007 ctl_inject_error(lun, io); 13008 13009 /* 13010 * XXX KDM how do we treat commands that aren't completed 13011 * successfully? 13012 * 13013 * XXX KDM should we also track I/O latency? 13014 */ 13015 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13016 io->io_hdr.io_type == CTL_IO_SCSI) { 13017 int type; 13018 #ifdef CTL_TIME_IO 13019 struct bintime bt; 13020 13021 getbinuptime(&bt); 13022 bintime_sub(&bt, &io->io_hdr.start_bt); 13023 #endif 13024 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13025 CTL_FLAG_DATA_IN) 13026 type = CTL_STATS_READ; 13027 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13028 CTL_FLAG_DATA_OUT) 13029 type = CTL_STATS_WRITE; 13030 else 13031 type = CTL_STATS_NO_IO; 13032 13033 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13034 lun->stats.operations[type] ++; 13035 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13036 #ifdef CTL_TIME_IO 13037 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13038 bintime_add(&lun->stats.time[type], &bt); 13039 #endif 13040 13041 mtx_lock(&port->port_lock); 13042 port->stats.bytes[type] += io->scsiio.kern_total_len; 13043 port->stats.operations[type] ++; 13044 port->stats.dmas[type] += io->io_hdr.num_dmas; 13045 #ifdef CTL_TIME_IO 13046 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13047 bintime_add(&port->stats.time[type], &bt); 13048 #endif 13049 mtx_unlock(&port->port_lock); 13050 } 13051 13052 /* 13053 * Run through the blocked queue of this I/O and see if anything 13054 * can be unblocked, now that this I/O is done and will be removed. 13055 * We need to do it before removal to have OOA position to start. 13056 */ 13057 ctl_try_unblock_others(lun, io, TRUE); 13058 13059 /* 13060 * Remove this from the OOA queue. 13061 */ 13062 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13063 #ifdef CTL_TIME_IO 13064 if (TAILQ_EMPTY(&lun->ooa_queue)) 13065 lun->last_busy = getsbinuptime(); 13066 #endif 13067 13068 /* 13069 * If the LUN has been invalidated, free it if there is nothing 13070 * left on its OOA queue. 13071 */ 13072 if ((lun->flags & CTL_LUN_INVALID) 13073 && TAILQ_EMPTY(&lun->ooa_queue)) { 13074 mtx_unlock(&lun->lun_lock); 13075 ctl_free_lun(lun); 13076 } else 13077 mtx_unlock(&lun->lun_lock); 13078 13079 bailout: 13080 13081 /* 13082 * If this command has been aborted, make sure we set the status 13083 * properly. The FETD is responsible for freeing the I/O and doing 13084 * whatever it needs to do to clean up its state. 13085 */ 13086 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13087 ctl_set_task_aborted(&io->scsiio); 13088 13089 /* 13090 * If enabled, print command error status. 13091 */ 13092 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13093 (ctl_debug & CTL_DEBUG_INFO) != 0) 13094 ctl_io_error_print(io, NULL); 13095 13096 /* 13097 * Tell the FETD or the other shelf controller we're done with this 13098 * command. Note that only SCSI commands get to this point. Task 13099 * management commands are completed above. 13100 */ 13101 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13102 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13103 memset(&msg, 0, sizeof(msg)); 13104 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13105 msg.hdr.serializing_sc = io->io_hdr.remote_io; 13106 msg.hdr.nexus = io->io_hdr.nexus; 13107 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13108 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13109 M_WAITOK); 13110 } 13111 13112 fe_done(io); 13113 } 13114 13115 /* 13116 * Front end should call this if it doesn't do autosense. When the request 13117 * sense comes back in from the initiator, we'll dequeue this and send it. 13118 */ 13119 int 13120 ctl_queue_sense(union ctl_io *io) 13121 { 13122 struct ctl_softc *softc = CTL_SOFTC(io); 13123 struct ctl_port *port = CTL_PORT(io); 13124 struct ctl_lun *lun; 13125 struct scsi_sense_data *ps; 13126 uint32_t initidx, p, targ_lun; 13127 13128 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13129 13130 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13131 13132 /* 13133 * LUN lookup will likely move to the ctl_work_thread() once we 13134 * have our new queueing infrastructure (that doesn't put things on 13135 * a per-LUN queue initially). That is so that we can handle 13136 * things like an INQUIRY to a LUN that we don't have enabled. We 13137 * can't deal with that right now. 13138 * If we don't have a LUN for this, just toss the sense information. 13139 */ 13140 mtx_lock(&softc->ctl_lock); 13141 if (targ_lun >= ctl_max_luns || 13142 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13143 mtx_unlock(&softc->ctl_lock); 13144 goto bailout; 13145 } 13146 mtx_lock(&lun->lun_lock); 13147 mtx_unlock(&softc->ctl_lock); 13148 13149 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13150 p = initidx / CTL_MAX_INIT_PER_PORT; 13151 if (lun->pending_sense[p] == NULL) { 13152 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13153 M_CTL, M_NOWAIT | M_ZERO); 13154 } 13155 if ((ps = lun->pending_sense[p]) != NULL) { 13156 ps += initidx % CTL_MAX_INIT_PER_PORT; 13157 memset(ps, 0, sizeof(*ps)); 13158 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13159 } 13160 mtx_unlock(&lun->lun_lock); 13161 13162 bailout: 13163 ctl_free_io(io); 13164 return (CTL_RETVAL_COMPLETE); 13165 } 13166 13167 /* 13168 * Primary command inlet from frontend ports. All SCSI and task I/O 13169 * requests must go through this function. 13170 */ 13171 int 13172 ctl_queue(union ctl_io *io) 13173 { 13174 struct ctl_port *port = CTL_PORT(io); 13175 13176 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13177 13178 #ifdef CTL_TIME_IO 13179 io->io_hdr.start_time = time_uptime; 13180 getbinuptime(&io->io_hdr.start_bt); 13181 #endif /* CTL_TIME_IO */ 13182 13183 /* Map FE-specific LUN ID into global one. */ 13184 io->io_hdr.nexus.targ_mapped_lun = 13185 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13186 13187 switch (io->io_hdr.io_type) { 13188 case CTL_IO_SCSI: 13189 case CTL_IO_TASK: 13190 if (ctl_debug & CTL_DEBUG_CDB) 13191 ctl_io_print(io); 13192 ctl_enqueue_incoming(io); 13193 break; 13194 default: 13195 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13196 return (EINVAL); 13197 } 13198 13199 return (CTL_RETVAL_COMPLETE); 13200 } 13201 13202 #ifdef CTL_IO_DELAY 13203 static void 13204 ctl_done_timer_wakeup(void *arg) 13205 { 13206 union ctl_io *io; 13207 13208 io = (union ctl_io *)arg; 13209 ctl_done(io); 13210 } 13211 #endif /* CTL_IO_DELAY */ 13212 13213 void 13214 ctl_serseq_done(union ctl_io *io) 13215 { 13216 struct ctl_lun *lun = CTL_LUN(io);; 13217 13218 if (lun->be_lun == NULL || 13219 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13220 return; 13221 mtx_lock(&lun->lun_lock); 13222 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13223 ctl_try_unblock_others(lun, io, FALSE); 13224 mtx_unlock(&lun->lun_lock); 13225 } 13226 13227 void 13228 ctl_done(union ctl_io *io) 13229 { 13230 13231 /* 13232 * Enable this to catch duplicate completion issues. 13233 */ 13234 #if 0 13235 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13236 printf("%s: type %d msg %d cdb %x iptl: " 13237 "%u:%u:%u tag 0x%04x " 13238 "flag %#x status %x\n", 13239 __func__, 13240 io->io_hdr.io_type, 13241 io->io_hdr.msg_type, 13242 io->scsiio.cdb[0], 13243 io->io_hdr.nexus.initid, 13244 io->io_hdr.nexus.targ_port, 13245 io->io_hdr.nexus.targ_lun, 13246 (io->io_hdr.io_type == 13247 CTL_IO_TASK) ? 13248 io->taskio.tag_num : 13249 io->scsiio.tag_num, 13250 io->io_hdr.flags, 13251 io->io_hdr.status); 13252 } else 13253 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13254 #endif 13255 13256 /* 13257 * This is an internal copy of an I/O, and should not go through 13258 * the normal done processing logic. 13259 */ 13260 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13261 return; 13262 13263 #ifdef CTL_IO_DELAY 13264 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13265 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13266 } else { 13267 struct ctl_lun *lun = CTL_LUN(io); 13268 13269 if ((lun != NULL) 13270 && (lun->delay_info.done_delay > 0)) { 13271 13272 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13273 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13274 callout_reset(&io->io_hdr.delay_callout, 13275 lun->delay_info.done_delay * hz, 13276 ctl_done_timer_wakeup, io); 13277 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13278 lun->delay_info.done_delay = 0; 13279 return; 13280 } 13281 } 13282 #endif /* CTL_IO_DELAY */ 13283 13284 ctl_enqueue_done(io); 13285 } 13286 13287 static void 13288 ctl_work_thread(void *arg) 13289 { 13290 struct ctl_thread *thr = (struct ctl_thread *)arg; 13291 struct ctl_softc *softc = thr->ctl_softc; 13292 union ctl_io *io; 13293 int retval; 13294 13295 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13296 thread_lock(curthread); 13297 sched_prio(curthread, PUSER - 1); 13298 thread_unlock(curthread); 13299 13300 while (!softc->shutdown) { 13301 /* 13302 * We handle the queues in this order: 13303 * - ISC 13304 * - done queue (to free up resources, unblock other commands) 13305 * - incoming queue 13306 * - RtR queue 13307 * 13308 * If those queues are empty, we break out of the loop and 13309 * go to sleep. 13310 */ 13311 mtx_lock(&thr->queue_lock); 13312 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13313 if (io != NULL) { 13314 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13315 mtx_unlock(&thr->queue_lock); 13316 ctl_handle_isc(io); 13317 continue; 13318 } 13319 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13320 if (io != NULL) { 13321 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13322 /* clear any blocked commands, call fe_done */ 13323 mtx_unlock(&thr->queue_lock); 13324 ctl_process_done(io); 13325 continue; 13326 } 13327 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13328 if (io != NULL) { 13329 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13330 mtx_unlock(&thr->queue_lock); 13331 if (io->io_hdr.io_type == CTL_IO_TASK) 13332 ctl_run_task(io); 13333 else 13334 ctl_scsiio_precheck(softc, &io->scsiio); 13335 continue; 13336 } 13337 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13338 if (io != NULL) { 13339 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13340 mtx_unlock(&thr->queue_lock); 13341 retval = ctl_scsiio(&io->scsiio); 13342 if (retval != CTL_RETVAL_COMPLETE) 13343 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13344 continue; 13345 } 13346 13347 /* Sleep until we have something to do. */ 13348 mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); 13349 } 13350 thr->thread = NULL; 13351 kthread_exit(); 13352 } 13353 13354 static void 13355 ctl_lun_thread(void *arg) 13356 { 13357 struct ctl_softc *softc = (struct ctl_softc *)arg; 13358 struct ctl_be_lun *be_lun; 13359 13360 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13361 thread_lock(curthread); 13362 sched_prio(curthread, PUSER - 1); 13363 thread_unlock(curthread); 13364 13365 while (!softc->shutdown) { 13366 mtx_lock(&softc->ctl_lock); 13367 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13368 if (be_lun != NULL) { 13369 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13370 mtx_unlock(&softc->ctl_lock); 13371 ctl_create_lun(be_lun); 13372 continue; 13373 } 13374 13375 /* Sleep until we have something to do. */ 13376 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13377 PDROP, "-", 0); 13378 } 13379 softc->lun_thread = NULL; 13380 kthread_exit(); 13381 } 13382 13383 static void 13384 ctl_thresh_thread(void *arg) 13385 { 13386 struct ctl_softc *softc = (struct ctl_softc *)arg; 13387 struct ctl_lun *lun; 13388 struct ctl_logical_block_provisioning_page *page; 13389 const char *attr; 13390 union ctl_ha_msg msg; 13391 uint64_t thres, val; 13392 int i, e, set; 13393 13394 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13395 thread_lock(curthread); 13396 sched_prio(curthread, PUSER - 1); 13397 thread_unlock(curthread); 13398 13399 while (!softc->shutdown) { 13400 mtx_lock(&softc->ctl_lock); 13401 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13402 if ((lun->flags & CTL_LUN_DISABLED) || 13403 (lun->flags & CTL_LUN_NO_MEDIA) || 13404 lun->backend->lun_attr == NULL) 13405 continue; 13406 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13407 softc->ha_mode == CTL_HA_MODE_XFER) 13408 continue; 13409 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13410 continue; 13411 e = 0; 13412 page = &lun->MODE_LBP; 13413 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13414 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13415 continue; 13416 thres = scsi_4btoul(page->descr[i].count); 13417 thres <<= CTL_LBP_EXPONENT; 13418 switch (page->descr[i].resource) { 13419 case 0x01: 13420 attr = "blocksavail"; 13421 break; 13422 case 0x02: 13423 attr = "blocksused"; 13424 break; 13425 case 0xf1: 13426 attr = "poolblocksavail"; 13427 break; 13428 case 0xf2: 13429 attr = "poolblocksused"; 13430 break; 13431 default: 13432 continue; 13433 } 13434 mtx_unlock(&softc->ctl_lock); // XXX 13435 val = lun->backend->lun_attr( 13436 lun->be_lun->be_lun, attr); 13437 mtx_lock(&softc->ctl_lock); 13438 if (val == UINT64_MAX) 13439 continue; 13440 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13441 == SLBPPD_ARMING_INC) 13442 e = (val >= thres); 13443 else 13444 e = (val <= thres); 13445 if (e) 13446 break; 13447 } 13448 mtx_lock(&lun->lun_lock); 13449 if (e) { 13450 scsi_u64to8b((uint8_t *)&page->descr[i] - 13451 (uint8_t *)page, lun->ua_tpt_info); 13452 if (lun->lasttpt == 0 || 13453 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13454 lun->lasttpt = time_uptime; 13455 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13456 set = 1; 13457 } else 13458 set = 0; 13459 } else { 13460 lun->lasttpt = 0; 13461 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13462 set = -1; 13463 } 13464 mtx_unlock(&lun->lun_lock); 13465 if (set != 0 && 13466 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13467 /* Send msg to other side. */ 13468 bzero(&msg.ua, sizeof(msg.ua)); 13469 msg.hdr.msg_type = CTL_MSG_UA; 13470 msg.hdr.nexus.initid = -1; 13471 msg.hdr.nexus.targ_port = -1; 13472 msg.hdr.nexus.targ_lun = lun->lun; 13473 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13474 msg.ua.ua_all = 1; 13475 msg.ua.ua_set = (set > 0); 13476 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13477 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13478 mtx_unlock(&softc->ctl_lock); // XXX 13479 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13480 sizeof(msg.ua), M_WAITOK); 13481 mtx_lock(&softc->ctl_lock); 13482 } 13483 } 13484 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13485 PDROP, "-", CTL_LBP_PERIOD * hz); 13486 } 13487 softc->thresh_thread = NULL; 13488 kthread_exit(); 13489 } 13490 13491 static void 13492 ctl_enqueue_incoming(union ctl_io *io) 13493 { 13494 struct ctl_softc *softc = CTL_SOFTC(io); 13495 struct ctl_thread *thr; 13496 u_int idx; 13497 13498 idx = (io->io_hdr.nexus.targ_port * 127 + 13499 io->io_hdr.nexus.initid) % worker_threads; 13500 thr = &softc->threads[idx]; 13501 mtx_lock(&thr->queue_lock); 13502 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13503 mtx_unlock(&thr->queue_lock); 13504 wakeup(thr); 13505 } 13506 13507 static void 13508 ctl_enqueue_rtr(union ctl_io *io) 13509 { 13510 struct ctl_softc *softc = CTL_SOFTC(io); 13511 struct ctl_thread *thr; 13512 13513 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13514 mtx_lock(&thr->queue_lock); 13515 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13516 mtx_unlock(&thr->queue_lock); 13517 wakeup(thr); 13518 } 13519 13520 static void 13521 ctl_enqueue_done(union ctl_io *io) 13522 { 13523 struct ctl_softc *softc = CTL_SOFTC(io); 13524 struct ctl_thread *thr; 13525 13526 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13527 mtx_lock(&thr->queue_lock); 13528 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13529 mtx_unlock(&thr->queue_lock); 13530 wakeup(thr); 13531 } 13532 13533 static void 13534 ctl_enqueue_isc(union ctl_io *io) 13535 { 13536 struct ctl_softc *softc = CTL_SOFTC(io); 13537 struct ctl_thread *thr; 13538 13539 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13540 mtx_lock(&thr->queue_lock); 13541 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13542 mtx_unlock(&thr->queue_lock); 13543 wakeup(thr); 13544 } 13545 13546 /* 13547 * vim: ts=8 13548 */ 13549