1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37 /* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #define _CTL_C 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/ctype.h> 51 #include <sys/kernel.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/bio.h> 55 #include <sys/fcntl.h> 56 #include <sys/lock.h> 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #include <sys/condvar.h> 60 #include <sys/malloc.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/endian.h> 67 #include <sys/sysctl.h> 68 #include <vm/uma.h> 69 70 #include <cam/cam.h> 71 #include <cam/scsi/scsi_all.h> 72 #include <cam/scsi/scsi_da.h> 73 #include <cam/ctl/ctl_io.h> 74 #include <cam/ctl/ctl.h> 75 #include <cam/ctl/ctl_frontend.h> 76 #include <cam/ctl/ctl_util.h> 77 #include <cam/ctl/ctl_backend.h> 78 #include <cam/ctl/ctl_ioctl.h> 79 #include <cam/ctl/ctl_ha.h> 80 #include <cam/ctl/ctl_private.h> 81 #include <cam/ctl/ctl_debug.h> 82 #include <cam/ctl/ctl_scsi_all.h> 83 #include <cam/ctl/ctl_error.h> 84 85 struct ctl_softc *control_softc = NULL; 86 87 /* 88 * Template mode pages. 89 */ 90 91 /* 92 * Note that these are default values only. The actual values will be 93 * filled in when the user does a mode sense. 94 */ 95 const static struct copan_debugconf_subpage debugconf_page_default = { 96 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 97 DBGCNF_SUBPAGE_CODE, /* subpage */ 98 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 99 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 100 DBGCNF_VERSION, /* page_version */ 101 {CTL_TIME_IO_DEFAULT_SECS>>8, 102 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 103 }; 104 105 const static struct copan_debugconf_subpage debugconf_page_changeable = { 106 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 107 DBGCNF_SUBPAGE_CODE, /* subpage */ 108 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 109 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 110 0, /* page_version */ 111 {0xff,0xff}, /* ctl_time_io_secs */ 112 }; 113 114 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 115 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 117 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 118 /*read_retry_count*/0, 119 /*correction_span*/0, 120 /*head_offset_count*/0, 121 /*data_strobe_offset_cnt*/0, 122 /*byte8*/SMS_RWER_LBPERE, 123 /*write_retry_count*/0, 124 /*reserved2*/0, 125 /*recovery_time_limit*/{0, 0}, 126 }; 127 128 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 129 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 130 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 131 /*byte3*/0, 132 /*read_retry_count*/0, 133 /*correction_span*/0, 134 /*head_offset_count*/0, 135 /*data_strobe_offset_cnt*/0, 136 /*byte8*/0, 137 /*write_retry_count*/0, 138 /*reserved2*/0, 139 /*recovery_time_limit*/{0, 0}, 140 }; 141 142 const static struct scsi_format_page format_page_default = { 143 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 144 /*page_length*/sizeof(struct scsi_format_page) - 2, 145 /*tracks_per_zone*/ {0, 0}, 146 /*alt_sectors_per_zone*/ {0, 0}, 147 /*alt_tracks_per_zone*/ {0, 0}, 148 /*alt_tracks_per_lun*/ {0, 0}, 149 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 150 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 151 /*bytes_per_sector*/ {0, 0}, 152 /*interleave*/ {0, 0}, 153 /*track_skew*/ {0, 0}, 154 /*cylinder_skew*/ {0, 0}, 155 /*flags*/ SFP_HSEC, 156 /*reserved*/ {0, 0, 0} 157 }; 158 159 const static struct scsi_format_page format_page_changeable = { 160 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 161 /*page_length*/sizeof(struct scsi_format_page) - 2, 162 /*tracks_per_zone*/ {0, 0}, 163 /*alt_sectors_per_zone*/ {0, 0}, 164 /*alt_tracks_per_zone*/ {0, 0}, 165 /*alt_tracks_per_lun*/ {0, 0}, 166 /*sectors_per_track*/ {0, 0}, 167 /*bytes_per_sector*/ {0, 0}, 168 /*interleave*/ {0, 0}, 169 /*track_skew*/ {0, 0}, 170 /*cylinder_skew*/ {0, 0}, 171 /*flags*/ 0, 172 /*reserved*/ {0, 0, 0} 173 }; 174 175 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 176 /*page_code*/SMS_RIGID_DISK_PAGE, 177 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 178 /*cylinders*/ {0, 0, 0}, 179 /*heads*/ CTL_DEFAULT_HEADS, 180 /*start_write_precomp*/ {0, 0, 0}, 181 /*start_reduced_current*/ {0, 0, 0}, 182 /*step_rate*/ {0, 0}, 183 /*landing_zone_cylinder*/ {0, 0, 0}, 184 /*rpl*/ SRDP_RPL_DISABLED, 185 /*rotational_offset*/ 0, 186 /*reserved1*/ 0, 187 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 188 CTL_DEFAULT_ROTATION_RATE & 0xff}, 189 /*reserved2*/ {0, 0} 190 }; 191 192 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 193 /*page_code*/SMS_RIGID_DISK_PAGE, 194 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 195 /*cylinders*/ {0, 0, 0}, 196 /*heads*/ 0, 197 /*start_write_precomp*/ {0, 0, 0}, 198 /*start_reduced_current*/ {0, 0, 0}, 199 /*step_rate*/ {0, 0}, 200 /*landing_zone_cylinder*/ {0, 0, 0}, 201 /*rpl*/ 0, 202 /*rotational_offset*/ 0, 203 /*reserved1*/ 0, 204 /*rotation_rate*/ {0, 0}, 205 /*reserved2*/ {0, 0} 206 }; 207 208 const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222 }; 223 224 const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238 }; 239 240 const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250 }; 251 252 const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262 }; 263 264 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 265 266 const static struct scsi_control_ext_page control_ext_page_default = { 267 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 268 /*subpage_code*/0x01, 269 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 270 /*flags*/0, 271 /*prio*/0, 272 /*max_sense*/0 273 }; 274 275 const static struct scsi_control_ext_page control_ext_page_changeable = { 276 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 277 /*subpage_code*/0x01, 278 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 279 /*flags*/0, 280 /*prio*/0, 281 /*max_sense*/0 282 }; 283 284 const static struct scsi_info_exceptions_page ie_page_default = { 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 286 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 287 /*info_flags*/SIEP_FLAGS_DEXCPT, 288 /*mrie*/0, 289 /*interval_timer*/{0, 0, 0, 0}, 290 /*report_count*/{0, 0, 0, 0} 291 }; 292 293 const static struct scsi_info_exceptions_page ie_page_changeable = { 294 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 295 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 296 /*info_flags*/0, 297 /*mrie*/0, 298 /*interval_timer*/{0, 0, 0, 0}, 299 /*report_count*/{0, 0, 0, 0} 300 }; 301 302 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 303 304 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 305 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 306 /*subpage_code*/0x02, 307 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 308 /*flags*/0, 309 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 310 /*descr*/{}}, 311 {{/*flags*/0, 312 /*resource*/0x01, 313 /*reserved*/{0, 0}, 314 /*count*/{0, 0, 0, 0}}, 315 {/*flags*/0, 316 /*resource*/0x02, 317 /*reserved*/{0, 0}, 318 /*count*/{0, 0, 0, 0}}, 319 {/*flags*/0, 320 /*resource*/0xf1, 321 /*reserved*/{0, 0}, 322 /*count*/{0, 0, 0, 0}}, 323 {/*flags*/0, 324 /*resource*/0xf2, 325 /*reserved*/{0, 0}, 326 /*count*/{0, 0, 0, 0}} 327 } 328 }; 329 330 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 331 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 332 /*subpage_code*/0x02, 333 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 334 /*flags*/0, 335 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 336 /*descr*/{}}, 337 {{/*flags*/0, 338 /*resource*/0, 339 /*reserved*/{0, 0}, 340 /*count*/{0, 0, 0, 0}}, 341 {/*flags*/0, 342 /*resource*/0, 343 /*reserved*/{0, 0}, 344 /*count*/{0, 0, 0, 0}}, 345 {/*flags*/0, 346 /*resource*/0, 347 /*reserved*/{0, 0}, 348 /*count*/{0, 0, 0, 0}}, 349 {/*flags*/0, 350 /*resource*/0, 351 /*reserved*/{0, 0}, 352 /*count*/{0, 0, 0, 0}} 353 } 354 }; 355 356 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 357 static int worker_threads = -1; 358 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 359 &worker_threads, 1, "Number of worker threads"); 360 static int ctl_debug = CTL_DEBUG_NONE; 361 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 362 &ctl_debug, 0, "Enabled debug flags"); 363 364 /* 365 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 366 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 367 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 368 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 369 */ 370 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 371 372 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 373 int param); 374 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 375 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 376 static int ctl_init(void); 377 void ctl_shutdown(void); 378 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 379 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 380 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 381 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 382 struct ctl_ooa *ooa_hdr, 383 struct ctl_ooa_entry *kern_entries); 384 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 385 struct thread *td); 386 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 387 struct ctl_be_lun *be_lun); 388 static int ctl_free_lun(struct ctl_lun *lun); 389 static void ctl_create_lun(struct ctl_be_lun *be_lun); 390 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 391 392 static int ctl_do_mode_select(union ctl_io *io); 393 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 394 uint64_t res_key, uint64_t sa_res_key, 395 uint8_t type, uint32_t residx, 396 struct ctl_scsiio *ctsio, 397 struct scsi_per_res_out *cdb, 398 struct scsi_per_res_out_parms* param); 399 static void ctl_pro_preempt_other(struct ctl_lun *lun, 400 union ctl_ha_msg *msg); 401 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 402 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 403 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 404 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 405 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 406 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 407 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 408 int alloc_len); 409 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 410 int alloc_len); 411 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 412 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 413 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 414 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 415 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 416 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 417 bool seq); 418 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 419 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 420 union ctl_io *pending_io, union ctl_io *ooa_io); 421 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 422 union ctl_io *starting_io); 423 static int ctl_check_blocked(struct ctl_lun *lun); 424 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 425 const struct ctl_cmd_entry *entry, 426 struct ctl_scsiio *ctsio); 427 static void ctl_failover_lun(struct ctl_lun *lun); 428 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 429 struct ctl_scsiio *ctsio); 430 static int ctl_scsiio(struct ctl_scsiio *ctsio); 431 432 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 433 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 434 ctl_ua_type ua_type); 435 static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, 436 ctl_ua_type ua_type); 437 static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 438 static int ctl_abort_task(union ctl_io *io); 439 static int ctl_abort_task_set(union ctl_io *io); 440 static int ctl_query_task(union ctl_io *io, int task_set); 441 static int ctl_i_t_nexus_reset(union ctl_io *io); 442 static int ctl_query_async_event(union ctl_io *io); 443 static void ctl_run_task(union ctl_io *io); 444 #ifdef CTL_IO_DELAY 445 static void ctl_datamove_timer_wakeup(void *arg); 446 static void ctl_done_timer_wakeup(void *arg); 447 #endif /* CTL_IO_DELAY */ 448 449 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 450 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 451 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 452 static void ctl_datamove_remote_write(union ctl_io *io); 453 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 454 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 455 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 456 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 457 ctl_ha_dt_cb callback); 458 static void ctl_datamove_remote_read(union ctl_io *io); 459 static void ctl_datamove_remote(union ctl_io *io); 460 static int ctl_process_done(union ctl_io *io); 461 static void ctl_lun_thread(void *arg); 462 static void ctl_thresh_thread(void *arg); 463 static void ctl_work_thread(void *arg); 464 static void ctl_enqueue_incoming(union ctl_io *io); 465 static void ctl_enqueue_rtr(union ctl_io *io); 466 static void ctl_enqueue_done(union ctl_io *io); 467 static void ctl_enqueue_isc(union ctl_io *io); 468 static const struct ctl_cmd_entry * 469 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 470 static const struct ctl_cmd_entry * 471 ctl_validate_command(struct ctl_scsiio *ctsio); 472 static int ctl_cmd_applicable(uint8_t lun_type, 473 const struct ctl_cmd_entry *entry); 474 475 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 476 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 477 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 478 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 479 480 /* 481 * Load the serialization table. This isn't very pretty, but is probably 482 * the easiest way to do it. 483 */ 484 #include "ctl_ser_table.c" 485 486 /* 487 * We only need to define open, close and ioctl routines for this driver. 488 */ 489 static struct cdevsw ctl_cdevsw = { 490 .d_version = D_VERSION, 491 .d_flags = 0, 492 .d_open = ctl_open, 493 .d_close = ctl_close, 494 .d_ioctl = ctl_ioctl, 495 .d_name = "ctl", 496 }; 497 498 499 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 500 501 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 502 503 static moduledata_t ctl_moduledata = { 504 "ctl", 505 ctl_module_event_handler, 506 NULL 507 }; 508 509 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 510 MODULE_VERSION(ctl, 1); 511 512 static struct ctl_frontend ha_frontend = 513 { 514 .name = "ha", 515 }; 516 517 static void 518 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 519 union ctl_ha_msg *msg_info) 520 { 521 struct ctl_scsiio *ctsio; 522 523 if (msg_info->hdr.original_sc == NULL) { 524 printf("%s: original_sc == NULL!\n", __func__); 525 /* XXX KDM now what? */ 526 return; 527 } 528 529 ctsio = &msg_info->hdr.original_sc->scsiio; 530 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 531 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 532 ctsio->io_hdr.status = msg_info->hdr.status; 533 ctsio->scsi_status = msg_info->scsi.scsi_status; 534 ctsio->sense_len = msg_info->scsi.sense_len; 535 ctsio->sense_residual = msg_info->scsi.sense_residual; 536 ctsio->residual = msg_info->scsi.residual; 537 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 538 msg_info->scsi.sense_len); 539 ctl_enqueue_isc((union ctl_io *)ctsio); 540 } 541 542 static void 543 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 544 union ctl_ha_msg *msg_info) 545 { 546 struct ctl_scsiio *ctsio; 547 548 if (msg_info->hdr.serializing_sc == NULL) { 549 printf("%s: serializing_sc == NULL!\n", __func__); 550 /* XXX KDM now what? */ 551 return; 552 } 553 554 ctsio = &msg_info->hdr.serializing_sc->scsiio; 555 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 556 ctl_enqueue_isc((union ctl_io *)ctsio); 557 } 558 559 void 560 ctl_isc_announce_lun(struct ctl_lun *lun) 561 { 562 struct ctl_softc *softc = lun->ctl_softc; 563 union ctl_ha_msg *msg; 564 struct ctl_ha_msg_lun_pr_key pr_key; 565 int i, k; 566 567 if (softc->ha_link != CTL_HA_LINK_ONLINE) 568 return; 569 mtx_lock(&lun->lun_lock); 570 i = sizeof(msg->lun); 571 if (lun->lun_devid) 572 i += lun->lun_devid->len; 573 i += sizeof(pr_key) * lun->pr_key_count; 574 alloc: 575 mtx_unlock(&lun->lun_lock); 576 msg = malloc(i, M_CTL, M_WAITOK); 577 mtx_lock(&lun->lun_lock); 578 k = sizeof(msg->lun); 579 if (lun->lun_devid) 580 k += lun->lun_devid->len; 581 k += sizeof(pr_key) * lun->pr_key_count; 582 if (i < k) { 583 free(msg, M_CTL); 584 i = k; 585 goto alloc; 586 } 587 bzero(&msg->lun, sizeof(msg->lun)); 588 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 589 msg->hdr.nexus.targ_lun = lun->lun; 590 msg->hdr.nexus.targ_mapped_lun = lun->lun; 591 msg->lun.flags = lun->flags; 592 msg->lun.pr_generation = lun->PRGeneration; 593 msg->lun.pr_res_idx = lun->pr_res_idx; 594 msg->lun.pr_res_type = lun->res_type; 595 msg->lun.pr_key_count = lun->pr_key_count; 596 i = 0; 597 if (lun->lun_devid) { 598 msg->lun.lun_devid_len = lun->lun_devid->len; 599 memcpy(&msg->lun.data[i], lun->lun_devid->data, 600 msg->lun.lun_devid_len); 601 i += msg->lun.lun_devid_len; 602 } 603 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 604 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 605 continue; 606 pr_key.pr_iid = k; 607 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 608 i += sizeof(pr_key); 609 } 610 mtx_unlock(&lun->lun_lock); 611 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 612 M_WAITOK); 613 free(msg, M_CTL); 614 615 if (lun->flags & CTL_LUN_PRIMARY_SC) { 616 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 617 ctl_isc_announce_mode(lun, -1, 618 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 619 lun->mode_pages.index[i].subpage); 620 } 621 } 622 } 623 624 void 625 ctl_isc_announce_port(struct ctl_port *port) 626 { 627 struct ctl_softc *softc = control_softc; 628 union ctl_ha_msg *msg; 629 int i; 630 631 if (port->targ_port < softc->port_min || 632 port->targ_port >= softc->port_max || 633 softc->ha_link != CTL_HA_LINK_ONLINE) 634 return; 635 i = sizeof(msg->port) + strlen(port->port_name) + 1; 636 if (port->lun_map) 637 i += sizeof(uint32_t) * CTL_MAX_LUNS; 638 if (port->port_devid) 639 i += port->port_devid->len; 640 if (port->target_devid) 641 i += port->target_devid->len; 642 if (port->init_devid) 643 i += port->init_devid->len; 644 msg = malloc(i, M_CTL, M_WAITOK); 645 bzero(&msg->port, sizeof(msg->port)); 646 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 647 msg->hdr.nexus.targ_port = port->targ_port; 648 msg->port.port_type = port->port_type; 649 msg->port.physical_port = port->physical_port; 650 msg->port.virtual_port = port->virtual_port; 651 msg->port.status = port->status; 652 i = 0; 653 msg->port.name_len = sprintf(&msg->port.data[i], 654 "%d:%s", softc->ha_id, port->port_name) + 1; 655 i += msg->port.name_len; 656 if (port->lun_map) { 657 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS; 658 memcpy(&msg->port.data[i], port->lun_map, 659 msg->port.lun_map_len); 660 i += msg->port.lun_map_len; 661 } 662 if (port->port_devid) { 663 msg->port.port_devid_len = port->port_devid->len; 664 memcpy(&msg->port.data[i], port->port_devid->data, 665 msg->port.port_devid_len); 666 i += msg->port.port_devid_len; 667 } 668 if (port->target_devid) { 669 msg->port.target_devid_len = port->target_devid->len; 670 memcpy(&msg->port.data[i], port->target_devid->data, 671 msg->port.target_devid_len); 672 i += msg->port.target_devid_len; 673 } 674 if (port->init_devid) { 675 msg->port.init_devid_len = port->init_devid->len; 676 memcpy(&msg->port.data[i], port->init_devid->data, 677 msg->port.init_devid_len); 678 i += msg->port.init_devid_len; 679 } 680 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 681 M_WAITOK); 682 free(msg, M_CTL); 683 } 684 685 void 686 ctl_isc_announce_iid(struct ctl_port *port, int iid) 687 { 688 struct ctl_softc *softc = control_softc; 689 union ctl_ha_msg *msg; 690 int i, l; 691 692 if (port->targ_port < softc->port_min || 693 port->targ_port >= softc->port_max || 694 softc->ha_link != CTL_HA_LINK_ONLINE) 695 return; 696 mtx_lock(&softc->ctl_lock); 697 i = sizeof(msg->iid); 698 l = 0; 699 if (port->wwpn_iid[iid].name) 700 l = strlen(port->wwpn_iid[iid].name) + 1; 701 i += l; 702 msg = malloc(i, M_CTL, M_NOWAIT); 703 if (msg == NULL) { 704 mtx_unlock(&softc->ctl_lock); 705 return; 706 } 707 bzero(&msg->iid, sizeof(msg->iid)); 708 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 709 msg->hdr.nexus.targ_port = port->targ_port; 710 msg->hdr.nexus.initid = iid; 711 msg->iid.in_use = port->wwpn_iid[iid].in_use; 712 msg->iid.name_len = l; 713 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 714 if (port->wwpn_iid[iid].name) 715 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 716 mtx_unlock(&softc->ctl_lock); 717 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 718 free(msg, M_CTL); 719 } 720 721 void 722 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 723 uint8_t page, uint8_t subpage) 724 { 725 struct ctl_softc *softc = lun->ctl_softc; 726 union ctl_ha_msg msg; 727 int i; 728 729 if (softc->ha_link != CTL_HA_LINK_ONLINE) 730 return; 731 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 732 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 733 page && lun->mode_pages.index[i].subpage == subpage) 734 break; 735 } 736 if (i == CTL_NUM_MODE_PAGES) 737 return; 738 bzero(&msg.mode, sizeof(msg.mode)); 739 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 740 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 741 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 742 msg.hdr.nexus.targ_lun = lun->lun; 743 msg.hdr.nexus.targ_mapped_lun = lun->lun; 744 msg.mode.page_code = page; 745 msg.mode.subpage = subpage; 746 msg.mode.page_len = lun->mode_pages.index[i].page_len; 747 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 748 msg.mode.page_len); 749 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 750 M_WAITOK); 751 } 752 753 static void 754 ctl_isc_ha_link_up(struct ctl_softc *softc) 755 { 756 struct ctl_port *port; 757 struct ctl_lun *lun; 758 union ctl_ha_msg msg; 759 int i; 760 761 /* Announce this node parameters to peer for validation. */ 762 msg.login.msg_type = CTL_MSG_LOGIN; 763 msg.login.version = CTL_HA_VERSION; 764 msg.login.ha_mode = softc->ha_mode; 765 msg.login.ha_id = softc->ha_id; 766 msg.login.max_luns = CTL_MAX_LUNS; 767 msg.login.max_ports = CTL_MAX_PORTS; 768 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 769 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 770 M_WAITOK); 771 772 STAILQ_FOREACH(port, &softc->port_list, links) { 773 ctl_isc_announce_port(port); 774 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 775 if (port->wwpn_iid[i].in_use) 776 ctl_isc_announce_iid(port, i); 777 } 778 } 779 STAILQ_FOREACH(lun, &softc->lun_list, links) 780 ctl_isc_announce_lun(lun); 781 } 782 783 static void 784 ctl_isc_ha_link_down(struct ctl_softc *softc) 785 { 786 struct ctl_port *port; 787 struct ctl_lun *lun; 788 union ctl_io *io; 789 int i; 790 791 mtx_lock(&softc->ctl_lock); 792 STAILQ_FOREACH(lun, &softc->lun_list, links) { 793 mtx_lock(&lun->lun_lock); 794 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 795 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 796 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 797 } 798 mtx_unlock(&lun->lun_lock); 799 800 mtx_unlock(&softc->ctl_lock); 801 io = ctl_alloc_io(softc->othersc_pool); 802 mtx_lock(&softc->ctl_lock); 803 ctl_zero_io(io); 804 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 805 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 806 ctl_enqueue_isc(io); 807 } 808 809 STAILQ_FOREACH(port, &softc->port_list, links) { 810 if (port->targ_port >= softc->port_min && 811 port->targ_port < softc->port_max) 812 continue; 813 port->status &= ~CTL_PORT_STATUS_ONLINE; 814 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 815 port->wwpn_iid[i].in_use = 0; 816 free(port->wwpn_iid[i].name, M_CTL); 817 port->wwpn_iid[i].name = NULL; 818 } 819 } 820 mtx_unlock(&softc->ctl_lock); 821 } 822 823 static void 824 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 825 { 826 struct ctl_lun *lun; 827 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 828 829 mtx_lock(&softc->ctl_lock); 830 if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS && 831 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) { 832 mtx_lock(&lun->lun_lock); 833 mtx_unlock(&softc->ctl_lock); 834 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && 835 msg->ua.ua_set) 836 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 837 if (msg->ua.ua_all) { 838 if (msg->ua.ua_set) 839 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 840 else 841 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 842 } else { 843 if (msg->ua.ua_set) 844 ctl_est_ua(lun, iid, msg->ua.ua_type); 845 else 846 ctl_clr_ua(lun, iid, msg->ua.ua_type); 847 } 848 mtx_unlock(&lun->lun_lock); 849 } else 850 mtx_unlock(&softc->ctl_lock); 851 } 852 853 static void 854 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 855 { 856 struct ctl_lun *lun; 857 struct ctl_ha_msg_lun_pr_key pr_key; 858 int i, k; 859 ctl_lun_flags oflags; 860 uint32_t targ_lun; 861 862 targ_lun = msg->hdr.nexus.targ_mapped_lun; 863 mtx_lock(&softc->ctl_lock); 864 if ((targ_lun >= CTL_MAX_LUNS) || 865 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 866 mtx_unlock(&softc->ctl_lock); 867 return; 868 } 869 mtx_lock(&lun->lun_lock); 870 mtx_unlock(&softc->ctl_lock); 871 if (lun->flags & CTL_LUN_DISABLED) { 872 mtx_unlock(&lun->lun_lock); 873 return; 874 } 875 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 876 if (msg->lun.lun_devid_len != i || (i > 0 && 877 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 878 mtx_unlock(&lun->lun_lock); 879 printf("%s: Received conflicting HA LUN %d\n", 880 __func__, msg->hdr.nexus.targ_lun); 881 return; 882 } else { 883 /* Record whether peer is primary. */ 884 oflags = lun->flags; 885 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 886 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 887 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 888 else 889 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 890 if (oflags != lun->flags) 891 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 892 893 /* If peer is primary and we are not -- use data */ 894 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 895 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 896 lun->PRGeneration = msg->lun.pr_generation; 897 lun->pr_res_idx = msg->lun.pr_res_idx; 898 lun->res_type = msg->lun.pr_res_type; 899 lun->pr_key_count = msg->lun.pr_key_count; 900 for (k = 0; k < CTL_MAX_INITIATORS; k++) 901 ctl_clr_prkey(lun, k); 902 for (k = 0; k < msg->lun.pr_key_count; k++) { 903 memcpy(&pr_key, &msg->lun.data[i], 904 sizeof(pr_key)); 905 ctl_alloc_prkey(lun, pr_key.pr_iid); 906 ctl_set_prkey(lun, pr_key.pr_iid, 907 pr_key.pr_key); 908 i += sizeof(pr_key); 909 } 910 } 911 912 mtx_unlock(&lun->lun_lock); 913 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 914 __func__, msg->hdr.nexus.targ_lun, 915 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 916 "primary" : "secondary")); 917 918 /* If we are primary but peer doesn't know -- notify */ 919 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 920 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 921 ctl_isc_announce_lun(lun); 922 } 923 } 924 925 static void 926 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 927 { 928 struct ctl_port *port; 929 struct ctl_lun *lun; 930 int i, new; 931 932 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 933 if (port == NULL) { 934 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 935 msg->hdr.nexus.targ_port)); 936 new = 1; 937 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 938 port->frontend = &ha_frontend; 939 port->targ_port = msg->hdr.nexus.targ_port; 940 } else if (port->frontend == &ha_frontend) { 941 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 942 msg->hdr.nexus.targ_port)); 943 new = 0; 944 } else { 945 printf("%s: Received conflicting HA port %d\n", 946 __func__, msg->hdr.nexus.targ_port); 947 return; 948 } 949 port->port_type = msg->port.port_type; 950 port->physical_port = msg->port.physical_port; 951 port->virtual_port = msg->port.virtual_port; 952 port->status = msg->port.status; 953 i = 0; 954 free(port->port_name, M_CTL); 955 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 956 M_CTL); 957 i += msg->port.name_len; 958 if (msg->port.lun_map_len != 0) { 959 if (port->lun_map == NULL) 960 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 961 M_CTL, M_WAITOK); 962 memcpy(port->lun_map, &msg->port.data[i], 963 sizeof(uint32_t) * CTL_MAX_LUNS); 964 i += msg->port.lun_map_len; 965 } else { 966 free(port->lun_map, M_CTL); 967 port->lun_map = NULL; 968 } 969 if (msg->port.port_devid_len != 0) { 970 if (port->port_devid == NULL || 971 port->port_devid->len != msg->port.port_devid_len) { 972 free(port->port_devid, M_CTL); 973 port->port_devid = malloc(sizeof(struct ctl_devid) + 974 msg->port.port_devid_len, M_CTL, M_WAITOK); 975 } 976 memcpy(port->port_devid->data, &msg->port.data[i], 977 msg->port.port_devid_len); 978 port->port_devid->len = msg->port.port_devid_len; 979 i += msg->port.port_devid_len; 980 } else { 981 free(port->port_devid, M_CTL); 982 port->port_devid = NULL; 983 } 984 if (msg->port.target_devid_len != 0) { 985 if (port->target_devid == NULL || 986 port->target_devid->len != msg->port.target_devid_len) { 987 free(port->target_devid, M_CTL); 988 port->target_devid = malloc(sizeof(struct ctl_devid) + 989 msg->port.target_devid_len, M_CTL, M_WAITOK); 990 } 991 memcpy(port->target_devid->data, &msg->port.data[i], 992 msg->port.target_devid_len); 993 port->target_devid->len = msg->port.target_devid_len; 994 i += msg->port.target_devid_len; 995 } else { 996 free(port->target_devid, M_CTL); 997 port->target_devid = NULL; 998 } 999 if (msg->port.init_devid_len != 0) { 1000 if (port->init_devid == NULL || 1001 port->init_devid->len != msg->port.init_devid_len) { 1002 free(port->init_devid, M_CTL); 1003 port->init_devid = malloc(sizeof(struct ctl_devid) + 1004 msg->port.init_devid_len, M_CTL, M_WAITOK); 1005 } 1006 memcpy(port->init_devid->data, &msg->port.data[i], 1007 msg->port.init_devid_len); 1008 port->init_devid->len = msg->port.init_devid_len; 1009 i += msg->port.init_devid_len; 1010 } else { 1011 free(port->init_devid, M_CTL); 1012 port->init_devid = NULL; 1013 } 1014 if (new) { 1015 if (ctl_port_register(port) != 0) { 1016 printf("%s: ctl_port_register() failed with error\n", 1017 __func__); 1018 } 1019 } 1020 mtx_lock(&softc->ctl_lock); 1021 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1022 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 1023 continue; 1024 mtx_lock(&lun->lun_lock); 1025 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1026 mtx_unlock(&lun->lun_lock); 1027 } 1028 mtx_unlock(&softc->ctl_lock); 1029 } 1030 1031 static void 1032 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1033 { 1034 struct ctl_port *port; 1035 int iid; 1036 1037 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1038 if (port == NULL) { 1039 printf("%s: Received IID for unknown port %d\n", 1040 __func__, msg->hdr.nexus.targ_port); 1041 return; 1042 } 1043 iid = msg->hdr.nexus.initid; 1044 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1045 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1046 free(port->wwpn_iid[iid].name, M_CTL); 1047 if (msg->iid.name_len) { 1048 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1049 msg->iid.name_len, M_CTL); 1050 } else 1051 port->wwpn_iid[iid].name = NULL; 1052 } 1053 1054 static void 1055 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1056 { 1057 1058 if (msg->login.version != CTL_HA_VERSION) { 1059 printf("CTL HA peers have different versions %d != %d\n", 1060 msg->login.version, CTL_HA_VERSION); 1061 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1062 return; 1063 } 1064 if (msg->login.ha_mode != softc->ha_mode) { 1065 printf("CTL HA peers have different ha_mode %d != %d\n", 1066 msg->login.ha_mode, softc->ha_mode); 1067 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1068 return; 1069 } 1070 if (msg->login.ha_id == softc->ha_id) { 1071 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1072 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1073 return; 1074 } 1075 if (msg->login.max_luns != CTL_MAX_LUNS || 1076 msg->login.max_ports != CTL_MAX_PORTS || 1077 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1078 printf("CTL HA peers have different limits\n"); 1079 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1080 return; 1081 } 1082 } 1083 1084 static void 1085 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1086 { 1087 struct ctl_lun *lun; 1088 int i; 1089 uint32_t initidx, targ_lun; 1090 1091 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1092 mtx_lock(&softc->ctl_lock); 1093 if ((targ_lun >= CTL_MAX_LUNS) || 1094 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 1095 mtx_unlock(&softc->ctl_lock); 1096 return; 1097 } 1098 mtx_lock(&lun->lun_lock); 1099 mtx_unlock(&softc->ctl_lock); 1100 if (lun->flags & CTL_LUN_DISABLED) { 1101 mtx_unlock(&lun->lun_lock); 1102 return; 1103 } 1104 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1105 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1106 msg->mode.page_code && 1107 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1108 break; 1109 } 1110 if (i == CTL_NUM_MODE_PAGES) { 1111 mtx_unlock(&lun->lun_lock); 1112 return; 1113 } 1114 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1115 lun->mode_pages.index[i].page_len); 1116 initidx = ctl_get_initindex(&msg->hdr.nexus); 1117 if (initidx != -1) 1118 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1119 mtx_unlock(&lun->lun_lock); 1120 } 1121 1122 /* 1123 * ISC (Inter Shelf Communication) event handler. Events from the HA 1124 * subsystem come in here. 1125 */ 1126 static void 1127 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1128 { 1129 struct ctl_softc *softc; 1130 union ctl_io *io; 1131 struct ctl_prio *presio; 1132 ctl_ha_status isc_status; 1133 1134 softc = control_softc; 1135 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1136 if (event == CTL_HA_EVT_MSG_RECV) { 1137 union ctl_ha_msg *msg, msgbuf; 1138 1139 if (param > sizeof(msgbuf)) 1140 msg = malloc(param, M_CTL, M_WAITOK); 1141 else 1142 msg = &msgbuf; 1143 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1144 M_WAITOK); 1145 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1146 printf("%s: Error receiving message: %d\n", 1147 __func__, isc_status); 1148 if (msg != &msgbuf) 1149 free(msg, M_CTL); 1150 return; 1151 } 1152 1153 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1154 switch (msg->hdr.msg_type) { 1155 case CTL_MSG_SERIALIZE: 1156 io = ctl_alloc_io(softc->othersc_pool); 1157 ctl_zero_io(io); 1158 // populate ctsio from msg 1159 io->io_hdr.io_type = CTL_IO_SCSI; 1160 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1161 io->io_hdr.original_sc = msg->hdr.original_sc; 1162 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1163 CTL_FLAG_IO_ACTIVE; 1164 /* 1165 * If we're in serialization-only mode, we don't 1166 * want to go through full done processing. Thus 1167 * the COPY flag. 1168 * 1169 * XXX KDM add another flag that is more specific. 1170 */ 1171 if (softc->ha_mode != CTL_HA_MODE_XFER) 1172 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1173 io->io_hdr.nexus = msg->hdr.nexus; 1174 #if 0 1175 printf("port %u, iid %u, lun %u\n", 1176 io->io_hdr.nexus.targ_port, 1177 io->io_hdr.nexus.initid, 1178 io->io_hdr.nexus.targ_lun); 1179 #endif 1180 io->scsiio.tag_num = msg->scsi.tag_num; 1181 io->scsiio.tag_type = msg->scsi.tag_type; 1182 #ifdef CTL_TIME_IO 1183 io->io_hdr.start_time = time_uptime; 1184 getbintime(&io->io_hdr.start_bt); 1185 #endif /* CTL_TIME_IO */ 1186 io->scsiio.cdb_len = msg->scsi.cdb_len; 1187 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1188 CTL_MAX_CDBLEN); 1189 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1190 const struct ctl_cmd_entry *entry; 1191 1192 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1193 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1194 io->io_hdr.flags |= 1195 entry->flags & CTL_FLAG_DATA_MASK; 1196 } 1197 ctl_enqueue_isc(io); 1198 break; 1199 1200 /* Performed on the Originating SC, XFER mode only */ 1201 case CTL_MSG_DATAMOVE: { 1202 struct ctl_sg_entry *sgl; 1203 int i, j; 1204 1205 io = msg->hdr.original_sc; 1206 if (io == NULL) { 1207 printf("%s: original_sc == NULL!\n", __func__); 1208 /* XXX KDM do something here */ 1209 break; 1210 } 1211 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1212 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1213 /* 1214 * Keep track of this, we need to send it back over 1215 * when the datamove is complete. 1216 */ 1217 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1218 if (msg->hdr.status == CTL_SUCCESS) 1219 io->io_hdr.status = msg->hdr.status; 1220 1221 if (msg->dt.sg_sequence == 0) { 1222 i = msg->dt.kern_sg_entries + 1223 msg->dt.kern_data_len / 1224 CTL_HA_DATAMOVE_SEGMENT + 1; 1225 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1226 M_WAITOK | M_ZERO); 1227 io->io_hdr.remote_sglist = sgl; 1228 io->io_hdr.local_sglist = 1229 &sgl[msg->dt.kern_sg_entries]; 1230 1231 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1232 1233 io->scsiio.kern_sg_entries = 1234 msg->dt.kern_sg_entries; 1235 io->scsiio.rem_sg_entries = 1236 msg->dt.kern_sg_entries; 1237 io->scsiio.kern_data_len = 1238 msg->dt.kern_data_len; 1239 io->scsiio.kern_total_len = 1240 msg->dt.kern_total_len; 1241 io->scsiio.kern_data_resid = 1242 msg->dt.kern_data_resid; 1243 io->scsiio.kern_rel_offset = 1244 msg->dt.kern_rel_offset; 1245 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1246 io->io_hdr.flags |= msg->dt.flags & 1247 CTL_FLAG_BUS_ADDR; 1248 } else 1249 sgl = (struct ctl_sg_entry *) 1250 io->scsiio.kern_data_ptr; 1251 1252 for (i = msg->dt.sent_sg_entries, j = 0; 1253 i < (msg->dt.sent_sg_entries + 1254 msg->dt.cur_sg_entries); i++, j++) { 1255 sgl[i].addr = msg->dt.sg_list[j].addr; 1256 sgl[i].len = msg->dt.sg_list[j].len; 1257 1258 #if 0 1259 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1260 __func__, sgl[i].addr, sgl[i].len, j, i); 1261 #endif 1262 } 1263 1264 /* 1265 * If this is the last piece of the I/O, we've got 1266 * the full S/G list. Queue processing in the thread. 1267 * Otherwise wait for the next piece. 1268 */ 1269 if (msg->dt.sg_last != 0) 1270 ctl_enqueue_isc(io); 1271 break; 1272 } 1273 /* Performed on the Serializing (primary) SC, XFER mode only */ 1274 case CTL_MSG_DATAMOVE_DONE: { 1275 if (msg->hdr.serializing_sc == NULL) { 1276 printf("%s: serializing_sc == NULL!\n", 1277 __func__); 1278 /* XXX KDM now what? */ 1279 break; 1280 } 1281 /* 1282 * We grab the sense information here in case 1283 * there was a failure, so we can return status 1284 * back to the initiator. 1285 */ 1286 io = msg->hdr.serializing_sc; 1287 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1288 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1289 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1290 io->io_hdr.port_status = msg->scsi.fetd_status; 1291 io->scsiio.residual = msg->scsi.residual; 1292 if (msg->hdr.status != CTL_STATUS_NONE) { 1293 io->io_hdr.status = msg->hdr.status; 1294 io->scsiio.scsi_status = msg->scsi.scsi_status; 1295 io->scsiio.sense_len = msg->scsi.sense_len; 1296 io->scsiio.sense_residual =msg->scsi.sense_residual; 1297 memcpy(&io->scsiio.sense_data, 1298 &msg->scsi.sense_data, 1299 msg->scsi.sense_len); 1300 if (msg->hdr.status == CTL_SUCCESS) 1301 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1302 } 1303 ctl_enqueue_isc(io); 1304 break; 1305 } 1306 1307 /* Preformed on Originating SC, SER_ONLY mode */ 1308 case CTL_MSG_R2R: 1309 io = msg->hdr.original_sc; 1310 if (io == NULL) { 1311 printf("%s: original_sc == NULL!\n", 1312 __func__); 1313 break; 1314 } 1315 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1316 io->io_hdr.msg_type = CTL_MSG_R2R; 1317 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1318 ctl_enqueue_isc(io); 1319 break; 1320 1321 /* 1322 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1323 * mode. 1324 * Performed on the Originating (i.e. secondary) SC in XFER 1325 * mode 1326 */ 1327 case CTL_MSG_FINISH_IO: 1328 if (softc->ha_mode == CTL_HA_MODE_XFER) 1329 ctl_isc_handler_finish_xfer(softc, msg); 1330 else 1331 ctl_isc_handler_finish_ser_only(softc, msg); 1332 break; 1333 1334 /* Preformed on Originating SC */ 1335 case CTL_MSG_BAD_JUJU: 1336 io = msg->hdr.original_sc; 1337 if (io == NULL) { 1338 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1339 __func__); 1340 break; 1341 } 1342 ctl_copy_sense_data(msg, io); 1343 /* 1344 * IO should have already been cleaned up on other 1345 * SC so clear this flag so we won't send a message 1346 * back to finish the IO there. 1347 */ 1348 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1349 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1350 1351 /* io = msg->hdr.serializing_sc; */ 1352 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1353 ctl_enqueue_isc(io); 1354 break; 1355 1356 /* Handle resets sent from the other side */ 1357 case CTL_MSG_MANAGE_TASKS: { 1358 struct ctl_taskio *taskio; 1359 taskio = (struct ctl_taskio *)ctl_alloc_io( 1360 softc->othersc_pool); 1361 ctl_zero_io((union ctl_io *)taskio); 1362 taskio->io_hdr.io_type = CTL_IO_TASK; 1363 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1364 taskio->io_hdr.nexus = msg->hdr.nexus; 1365 taskio->task_action = msg->task.task_action; 1366 taskio->tag_num = msg->task.tag_num; 1367 taskio->tag_type = msg->task.tag_type; 1368 #ifdef CTL_TIME_IO 1369 taskio->io_hdr.start_time = time_uptime; 1370 getbintime(&taskio->io_hdr.start_bt); 1371 #endif /* CTL_TIME_IO */ 1372 ctl_run_task((union ctl_io *)taskio); 1373 break; 1374 } 1375 /* Persistent Reserve action which needs attention */ 1376 case CTL_MSG_PERS_ACTION: 1377 presio = (struct ctl_prio *)ctl_alloc_io( 1378 softc->othersc_pool); 1379 ctl_zero_io((union ctl_io *)presio); 1380 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1381 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1382 presio->io_hdr.nexus = msg->hdr.nexus; 1383 presio->pr_msg = msg->pr; 1384 ctl_enqueue_isc((union ctl_io *)presio); 1385 break; 1386 case CTL_MSG_UA: 1387 ctl_isc_ua(softc, msg, param); 1388 break; 1389 case CTL_MSG_PORT_SYNC: 1390 ctl_isc_port_sync(softc, msg, param); 1391 break; 1392 case CTL_MSG_LUN_SYNC: 1393 ctl_isc_lun_sync(softc, msg, param); 1394 break; 1395 case CTL_MSG_IID_SYNC: 1396 ctl_isc_iid_sync(softc, msg, param); 1397 break; 1398 case CTL_MSG_LOGIN: 1399 ctl_isc_login(softc, msg, param); 1400 break; 1401 case CTL_MSG_MODE_SYNC: 1402 ctl_isc_mode_sync(softc, msg, param); 1403 break; 1404 default: 1405 printf("Received HA message of unknown type %d\n", 1406 msg->hdr.msg_type); 1407 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1408 break; 1409 } 1410 if (msg != &msgbuf) 1411 free(msg, M_CTL); 1412 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1413 printf("CTL: HA link status changed from %d to %d\n", 1414 softc->ha_link, param); 1415 if (param == softc->ha_link) 1416 return; 1417 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1418 softc->ha_link = param; 1419 ctl_isc_ha_link_down(softc); 1420 } else { 1421 softc->ha_link = param; 1422 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1423 ctl_isc_ha_link_up(softc); 1424 } 1425 return; 1426 } else { 1427 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1428 return; 1429 } 1430 } 1431 1432 static void 1433 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1434 { 1435 1436 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1437 src->scsi.sense_len); 1438 dest->scsiio.scsi_status = src->scsi.scsi_status; 1439 dest->scsiio.sense_len = src->scsi.sense_len; 1440 dest->io_hdr.status = src->hdr.status; 1441 } 1442 1443 static void 1444 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1445 { 1446 1447 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1448 src->scsiio.sense_len); 1449 dest->scsi.scsi_status = src->scsiio.scsi_status; 1450 dest->scsi.sense_len = src->scsiio.sense_len; 1451 dest->hdr.status = src->io_hdr.status; 1452 } 1453 1454 void 1455 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1456 { 1457 struct ctl_softc *softc = lun->ctl_softc; 1458 ctl_ua_type *pu; 1459 1460 if (initidx < softc->init_min || initidx >= softc->init_max) 1461 return; 1462 mtx_assert(&lun->lun_lock, MA_OWNED); 1463 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1464 if (pu == NULL) 1465 return; 1466 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1467 } 1468 1469 void 1470 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1471 { 1472 int i; 1473 1474 mtx_assert(&lun->lun_lock, MA_OWNED); 1475 if (lun->pending_ua[port] == NULL) 1476 return; 1477 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1478 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1479 continue; 1480 lun->pending_ua[port][i] |= ua; 1481 } 1482 } 1483 1484 void 1485 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1486 { 1487 struct ctl_softc *softc = lun->ctl_softc; 1488 int i; 1489 1490 mtx_assert(&lun->lun_lock, MA_OWNED); 1491 for (i = softc->port_min; i < softc->port_max; i++) 1492 ctl_est_ua_port(lun, i, except, ua); 1493 } 1494 1495 void 1496 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1497 { 1498 struct ctl_softc *softc = lun->ctl_softc; 1499 ctl_ua_type *pu; 1500 1501 if (initidx < softc->init_min || initidx >= softc->init_max) 1502 return; 1503 mtx_assert(&lun->lun_lock, MA_OWNED); 1504 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1505 if (pu == NULL) 1506 return; 1507 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1508 } 1509 1510 void 1511 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1512 { 1513 struct ctl_softc *softc = lun->ctl_softc; 1514 int i, j; 1515 1516 mtx_assert(&lun->lun_lock, MA_OWNED); 1517 for (i = softc->port_min; i < softc->port_max; i++) { 1518 if (lun->pending_ua[i] == NULL) 1519 continue; 1520 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1521 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1522 continue; 1523 lun->pending_ua[i][j] &= ~ua; 1524 } 1525 } 1526 } 1527 1528 void 1529 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1530 ctl_ua_type ua_type) 1531 { 1532 struct ctl_lun *lun; 1533 1534 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1535 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1536 mtx_lock(&lun->lun_lock); 1537 ctl_clr_ua(lun, initidx, ua_type); 1538 mtx_unlock(&lun->lun_lock); 1539 } 1540 } 1541 1542 static int 1543 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1544 { 1545 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1546 struct ctl_lun *lun; 1547 struct ctl_lun_req ireq; 1548 int error, value; 1549 1550 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1551 error = sysctl_handle_int(oidp, &value, 0, req); 1552 if ((error != 0) || (req->newptr == NULL)) 1553 return (error); 1554 1555 mtx_lock(&softc->ctl_lock); 1556 if (value == 0) 1557 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1558 else 1559 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1560 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1561 mtx_unlock(&softc->ctl_lock); 1562 bzero(&ireq, sizeof(ireq)); 1563 ireq.reqtype = CTL_LUNREQ_MODIFY; 1564 ireq.reqdata.modify.lun_id = lun->lun; 1565 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1566 curthread); 1567 if (ireq.status != CTL_LUN_OK) { 1568 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1569 __func__, ireq.status, ireq.error_str); 1570 } 1571 mtx_lock(&softc->ctl_lock); 1572 } 1573 mtx_unlock(&softc->ctl_lock); 1574 return (0); 1575 } 1576 1577 static int 1578 ctl_init(void) 1579 { 1580 struct ctl_softc *softc; 1581 void *other_pool; 1582 int i, error, retval; 1583 1584 retval = 0; 1585 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1586 M_WAITOK | M_ZERO); 1587 softc = control_softc; 1588 1589 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1590 "cam/ctl"); 1591 1592 softc->dev->si_drv1 = softc; 1593 1594 sysctl_ctx_init(&softc->sysctl_ctx); 1595 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1596 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1597 CTLFLAG_RD, 0, "CAM Target Layer"); 1598 1599 if (softc->sysctl_tree == NULL) { 1600 printf("%s: unable to allocate sysctl tree\n", __func__); 1601 destroy_dev(softc->dev); 1602 free(control_softc, M_DEVBUF); 1603 control_softc = NULL; 1604 return (ENOMEM); 1605 } 1606 1607 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1608 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1609 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1610 softc->open_count = 0; 1611 1612 /* 1613 * Default to actually sending a SYNCHRONIZE CACHE command down to 1614 * the drive. 1615 */ 1616 softc->flags = CTL_FLAG_REAL_SYNC; 1617 1618 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1619 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1620 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1621 1622 /* 1623 * In Copan's HA scheme, the "master" and "slave" roles are 1624 * figured out through the slot the controller is in. Although it 1625 * is an active/active system, someone has to be in charge. 1626 */ 1627 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1628 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1629 "HA head ID (0 - no HA)"); 1630 if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) { 1631 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1632 softc->is_single = 1; 1633 softc->port_cnt = CTL_MAX_PORTS; 1634 softc->port_min = 0; 1635 } else { 1636 softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS; 1637 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1638 } 1639 softc->port_max = softc->port_min + softc->port_cnt; 1640 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1641 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1642 1643 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1644 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1645 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1646 1647 STAILQ_INIT(&softc->lun_list); 1648 STAILQ_INIT(&softc->pending_lun_queue); 1649 STAILQ_INIT(&softc->fe_list); 1650 STAILQ_INIT(&softc->port_list); 1651 STAILQ_INIT(&softc->be_list); 1652 ctl_tpc_init(softc); 1653 1654 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1655 &other_pool) != 0) 1656 { 1657 printf("ctl: can't allocate %d entry other SC pool, " 1658 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1659 return (ENOMEM); 1660 } 1661 softc->othersc_pool = other_pool; 1662 1663 if (worker_threads <= 0) 1664 worker_threads = max(1, mp_ncpus / 4); 1665 if (worker_threads > CTL_MAX_THREADS) 1666 worker_threads = CTL_MAX_THREADS; 1667 1668 for (i = 0; i < worker_threads; i++) { 1669 struct ctl_thread *thr = &softc->threads[i]; 1670 1671 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1672 thr->ctl_softc = softc; 1673 STAILQ_INIT(&thr->incoming_queue); 1674 STAILQ_INIT(&thr->rtr_queue); 1675 STAILQ_INIT(&thr->done_queue); 1676 STAILQ_INIT(&thr->isc_queue); 1677 1678 error = kproc_kthread_add(ctl_work_thread, thr, 1679 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1680 if (error != 0) { 1681 printf("error creating CTL work thread!\n"); 1682 ctl_pool_free(other_pool); 1683 return (error); 1684 } 1685 } 1686 error = kproc_kthread_add(ctl_lun_thread, softc, 1687 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1688 if (error != 0) { 1689 printf("error creating CTL lun thread!\n"); 1690 ctl_pool_free(other_pool); 1691 return (error); 1692 } 1693 error = kproc_kthread_add(ctl_thresh_thread, softc, 1694 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1695 if (error != 0) { 1696 printf("error creating CTL threshold thread!\n"); 1697 ctl_pool_free(other_pool); 1698 return (error); 1699 } 1700 1701 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1702 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1703 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1704 1705 if (softc->is_single == 0) { 1706 ctl_frontend_register(&ha_frontend); 1707 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1708 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1709 softc->is_single = 1; 1710 } else 1711 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1712 != CTL_HA_STATUS_SUCCESS) { 1713 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1714 softc->is_single = 1; 1715 } 1716 } 1717 return (0); 1718 } 1719 1720 void 1721 ctl_shutdown(void) 1722 { 1723 struct ctl_softc *softc; 1724 struct ctl_lun *lun, *next_lun; 1725 1726 softc = (struct ctl_softc *)control_softc; 1727 1728 if (softc->is_single == 0) { 1729 ctl_ha_msg_shutdown(softc); 1730 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1731 != CTL_HA_STATUS_SUCCESS) 1732 printf("%s: ctl_ha_msg_deregister failed.\n", __func__); 1733 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 1734 printf("%s: ctl_ha_msg_destroy failed.\n", __func__); 1735 ctl_frontend_deregister(&ha_frontend); 1736 } 1737 1738 mtx_lock(&softc->ctl_lock); 1739 1740 /* 1741 * Free up each LUN. 1742 */ 1743 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1744 next_lun = STAILQ_NEXT(lun, links); 1745 ctl_free_lun(lun); 1746 } 1747 1748 mtx_unlock(&softc->ctl_lock); 1749 1750 #if 0 1751 ctl_shutdown_thread(softc->work_thread); 1752 mtx_destroy(&softc->queue_lock); 1753 #endif 1754 1755 ctl_tpc_shutdown(softc); 1756 uma_zdestroy(softc->io_zone); 1757 mtx_destroy(&softc->ctl_lock); 1758 1759 destroy_dev(softc->dev); 1760 1761 sysctl_ctx_free(&softc->sysctl_ctx); 1762 1763 free(control_softc, M_DEVBUF); 1764 control_softc = NULL; 1765 } 1766 1767 static int 1768 ctl_module_event_handler(module_t mod, int what, void *arg) 1769 { 1770 1771 switch (what) { 1772 case MOD_LOAD: 1773 return (ctl_init()); 1774 case MOD_UNLOAD: 1775 return (EBUSY); 1776 default: 1777 return (EOPNOTSUPP); 1778 } 1779 } 1780 1781 /* 1782 * XXX KDM should we do some access checks here? Bump a reference count to 1783 * prevent a CTL module from being unloaded while someone has it open? 1784 */ 1785 static int 1786 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1787 { 1788 return (0); 1789 } 1790 1791 static int 1792 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1793 { 1794 return (0); 1795 } 1796 1797 /* 1798 * Remove an initiator by port number and initiator ID. 1799 * Returns 0 for success, -1 for failure. 1800 */ 1801 int 1802 ctl_remove_initiator(struct ctl_port *port, int iid) 1803 { 1804 struct ctl_softc *softc = control_softc; 1805 1806 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1807 1808 if (iid > CTL_MAX_INIT_PER_PORT) { 1809 printf("%s: initiator ID %u > maximun %u!\n", 1810 __func__, iid, CTL_MAX_INIT_PER_PORT); 1811 return (-1); 1812 } 1813 1814 mtx_lock(&softc->ctl_lock); 1815 port->wwpn_iid[iid].in_use--; 1816 port->wwpn_iid[iid].last_use = time_uptime; 1817 mtx_unlock(&softc->ctl_lock); 1818 ctl_isc_announce_iid(port, iid); 1819 1820 return (0); 1821 } 1822 1823 /* 1824 * Add an initiator to the initiator map. 1825 * Returns iid for success, < 0 for failure. 1826 */ 1827 int 1828 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1829 { 1830 struct ctl_softc *softc = control_softc; 1831 time_t best_time; 1832 int i, best; 1833 1834 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1835 1836 if (iid >= CTL_MAX_INIT_PER_PORT) { 1837 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1838 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1839 free(name, M_CTL); 1840 return (-1); 1841 } 1842 1843 mtx_lock(&softc->ctl_lock); 1844 1845 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1846 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1847 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1848 iid = i; 1849 break; 1850 } 1851 if (name != NULL && port->wwpn_iid[i].name != NULL && 1852 strcmp(name, port->wwpn_iid[i].name) == 0) { 1853 iid = i; 1854 break; 1855 } 1856 } 1857 } 1858 1859 if (iid < 0) { 1860 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1861 if (port->wwpn_iid[i].in_use == 0 && 1862 port->wwpn_iid[i].wwpn == 0 && 1863 port->wwpn_iid[i].name == NULL) { 1864 iid = i; 1865 break; 1866 } 1867 } 1868 } 1869 1870 if (iid < 0) { 1871 best = -1; 1872 best_time = INT32_MAX; 1873 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1874 if (port->wwpn_iid[i].in_use == 0) { 1875 if (port->wwpn_iid[i].last_use < best_time) { 1876 best = i; 1877 best_time = port->wwpn_iid[i].last_use; 1878 } 1879 } 1880 } 1881 iid = best; 1882 } 1883 1884 if (iid < 0) { 1885 mtx_unlock(&softc->ctl_lock); 1886 free(name, M_CTL); 1887 return (-2); 1888 } 1889 1890 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1891 /* 1892 * This is not an error yet. 1893 */ 1894 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1895 #if 0 1896 printf("%s: port %d iid %u WWPN %#jx arrived" 1897 " again\n", __func__, port->targ_port, 1898 iid, (uintmax_t)wwpn); 1899 #endif 1900 goto take; 1901 } 1902 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1903 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1904 #if 0 1905 printf("%s: port %d iid %u name '%s' arrived" 1906 " again\n", __func__, port->targ_port, 1907 iid, name); 1908 #endif 1909 goto take; 1910 } 1911 1912 /* 1913 * This is an error, but what do we do about it? The 1914 * driver is telling us we have a new WWPN for this 1915 * initiator ID, so we pretty much need to use it. 1916 */ 1917 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1918 " but WWPN %#jx '%s' is still at that address\n", 1919 __func__, port->targ_port, iid, wwpn, name, 1920 (uintmax_t)port->wwpn_iid[iid].wwpn, 1921 port->wwpn_iid[iid].name); 1922 1923 /* 1924 * XXX KDM clear have_ca and ua_pending on each LUN for 1925 * this initiator. 1926 */ 1927 } 1928 take: 1929 free(port->wwpn_iid[iid].name, M_CTL); 1930 port->wwpn_iid[iid].name = name; 1931 port->wwpn_iid[iid].wwpn = wwpn; 1932 port->wwpn_iid[iid].in_use++; 1933 mtx_unlock(&softc->ctl_lock); 1934 ctl_isc_announce_iid(port, iid); 1935 1936 return (iid); 1937 } 1938 1939 static int 1940 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1941 { 1942 int len; 1943 1944 switch (port->port_type) { 1945 case CTL_PORT_FC: 1946 { 1947 struct scsi_transportid_fcp *id = 1948 (struct scsi_transportid_fcp *)buf; 1949 if (port->wwpn_iid[iid].wwpn == 0) 1950 return (0); 1951 memset(id, 0, sizeof(*id)); 1952 id->format_protocol = SCSI_PROTO_FC; 1953 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1954 return (sizeof(*id)); 1955 } 1956 case CTL_PORT_ISCSI: 1957 { 1958 struct scsi_transportid_iscsi_port *id = 1959 (struct scsi_transportid_iscsi_port *)buf; 1960 if (port->wwpn_iid[iid].name == NULL) 1961 return (0); 1962 memset(id, 0, 256); 1963 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1964 SCSI_PROTO_ISCSI; 1965 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1966 len = roundup2(min(len, 252), 4); 1967 scsi_ulto2b(len, id->additional_length); 1968 return (sizeof(*id) + len); 1969 } 1970 case CTL_PORT_SAS: 1971 { 1972 struct scsi_transportid_sas *id = 1973 (struct scsi_transportid_sas *)buf; 1974 if (port->wwpn_iid[iid].wwpn == 0) 1975 return (0); 1976 memset(id, 0, sizeof(*id)); 1977 id->format_protocol = SCSI_PROTO_SAS; 1978 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1979 return (sizeof(*id)); 1980 } 1981 default: 1982 { 1983 struct scsi_transportid_spi *id = 1984 (struct scsi_transportid_spi *)buf; 1985 memset(id, 0, sizeof(*id)); 1986 id->format_protocol = SCSI_PROTO_SPI; 1987 scsi_ulto2b(iid, id->scsi_addr); 1988 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1989 return (sizeof(*id)); 1990 } 1991 } 1992 } 1993 1994 /* 1995 * Serialize a command that went down the "wrong" side, and so was sent to 1996 * this controller for execution. The logic is a little different than the 1997 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1998 * sent back to the other side, but in the success case, we execute the 1999 * command on this side (XFER mode) or tell the other side to execute it 2000 * (SER_ONLY mode). 2001 */ 2002 static int 2003 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2004 { 2005 struct ctl_softc *softc; 2006 union ctl_ha_msg msg_info; 2007 struct ctl_lun *lun; 2008 const struct ctl_cmd_entry *entry; 2009 int retval = 0; 2010 uint32_t targ_lun; 2011 2012 softc = control_softc; 2013 2014 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2015 mtx_lock(&softc->ctl_lock); 2016 if ((targ_lun < CTL_MAX_LUNS) && 2017 ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 2018 mtx_lock(&lun->lun_lock); 2019 mtx_unlock(&softc->ctl_lock); 2020 /* 2021 * If the LUN is invalid, pretend that it doesn't exist. 2022 * It will go away as soon as all pending I/O has been 2023 * completed. 2024 */ 2025 if (lun->flags & CTL_LUN_DISABLED) { 2026 mtx_unlock(&lun->lun_lock); 2027 lun = NULL; 2028 } 2029 } else { 2030 mtx_unlock(&softc->ctl_lock); 2031 lun = NULL; 2032 } 2033 if (lun == NULL) { 2034 /* 2035 * The other node would not send this request to us unless 2036 * received announce that we are primary node for this LUN. 2037 * If this LUN does not exist now, it is probably result of 2038 * a race, so respond to initiator in the most opaque way. 2039 */ 2040 ctl_set_busy(ctsio); 2041 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2042 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2043 msg_info.hdr.serializing_sc = NULL; 2044 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2045 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2046 sizeof(msg_info.scsi), M_WAITOK); 2047 return(1); 2048 } 2049 2050 entry = ctl_get_cmd_entry(ctsio, NULL); 2051 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2052 mtx_unlock(&lun->lun_lock); 2053 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2054 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2055 msg_info.hdr.serializing_sc = NULL; 2056 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2057 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2058 sizeof(msg_info.scsi), M_WAITOK); 2059 return(1); 2060 } 2061 2062 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 2063 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; 2064 2065 /* 2066 * Every I/O goes into the OOA queue for a 2067 * particular LUN, and stays there until completion. 2068 */ 2069 #ifdef CTL_TIME_IO 2070 if (TAILQ_EMPTY(&lun->ooa_queue)) 2071 lun->idle_time += getsbinuptime() - lun->last_busy; 2072 #endif 2073 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2074 2075 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 2076 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 2077 ooa_links))) { 2078 case CTL_ACTION_BLOCK: 2079 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 2080 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 2081 blocked_links); 2082 mtx_unlock(&lun->lun_lock); 2083 break; 2084 case CTL_ACTION_PASS: 2085 case CTL_ACTION_SKIP: 2086 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2087 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2088 ctl_enqueue_rtr((union ctl_io *)ctsio); 2089 mtx_unlock(&lun->lun_lock); 2090 } else { 2091 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2092 mtx_unlock(&lun->lun_lock); 2093 2094 /* send msg back to other side */ 2095 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2096 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2097 msg_info.hdr.msg_type = CTL_MSG_R2R; 2098 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2099 sizeof(msg_info.hdr), M_WAITOK); 2100 } 2101 break; 2102 case CTL_ACTION_OVERLAP: 2103 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2104 mtx_unlock(&lun->lun_lock); 2105 retval = 1; 2106 2107 ctl_set_overlapped_cmd(ctsio); 2108 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2109 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2110 msg_info.hdr.serializing_sc = NULL; 2111 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2112 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2113 sizeof(msg_info.scsi), M_WAITOK); 2114 break; 2115 case CTL_ACTION_OVERLAP_TAG: 2116 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2117 mtx_unlock(&lun->lun_lock); 2118 retval = 1; 2119 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2120 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2121 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2122 msg_info.hdr.serializing_sc = NULL; 2123 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2124 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2125 sizeof(msg_info.scsi), M_WAITOK); 2126 break; 2127 case CTL_ACTION_ERROR: 2128 default: 2129 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2130 mtx_unlock(&lun->lun_lock); 2131 retval = 1; 2132 2133 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2134 /*retry_count*/ 0); 2135 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2136 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2137 msg_info.hdr.serializing_sc = NULL; 2138 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2139 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2140 sizeof(msg_info.scsi), M_WAITOK); 2141 break; 2142 } 2143 return (retval); 2144 } 2145 2146 /* 2147 * Returns 0 for success, errno for failure. 2148 */ 2149 static int 2150 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2151 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2152 { 2153 union ctl_io *io; 2154 int retval; 2155 2156 retval = 0; 2157 2158 mtx_lock(&lun->lun_lock); 2159 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2160 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2161 ooa_links)) { 2162 struct ctl_ooa_entry *entry; 2163 2164 /* 2165 * If we've got more than we can fit, just count the 2166 * remaining entries. 2167 */ 2168 if (*cur_fill_num >= ooa_hdr->alloc_num) 2169 continue; 2170 2171 entry = &kern_entries[*cur_fill_num]; 2172 2173 entry->tag_num = io->scsiio.tag_num; 2174 entry->lun_num = lun->lun; 2175 #ifdef CTL_TIME_IO 2176 entry->start_bt = io->io_hdr.start_bt; 2177 #endif 2178 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2179 entry->cdb_len = io->scsiio.cdb_len; 2180 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2181 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2182 2183 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2184 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2185 2186 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2187 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2188 2189 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2190 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2191 2192 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2193 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2194 } 2195 mtx_unlock(&lun->lun_lock); 2196 2197 return (retval); 2198 } 2199 2200 static void * 2201 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2202 size_t error_str_len) 2203 { 2204 void *kptr; 2205 2206 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2207 2208 if (copyin(user_addr, kptr, len) != 0) { 2209 snprintf(error_str, error_str_len, "Error copying %d bytes " 2210 "from user address %p to kernel address %p", len, 2211 user_addr, kptr); 2212 free(kptr, M_CTL); 2213 return (NULL); 2214 } 2215 2216 return (kptr); 2217 } 2218 2219 static void 2220 ctl_free_args(int num_args, struct ctl_be_arg *args) 2221 { 2222 int i; 2223 2224 if (args == NULL) 2225 return; 2226 2227 for (i = 0; i < num_args; i++) { 2228 free(args[i].kname, M_CTL); 2229 free(args[i].kvalue, M_CTL); 2230 } 2231 2232 free(args, M_CTL); 2233 } 2234 2235 static struct ctl_be_arg * 2236 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2237 char *error_str, size_t error_str_len) 2238 { 2239 struct ctl_be_arg *args; 2240 int i; 2241 2242 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2243 error_str, error_str_len); 2244 2245 if (args == NULL) 2246 goto bailout; 2247 2248 for (i = 0; i < num_args; i++) { 2249 args[i].kname = NULL; 2250 args[i].kvalue = NULL; 2251 } 2252 2253 for (i = 0; i < num_args; i++) { 2254 uint8_t *tmpptr; 2255 2256 args[i].kname = ctl_copyin_alloc(args[i].name, 2257 args[i].namelen, error_str, error_str_len); 2258 if (args[i].kname == NULL) 2259 goto bailout; 2260 2261 if (args[i].kname[args[i].namelen - 1] != '\0') { 2262 snprintf(error_str, error_str_len, "Argument %d " 2263 "name is not NUL-terminated", i); 2264 goto bailout; 2265 } 2266 2267 if (args[i].flags & CTL_BEARG_RD) { 2268 tmpptr = ctl_copyin_alloc(args[i].value, 2269 args[i].vallen, error_str, error_str_len); 2270 if (tmpptr == NULL) 2271 goto bailout; 2272 if ((args[i].flags & CTL_BEARG_ASCII) 2273 && (tmpptr[args[i].vallen - 1] != '\0')) { 2274 snprintf(error_str, error_str_len, "Argument " 2275 "%d value is not NUL-terminated", i); 2276 goto bailout; 2277 } 2278 args[i].kvalue = tmpptr; 2279 } else { 2280 args[i].kvalue = malloc(args[i].vallen, 2281 M_CTL, M_WAITOK | M_ZERO); 2282 } 2283 } 2284 2285 return (args); 2286 bailout: 2287 2288 ctl_free_args(num_args, args); 2289 2290 return (NULL); 2291 } 2292 2293 static void 2294 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2295 { 2296 int i; 2297 2298 for (i = 0; i < num_args; i++) { 2299 if (args[i].flags & CTL_BEARG_WR) 2300 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2301 } 2302 } 2303 2304 /* 2305 * Escape characters that are illegal or not recommended in XML. 2306 */ 2307 int 2308 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2309 { 2310 char *end = str + size; 2311 int retval; 2312 2313 retval = 0; 2314 2315 for (; *str && str < end; str++) { 2316 switch (*str) { 2317 case '&': 2318 retval = sbuf_printf(sb, "&"); 2319 break; 2320 case '>': 2321 retval = sbuf_printf(sb, ">"); 2322 break; 2323 case '<': 2324 retval = sbuf_printf(sb, "<"); 2325 break; 2326 default: 2327 retval = sbuf_putc(sb, *str); 2328 break; 2329 } 2330 2331 if (retval != 0) 2332 break; 2333 2334 } 2335 2336 return (retval); 2337 } 2338 2339 static void 2340 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2341 { 2342 struct scsi_vpd_id_descriptor *desc; 2343 int i; 2344 2345 if (id == NULL || id->len < 4) 2346 return; 2347 desc = (struct scsi_vpd_id_descriptor *)id->data; 2348 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2349 case SVPD_ID_TYPE_T10: 2350 sbuf_printf(sb, "t10."); 2351 break; 2352 case SVPD_ID_TYPE_EUI64: 2353 sbuf_printf(sb, "eui."); 2354 break; 2355 case SVPD_ID_TYPE_NAA: 2356 sbuf_printf(sb, "naa."); 2357 break; 2358 case SVPD_ID_TYPE_SCSI_NAME: 2359 break; 2360 } 2361 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2362 case SVPD_ID_CODESET_BINARY: 2363 for (i = 0; i < desc->length; i++) 2364 sbuf_printf(sb, "%02x", desc->identifier[i]); 2365 break; 2366 case SVPD_ID_CODESET_ASCII: 2367 sbuf_printf(sb, "%.*s", (int)desc->length, 2368 (char *)desc->identifier); 2369 break; 2370 case SVPD_ID_CODESET_UTF8: 2371 sbuf_printf(sb, "%s", (char *)desc->identifier); 2372 break; 2373 } 2374 } 2375 2376 static int 2377 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2378 struct thread *td) 2379 { 2380 struct ctl_softc *softc; 2381 struct ctl_lun *lun; 2382 int retval; 2383 2384 softc = control_softc; 2385 2386 retval = 0; 2387 2388 switch (cmd) { 2389 case CTL_IO: 2390 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2391 break; 2392 case CTL_ENABLE_PORT: 2393 case CTL_DISABLE_PORT: 2394 case CTL_SET_PORT_WWNS: { 2395 struct ctl_port *port; 2396 struct ctl_port_entry *entry; 2397 2398 entry = (struct ctl_port_entry *)addr; 2399 2400 mtx_lock(&softc->ctl_lock); 2401 STAILQ_FOREACH(port, &softc->port_list, links) { 2402 int action, done; 2403 2404 if (port->targ_port < softc->port_min || 2405 port->targ_port >= softc->port_max) 2406 continue; 2407 2408 action = 0; 2409 done = 0; 2410 if ((entry->port_type == CTL_PORT_NONE) 2411 && (entry->targ_port == port->targ_port)) { 2412 /* 2413 * If the user only wants to enable or 2414 * disable or set WWNs on a specific port, 2415 * do the operation and we're done. 2416 */ 2417 action = 1; 2418 done = 1; 2419 } else if (entry->port_type & port->port_type) { 2420 /* 2421 * Compare the user's type mask with the 2422 * particular frontend type to see if we 2423 * have a match. 2424 */ 2425 action = 1; 2426 done = 0; 2427 2428 /* 2429 * Make sure the user isn't trying to set 2430 * WWNs on multiple ports at the same time. 2431 */ 2432 if (cmd == CTL_SET_PORT_WWNS) { 2433 printf("%s: Can't set WWNs on " 2434 "multiple ports\n", __func__); 2435 retval = EINVAL; 2436 break; 2437 } 2438 } 2439 if (action == 0) 2440 continue; 2441 2442 /* 2443 * XXX KDM we have to drop the lock here, because 2444 * the online/offline operations can potentially 2445 * block. We need to reference count the frontends 2446 * so they can't go away, 2447 */ 2448 if (cmd == CTL_ENABLE_PORT) { 2449 mtx_unlock(&softc->ctl_lock); 2450 ctl_port_online(port); 2451 mtx_lock(&softc->ctl_lock); 2452 } else if (cmd == CTL_DISABLE_PORT) { 2453 mtx_unlock(&softc->ctl_lock); 2454 ctl_port_offline(port); 2455 mtx_lock(&softc->ctl_lock); 2456 } else if (cmd == CTL_SET_PORT_WWNS) { 2457 ctl_port_set_wwns(port, 2458 (entry->flags & CTL_PORT_WWNN_VALID) ? 2459 1 : 0, entry->wwnn, 2460 (entry->flags & CTL_PORT_WWPN_VALID) ? 2461 1 : 0, entry->wwpn); 2462 } 2463 if (done != 0) 2464 break; 2465 } 2466 mtx_unlock(&softc->ctl_lock); 2467 break; 2468 } 2469 case CTL_GET_PORT_LIST: { 2470 struct ctl_port *port; 2471 struct ctl_port_list *list; 2472 int i; 2473 2474 list = (struct ctl_port_list *)addr; 2475 2476 if (list->alloc_len != (list->alloc_num * 2477 sizeof(struct ctl_port_entry))) { 2478 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2479 "alloc_num %u * sizeof(struct ctl_port_entry) " 2480 "%zu\n", __func__, list->alloc_len, 2481 list->alloc_num, sizeof(struct ctl_port_entry)); 2482 retval = EINVAL; 2483 break; 2484 } 2485 list->fill_len = 0; 2486 list->fill_num = 0; 2487 list->dropped_num = 0; 2488 i = 0; 2489 mtx_lock(&softc->ctl_lock); 2490 STAILQ_FOREACH(port, &softc->port_list, links) { 2491 struct ctl_port_entry entry, *list_entry; 2492 2493 if (list->fill_num >= list->alloc_num) { 2494 list->dropped_num++; 2495 continue; 2496 } 2497 2498 entry.port_type = port->port_type; 2499 strlcpy(entry.port_name, port->port_name, 2500 sizeof(entry.port_name)); 2501 entry.targ_port = port->targ_port; 2502 entry.physical_port = port->physical_port; 2503 entry.virtual_port = port->virtual_port; 2504 entry.wwnn = port->wwnn; 2505 entry.wwpn = port->wwpn; 2506 if (port->status & CTL_PORT_STATUS_ONLINE) 2507 entry.online = 1; 2508 else 2509 entry.online = 0; 2510 2511 list_entry = &list->entries[i]; 2512 2513 retval = copyout(&entry, list_entry, sizeof(entry)); 2514 if (retval != 0) { 2515 printf("%s: CTL_GET_PORT_LIST: copyout " 2516 "returned %d\n", __func__, retval); 2517 break; 2518 } 2519 i++; 2520 list->fill_num++; 2521 list->fill_len += sizeof(entry); 2522 } 2523 mtx_unlock(&softc->ctl_lock); 2524 2525 /* 2526 * If this is non-zero, we had a copyout fault, so there's 2527 * probably no point in attempting to set the status inside 2528 * the structure. 2529 */ 2530 if (retval != 0) 2531 break; 2532 2533 if (list->dropped_num > 0) 2534 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2535 else 2536 list->status = CTL_PORT_LIST_OK; 2537 break; 2538 } 2539 case CTL_DUMP_OOA: { 2540 union ctl_io *io; 2541 char printbuf[128]; 2542 struct sbuf sb; 2543 2544 mtx_lock(&softc->ctl_lock); 2545 printf("Dumping OOA queues:\n"); 2546 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2547 mtx_lock(&lun->lun_lock); 2548 for (io = (union ctl_io *)TAILQ_FIRST( 2549 &lun->ooa_queue); io != NULL; 2550 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2551 ooa_links)) { 2552 sbuf_new(&sb, printbuf, sizeof(printbuf), 2553 SBUF_FIXEDLEN); 2554 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2555 (intmax_t)lun->lun, 2556 io->scsiio.tag_num, 2557 (io->io_hdr.flags & 2558 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2559 (io->io_hdr.flags & 2560 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2561 (io->io_hdr.flags & 2562 CTL_FLAG_ABORT) ? " ABORT" : "", 2563 (io->io_hdr.flags & 2564 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2565 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2566 sbuf_finish(&sb); 2567 printf("%s\n", sbuf_data(&sb)); 2568 } 2569 mtx_unlock(&lun->lun_lock); 2570 } 2571 printf("OOA queues dump done\n"); 2572 mtx_unlock(&softc->ctl_lock); 2573 break; 2574 } 2575 case CTL_GET_OOA: { 2576 struct ctl_ooa *ooa_hdr; 2577 struct ctl_ooa_entry *entries; 2578 uint32_t cur_fill_num; 2579 2580 ooa_hdr = (struct ctl_ooa *)addr; 2581 2582 if ((ooa_hdr->alloc_len == 0) 2583 || (ooa_hdr->alloc_num == 0)) { 2584 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2585 "must be non-zero\n", __func__, 2586 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2587 retval = EINVAL; 2588 break; 2589 } 2590 2591 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2592 sizeof(struct ctl_ooa_entry))) { 2593 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2594 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2595 __func__, ooa_hdr->alloc_len, 2596 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2597 retval = EINVAL; 2598 break; 2599 } 2600 2601 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2602 if (entries == NULL) { 2603 printf("%s: could not allocate %d bytes for OOA " 2604 "dump\n", __func__, ooa_hdr->alloc_len); 2605 retval = ENOMEM; 2606 break; 2607 } 2608 2609 mtx_lock(&softc->ctl_lock); 2610 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2611 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2612 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2613 mtx_unlock(&softc->ctl_lock); 2614 free(entries, M_CTL); 2615 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2616 __func__, (uintmax_t)ooa_hdr->lun_num); 2617 retval = EINVAL; 2618 break; 2619 } 2620 2621 cur_fill_num = 0; 2622 2623 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2624 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2625 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2626 ooa_hdr, entries); 2627 if (retval != 0) 2628 break; 2629 } 2630 if (retval != 0) { 2631 mtx_unlock(&softc->ctl_lock); 2632 free(entries, M_CTL); 2633 break; 2634 } 2635 } else { 2636 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2637 2638 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2639 entries); 2640 } 2641 mtx_unlock(&softc->ctl_lock); 2642 2643 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2644 ooa_hdr->fill_len = ooa_hdr->fill_num * 2645 sizeof(struct ctl_ooa_entry); 2646 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2647 if (retval != 0) { 2648 printf("%s: error copying out %d bytes for OOA dump\n", 2649 __func__, ooa_hdr->fill_len); 2650 } 2651 2652 getbintime(&ooa_hdr->cur_bt); 2653 2654 if (cur_fill_num > ooa_hdr->alloc_num) { 2655 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2656 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2657 } else { 2658 ooa_hdr->dropped_num = 0; 2659 ooa_hdr->status = CTL_OOA_OK; 2660 } 2661 2662 free(entries, M_CTL); 2663 break; 2664 } 2665 case CTL_CHECK_OOA: { 2666 union ctl_io *io; 2667 struct ctl_ooa_info *ooa_info; 2668 2669 2670 ooa_info = (struct ctl_ooa_info *)addr; 2671 2672 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2673 ooa_info->status = CTL_OOA_INVALID_LUN; 2674 break; 2675 } 2676 mtx_lock(&softc->ctl_lock); 2677 lun = softc->ctl_luns[ooa_info->lun_id]; 2678 if (lun == NULL) { 2679 mtx_unlock(&softc->ctl_lock); 2680 ooa_info->status = CTL_OOA_INVALID_LUN; 2681 break; 2682 } 2683 mtx_lock(&lun->lun_lock); 2684 mtx_unlock(&softc->ctl_lock); 2685 ooa_info->num_entries = 0; 2686 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2687 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2688 &io->io_hdr, ooa_links)) { 2689 ooa_info->num_entries++; 2690 } 2691 mtx_unlock(&lun->lun_lock); 2692 2693 ooa_info->status = CTL_OOA_SUCCESS; 2694 2695 break; 2696 } 2697 case CTL_DELAY_IO: { 2698 struct ctl_io_delay_info *delay_info; 2699 2700 delay_info = (struct ctl_io_delay_info *)addr; 2701 2702 #ifdef CTL_IO_DELAY 2703 mtx_lock(&softc->ctl_lock); 2704 2705 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2706 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2707 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2708 } else { 2709 lun = softc->ctl_luns[delay_info->lun_id]; 2710 mtx_lock(&lun->lun_lock); 2711 2712 delay_info->status = CTL_DELAY_STATUS_OK; 2713 2714 switch (delay_info->delay_type) { 2715 case CTL_DELAY_TYPE_CONT: 2716 break; 2717 case CTL_DELAY_TYPE_ONESHOT: 2718 break; 2719 default: 2720 delay_info->status = 2721 CTL_DELAY_STATUS_INVALID_TYPE; 2722 break; 2723 } 2724 2725 switch (delay_info->delay_loc) { 2726 case CTL_DELAY_LOC_DATAMOVE: 2727 lun->delay_info.datamove_type = 2728 delay_info->delay_type; 2729 lun->delay_info.datamove_delay = 2730 delay_info->delay_secs; 2731 break; 2732 case CTL_DELAY_LOC_DONE: 2733 lun->delay_info.done_type = 2734 delay_info->delay_type; 2735 lun->delay_info.done_delay = 2736 delay_info->delay_secs; 2737 break; 2738 default: 2739 delay_info->status = 2740 CTL_DELAY_STATUS_INVALID_LOC; 2741 break; 2742 } 2743 mtx_unlock(&lun->lun_lock); 2744 } 2745 2746 mtx_unlock(&softc->ctl_lock); 2747 #else 2748 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2749 #endif /* CTL_IO_DELAY */ 2750 break; 2751 } 2752 case CTL_REALSYNC_SET: { 2753 int *syncstate; 2754 2755 syncstate = (int *)addr; 2756 2757 mtx_lock(&softc->ctl_lock); 2758 switch (*syncstate) { 2759 case 0: 2760 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2761 break; 2762 case 1: 2763 softc->flags |= CTL_FLAG_REAL_SYNC; 2764 break; 2765 default: 2766 retval = EINVAL; 2767 break; 2768 } 2769 mtx_unlock(&softc->ctl_lock); 2770 break; 2771 } 2772 case CTL_REALSYNC_GET: { 2773 int *syncstate; 2774 2775 syncstate = (int*)addr; 2776 2777 mtx_lock(&softc->ctl_lock); 2778 if (softc->flags & CTL_FLAG_REAL_SYNC) 2779 *syncstate = 1; 2780 else 2781 *syncstate = 0; 2782 mtx_unlock(&softc->ctl_lock); 2783 2784 break; 2785 } 2786 case CTL_SETSYNC: 2787 case CTL_GETSYNC: { 2788 struct ctl_sync_info *sync_info; 2789 2790 sync_info = (struct ctl_sync_info *)addr; 2791 2792 mtx_lock(&softc->ctl_lock); 2793 lun = softc->ctl_luns[sync_info->lun_id]; 2794 if (lun == NULL) { 2795 mtx_unlock(&softc->ctl_lock); 2796 sync_info->status = CTL_GS_SYNC_NO_LUN; 2797 break; 2798 } 2799 /* 2800 * Get or set the sync interval. We're not bounds checking 2801 * in the set case, hopefully the user won't do something 2802 * silly. 2803 */ 2804 mtx_lock(&lun->lun_lock); 2805 mtx_unlock(&softc->ctl_lock); 2806 if (cmd == CTL_GETSYNC) 2807 sync_info->sync_interval = lun->sync_interval; 2808 else 2809 lun->sync_interval = sync_info->sync_interval; 2810 mtx_unlock(&lun->lun_lock); 2811 2812 sync_info->status = CTL_GS_SYNC_OK; 2813 2814 break; 2815 } 2816 case CTL_GETSTATS: { 2817 struct ctl_stats *stats; 2818 int i; 2819 2820 stats = (struct ctl_stats *)addr; 2821 2822 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2823 stats->alloc_len) { 2824 stats->status = CTL_SS_NEED_MORE_SPACE; 2825 stats->num_luns = softc->num_luns; 2826 break; 2827 } 2828 /* 2829 * XXX KDM no locking here. If the LUN list changes, 2830 * things can blow up. 2831 */ 2832 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2833 i++, lun = STAILQ_NEXT(lun, links)) { 2834 retval = copyout(&lun->stats, &stats->lun_stats[i], 2835 sizeof(lun->stats)); 2836 if (retval != 0) 2837 break; 2838 } 2839 stats->num_luns = softc->num_luns; 2840 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2841 softc->num_luns; 2842 stats->status = CTL_SS_OK; 2843 #ifdef CTL_TIME_IO 2844 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2845 #else 2846 stats->flags = CTL_STATS_FLAG_NONE; 2847 #endif 2848 getnanouptime(&stats->timestamp); 2849 break; 2850 } 2851 case CTL_ERROR_INJECT: { 2852 struct ctl_error_desc *err_desc, *new_err_desc; 2853 2854 err_desc = (struct ctl_error_desc *)addr; 2855 2856 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2857 M_WAITOK | M_ZERO); 2858 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2859 2860 mtx_lock(&softc->ctl_lock); 2861 lun = softc->ctl_luns[err_desc->lun_id]; 2862 if (lun == NULL) { 2863 mtx_unlock(&softc->ctl_lock); 2864 free(new_err_desc, M_CTL); 2865 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2866 __func__, (uintmax_t)err_desc->lun_id); 2867 retval = EINVAL; 2868 break; 2869 } 2870 mtx_lock(&lun->lun_lock); 2871 mtx_unlock(&softc->ctl_lock); 2872 2873 /* 2874 * We could do some checking here to verify the validity 2875 * of the request, but given the complexity of error 2876 * injection requests, the checking logic would be fairly 2877 * complex. 2878 * 2879 * For now, if the request is invalid, it just won't get 2880 * executed and might get deleted. 2881 */ 2882 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2883 2884 /* 2885 * XXX KDM check to make sure the serial number is unique, 2886 * in case we somehow manage to wrap. That shouldn't 2887 * happen for a very long time, but it's the right thing to 2888 * do. 2889 */ 2890 new_err_desc->serial = lun->error_serial; 2891 err_desc->serial = lun->error_serial; 2892 lun->error_serial++; 2893 2894 mtx_unlock(&lun->lun_lock); 2895 break; 2896 } 2897 case CTL_ERROR_INJECT_DELETE: { 2898 struct ctl_error_desc *delete_desc, *desc, *desc2; 2899 int delete_done; 2900 2901 delete_desc = (struct ctl_error_desc *)addr; 2902 delete_done = 0; 2903 2904 mtx_lock(&softc->ctl_lock); 2905 lun = softc->ctl_luns[delete_desc->lun_id]; 2906 if (lun == NULL) { 2907 mtx_unlock(&softc->ctl_lock); 2908 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2909 __func__, (uintmax_t)delete_desc->lun_id); 2910 retval = EINVAL; 2911 break; 2912 } 2913 mtx_lock(&lun->lun_lock); 2914 mtx_unlock(&softc->ctl_lock); 2915 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2916 if (desc->serial != delete_desc->serial) 2917 continue; 2918 2919 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2920 links); 2921 free(desc, M_CTL); 2922 delete_done = 1; 2923 } 2924 mtx_unlock(&lun->lun_lock); 2925 if (delete_done == 0) { 2926 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2927 "error serial %ju on LUN %u\n", __func__, 2928 delete_desc->serial, delete_desc->lun_id); 2929 retval = EINVAL; 2930 break; 2931 } 2932 break; 2933 } 2934 case CTL_DUMP_STRUCTS: { 2935 int i, j, k; 2936 struct ctl_port *port; 2937 struct ctl_frontend *fe; 2938 2939 mtx_lock(&softc->ctl_lock); 2940 printf("CTL Persistent Reservation information start:\n"); 2941 for (i = 0; i < CTL_MAX_LUNS; i++) { 2942 lun = softc->ctl_luns[i]; 2943 2944 if ((lun == NULL) 2945 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2946 continue; 2947 2948 for (j = 0; j < CTL_MAX_PORTS; j++) { 2949 if (lun->pr_keys[j] == NULL) 2950 continue; 2951 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2952 if (lun->pr_keys[j][k] == 0) 2953 continue; 2954 printf(" LUN %d port %d iid %d key " 2955 "%#jx\n", i, j, k, 2956 (uintmax_t)lun->pr_keys[j][k]); 2957 } 2958 } 2959 } 2960 printf("CTL Persistent Reservation information end\n"); 2961 printf("CTL Ports:\n"); 2962 STAILQ_FOREACH(port, &softc->port_list, links) { 2963 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2964 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2965 port->frontend->name, port->port_type, 2966 port->physical_port, port->virtual_port, 2967 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2968 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2969 if (port->wwpn_iid[j].in_use == 0 && 2970 port->wwpn_iid[j].wwpn == 0 && 2971 port->wwpn_iid[j].name == NULL) 2972 continue; 2973 2974 printf(" iid %u use %d WWPN %#jx '%s'\n", 2975 j, port->wwpn_iid[j].in_use, 2976 (uintmax_t)port->wwpn_iid[j].wwpn, 2977 port->wwpn_iid[j].name); 2978 } 2979 } 2980 printf("CTL Port information end\n"); 2981 mtx_unlock(&softc->ctl_lock); 2982 /* 2983 * XXX KDM calling this without a lock. We'd likely want 2984 * to drop the lock before calling the frontend's dump 2985 * routine anyway. 2986 */ 2987 printf("CTL Frontends:\n"); 2988 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2989 printf(" Frontend '%s'\n", fe->name); 2990 if (fe->fe_dump != NULL) 2991 fe->fe_dump(); 2992 } 2993 printf("CTL Frontend information end\n"); 2994 break; 2995 } 2996 case CTL_LUN_REQ: { 2997 struct ctl_lun_req *lun_req; 2998 struct ctl_backend_driver *backend; 2999 3000 lun_req = (struct ctl_lun_req *)addr; 3001 3002 backend = ctl_backend_find(lun_req->backend); 3003 if (backend == NULL) { 3004 lun_req->status = CTL_LUN_ERROR; 3005 snprintf(lun_req->error_str, 3006 sizeof(lun_req->error_str), 3007 "Backend \"%s\" not found.", 3008 lun_req->backend); 3009 break; 3010 } 3011 if (lun_req->num_be_args > 0) { 3012 lun_req->kern_be_args = ctl_copyin_args( 3013 lun_req->num_be_args, 3014 lun_req->be_args, 3015 lun_req->error_str, 3016 sizeof(lun_req->error_str)); 3017 if (lun_req->kern_be_args == NULL) { 3018 lun_req->status = CTL_LUN_ERROR; 3019 break; 3020 } 3021 } 3022 3023 retval = backend->ioctl(dev, cmd, addr, flag, td); 3024 3025 if (lun_req->num_be_args > 0) { 3026 ctl_copyout_args(lun_req->num_be_args, 3027 lun_req->kern_be_args); 3028 ctl_free_args(lun_req->num_be_args, 3029 lun_req->kern_be_args); 3030 } 3031 break; 3032 } 3033 case CTL_LUN_LIST: { 3034 struct sbuf *sb; 3035 struct ctl_lun_list *list; 3036 struct ctl_option *opt; 3037 3038 list = (struct ctl_lun_list *)addr; 3039 3040 /* 3041 * Allocate a fixed length sbuf here, based on the length 3042 * of the user's buffer. We could allocate an auto-extending 3043 * buffer, and then tell the user how much larger our 3044 * amount of data is than his buffer, but that presents 3045 * some problems: 3046 * 3047 * 1. The sbuf(9) routines use a blocking malloc, and so 3048 * we can't hold a lock while calling them with an 3049 * auto-extending buffer. 3050 * 3051 * 2. There is not currently a LUN reference counting 3052 * mechanism, outside of outstanding transactions on 3053 * the LUN's OOA queue. So a LUN could go away on us 3054 * while we're getting the LUN number, backend-specific 3055 * information, etc. Thus, given the way things 3056 * currently work, we need to hold the CTL lock while 3057 * grabbing LUN information. 3058 * 3059 * So, from the user's standpoint, the best thing to do is 3060 * allocate what he thinks is a reasonable buffer length, 3061 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3062 * double the buffer length and try again. (And repeat 3063 * that until he succeeds.) 3064 */ 3065 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3066 if (sb == NULL) { 3067 list->status = CTL_LUN_LIST_ERROR; 3068 snprintf(list->error_str, sizeof(list->error_str), 3069 "Unable to allocate %d bytes for LUN list", 3070 list->alloc_len); 3071 break; 3072 } 3073 3074 sbuf_printf(sb, "<ctllunlist>\n"); 3075 3076 mtx_lock(&softc->ctl_lock); 3077 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3078 mtx_lock(&lun->lun_lock); 3079 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3080 (uintmax_t)lun->lun); 3081 3082 /* 3083 * Bail out as soon as we see that we've overfilled 3084 * the buffer. 3085 */ 3086 if (retval != 0) 3087 break; 3088 3089 retval = sbuf_printf(sb, "\t<backend_type>%s" 3090 "</backend_type>\n", 3091 (lun->backend == NULL) ? "none" : 3092 lun->backend->name); 3093 3094 if (retval != 0) 3095 break; 3096 3097 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3098 lun->be_lun->lun_type); 3099 3100 if (retval != 0) 3101 break; 3102 3103 if (lun->backend == NULL) { 3104 retval = sbuf_printf(sb, "</lun>\n"); 3105 if (retval != 0) 3106 break; 3107 continue; 3108 } 3109 3110 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3111 (lun->be_lun->maxlba > 0) ? 3112 lun->be_lun->maxlba + 1 : 0); 3113 3114 if (retval != 0) 3115 break; 3116 3117 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3118 lun->be_lun->blocksize); 3119 3120 if (retval != 0) 3121 break; 3122 3123 retval = sbuf_printf(sb, "\t<serial_number>"); 3124 3125 if (retval != 0) 3126 break; 3127 3128 retval = ctl_sbuf_printf_esc(sb, 3129 lun->be_lun->serial_num, 3130 sizeof(lun->be_lun->serial_num)); 3131 3132 if (retval != 0) 3133 break; 3134 3135 retval = sbuf_printf(sb, "</serial_number>\n"); 3136 3137 if (retval != 0) 3138 break; 3139 3140 retval = sbuf_printf(sb, "\t<device_id>"); 3141 3142 if (retval != 0) 3143 break; 3144 3145 retval = ctl_sbuf_printf_esc(sb, 3146 lun->be_lun->device_id, 3147 sizeof(lun->be_lun->device_id)); 3148 3149 if (retval != 0) 3150 break; 3151 3152 retval = sbuf_printf(sb, "</device_id>\n"); 3153 3154 if (retval != 0) 3155 break; 3156 3157 if (lun->backend->lun_info != NULL) { 3158 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3159 if (retval != 0) 3160 break; 3161 } 3162 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3163 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3164 opt->name, opt->value, opt->name); 3165 if (retval != 0) 3166 break; 3167 } 3168 3169 retval = sbuf_printf(sb, "</lun>\n"); 3170 3171 if (retval != 0) 3172 break; 3173 mtx_unlock(&lun->lun_lock); 3174 } 3175 if (lun != NULL) 3176 mtx_unlock(&lun->lun_lock); 3177 mtx_unlock(&softc->ctl_lock); 3178 3179 if ((retval != 0) 3180 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3181 retval = 0; 3182 sbuf_delete(sb); 3183 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3184 snprintf(list->error_str, sizeof(list->error_str), 3185 "Out of space, %d bytes is too small", 3186 list->alloc_len); 3187 break; 3188 } 3189 3190 sbuf_finish(sb); 3191 3192 retval = copyout(sbuf_data(sb), list->lun_xml, 3193 sbuf_len(sb) + 1); 3194 3195 list->fill_len = sbuf_len(sb) + 1; 3196 list->status = CTL_LUN_LIST_OK; 3197 sbuf_delete(sb); 3198 break; 3199 } 3200 case CTL_ISCSI: { 3201 struct ctl_iscsi *ci; 3202 struct ctl_frontend *fe; 3203 3204 ci = (struct ctl_iscsi *)addr; 3205 3206 fe = ctl_frontend_find("iscsi"); 3207 if (fe == NULL) { 3208 ci->status = CTL_ISCSI_ERROR; 3209 snprintf(ci->error_str, sizeof(ci->error_str), 3210 "Frontend \"iscsi\" not found."); 3211 break; 3212 } 3213 3214 retval = fe->ioctl(dev, cmd, addr, flag, td); 3215 break; 3216 } 3217 case CTL_PORT_REQ: { 3218 struct ctl_req *req; 3219 struct ctl_frontend *fe; 3220 3221 req = (struct ctl_req *)addr; 3222 3223 fe = ctl_frontend_find(req->driver); 3224 if (fe == NULL) { 3225 req->status = CTL_LUN_ERROR; 3226 snprintf(req->error_str, sizeof(req->error_str), 3227 "Frontend \"%s\" not found.", req->driver); 3228 break; 3229 } 3230 if (req->num_args > 0) { 3231 req->kern_args = ctl_copyin_args(req->num_args, 3232 req->args, req->error_str, sizeof(req->error_str)); 3233 if (req->kern_args == NULL) { 3234 req->status = CTL_LUN_ERROR; 3235 break; 3236 } 3237 } 3238 3239 if (fe->ioctl) 3240 retval = fe->ioctl(dev, cmd, addr, flag, td); 3241 else 3242 retval = ENODEV; 3243 3244 if (req->num_args > 0) { 3245 ctl_copyout_args(req->num_args, req->kern_args); 3246 ctl_free_args(req->num_args, req->kern_args); 3247 } 3248 break; 3249 } 3250 case CTL_PORT_LIST: { 3251 struct sbuf *sb; 3252 struct ctl_port *port; 3253 struct ctl_lun_list *list; 3254 struct ctl_option *opt; 3255 int j; 3256 uint32_t plun; 3257 3258 list = (struct ctl_lun_list *)addr; 3259 3260 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3261 if (sb == NULL) { 3262 list->status = CTL_LUN_LIST_ERROR; 3263 snprintf(list->error_str, sizeof(list->error_str), 3264 "Unable to allocate %d bytes for LUN list", 3265 list->alloc_len); 3266 break; 3267 } 3268 3269 sbuf_printf(sb, "<ctlportlist>\n"); 3270 3271 mtx_lock(&softc->ctl_lock); 3272 STAILQ_FOREACH(port, &softc->port_list, links) { 3273 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3274 (uintmax_t)port->targ_port); 3275 3276 /* 3277 * Bail out as soon as we see that we've overfilled 3278 * the buffer. 3279 */ 3280 if (retval != 0) 3281 break; 3282 3283 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3284 "</frontend_type>\n", port->frontend->name); 3285 if (retval != 0) 3286 break; 3287 3288 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3289 port->port_type); 3290 if (retval != 0) 3291 break; 3292 3293 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3294 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3295 if (retval != 0) 3296 break; 3297 3298 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3299 port->port_name); 3300 if (retval != 0) 3301 break; 3302 3303 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3304 port->physical_port); 3305 if (retval != 0) 3306 break; 3307 3308 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3309 port->virtual_port); 3310 if (retval != 0) 3311 break; 3312 3313 if (port->target_devid != NULL) { 3314 sbuf_printf(sb, "\t<target>"); 3315 ctl_id_sbuf(port->target_devid, sb); 3316 sbuf_printf(sb, "</target>\n"); 3317 } 3318 3319 if (port->port_devid != NULL) { 3320 sbuf_printf(sb, "\t<port>"); 3321 ctl_id_sbuf(port->port_devid, sb); 3322 sbuf_printf(sb, "</port>\n"); 3323 } 3324 3325 if (port->port_info != NULL) { 3326 retval = port->port_info(port->onoff_arg, sb); 3327 if (retval != 0) 3328 break; 3329 } 3330 STAILQ_FOREACH(opt, &port->options, links) { 3331 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3332 opt->name, opt->value, opt->name); 3333 if (retval != 0) 3334 break; 3335 } 3336 3337 if (port->lun_map != NULL) { 3338 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3339 for (j = 0; j < CTL_MAX_LUNS; j++) { 3340 plun = ctl_lun_map_from_port(port, j); 3341 if (plun >= CTL_MAX_LUNS) 3342 continue; 3343 sbuf_printf(sb, 3344 "\t<lun id=\"%u\">%u</lun>\n", 3345 j, plun); 3346 } 3347 } 3348 3349 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3350 if (port->wwpn_iid[j].in_use == 0 || 3351 (port->wwpn_iid[j].wwpn == 0 && 3352 port->wwpn_iid[j].name == NULL)) 3353 continue; 3354 3355 if (port->wwpn_iid[j].name != NULL) 3356 retval = sbuf_printf(sb, 3357 "\t<initiator id=\"%u\">%s</initiator>\n", 3358 j, port->wwpn_iid[j].name); 3359 else 3360 retval = sbuf_printf(sb, 3361 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3362 j, port->wwpn_iid[j].wwpn); 3363 if (retval != 0) 3364 break; 3365 } 3366 if (retval != 0) 3367 break; 3368 3369 retval = sbuf_printf(sb, "</targ_port>\n"); 3370 if (retval != 0) 3371 break; 3372 } 3373 mtx_unlock(&softc->ctl_lock); 3374 3375 if ((retval != 0) 3376 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3377 retval = 0; 3378 sbuf_delete(sb); 3379 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3380 snprintf(list->error_str, sizeof(list->error_str), 3381 "Out of space, %d bytes is too small", 3382 list->alloc_len); 3383 break; 3384 } 3385 3386 sbuf_finish(sb); 3387 3388 retval = copyout(sbuf_data(sb), list->lun_xml, 3389 sbuf_len(sb) + 1); 3390 3391 list->fill_len = sbuf_len(sb) + 1; 3392 list->status = CTL_LUN_LIST_OK; 3393 sbuf_delete(sb); 3394 break; 3395 } 3396 case CTL_LUN_MAP: { 3397 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3398 struct ctl_port *port; 3399 3400 mtx_lock(&softc->ctl_lock); 3401 if (lm->port < softc->port_min || 3402 lm->port >= softc->port_max || 3403 (port = softc->ctl_ports[lm->port]) == NULL) { 3404 mtx_unlock(&softc->ctl_lock); 3405 return (ENXIO); 3406 } 3407 if (port->status & CTL_PORT_STATUS_ONLINE) { 3408 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3409 if (ctl_lun_map_to_port(port, lun->lun) >= 3410 CTL_MAX_LUNS) 3411 continue; 3412 mtx_lock(&lun->lun_lock); 3413 ctl_est_ua_port(lun, lm->port, -1, 3414 CTL_UA_LUN_CHANGE); 3415 mtx_unlock(&lun->lun_lock); 3416 } 3417 } 3418 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3419 if (lm->plun < CTL_MAX_LUNS) { 3420 if (lm->lun == UINT32_MAX) 3421 retval = ctl_lun_map_unset(port, lm->plun); 3422 else if (lm->lun < CTL_MAX_LUNS && 3423 softc->ctl_luns[lm->lun] != NULL) 3424 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3425 else 3426 return (ENXIO); 3427 } else if (lm->plun == UINT32_MAX) { 3428 if (lm->lun == UINT32_MAX) 3429 retval = ctl_lun_map_deinit(port); 3430 else 3431 retval = ctl_lun_map_init(port); 3432 } else 3433 return (ENXIO); 3434 if (port->status & CTL_PORT_STATUS_ONLINE) 3435 ctl_isc_announce_port(port); 3436 break; 3437 } 3438 default: { 3439 /* XXX KDM should we fix this? */ 3440 #if 0 3441 struct ctl_backend_driver *backend; 3442 unsigned int type; 3443 int found; 3444 3445 found = 0; 3446 3447 /* 3448 * We encode the backend type as the ioctl type for backend 3449 * ioctls. So parse it out here, and then search for a 3450 * backend of this type. 3451 */ 3452 type = _IOC_TYPE(cmd); 3453 3454 STAILQ_FOREACH(backend, &softc->be_list, links) { 3455 if (backend->type == type) { 3456 found = 1; 3457 break; 3458 } 3459 } 3460 if (found == 0) { 3461 printf("ctl: unknown ioctl command %#lx or backend " 3462 "%d\n", cmd, type); 3463 retval = EINVAL; 3464 break; 3465 } 3466 retval = backend->ioctl(dev, cmd, addr, flag, td); 3467 #endif 3468 retval = ENOTTY; 3469 break; 3470 } 3471 } 3472 return (retval); 3473 } 3474 3475 uint32_t 3476 ctl_get_initindex(struct ctl_nexus *nexus) 3477 { 3478 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3479 } 3480 3481 int 3482 ctl_lun_map_init(struct ctl_port *port) 3483 { 3484 struct ctl_softc *softc = control_softc; 3485 struct ctl_lun *lun; 3486 uint32_t i; 3487 3488 if (port->lun_map == NULL) 3489 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3490 M_CTL, M_NOWAIT); 3491 if (port->lun_map == NULL) 3492 return (ENOMEM); 3493 for (i = 0; i < CTL_MAX_LUNS; i++) 3494 port->lun_map[i] = UINT32_MAX; 3495 if (port->status & CTL_PORT_STATUS_ONLINE) { 3496 if (port->lun_disable != NULL) { 3497 STAILQ_FOREACH(lun, &softc->lun_list, links) 3498 port->lun_disable(port->targ_lun_arg, lun->lun); 3499 } 3500 ctl_isc_announce_port(port); 3501 } 3502 return (0); 3503 } 3504 3505 int 3506 ctl_lun_map_deinit(struct ctl_port *port) 3507 { 3508 struct ctl_softc *softc = control_softc; 3509 struct ctl_lun *lun; 3510 3511 if (port->lun_map == NULL) 3512 return (0); 3513 free(port->lun_map, M_CTL); 3514 port->lun_map = NULL; 3515 if (port->status & CTL_PORT_STATUS_ONLINE) { 3516 if (port->lun_enable != NULL) { 3517 STAILQ_FOREACH(lun, &softc->lun_list, links) 3518 port->lun_enable(port->targ_lun_arg, lun->lun); 3519 } 3520 ctl_isc_announce_port(port); 3521 } 3522 return (0); 3523 } 3524 3525 int 3526 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3527 { 3528 int status; 3529 uint32_t old; 3530 3531 if (port->lun_map == NULL) { 3532 status = ctl_lun_map_init(port); 3533 if (status != 0) 3534 return (status); 3535 } 3536 old = port->lun_map[plun]; 3537 port->lun_map[plun] = glun; 3538 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) { 3539 if (port->lun_enable != NULL) 3540 port->lun_enable(port->targ_lun_arg, plun); 3541 ctl_isc_announce_port(port); 3542 } 3543 return (0); 3544 } 3545 3546 int 3547 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3548 { 3549 uint32_t old; 3550 3551 if (port->lun_map == NULL) 3552 return (0); 3553 old = port->lun_map[plun]; 3554 port->lun_map[plun] = UINT32_MAX; 3555 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) { 3556 if (port->lun_disable != NULL) 3557 port->lun_disable(port->targ_lun_arg, plun); 3558 ctl_isc_announce_port(port); 3559 } 3560 return (0); 3561 } 3562 3563 uint32_t 3564 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3565 { 3566 3567 if (port == NULL) 3568 return (UINT32_MAX); 3569 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3570 return (lun_id); 3571 return (port->lun_map[lun_id]); 3572 } 3573 3574 uint32_t 3575 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3576 { 3577 uint32_t i; 3578 3579 if (port == NULL) 3580 return (UINT32_MAX); 3581 if (port->lun_map == NULL) 3582 return (lun_id); 3583 for (i = 0; i < CTL_MAX_LUNS; i++) { 3584 if (port->lun_map[i] == lun_id) 3585 return (i); 3586 } 3587 return (UINT32_MAX); 3588 } 3589 3590 static struct ctl_port * 3591 ctl_io_port(struct ctl_io_hdr *io_hdr) 3592 { 3593 3594 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); 3595 } 3596 3597 int 3598 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3599 { 3600 int i; 3601 3602 for (i = first; i < last; i++) { 3603 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3604 return (i); 3605 } 3606 return (-1); 3607 } 3608 3609 int 3610 ctl_set_mask(uint32_t *mask, uint32_t bit) 3611 { 3612 uint32_t chunk, piece; 3613 3614 chunk = bit >> 5; 3615 piece = bit % (sizeof(uint32_t) * 8); 3616 3617 if ((mask[chunk] & (1 << piece)) != 0) 3618 return (-1); 3619 else 3620 mask[chunk] |= (1 << piece); 3621 3622 return (0); 3623 } 3624 3625 int 3626 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3627 { 3628 uint32_t chunk, piece; 3629 3630 chunk = bit >> 5; 3631 piece = bit % (sizeof(uint32_t) * 8); 3632 3633 if ((mask[chunk] & (1 << piece)) == 0) 3634 return (-1); 3635 else 3636 mask[chunk] &= ~(1 << piece); 3637 3638 return (0); 3639 } 3640 3641 int 3642 ctl_is_set(uint32_t *mask, uint32_t bit) 3643 { 3644 uint32_t chunk, piece; 3645 3646 chunk = bit >> 5; 3647 piece = bit % (sizeof(uint32_t) * 8); 3648 3649 if ((mask[chunk] & (1 << piece)) == 0) 3650 return (0); 3651 else 3652 return (1); 3653 } 3654 3655 static uint64_t 3656 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3657 { 3658 uint64_t *t; 3659 3660 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3661 if (t == NULL) 3662 return (0); 3663 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3664 } 3665 3666 static void 3667 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3668 { 3669 uint64_t *t; 3670 3671 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3672 if (t == NULL) 3673 return; 3674 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3675 } 3676 3677 static void 3678 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3679 { 3680 uint64_t *p; 3681 u_int i; 3682 3683 i = residx/CTL_MAX_INIT_PER_PORT; 3684 if (lun->pr_keys[i] != NULL) 3685 return; 3686 mtx_unlock(&lun->lun_lock); 3687 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3688 M_WAITOK | M_ZERO); 3689 mtx_lock(&lun->lun_lock); 3690 if (lun->pr_keys[i] == NULL) 3691 lun->pr_keys[i] = p; 3692 else 3693 free(p, M_CTL); 3694 } 3695 3696 static void 3697 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3698 { 3699 uint64_t *t; 3700 3701 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3702 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3703 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3704 } 3705 3706 /* 3707 * ctl_softc, pool_name, total_ctl_io are passed in. 3708 * npool is passed out. 3709 */ 3710 int 3711 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3712 uint32_t total_ctl_io, void **npool) 3713 { 3714 #ifdef IO_POOLS 3715 struct ctl_io_pool *pool; 3716 3717 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3718 M_NOWAIT | M_ZERO); 3719 if (pool == NULL) 3720 return (ENOMEM); 3721 3722 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3723 pool->ctl_softc = ctl_softc; 3724 pool->zone = uma_zsecond_create(pool->name, NULL, 3725 NULL, NULL, NULL, ctl_softc->io_zone); 3726 /* uma_prealloc(pool->zone, total_ctl_io); */ 3727 3728 *npool = pool; 3729 #else 3730 *npool = ctl_softc->io_zone; 3731 #endif 3732 return (0); 3733 } 3734 3735 void 3736 ctl_pool_free(struct ctl_io_pool *pool) 3737 { 3738 3739 if (pool == NULL) 3740 return; 3741 3742 #ifdef IO_POOLS 3743 uma_zdestroy(pool->zone); 3744 free(pool, M_CTL); 3745 #endif 3746 } 3747 3748 union ctl_io * 3749 ctl_alloc_io(void *pool_ref) 3750 { 3751 union ctl_io *io; 3752 #ifdef IO_POOLS 3753 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3754 3755 io = uma_zalloc(pool->zone, M_WAITOK); 3756 #else 3757 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3758 #endif 3759 if (io != NULL) 3760 io->io_hdr.pool = pool_ref; 3761 return (io); 3762 } 3763 3764 union ctl_io * 3765 ctl_alloc_io_nowait(void *pool_ref) 3766 { 3767 union ctl_io *io; 3768 #ifdef IO_POOLS 3769 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3770 3771 io = uma_zalloc(pool->zone, M_NOWAIT); 3772 #else 3773 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3774 #endif 3775 if (io != NULL) 3776 io->io_hdr.pool = pool_ref; 3777 return (io); 3778 } 3779 3780 void 3781 ctl_free_io(union ctl_io *io) 3782 { 3783 #ifdef IO_POOLS 3784 struct ctl_io_pool *pool; 3785 #endif 3786 3787 if (io == NULL) 3788 return; 3789 3790 #ifdef IO_POOLS 3791 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3792 uma_zfree(pool->zone, io); 3793 #else 3794 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3795 #endif 3796 } 3797 3798 void 3799 ctl_zero_io(union ctl_io *io) 3800 { 3801 void *pool_ref; 3802 3803 if (io == NULL) 3804 return; 3805 3806 /* 3807 * May need to preserve linked list pointers at some point too. 3808 */ 3809 pool_ref = io->io_hdr.pool; 3810 memset(io, 0, sizeof(*io)); 3811 io->io_hdr.pool = pool_ref; 3812 } 3813 3814 /* 3815 * This routine is currently used for internal copies of ctl_ios that need 3816 * to persist for some reason after we've already returned status to the 3817 * FETD. (Thus the flag set.) 3818 * 3819 * XXX XXX 3820 * Note that this makes a blind copy of all fields in the ctl_io, except 3821 * for the pool reference. This includes any memory that has been 3822 * allocated! That memory will no longer be valid after done has been 3823 * called, so this would be VERY DANGEROUS for command that actually does 3824 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3825 * start and stop commands, which don't transfer any data, so this is not a 3826 * problem. If it is used for anything else, the caller would also need to 3827 * allocate data buffer space and this routine would need to be modified to 3828 * copy the data buffer(s) as well. 3829 */ 3830 void 3831 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3832 { 3833 void *pool_ref; 3834 3835 if ((src == NULL) 3836 || (dest == NULL)) 3837 return; 3838 3839 /* 3840 * May need to preserve linked list pointers at some point too. 3841 */ 3842 pool_ref = dest->io_hdr.pool; 3843 3844 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3845 3846 dest->io_hdr.pool = pool_ref; 3847 /* 3848 * We need to know that this is an internal copy, and doesn't need 3849 * to get passed back to the FETD that allocated it. 3850 */ 3851 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3852 } 3853 3854 int 3855 ctl_expand_number(const char *buf, uint64_t *num) 3856 { 3857 char *endptr; 3858 uint64_t number; 3859 unsigned shift; 3860 3861 number = strtoq(buf, &endptr, 0); 3862 3863 switch (tolower((unsigned char)*endptr)) { 3864 case 'e': 3865 shift = 60; 3866 break; 3867 case 'p': 3868 shift = 50; 3869 break; 3870 case 't': 3871 shift = 40; 3872 break; 3873 case 'g': 3874 shift = 30; 3875 break; 3876 case 'm': 3877 shift = 20; 3878 break; 3879 case 'k': 3880 shift = 10; 3881 break; 3882 case 'b': 3883 case '\0': /* No unit. */ 3884 *num = number; 3885 return (0); 3886 default: 3887 /* Unrecognized unit. */ 3888 return (-1); 3889 } 3890 3891 if ((number << shift) >> shift != number) { 3892 /* Overflow */ 3893 return (-1); 3894 } 3895 *num = number << shift; 3896 return (0); 3897 } 3898 3899 3900 /* 3901 * This routine could be used in the future to load default and/or saved 3902 * mode page parameters for a particuar lun. 3903 */ 3904 static int 3905 ctl_init_page_index(struct ctl_lun *lun) 3906 { 3907 int i; 3908 struct ctl_page_index *page_index; 3909 const char *value; 3910 uint64_t ival; 3911 3912 memcpy(&lun->mode_pages.index, page_index_template, 3913 sizeof(page_index_template)); 3914 3915 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3916 3917 page_index = &lun->mode_pages.index[i]; 3918 /* 3919 * If this is a disk-only mode page, there's no point in 3920 * setting it up. For some pages, we have to have some 3921 * basic information about the disk in order to calculate the 3922 * mode page data. 3923 */ 3924 if ((lun->be_lun->lun_type != T_DIRECT) 3925 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3926 continue; 3927 3928 switch (page_index->page_code & SMPH_PC_MASK) { 3929 case SMS_RW_ERROR_RECOVERY_PAGE: { 3930 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3931 panic("subpage is incorrect!"); 3932 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3933 &rw_er_page_default, 3934 sizeof(rw_er_page_default)); 3935 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3936 &rw_er_page_changeable, 3937 sizeof(rw_er_page_changeable)); 3938 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3939 &rw_er_page_default, 3940 sizeof(rw_er_page_default)); 3941 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3942 &rw_er_page_default, 3943 sizeof(rw_er_page_default)); 3944 page_index->page_data = 3945 (uint8_t *)lun->mode_pages.rw_er_page; 3946 break; 3947 } 3948 case SMS_FORMAT_DEVICE_PAGE: { 3949 struct scsi_format_page *format_page; 3950 3951 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3952 panic("subpage is incorrect!"); 3953 3954 /* 3955 * Sectors per track are set above. Bytes per 3956 * sector need to be set here on a per-LUN basis. 3957 */ 3958 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3959 &format_page_default, 3960 sizeof(format_page_default)); 3961 memcpy(&lun->mode_pages.format_page[ 3962 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3963 sizeof(format_page_changeable)); 3964 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3965 &format_page_default, 3966 sizeof(format_page_default)); 3967 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3968 &format_page_default, 3969 sizeof(format_page_default)); 3970 3971 format_page = &lun->mode_pages.format_page[ 3972 CTL_PAGE_CURRENT]; 3973 scsi_ulto2b(lun->be_lun->blocksize, 3974 format_page->bytes_per_sector); 3975 3976 format_page = &lun->mode_pages.format_page[ 3977 CTL_PAGE_DEFAULT]; 3978 scsi_ulto2b(lun->be_lun->blocksize, 3979 format_page->bytes_per_sector); 3980 3981 format_page = &lun->mode_pages.format_page[ 3982 CTL_PAGE_SAVED]; 3983 scsi_ulto2b(lun->be_lun->blocksize, 3984 format_page->bytes_per_sector); 3985 3986 page_index->page_data = 3987 (uint8_t *)lun->mode_pages.format_page; 3988 break; 3989 } 3990 case SMS_RIGID_DISK_PAGE: { 3991 struct scsi_rigid_disk_page *rigid_disk_page; 3992 uint32_t sectors_per_cylinder; 3993 uint64_t cylinders; 3994 #ifndef __XSCALE__ 3995 int shift; 3996 #endif /* !__XSCALE__ */ 3997 3998 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3999 panic("invalid subpage value %d", 4000 page_index->subpage); 4001 4002 /* 4003 * Rotation rate and sectors per track are set 4004 * above. We calculate the cylinders here based on 4005 * capacity. Due to the number of heads and 4006 * sectors per track we're using, smaller arrays 4007 * may turn out to have 0 cylinders. Linux and 4008 * FreeBSD don't pay attention to these mode pages 4009 * to figure out capacity, but Solaris does. It 4010 * seems to deal with 0 cylinders just fine, and 4011 * works out a fake geometry based on the capacity. 4012 */ 4013 memcpy(&lun->mode_pages.rigid_disk_page[ 4014 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4015 sizeof(rigid_disk_page_default)); 4016 memcpy(&lun->mode_pages.rigid_disk_page[ 4017 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4018 sizeof(rigid_disk_page_changeable)); 4019 4020 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4021 CTL_DEFAULT_HEADS; 4022 4023 /* 4024 * The divide method here will be more accurate, 4025 * probably, but results in floating point being 4026 * used in the kernel on i386 (__udivdi3()). On the 4027 * XScale, though, __udivdi3() is implemented in 4028 * software. 4029 * 4030 * The shift method for cylinder calculation is 4031 * accurate if sectors_per_cylinder is a power of 4032 * 2. Otherwise it might be slightly off -- you 4033 * might have a bit of a truncation problem. 4034 */ 4035 #ifdef __XSCALE__ 4036 cylinders = (lun->be_lun->maxlba + 1) / 4037 sectors_per_cylinder; 4038 #else 4039 for (shift = 31; shift > 0; shift--) { 4040 if (sectors_per_cylinder & (1 << shift)) 4041 break; 4042 } 4043 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4044 #endif 4045 4046 /* 4047 * We've basically got 3 bytes, or 24 bits for the 4048 * cylinder size in the mode page. If we're over, 4049 * just round down to 2^24. 4050 */ 4051 if (cylinders > 0xffffff) 4052 cylinders = 0xffffff; 4053 4054 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4055 CTL_PAGE_DEFAULT]; 4056 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4057 4058 if ((value = ctl_get_opt(&lun->be_lun->options, 4059 "rpm")) != NULL) { 4060 scsi_ulto2b(strtol(value, NULL, 0), 4061 rigid_disk_page->rotation_rate); 4062 } 4063 4064 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4065 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4066 sizeof(rigid_disk_page_default)); 4067 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4068 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4069 sizeof(rigid_disk_page_default)); 4070 4071 page_index->page_data = 4072 (uint8_t *)lun->mode_pages.rigid_disk_page; 4073 break; 4074 } 4075 case SMS_CACHING_PAGE: { 4076 struct scsi_caching_page *caching_page; 4077 4078 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4079 panic("invalid subpage value %d", 4080 page_index->subpage); 4081 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4082 &caching_page_default, 4083 sizeof(caching_page_default)); 4084 memcpy(&lun->mode_pages.caching_page[ 4085 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4086 sizeof(caching_page_changeable)); 4087 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4088 &caching_page_default, 4089 sizeof(caching_page_default)); 4090 caching_page = &lun->mode_pages.caching_page[ 4091 CTL_PAGE_SAVED]; 4092 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4093 if (value != NULL && strcmp(value, "off") == 0) 4094 caching_page->flags1 &= ~SCP_WCE; 4095 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4096 if (value != NULL && strcmp(value, "off") == 0) 4097 caching_page->flags1 |= SCP_RCD; 4098 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4099 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4100 sizeof(caching_page_default)); 4101 page_index->page_data = 4102 (uint8_t *)lun->mode_pages.caching_page; 4103 break; 4104 } 4105 case SMS_CONTROL_MODE_PAGE: { 4106 switch (page_index->subpage) { 4107 case SMS_SUBPAGE_PAGE_0: { 4108 struct scsi_control_page *control_page; 4109 4110 memcpy(&lun->mode_pages.control_page[ 4111 CTL_PAGE_DEFAULT], 4112 &control_page_default, 4113 sizeof(control_page_default)); 4114 memcpy(&lun->mode_pages.control_page[ 4115 CTL_PAGE_CHANGEABLE], 4116 &control_page_changeable, 4117 sizeof(control_page_changeable)); 4118 memcpy(&lun->mode_pages.control_page[ 4119 CTL_PAGE_SAVED], 4120 &control_page_default, 4121 sizeof(control_page_default)); 4122 control_page = &lun->mode_pages.control_page[ 4123 CTL_PAGE_SAVED]; 4124 value = ctl_get_opt(&lun->be_lun->options, 4125 "reordering"); 4126 if (value != NULL && 4127 strcmp(value, "unrestricted") == 0) { 4128 control_page->queue_flags &= 4129 ~SCP_QUEUE_ALG_MASK; 4130 control_page->queue_flags |= 4131 SCP_QUEUE_ALG_UNRESTRICTED; 4132 } 4133 memcpy(&lun->mode_pages.control_page[ 4134 CTL_PAGE_CURRENT], 4135 &lun->mode_pages.control_page[ 4136 CTL_PAGE_SAVED], 4137 sizeof(control_page_default)); 4138 page_index->page_data = 4139 (uint8_t *)lun->mode_pages.control_page; 4140 break; 4141 } 4142 case 0x01: 4143 memcpy(&lun->mode_pages.control_ext_page[ 4144 CTL_PAGE_DEFAULT], 4145 &control_ext_page_default, 4146 sizeof(control_ext_page_default)); 4147 memcpy(&lun->mode_pages.control_ext_page[ 4148 CTL_PAGE_CHANGEABLE], 4149 &control_ext_page_changeable, 4150 sizeof(control_ext_page_changeable)); 4151 memcpy(&lun->mode_pages.control_ext_page[ 4152 CTL_PAGE_SAVED], 4153 &control_ext_page_default, 4154 sizeof(control_ext_page_default)); 4155 memcpy(&lun->mode_pages.control_ext_page[ 4156 CTL_PAGE_CURRENT], 4157 &lun->mode_pages.control_ext_page[ 4158 CTL_PAGE_SAVED], 4159 sizeof(control_ext_page_default)); 4160 page_index->page_data = 4161 (uint8_t *)lun->mode_pages.control_ext_page; 4162 break; 4163 } 4164 break; 4165 } 4166 case SMS_INFO_EXCEPTIONS_PAGE: { 4167 switch (page_index->subpage) { 4168 case SMS_SUBPAGE_PAGE_0: 4169 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4170 &ie_page_default, 4171 sizeof(ie_page_default)); 4172 memcpy(&lun->mode_pages.ie_page[ 4173 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4174 sizeof(ie_page_changeable)); 4175 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4176 &ie_page_default, 4177 sizeof(ie_page_default)); 4178 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4179 &ie_page_default, 4180 sizeof(ie_page_default)); 4181 page_index->page_data = 4182 (uint8_t *)lun->mode_pages.ie_page; 4183 break; 4184 case 0x02: { 4185 struct ctl_logical_block_provisioning_page *page; 4186 4187 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4188 &lbp_page_default, 4189 sizeof(lbp_page_default)); 4190 memcpy(&lun->mode_pages.lbp_page[ 4191 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4192 sizeof(lbp_page_changeable)); 4193 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4194 &lbp_page_default, 4195 sizeof(lbp_page_default)); 4196 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4197 value = ctl_get_opt(&lun->be_lun->options, 4198 "avail-threshold"); 4199 if (value != NULL && 4200 ctl_expand_number(value, &ival) == 0) { 4201 page->descr[0].flags |= SLBPPD_ENABLED | 4202 SLBPPD_ARMING_DEC; 4203 if (lun->be_lun->blocksize) 4204 ival /= lun->be_lun->blocksize; 4205 else 4206 ival /= 512; 4207 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4208 page->descr[0].count); 4209 } 4210 value = ctl_get_opt(&lun->be_lun->options, 4211 "used-threshold"); 4212 if (value != NULL && 4213 ctl_expand_number(value, &ival) == 0) { 4214 page->descr[1].flags |= SLBPPD_ENABLED | 4215 SLBPPD_ARMING_INC; 4216 if (lun->be_lun->blocksize) 4217 ival /= lun->be_lun->blocksize; 4218 else 4219 ival /= 512; 4220 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4221 page->descr[1].count); 4222 } 4223 value = ctl_get_opt(&lun->be_lun->options, 4224 "pool-avail-threshold"); 4225 if (value != NULL && 4226 ctl_expand_number(value, &ival) == 0) { 4227 page->descr[2].flags |= SLBPPD_ENABLED | 4228 SLBPPD_ARMING_DEC; 4229 if (lun->be_lun->blocksize) 4230 ival /= lun->be_lun->blocksize; 4231 else 4232 ival /= 512; 4233 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4234 page->descr[2].count); 4235 } 4236 value = ctl_get_opt(&lun->be_lun->options, 4237 "pool-used-threshold"); 4238 if (value != NULL && 4239 ctl_expand_number(value, &ival) == 0) { 4240 page->descr[3].flags |= SLBPPD_ENABLED | 4241 SLBPPD_ARMING_INC; 4242 if (lun->be_lun->blocksize) 4243 ival /= lun->be_lun->blocksize; 4244 else 4245 ival /= 512; 4246 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4247 page->descr[3].count); 4248 } 4249 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4250 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4251 sizeof(lbp_page_default)); 4252 page_index->page_data = 4253 (uint8_t *)lun->mode_pages.lbp_page; 4254 }} 4255 break; 4256 } 4257 case SMS_VENDOR_SPECIFIC_PAGE:{ 4258 switch (page_index->subpage) { 4259 case DBGCNF_SUBPAGE_CODE: { 4260 struct copan_debugconf_subpage *current_page, 4261 *saved_page; 4262 4263 memcpy(&lun->mode_pages.debugconf_subpage[ 4264 CTL_PAGE_CURRENT], 4265 &debugconf_page_default, 4266 sizeof(debugconf_page_default)); 4267 memcpy(&lun->mode_pages.debugconf_subpage[ 4268 CTL_PAGE_CHANGEABLE], 4269 &debugconf_page_changeable, 4270 sizeof(debugconf_page_changeable)); 4271 memcpy(&lun->mode_pages.debugconf_subpage[ 4272 CTL_PAGE_DEFAULT], 4273 &debugconf_page_default, 4274 sizeof(debugconf_page_default)); 4275 memcpy(&lun->mode_pages.debugconf_subpage[ 4276 CTL_PAGE_SAVED], 4277 &debugconf_page_default, 4278 sizeof(debugconf_page_default)); 4279 page_index->page_data = 4280 (uint8_t *)lun->mode_pages.debugconf_subpage; 4281 4282 current_page = (struct copan_debugconf_subpage *) 4283 (page_index->page_data + 4284 (page_index->page_len * 4285 CTL_PAGE_CURRENT)); 4286 saved_page = (struct copan_debugconf_subpage *) 4287 (page_index->page_data + 4288 (page_index->page_len * 4289 CTL_PAGE_SAVED)); 4290 break; 4291 } 4292 default: 4293 panic("invalid subpage value %d", 4294 page_index->subpage); 4295 break; 4296 } 4297 break; 4298 } 4299 default: 4300 panic("invalid page value %d", 4301 page_index->page_code & SMPH_PC_MASK); 4302 break; 4303 } 4304 } 4305 4306 return (CTL_RETVAL_COMPLETE); 4307 } 4308 4309 static int 4310 ctl_init_log_page_index(struct ctl_lun *lun) 4311 { 4312 struct ctl_page_index *page_index; 4313 int i, j, k, prev; 4314 4315 memcpy(&lun->log_pages.index, log_page_index_template, 4316 sizeof(log_page_index_template)); 4317 4318 prev = -1; 4319 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4320 4321 page_index = &lun->log_pages.index[i]; 4322 /* 4323 * If this is a disk-only mode page, there's no point in 4324 * setting it up. For some pages, we have to have some 4325 * basic information about the disk in order to calculate the 4326 * mode page data. 4327 */ 4328 if ((lun->be_lun->lun_type != T_DIRECT) 4329 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4330 continue; 4331 4332 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4333 lun->backend->lun_attr == NULL) 4334 continue; 4335 4336 if (page_index->page_code != prev) { 4337 lun->log_pages.pages_page[j] = page_index->page_code; 4338 prev = page_index->page_code; 4339 j++; 4340 } 4341 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4342 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4343 k++; 4344 } 4345 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4346 lun->log_pages.index[0].page_len = j; 4347 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4348 lun->log_pages.index[1].page_len = k * 2; 4349 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4350 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4351 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4352 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4353 4354 return (CTL_RETVAL_COMPLETE); 4355 } 4356 4357 static int 4358 hex2bin(const char *str, uint8_t *buf, int buf_size) 4359 { 4360 int i; 4361 u_char c; 4362 4363 memset(buf, 0, buf_size); 4364 while (isspace(str[0])) 4365 str++; 4366 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4367 str += 2; 4368 buf_size *= 2; 4369 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4370 c = str[i]; 4371 if (isdigit(c)) 4372 c -= '0'; 4373 else if (isalpha(c)) 4374 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4375 else 4376 break; 4377 if (c >= 16) 4378 break; 4379 if ((i & 1) == 0) 4380 buf[i / 2] |= (c << 4); 4381 else 4382 buf[i / 2] |= c; 4383 } 4384 return ((i + 1) / 2); 4385 } 4386 4387 /* 4388 * LUN allocation. 4389 * 4390 * Requirements: 4391 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4392 * wants us to allocate the LUN and he can block. 4393 * - ctl_softc is always set 4394 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4395 * 4396 * Returns 0 for success, non-zero (errno) for failure. 4397 */ 4398 static int 4399 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4400 struct ctl_be_lun *const be_lun) 4401 { 4402 struct ctl_lun *nlun, *lun; 4403 struct scsi_vpd_id_descriptor *desc; 4404 struct scsi_vpd_id_t10 *t10id; 4405 const char *eui, *naa, *scsiname, *vendor; 4406 int lun_number, i, lun_malloced; 4407 int devidlen, idlen1, idlen2 = 0, len; 4408 4409 if (be_lun == NULL) 4410 return (EINVAL); 4411 4412 /* 4413 * We currently only support Direct Access or Processor LUN types. 4414 */ 4415 switch (be_lun->lun_type) { 4416 case T_DIRECT: 4417 break; 4418 case T_PROCESSOR: 4419 break; 4420 case T_SEQUENTIAL: 4421 case T_CHANGER: 4422 default: 4423 be_lun->lun_config_status(be_lun->be_lun, 4424 CTL_LUN_CONFIG_FAILURE); 4425 break; 4426 } 4427 if (ctl_lun == NULL) { 4428 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4429 lun_malloced = 1; 4430 } else { 4431 lun_malloced = 0; 4432 lun = ctl_lun; 4433 } 4434 4435 memset(lun, 0, sizeof(*lun)); 4436 if (lun_malloced) 4437 lun->flags = CTL_LUN_MALLOCED; 4438 4439 /* Generate LUN ID. */ 4440 devidlen = max(CTL_DEVID_MIN_LEN, 4441 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4442 idlen1 = sizeof(*t10id) + devidlen; 4443 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4444 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4445 if (scsiname != NULL) { 4446 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4447 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4448 } 4449 eui = ctl_get_opt(&be_lun->options, "eui"); 4450 if (eui != NULL) { 4451 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4452 } 4453 naa = ctl_get_opt(&be_lun->options, "naa"); 4454 if (naa != NULL) { 4455 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4456 } 4457 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4458 M_CTL, M_WAITOK | M_ZERO); 4459 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4460 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4461 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4462 desc->length = idlen1; 4463 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4464 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4465 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4466 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4467 } else { 4468 strncpy(t10id->vendor, vendor, 4469 min(sizeof(t10id->vendor), strlen(vendor))); 4470 } 4471 strncpy((char *)t10id->vendor_spec_id, 4472 (char *)be_lun->device_id, devidlen); 4473 if (scsiname != NULL) { 4474 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4475 desc->length); 4476 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4477 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4478 SVPD_ID_TYPE_SCSI_NAME; 4479 desc->length = idlen2; 4480 strlcpy(desc->identifier, scsiname, idlen2); 4481 } 4482 if (eui != NULL) { 4483 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4484 desc->length); 4485 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4486 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4487 SVPD_ID_TYPE_EUI64; 4488 desc->length = hex2bin(eui, desc->identifier, 16); 4489 desc->length = desc->length > 12 ? 16 : 4490 (desc->length > 8 ? 12 : 8); 4491 len -= 16 - desc->length; 4492 } 4493 if (naa != NULL) { 4494 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4495 desc->length); 4496 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4497 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4498 SVPD_ID_TYPE_NAA; 4499 desc->length = hex2bin(naa, desc->identifier, 16); 4500 desc->length = desc->length > 8 ? 16 : 8; 4501 len -= 16 - desc->length; 4502 } 4503 lun->lun_devid->len = len; 4504 4505 mtx_lock(&ctl_softc->ctl_lock); 4506 /* 4507 * See if the caller requested a particular LUN number. If so, see 4508 * if it is available. Otherwise, allocate the first available LUN. 4509 */ 4510 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4511 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4512 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4513 mtx_unlock(&ctl_softc->ctl_lock); 4514 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4515 printf("ctl: requested LUN ID %d is higher " 4516 "than CTL_MAX_LUNS - 1 (%d)\n", 4517 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4518 } else { 4519 /* 4520 * XXX KDM return an error, or just assign 4521 * another LUN ID in this case?? 4522 */ 4523 printf("ctl: requested LUN ID %d is already " 4524 "in use\n", be_lun->req_lun_id); 4525 } 4526 if (lun->flags & CTL_LUN_MALLOCED) 4527 free(lun, M_CTL); 4528 be_lun->lun_config_status(be_lun->be_lun, 4529 CTL_LUN_CONFIG_FAILURE); 4530 return (ENOSPC); 4531 } 4532 lun_number = be_lun->req_lun_id; 4533 } else { 4534 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4535 if (lun_number == -1) { 4536 mtx_unlock(&ctl_softc->ctl_lock); 4537 printf("ctl: can't allocate LUN, out of LUNs\n"); 4538 if (lun->flags & CTL_LUN_MALLOCED) 4539 free(lun, M_CTL); 4540 be_lun->lun_config_status(be_lun->be_lun, 4541 CTL_LUN_CONFIG_FAILURE); 4542 return (ENOSPC); 4543 } 4544 } 4545 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4546 4547 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4548 lun->lun = lun_number; 4549 lun->be_lun = be_lun; 4550 /* 4551 * The processor LUN is always enabled. Disk LUNs come on line 4552 * disabled, and must be enabled by the backend. 4553 */ 4554 lun->flags |= CTL_LUN_DISABLED; 4555 lun->backend = be_lun->be; 4556 be_lun->ctl_lun = lun; 4557 be_lun->lun_id = lun_number; 4558 atomic_add_int(&be_lun->be->num_luns, 1); 4559 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4560 lun->flags |= CTL_LUN_OFFLINE; 4561 4562 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4563 lun->flags |= CTL_LUN_STOPPED; 4564 4565 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4566 lun->flags |= CTL_LUN_INOPERABLE; 4567 4568 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4569 lun->flags |= CTL_LUN_PRIMARY_SC; 4570 4571 lun->ctl_softc = ctl_softc; 4572 #ifdef CTL_TIME_IO 4573 lun->last_busy = getsbinuptime(); 4574 #endif 4575 TAILQ_INIT(&lun->ooa_queue); 4576 TAILQ_INIT(&lun->blocked_queue); 4577 STAILQ_INIT(&lun->error_list); 4578 ctl_tpc_lun_init(lun); 4579 4580 /* 4581 * Initialize the mode and log page index. 4582 */ 4583 ctl_init_page_index(lun); 4584 ctl_init_log_page_index(lun); 4585 4586 /* 4587 * Now, before we insert this lun on the lun list, set the lun 4588 * inventory changed UA for all other luns. 4589 */ 4590 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4591 mtx_lock(&nlun->lun_lock); 4592 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4593 mtx_unlock(&nlun->lun_lock); 4594 } 4595 4596 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4597 4598 ctl_softc->ctl_luns[lun_number] = lun; 4599 4600 ctl_softc->num_luns++; 4601 4602 /* Setup statistics gathering */ 4603 lun->stats.device_type = be_lun->lun_type; 4604 lun->stats.lun_number = lun_number; 4605 if (lun->stats.device_type == T_DIRECT) 4606 lun->stats.blocksize = be_lun->blocksize; 4607 else 4608 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4609 for (i = 0;i < CTL_MAX_PORTS;i++) 4610 lun->stats.ports[i].targ_port = i; 4611 4612 mtx_unlock(&ctl_softc->ctl_lock); 4613 4614 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4615 return (0); 4616 } 4617 4618 /* 4619 * Delete a LUN. 4620 * Assumptions: 4621 * - LUN has already been marked invalid and any pending I/O has been taken 4622 * care of. 4623 */ 4624 static int 4625 ctl_free_lun(struct ctl_lun *lun) 4626 { 4627 struct ctl_softc *softc; 4628 struct ctl_lun *nlun; 4629 int i; 4630 4631 softc = lun->ctl_softc; 4632 4633 mtx_assert(&softc->ctl_lock, MA_OWNED); 4634 4635 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4636 4637 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4638 4639 softc->ctl_luns[lun->lun] = NULL; 4640 4641 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4642 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4643 4644 softc->num_luns--; 4645 4646 /* 4647 * Tell the backend to free resources, if this LUN has a backend. 4648 */ 4649 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4650 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4651 4652 ctl_tpc_lun_shutdown(lun); 4653 mtx_destroy(&lun->lun_lock); 4654 free(lun->lun_devid, M_CTL); 4655 for (i = 0; i < CTL_MAX_PORTS; i++) 4656 free(lun->pending_ua[i], M_CTL); 4657 for (i = 0; i < CTL_MAX_PORTS; i++) 4658 free(lun->pr_keys[i], M_CTL); 4659 free(lun->write_buffer, M_CTL); 4660 if (lun->flags & CTL_LUN_MALLOCED) 4661 free(lun, M_CTL); 4662 4663 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4664 mtx_lock(&nlun->lun_lock); 4665 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4666 mtx_unlock(&nlun->lun_lock); 4667 } 4668 4669 return (0); 4670 } 4671 4672 static void 4673 ctl_create_lun(struct ctl_be_lun *be_lun) 4674 { 4675 struct ctl_softc *softc; 4676 4677 softc = control_softc; 4678 4679 /* 4680 * ctl_alloc_lun() should handle all potential failure cases. 4681 */ 4682 ctl_alloc_lun(softc, NULL, be_lun); 4683 } 4684 4685 int 4686 ctl_add_lun(struct ctl_be_lun *be_lun) 4687 { 4688 struct ctl_softc *softc = control_softc; 4689 4690 mtx_lock(&softc->ctl_lock); 4691 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4692 mtx_unlock(&softc->ctl_lock); 4693 wakeup(&softc->pending_lun_queue); 4694 4695 return (0); 4696 } 4697 4698 int 4699 ctl_enable_lun(struct ctl_be_lun *be_lun) 4700 { 4701 struct ctl_softc *softc; 4702 struct ctl_port *port, *nport; 4703 struct ctl_lun *lun; 4704 int retval; 4705 4706 lun = (struct ctl_lun *)be_lun->ctl_lun; 4707 softc = lun->ctl_softc; 4708 4709 mtx_lock(&softc->ctl_lock); 4710 mtx_lock(&lun->lun_lock); 4711 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4712 /* 4713 * eh? Why did we get called if the LUN is already 4714 * enabled? 4715 */ 4716 mtx_unlock(&lun->lun_lock); 4717 mtx_unlock(&softc->ctl_lock); 4718 return (0); 4719 } 4720 lun->flags &= ~CTL_LUN_DISABLED; 4721 mtx_unlock(&lun->lun_lock); 4722 4723 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4724 nport = STAILQ_NEXT(port, links); 4725 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4726 port->lun_map != NULL || port->lun_enable == NULL) 4727 continue; 4728 4729 /* 4730 * Drop the lock while we call the FETD's enable routine. 4731 * This can lead to a callback into CTL (at least in the 4732 * case of the internal initiator frontend. 4733 */ 4734 mtx_unlock(&softc->ctl_lock); 4735 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4736 mtx_lock(&softc->ctl_lock); 4737 if (retval != 0) { 4738 printf("%s: FETD %s port %d returned error " 4739 "%d for lun_enable on lun %jd\n", 4740 __func__, port->port_name, port->targ_port, 4741 retval, (intmax_t)lun->lun); 4742 } 4743 } 4744 4745 mtx_unlock(&softc->ctl_lock); 4746 ctl_isc_announce_lun(lun); 4747 4748 return (0); 4749 } 4750 4751 int 4752 ctl_disable_lun(struct ctl_be_lun *be_lun) 4753 { 4754 struct ctl_softc *softc; 4755 struct ctl_port *port; 4756 struct ctl_lun *lun; 4757 int retval; 4758 4759 lun = (struct ctl_lun *)be_lun->ctl_lun; 4760 softc = lun->ctl_softc; 4761 4762 mtx_lock(&softc->ctl_lock); 4763 mtx_lock(&lun->lun_lock); 4764 if (lun->flags & CTL_LUN_DISABLED) { 4765 mtx_unlock(&lun->lun_lock); 4766 mtx_unlock(&softc->ctl_lock); 4767 return (0); 4768 } 4769 lun->flags |= CTL_LUN_DISABLED; 4770 mtx_unlock(&lun->lun_lock); 4771 4772 STAILQ_FOREACH(port, &softc->port_list, links) { 4773 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4774 port->lun_map != NULL || port->lun_disable == NULL) 4775 continue; 4776 4777 /* 4778 * Drop the lock before we call the frontend's disable 4779 * routine, to avoid lock order reversals. 4780 * 4781 * XXX KDM what happens if the frontend list changes while 4782 * we're traversing it? It's unlikely, but should be handled. 4783 */ 4784 mtx_unlock(&softc->ctl_lock); 4785 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4786 mtx_lock(&softc->ctl_lock); 4787 if (retval != 0) { 4788 printf("%s: FETD %s port %d returned error " 4789 "%d for lun_disable on lun %jd\n", 4790 __func__, port->port_name, port->targ_port, 4791 retval, (intmax_t)lun->lun); 4792 } 4793 } 4794 4795 mtx_unlock(&softc->ctl_lock); 4796 ctl_isc_announce_lun(lun); 4797 4798 return (0); 4799 } 4800 4801 int 4802 ctl_start_lun(struct ctl_be_lun *be_lun) 4803 { 4804 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4805 4806 mtx_lock(&lun->lun_lock); 4807 lun->flags &= ~CTL_LUN_STOPPED; 4808 mtx_unlock(&lun->lun_lock); 4809 return (0); 4810 } 4811 4812 int 4813 ctl_stop_lun(struct ctl_be_lun *be_lun) 4814 { 4815 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4816 4817 mtx_lock(&lun->lun_lock); 4818 lun->flags |= CTL_LUN_STOPPED; 4819 mtx_unlock(&lun->lun_lock); 4820 return (0); 4821 } 4822 4823 int 4824 ctl_lun_offline(struct ctl_be_lun *be_lun) 4825 { 4826 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4827 4828 mtx_lock(&lun->lun_lock); 4829 lun->flags |= CTL_LUN_OFFLINE; 4830 mtx_unlock(&lun->lun_lock); 4831 return (0); 4832 } 4833 4834 int 4835 ctl_lun_online(struct ctl_be_lun *be_lun) 4836 { 4837 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4838 4839 mtx_lock(&lun->lun_lock); 4840 lun->flags &= ~CTL_LUN_OFFLINE; 4841 mtx_unlock(&lun->lun_lock); 4842 return (0); 4843 } 4844 4845 int 4846 ctl_lun_primary(struct ctl_be_lun *be_lun) 4847 { 4848 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4849 4850 mtx_lock(&lun->lun_lock); 4851 lun->flags |= CTL_LUN_PRIMARY_SC; 4852 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4853 mtx_unlock(&lun->lun_lock); 4854 ctl_isc_announce_lun(lun); 4855 return (0); 4856 } 4857 4858 int 4859 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4860 { 4861 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4862 4863 mtx_lock(&lun->lun_lock); 4864 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4865 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4866 mtx_unlock(&lun->lun_lock); 4867 ctl_isc_announce_lun(lun); 4868 return (0); 4869 } 4870 4871 int 4872 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4873 { 4874 struct ctl_softc *softc; 4875 struct ctl_lun *lun; 4876 4877 lun = (struct ctl_lun *)be_lun->ctl_lun; 4878 softc = lun->ctl_softc; 4879 4880 mtx_lock(&lun->lun_lock); 4881 4882 /* 4883 * The LUN needs to be disabled before it can be marked invalid. 4884 */ 4885 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4886 mtx_unlock(&lun->lun_lock); 4887 return (-1); 4888 } 4889 /* 4890 * Mark the LUN invalid. 4891 */ 4892 lun->flags |= CTL_LUN_INVALID; 4893 4894 /* 4895 * If there is nothing in the OOA queue, go ahead and free the LUN. 4896 * If we have something in the OOA queue, we'll free it when the 4897 * last I/O completes. 4898 */ 4899 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4900 mtx_unlock(&lun->lun_lock); 4901 mtx_lock(&softc->ctl_lock); 4902 ctl_free_lun(lun); 4903 mtx_unlock(&softc->ctl_lock); 4904 } else 4905 mtx_unlock(&lun->lun_lock); 4906 4907 return (0); 4908 } 4909 4910 int 4911 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4912 { 4913 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4914 4915 mtx_lock(&lun->lun_lock); 4916 lun->flags |= CTL_LUN_INOPERABLE; 4917 mtx_unlock(&lun->lun_lock); 4918 return (0); 4919 } 4920 4921 int 4922 ctl_lun_operable(struct ctl_be_lun *be_lun) 4923 { 4924 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4925 4926 mtx_lock(&lun->lun_lock); 4927 lun->flags &= ~CTL_LUN_INOPERABLE; 4928 mtx_unlock(&lun->lun_lock); 4929 return (0); 4930 } 4931 4932 void 4933 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4934 { 4935 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4936 union ctl_ha_msg msg; 4937 4938 mtx_lock(&lun->lun_lock); 4939 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4940 mtx_unlock(&lun->lun_lock); 4941 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4942 /* Send msg to other side. */ 4943 bzero(&msg.ua, sizeof(msg.ua)); 4944 msg.hdr.msg_type = CTL_MSG_UA; 4945 msg.hdr.nexus.initid = -1; 4946 msg.hdr.nexus.targ_port = -1; 4947 msg.hdr.nexus.targ_lun = lun->lun; 4948 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4949 msg.ua.ua_all = 1; 4950 msg.ua.ua_set = 1; 4951 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED; 4952 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4953 M_WAITOK); 4954 } 4955 } 4956 4957 /* 4958 * Backend "memory move is complete" callback for requests that never 4959 * make it down to say RAIDCore's configuration code. 4960 */ 4961 int 4962 ctl_config_move_done(union ctl_io *io) 4963 { 4964 int retval; 4965 4966 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4967 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 4968 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 4969 4970 if ((io->io_hdr.port_status != 0) && 4971 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4972 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4973 /* 4974 * For hardware error sense keys, the sense key 4975 * specific value is defined to be a retry count, 4976 * but we use it to pass back an internal FETD 4977 * error code. XXX KDM Hopefully the FETD is only 4978 * using 16 bits for an error code, since that's 4979 * all the space we have in the sks field. 4980 */ 4981 ctl_set_internal_failure(&io->scsiio, 4982 /*sks_valid*/ 1, 4983 /*retry_count*/ 4984 io->io_hdr.port_status); 4985 } 4986 4987 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4988 ctl_data_print(io); 4989 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4990 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4991 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4992 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4993 /* 4994 * XXX KDM just assuming a single pointer here, and not a 4995 * S/G list. If we start using S/G lists for config data, 4996 * we'll need to know how to clean them up here as well. 4997 */ 4998 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4999 free(io->scsiio.kern_data_ptr, M_CTL); 5000 ctl_done(io); 5001 retval = CTL_RETVAL_COMPLETE; 5002 } else { 5003 /* 5004 * XXX KDM now we need to continue data movement. Some 5005 * options: 5006 * - call ctl_scsiio() again? We don't do this for data 5007 * writes, because for those at least we know ahead of 5008 * time where the write will go and how long it is. For 5009 * config writes, though, that information is largely 5010 * contained within the write itself, thus we need to 5011 * parse out the data again. 5012 * 5013 * - Call some other function once the data is in? 5014 */ 5015 5016 /* 5017 * XXX KDM call ctl_scsiio() again for now, and check flag 5018 * bits to see whether we're allocated or not. 5019 */ 5020 retval = ctl_scsiio(&io->scsiio); 5021 } 5022 return (retval); 5023 } 5024 5025 /* 5026 * This gets called by a backend driver when it is done with a 5027 * data_submit method. 5028 */ 5029 void 5030 ctl_data_submit_done(union ctl_io *io) 5031 { 5032 /* 5033 * If the IO_CONT flag is set, we need to call the supplied 5034 * function to continue processing the I/O, instead of completing 5035 * the I/O just yet. 5036 * 5037 * If there is an error, though, we don't want to keep processing. 5038 * Instead, just send status back to the initiator. 5039 */ 5040 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5041 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5042 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5043 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5044 io->scsiio.io_cont(io); 5045 return; 5046 } 5047 ctl_done(io); 5048 } 5049 5050 /* 5051 * This gets called by a backend driver when it is done with a 5052 * configuration write. 5053 */ 5054 void 5055 ctl_config_write_done(union ctl_io *io) 5056 { 5057 uint8_t *buf; 5058 5059 /* 5060 * If the IO_CONT flag is set, we need to call the supplied 5061 * function to continue processing the I/O, instead of completing 5062 * the I/O just yet. 5063 * 5064 * If there is an error, though, we don't want to keep processing. 5065 * Instead, just send status back to the initiator. 5066 */ 5067 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5068 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5069 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5070 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5071 io->scsiio.io_cont(io); 5072 return; 5073 } 5074 /* 5075 * Since a configuration write can be done for commands that actually 5076 * have data allocated, like write buffer, and commands that have 5077 * no data, like start/stop unit, we need to check here. 5078 */ 5079 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5080 buf = io->scsiio.kern_data_ptr; 5081 else 5082 buf = NULL; 5083 ctl_done(io); 5084 if (buf) 5085 free(buf, M_CTL); 5086 } 5087 5088 void 5089 ctl_config_read_done(union ctl_io *io) 5090 { 5091 uint8_t *buf; 5092 5093 /* 5094 * If there is some error -- we are done, skip data transfer. 5095 */ 5096 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5097 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5098 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5099 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5100 buf = io->scsiio.kern_data_ptr; 5101 else 5102 buf = NULL; 5103 ctl_done(io); 5104 if (buf) 5105 free(buf, M_CTL); 5106 return; 5107 } 5108 5109 /* 5110 * If the IO_CONT flag is set, we need to call the supplied 5111 * function to continue processing the I/O, instead of completing 5112 * the I/O just yet. 5113 */ 5114 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5115 io->scsiio.io_cont(io); 5116 return; 5117 } 5118 5119 ctl_datamove(io); 5120 } 5121 5122 /* 5123 * SCSI release command. 5124 */ 5125 int 5126 ctl_scsi_release(struct ctl_scsiio *ctsio) 5127 { 5128 int length, longid, thirdparty_id, resv_id; 5129 struct ctl_lun *lun; 5130 uint32_t residx; 5131 5132 length = 0; 5133 resv_id = 0; 5134 5135 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5136 5137 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5138 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5139 5140 switch (ctsio->cdb[0]) { 5141 case RELEASE_10: { 5142 struct scsi_release_10 *cdb; 5143 5144 cdb = (struct scsi_release_10 *)ctsio->cdb; 5145 5146 if (cdb->byte2 & SR10_LONGID) 5147 longid = 1; 5148 else 5149 thirdparty_id = cdb->thirdparty_id; 5150 5151 resv_id = cdb->resv_id; 5152 length = scsi_2btoul(cdb->length); 5153 break; 5154 } 5155 } 5156 5157 5158 /* 5159 * XXX KDM right now, we only support LUN reservation. We don't 5160 * support 3rd party reservations, or extent reservations, which 5161 * might actually need the parameter list. If we've gotten this 5162 * far, we've got a LUN reservation. Anything else got kicked out 5163 * above. So, according to SPC, ignore the length. 5164 */ 5165 length = 0; 5166 5167 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5168 && (length > 0)) { 5169 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5170 ctsio->kern_data_len = length; 5171 ctsio->kern_total_len = length; 5172 ctsio->kern_data_resid = 0; 5173 ctsio->kern_rel_offset = 0; 5174 ctsio->kern_sg_entries = 0; 5175 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5176 ctsio->be_move_done = ctl_config_move_done; 5177 ctl_datamove((union ctl_io *)ctsio); 5178 5179 return (CTL_RETVAL_COMPLETE); 5180 } 5181 5182 if (length > 0) 5183 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5184 5185 mtx_lock(&lun->lun_lock); 5186 5187 /* 5188 * According to SPC, it is not an error for an intiator to attempt 5189 * to release a reservation on a LUN that isn't reserved, or that 5190 * is reserved by another initiator. The reservation can only be 5191 * released, though, by the initiator who made it or by one of 5192 * several reset type events. 5193 */ 5194 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5195 lun->flags &= ~CTL_LUN_RESERVED; 5196 5197 mtx_unlock(&lun->lun_lock); 5198 5199 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5200 free(ctsio->kern_data_ptr, M_CTL); 5201 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5202 } 5203 5204 ctl_set_success(ctsio); 5205 ctl_done((union ctl_io *)ctsio); 5206 return (CTL_RETVAL_COMPLETE); 5207 } 5208 5209 int 5210 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5211 { 5212 int extent, thirdparty, longid; 5213 int resv_id, length; 5214 uint64_t thirdparty_id; 5215 struct ctl_lun *lun; 5216 uint32_t residx; 5217 5218 extent = 0; 5219 thirdparty = 0; 5220 longid = 0; 5221 resv_id = 0; 5222 length = 0; 5223 thirdparty_id = 0; 5224 5225 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5226 5227 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5228 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5229 5230 switch (ctsio->cdb[0]) { 5231 case RESERVE_10: { 5232 struct scsi_reserve_10 *cdb; 5233 5234 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5235 5236 if (cdb->byte2 & SR10_LONGID) 5237 longid = 1; 5238 else 5239 thirdparty_id = cdb->thirdparty_id; 5240 5241 resv_id = cdb->resv_id; 5242 length = scsi_2btoul(cdb->length); 5243 break; 5244 } 5245 } 5246 5247 /* 5248 * XXX KDM right now, we only support LUN reservation. We don't 5249 * support 3rd party reservations, or extent reservations, which 5250 * might actually need the parameter list. If we've gotten this 5251 * far, we've got a LUN reservation. Anything else got kicked out 5252 * above. So, according to SPC, ignore the length. 5253 */ 5254 length = 0; 5255 5256 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5257 && (length > 0)) { 5258 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5259 ctsio->kern_data_len = length; 5260 ctsio->kern_total_len = length; 5261 ctsio->kern_data_resid = 0; 5262 ctsio->kern_rel_offset = 0; 5263 ctsio->kern_sg_entries = 0; 5264 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5265 ctsio->be_move_done = ctl_config_move_done; 5266 ctl_datamove((union ctl_io *)ctsio); 5267 5268 return (CTL_RETVAL_COMPLETE); 5269 } 5270 5271 if (length > 0) 5272 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5273 5274 mtx_lock(&lun->lun_lock); 5275 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5276 ctl_set_reservation_conflict(ctsio); 5277 goto bailout; 5278 } 5279 5280 lun->flags |= CTL_LUN_RESERVED; 5281 lun->res_idx = residx; 5282 5283 ctl_set_success(ctsio); 5284 5285 bailout: 5286 mtx_unlock(&lun->lun_lock); 5287 5288 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5289 free(ctsio->kern_data_ptr, M_CTL); 5290 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5291 } 5292 5293 ctl_done((union ctl_io *)ctsio); 5294 return (CTL_RETVAL_COMPLETE); 5295 } 5296 5297 int 5298 ctl_start_stop(struct ctl_scsiio *ctsio) 5299 { 5300 struct scsi_start_stop_unit *cdb; 5301 struct ctl_lun *lun; 5302 int retval; 5303 5304 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5305 5306 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5307 retval = 0; 5308 5309 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5310 5311 /* 5312 * XXX KDM 5313 * We don't support the immediate bit on a stop unit. In order to 5314 * do that, we would need to code up a way to know that a stop is 5315 * pending, and hold off any new commands until it completes, one 5316 * way or another. Then we could accept or reject those commands 5317 * depending on its status. We would almost need to do the reverse 5318 * of what we do below for an immediate start -- return the copy of 5319 * the ctl_io to the FETD with status to send to the host (and to 5320 * free the copy!) and then free the original I/O once the stop 5321 * actually completes. That way, the OOA queue mechanism can work 5322 * to block commands that shouldn't proceed. Another alternative 5323 * would be to put the copy in the queue in place of the original, 5324 * and return the original back to the caller. That could be 5325 * slightly safer.. 5326 */ 5327 if ((cdb->byte2 & SSS_IMMED) 5328 && ((cdb->how & SSS_START) == 0)) { 5329 ctl_set_invalid_field(ctsio, 5330 /*sks_valid*/ 1, 5331 /*command*/ 1, 5332 /*field*/ 1, 5333 /*bit_valid*/ 1, 5334 /*bit*/ 0); 5335 ctl_done((union ctl_io *)ctsio); 5336 return (CTL_RETVAL_COMPLETE); 5337 } 5338 5339 if ((lun->flags & CTL_LUN_PR_RESERVED) 5340 && ((cdb->how & SSS_START)==0)) { 5341 uint32_t residx; 5342 5343 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5344 if (ctl_get_prkey(lun, residx) == 0 5345 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5346 5347 ctl_set_reservation_conflict(ctsio); 5348 ctl_done((union ctl_io *)ctsio); 5349 return (CTL_RETVAL_COMPLETE); 5350 } 5351 } 5352 5353 /* 5354 * If there is no backend on this device, we can't start or stop 5355 * it. In theory we shouldn't get any start/stop commands in the 5356 * first place at this level if the LUN doesn't have a backend. 5357 * That should get stopped by the command decode code. 5358 */ 5359 if (lun->backend == NULL) { 5360 ctl_set_invalid_opcode(ctsio); 5361 ctl_done((union ctl_io *)ctsio); 5362 return (CTL_RETVAL_COMPLETE); 5363 } 5364 5365 /* 5366 * XXX KDM Copan-specific offline behavior. 5367 * Figure out a reasonable way to port this? 5368 */ 5369 #ifdef NEEDTOPORT 5370 mtx_lock(&lun->lun_lock); 5371 5372 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5373 && (lun->flags & CTL_LUN_OFFLINE)) { 5374 /* 5375 * If the LUN is offline, and the on/offline bit isn't set, 5376 * reject the start or stop. Otherwise, let it through. 5377 */ 5378 mtx_unlock(&lun->lun_lock); 5379 ctl_set_lun_not_ready(ctsio); 5380 ctl_done((union ctl_io *)ctsio); 5381 } else { 5382 mtx_unlock(&lun->lun_lock); 5383 #endif /* NEEDTOPORT */ 5384 /* 5385 * This could be a start or a stop when we're online, 5386 * or a stop/offline or start/online. A start or stop when 5387 * we're offline is covered in the case above. 5388 */ 5389 /* 5390 * In the non-immediate case, we send the request to 5391 * the backend and return status to the user when 5392 * it is done. 5393 * 5394 * In the immediate case, we allocate a new ctl_io 5395 * to hold a copy of the request, and send that to 5396 * the backend. We then set good status on the 5397 * user's request and return it immediately. 5398 */ 5399 if (cdb->byte2 & SSS_IMMED) { 5400 union ctl_io *new_io; 5401 5402 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5403 ctl_copy_io((union ctl_io *)ctsio, new_io); 5404 retval = lun->backend->config_write(new_io); 5405 ctl_set_success(ctsio); 5406 ctl_done((union ctl_io *)ctsio); 5407 } else { 5408 retval = lun->backend->config_write( 5409 (union ctl_io *)ctsio); 5410 } 5411 #ifdef NEEDTOPORT 5412 } 5413 #endif 5414 return (retval); 5415 } 5416 5417 /* 5418 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5419 * we don't really do anything with the LBA and length fields if the user 5420 * passes them in. Instead we'll just flush out the cache for the entire 5421 * LUN. 5422 */ 5423 int 5424 ctl_sync_cache(struct ctl_scsiio *ctsio) 5425 { 5426 struct ctl_lun *lun; 5427 struct ctl_softc *softc; 5428 struct ctl_lba_len_flags *lbalen; 5429 uint64_t starting_lba; 5430 uint32_t block_count; 5431 int retval; 5432 uint8_t byte2; 5433 5434 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5435 5436 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5437 softc = lun->ctl_softc; 5438 retval = 0; 5439 5440 switch (ctsio->cdb[0]) { 5441 case SYNCHRONIZE_CACHE: { 5442 struct scsi_sync_cache *cdb; 5443 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5444 5445 starting_lba = scsi_4btoul(cdb->begin_lba); 5446 block_count = scsi_2btoul(cdb->lb_count); 5447 byte2 = cdb->byte2; 5448 break; 5449 } 5450 case SYNCHRONIZE_CACHE_16: { 5451 struct scsi_sync_cache_16 *cdb; 5452 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5453 5454 starting_lba = scsi_8btou64(cdb->begin_lba); 5455 block_count = scsi_4btoul(cdb->lb_count); 5456 byte2 = cdb->byte2; 5457 break; 5458 } 5459 default: 5460 ctl_set_invalid_opcode(ctsio); 5461 ctl_done((union ctl_io *)ctsio); 5462 goto bailout; 5463 break; /* NOTREACHED */ 5464 } 5465 5466 /* 5467 * We check the LBA and length, but don't do anything with them. 5468 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5469 * get flushed. This check will just help satisfy anyone who wants 5470 * to see an error for an out of range LBA. 5471 */ 5472 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5473 ctl_set_lba_out_of_range(ctsio); 5474 ctl_done((union ctl_io *)ctsio); 5475 goto bailout; 5476 } 5477 5478 /* 5479 * If this LUN has no backend, we can't flush the cache anyway. 5480 */ 5481 if (lun->backend == NULL) { 5482 ctl_set_invalid_opcode(ctsio); 5483 ctl_done((union ctl_io *)ctsio); 5484 goto bailout; 5485 } 5486 5487 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5488 lbalen->lba = starting_lba; 5489 lbalen->len = block_count; 5490 lbalen->flags = byte2; 5491 5492 /* 5493 * Check to see whether we're configured to send the SYNCHRONIZE 5494 * CACHE command directly to the back end. 5495 */ 5496 mtx_lock(&lun->lun_lock); 5497 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5498 && (++(lun->sync_count) >= lun->sync_interval)) { 5499 lun->sync_count = 0; 5500 mtx_unlock(&lun->lun_lock); 5501 retval = lun->backend->config_write((union ctl_io *)ctsio); 5502 } else { 5503 mtx_unlock(&lun->lun_lock); 5504 ctl_set_success(ctsio); 5505 ctl_done((union ctl_io *)ctsio); 5506 } 5507 5508 bailout: 5509 5510 return (retval); 5511 } 5512 5513 int 5514 ctl_format(struct ctl_scsiio *ctsio) 5515 { 5516 struct scsi_format *cdb; 5517 struct ctl_lun *lun; 5518 int length, defect_list_len; 5519 5520 CTL_DEBUG_PRINT(("ctl_format\n")); 5521 5522 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5523 5524 cdb = (struct scsi_format *)ctsio->cdb; 5525 5526 length = 0; 5527 if (cdb->byte2 & SF_FMTDATA) { 5528 if (cdb->byte2 & SF_LONGLIST) 5529 length = sizeof(struct scsi_format_header_long); 5530 else 5531 length = sizeof(struct scsi_format_header_short); 5532 } 5533 5534 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5535 && (length > 0)) { 5536 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5537 ctsio->kern_data_len = length; 5538 ctsio->kern_total_len = length; 5539 ctsio->kern_data_resid = 0; 5540 ctsio->kern_rel_offset = 0; 5541 ctsio->kern_sg_entries = 0; 5542 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5543 ctsio->be_move_done = ctl_config_move_done; 5544 ctl_datamove((union ctl_io *)ctsio); 5545 5546 return (CTL_RETVAL_COMPLETE); 5547 } 5548 5549 defect_list_len = 0; 5550 5551 if (cdb->byte2 & SF_FMTDATA) { 5552 if (cdb->byte2 & SF_LONGLIST) { 5553 struct scsi_format_header_long *header; 5554 5555 header = (struct scsi_format_header_long *) 5556 ctsio->kern_data_ptr; 5557 5558 defect_list_len = scsi_4btoul(header->defect_list_len); 5559 if (defect_list_len != 0) { 5560 ctl_set_invalid_field(ctsio, 5561 /*sks_valid*/ 1, 5562 /*command*/ 0, 5563 /*field*/ 2, 5564 /*bit_valid*/ 0, 5565 /*bit*/ 0); 5566 goto bailout; 5567 } 5568 } else { 5569 struct scsi_format_header_short *header; 5570 5571 header = (struct scsi_format_header_short *) 5572 ctsio->kern_data_ptr; 5573 5574 defect_list_len = scsi_2btoul(header->defect_list_len); 5575 if (defect_list_len != 0) { 5576 ctl_set_invalid_field(ctsio, 5577 /*sks_valid*/ 1, 5578 /*command*/ 0, 5579 /*field*/ 2, 5580 /*bit_valid*/ 0, 5581 /*bit*/ 0); 5582 goto bailout; 5583 } 5584 } 5585 } 5586 5587 /* 5588 * The format command will clear out the "Medium format corrupted" 5589 * status if set by the configuration code. That status is really 5590 * just a way to notify the host that we have lost the media, and 5591 * get them to issue a command that will basically make them think 5592 * they're blowing away the media. 5593 */ 5594 mtx_lock(&lun->lun_lock); 5595 lun->flags &= ~CTL_LUN_INOPERABLE; 5596 mtx_unlock(&lun->lun_lock); 5597 5598 ctl_set_success(ctsio); 5599 bailout: 5600 5601 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5602 free(ctsio->kern_data_ptr, M_CTL); 5603 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5604 } 5605 5606 ctl_done((union ctl_io *)ctsio); 5607 return (CTL_RETVAL_COMPLETE); 5608 } 5609 5610 int 5611 ctl_read_buffer(struct ctl_scsiio *ctsio) 5612 { 5613 struct ctl_lun *lun; 5614 uint64_t buffer_offset; 5615 uint32_t len; 5616 uint8_t byte2; 5617 static uint8_t descr[4]; 5618 static uint8_t echo_descr[4] = { 0 }; 5619 5620 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5621 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5622 switch (ctsio->cdb[0]) { 5623 case READ_BUFFER: { 5624 struct scsi_read_buffer *cdb; 5625 5626 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5627 buffer_offset = scsi_3btoul(cdb->offset); 5628 len = scsi_3btoul(cdb->length); 5629 byte2 = cdb->byte2; 5630 break; 5631 } 5632 case READ_BUFFER_16: { 5633 struct scsi_read_buffer_16 *cdb; 5634 5635 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5636 buffer_offset = scsi_8btou64(cdb->offset); 5637 len = scsi_4btoul(cdb->length); 5638 byte2 = cdb->byte2; 5639 break; 5640 } 5641 default: /* This shouldn't happen. */ 5642 ctl_set_invalid_opcode(ctsio); 5643 ctl_done((union ctl_io *)ctsio); 5644 return (CTL_RETVAL_COMPLETE); 5645 } 5646 5647 if ((byte2 & RWB_MODE) != RWB_MODE_DATA && 5648 (byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5649 (byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5650 ctl_set_invalid_field(ctsio, 5651 /*sks_valid*/ 1, 5652 /*command*/ 1, 5653 /*field*/ 1, 5654 /*bit_valid*/ 1, 5655 /*bit*/ 4); 5656 ctl_done((union ctl_io *)ctsio); 5657 return (CTL_RETVAL_COMPLETE); 5658 } 5659 5660 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5661 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5662 ctl_set_invalid_field(ctsio, 5663 /*sks_valid*/ 1, 5664 /*command*/ 1, 5665 /*field*/ 6, 5666 /*bit_valid*/ 0, 5667 /*bit*/ 0); 5668 ctl_done((union ctl_io *)ctsio); 5669 return (CTL_RETVAL_COMPLETE); 5670 } 5671 5672 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5673 descr[0] = 0; 5674 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5675 ctsio->kern_data_ptr = descr; 5676 len = min(len, sizeof(descr)); 5677 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5678 ctsio->kern_data_ptr = echo_descr; 5679 len = min(len, sizeof(echo_descr)); 5680 } else { 5681 if (lun->write_buffer == NULL) { 5682 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5683 M_CTL, M_WAITOK); 5684 } 5685 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5686 } 5687 ctsio->kern_data_len = len; 5688 ctsio->kern_total_len = len; 5689 ctsio->kern_data_resid = 0; 5690 ctsio->kern_rel_offset = 0; 5691 ctsio->kern_sg_entries = 0; 5692 ctl_set_success(ctsio); 5693 ctsio->be_move_done = ctl_config_move_done; 5694 ctl_datamove((union ctl_io *)ctsio); 5695 return (CTL_RETVAL_COMPLETE); 5696 } 5697 5698 int 5699 ctl_write_buffer(struct ctl_scsiio *ctsio) 5700 { 5701 struct scsi_write_buffer *cdb; 5702 struct ctl_lun *lun; 5703 int buffer_offset, len; 5704 5705 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5706 5707 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5708 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5709 5710 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5711 ctl_set_invalid_field(ctsio, 5712 /*sks_valid*/ 1, 5713 /*command*/ 1, 5714 /*field*/ 1, 5715 /*bit_valid*/ 1, 5716 /*bit*/ 4); 5717 ctl_done((union ctl_io *)ctsio); 5718 return (CTL_RETVAL_COMPLETE); 5719 } 5720 5721 len = scsi_3btoul(cdb->length); 5722 buffer_offset = scsi_3btoul(cdb->offset); 5723 5724 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5725 ctl_set_invalid_field(ctsio, 5726 /*sks_valid*/ 1, 5727 /*command*/ 1, 5728 /*field*/ 6, 5729 /*bit_valid*/ 0, 5730 /*bit*/ 0); 5731 ctl_done((union ctl_io *)ctsio); 5732 return (CTL_RETVAL_COMPLETE); 5733 } 5734 5735 /* 5736 * If we've got a kernel request that hasn't been malloced yet, 5737 * malloc it and tell the caller the data buffer is here. 5738 */ 5739 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5740 if (lun->write_buffer == NULL) { 5741 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5742 M_CTL, M_WAITOK); 5743 } 5744 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5745 ctsio->kern_data_len = len; 5746 ctsio->kern_total_len = len; 5747 ctsio->kern_data_resid = 0; 5748 ctsio->kern_rel_offset = 0; 5749 ctsio->kern_sg_entries = 0; 5750 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5751 ctsio->be_move_done = ctl_config_move_done; 5752 ctl_datamove((union ctl_io *)ctsio); 5753 5754 return (CTL_RETVAL_COMPLETE); 5755 } 5756 5757 ctl_set_success(ctsio); 5758 ctl_done((union ctl_io *)ctsio); 5759 return (CTL_RETVAL_COMPLETE); 5760 } 5761 5762 int 5763 ctl_write_same(struct ctl_scsiio *ctsio) 5764 { 5765 struct ctl_lun *lun; 5766 struct ctl_lba_len_flags *lbalen; 5767 uint64_t lba; 5768 uint32_t num_blocks; 5769 int len, retval; 5770 uint8_t byte2; 5771 5772 retval = CTL_RETVAL_COMPLETE; 5773 5774 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5775 5776 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5777 5778 switch (ctsio->cdb[0]) { 5779 case WRITE_SAME_10: { 5780 struct scsi_write_same_10 *cdb; 5781 5782 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5783 5784 lba = scsi_4btoul(cdb->addr); 5785 num_blocks = scsi_2btoul(cdb->length); 5786 byte2 = cdb->byte2; 5787 break; 5788 } 5789 case WRITE_SAME_16: { 5790 struct scsi_write_same_16 *cdb; 5791 5792 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5793 5794 lba = scsi_8btou64(cdb->addr); 5795 num_blocks = scsi_4btoul(cdb->length); 5796 byte2 = cdb->byte2; 5797 break; 5798 } 5799 default: 5800 /* 5801 * We got a command we don't support. This shouldn't 5802 * happen, commands should be filtered out above us. 5803 */ 5804 ctl_set_invalid_opcode(ctsio); 5805 ctl_done((union ctl_io *)ctsio); 5806 5807 return (CTL_RETVAL_COMPLETE); 5808 break; /* NOTREACHED */ 5809 } 5810 5811 /* ANCHOR flag can be used only together with UNMAP */ 5812 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5813 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5814 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5815 ctl_done((union ctl_io *)ctsio); 5816 return (CTL_RETVAL_COMPLETE); 5817 } 5818 5819 /* 5820 * The first check is to make sure we're in bounds, the second 5821 * check is to catch wrap-around problems. If the lba + num blocks 5822 * is less than the lba, then we've wrapped around and the block 5823 * range is invalid anyway. 5824 */ 5825 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5826 || ((lba + num_blocks) < lba)) { 5827 ctl_set_lba_out_of_range(ctsio); 5828 ctl_done((union ctl_io *)ctsio); 5829 return (CTL_RETVAL_COMPLETE); 5830 } 5831 5832 /* Zero number of blocks means "to the last logical block" */ 5833 if (num_blocks == 0) { 5834 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5835 ctl_set_invalid_field(ctsio, 5836 /*sks_valid*/ 0, 5837 /*command*/ 1, 5838 /*field*/ 0, 5839 /*bit_valid*/ 0, 5840 /*bit*/ 0); 5841 ctl_done((union ctl_io *)ctsio); 5842 return (CTL_RETVAL_COMPLETE); 5843 } 5844 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5845 } 5846 5847 len = lun->be_lun->blocksize; 5848 5849 /* 5850 * If we've got a kernel request that hasn't been malloced yet, 5851 * malloc it and tell the caller the data buffer is here. 5852 */ 5853 if ((byte2 & SWS_NDOB) == 0 && 5854 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5855 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5856 ctsio->kern_data_len = len; 5857 ctsio->kern_total_len = len; 5858 ctsio->kern_data_resid = 0; 5859 ctsio->kern_rel_offset = 0; 5860 ctsio->kern_sg_entries = 0; 5861 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5862 ctsio->be_move_done = ctl_config_move_done; 5863 ctl_datamove((union ctl_io *)ctsio); 5864 5865 return (CTL_RETVAL_COMPLETE); 5866 } 5867 5868 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5869 lbalen->lba = lba; 5870 lbalen->len = num_blocks; 5871 lbalen->flags = byte2; 5872 retval = lun->backend->config_write((union ctl_io *)ctsio); 5873 5874 return (retval); 5875 } 5876 5877 int 5878 ctl_unmap(struct ctl_scsiio *ctsio) 5879 { 5880 struct ctl_lun *lun; 5881 struct scsi_unmap *cdb; 5882 struct ctl_ptr_len_flags *ptrlen; 5883 struct scsi_unmap_header *hdr; 5884 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5885 uint64_t lba; 5886 uint32_t num_blocks; 5887 int len, retval; 5888 uint8_t byte2; 5889 5890 retval = CTL_RETVAL_COMPLETE; 5891 5892 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5893 5894 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5895 cdb = (struct scsi_unmap *)ctsio->cdb; 5896 5897 len = scsi_2btoul(cdb->length); 5898 byte2 = cdb->byte2; 5899 5900 /* 5901 * If we've got a kernel request that hasn't been malloced yet, 5902 * malloc it and tell the caller the data buffer is here. 5903 */ 5904 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5905 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5906 ctsio->kern_data_len = len; 5907 ctsio->kern_total_len = len; 5908 ctsio->kern_data_resid = 0; 5909 ctsio->kern_rel_offset = 0; 5910 ctsio->kern_sg_entries = 0; 5911 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5912 ctsio->be_move_done = ctl_config_move_done; 5913 ctl_datamove((union ctl_io *)ctsio); 5914 5915 return (CTL_RETVAL_COMPLETE); 5916 } 5917 5918 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5919 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5920 if (len < sizeof (*hdr) || 5921 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5922 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5923 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5924 ctl_set_invalid_field(ctsio, 5925 /*sks_valid*/ 0, 5926 /*command*/ 0, 5927 /*field*/ 0, 5928 /*bit_valid*/ 0, 5929 /*bit*/ 0); 5930 goto done; 5931 } 5932 len = scsi_2btoul(hdr->desc_length); 5933 buf = (struct scsi_unmap_desc *)(hdr + 1); 5934 end = buf + len / sizeof(*buf); 5935 5936 endnz = buf; 5937 for (range = buf; range < end; range++) { 5938 lba = scsi_8btou64(range->lba); 5939 num_blocks = scsi_4btoul(range->length); 5940 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5941 || ((lba + num_blocks) < lba)) { 5942 ctl_set_lba_out_of_range(ctsio); 5943 ctl_done((union ctl_io *)ctsio); 5944 return (CTL_RETVAL_COMPLETE); 5945 } 5946 if (num_blocks != 0) 5947 endnz = range + 1; 5948 } 5949 5950 /* 5951 * Block backend can not handle zero last range. 5952 * Filter it out and return if there is nothing left. 5953 */ 5954 len = (uint8_t *)endnz - (uint8_t *)buf; 5955 if (len == 0) { 5956 ctl_set_success(ctsio); 5957 goto done; 5958 } 5959 5960 mtx_lock(&lun->lun_lock); 5961 ptrlen = (struct ctl_ptr_len_flags *) 5962 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5963 ptrlen->ptr = (void *)buf; 5964 ptrlen->len = len; 5965 ptrlen->flags = byte2; 5966 ctl_check_blocked(lun); 5967 mtx_unlock(&lun->lun_lock); 5968 5969 retval = lun->backend->config_write((union ctl_io *)ctsio); 5970 return (retval); 5971 5972 done: 5973 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5974 free(ctsio->kern_data_ptr, M_CTL); 5975 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5976 } 5977 ctl_done((union ctl_io *)ctsio); 5978 return (CTL_RETVAL_COMPLETE); 5979 } 5980 5981 /* 5982 * Note that this function currently doesn't actually do anything inside 5983 * CTL to enforce things if the DQue bit is turned on. 5984 * 5985 * Also note that this function can't be used in the default case, because 5986 * the DQue bit isn't set in the changeable mask for the control mode page 5987 * anyway. This is just here as an example for how to implement a page 5988 * handler, and a placeholder in case we want to allow the user to turn 5989 * tagged queueing on and off. 5990 * 5991 * The D_SENSE bit handling is functional, however, and will turn 5992 * descriptor sense on and off for a given LUN. 5993 */ 5994 int 5995 ctl_control_page_handler(struct ctl_scsiio *ctsio, 5996 struct ctl_page_index *page_index, uint8_t *page_ptr) 5997 { 5998 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5999 struct ctl_lun *lun; 6000 int set_ua; 6001 uint32_t initidx; 6002 6003 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6004 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6005 set_ua = 0; 6006 6007 user_cp = (struct scsi_control_page *)page_ptr; 6008 current_cp = (struct scsi_control_page *) 6009 (page_index->page_data + (page_index->page_len * 6010 CTL_PAGE_CURRENT)); 6011 saved_cp = (struct scsi_control_page *) 6012 (page_index->page_data + (page_index->page_len * 6013 CTL_PAGE_SAVED)); 6014 6015 mtx_lock(&lun->lun_lock); 6016 if (((current_cp->rlec & SCP_DSENSE) == 0) 6017 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 6018 /* 6019 * Descriptor sense is currently turned off and the user 6020 * wants to turn it on. 6021 */ 6022 current_cp->rlec |= SCP_DSENSE; 6023 saved_cp->rlec |= SCP_DSENSE; 6024 lun->flags |= CTL_LUN_SENSE_DESC; 6025 set_ua = 1; 6026 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 6027 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 6028 /* 6029 * Descriptor sense is currently turned on, and the user 6030 * wants to turn it off. 6031 */ 6032 current_cp->rlec &= ~SCP_DSENSE; 6033 saved_cp->rlec &= ~SCP_DSENSE; 6034 lun->flags &= ~CTL_LUN_SENSE_DESC; 6035 set_ua = 1; 6036 } 6037 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 6038 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 6039 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6040 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6041 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6042 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6043 set_ua = 1; 6044 } 6045 if ((current_cp->eca_and_aen & SCP_SWP) != 6046 (user_cp->eca_and_aen & SCP_SWP)) { 6047 current_cp->eca_and_aen &= ~SCP_SWP; 6048 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6049 saved_cp->eca_and_aen &= ~SCP_SWP; 6050 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6051 set_ua = 1; 6052 } 6053 if (set_ua != 0) 6054 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6055 mtx_unlock(&lun->lun_lock); 6056 if (set_ua) { 6057 ctl_isc_announce_mode(lun, 6058 ctl_get_initindex(&ctsio->io_hdr.nexus), 6059 page_index->page_code, page_index->subpage); 6060 } 6061 return (0); 6062 } 6063 6064 int 6065 ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 6066 struct ctl_page_index *page_index, uint8_t *page_ptr) 6067 { 6068 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 6069 struct ctl_lun *lun; 6070 int set_ua; 6071 uint32_t initidx; 6072 6073 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6074 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6075 set_ua = 0; 6076 6077 user_cp = (struct scsi_caching_page *)page_ptr; 6078 current_cp = (struct scsi_caching_page *) 6079 (page_index->page_data + (page_index->page_len * 6080 CTL_PAGE_CURRENT)); 6081 saved_cp = (struct scsi_caching_page *) 6082 (page_index->page_data + (page_index->page_len * 6083 CTL_PAGE_SAVED)); 6084 6085 mtx_lock(&lun->lun_lock); 6086 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 6087 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 6088 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6089 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6090 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6091 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6092 set_ua = 1; 6093 } 6094 if (set_ua != 0) 6095 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6096 mtx_unlock(&lun->lun_lock); 6097 if (set_ua) { 6098 ctl_isc_announce_mode(lun, 6099 ctl_get_initindex(&ctsio->io_hdr.nexus), 6100 page_index->page_code, page_index->subpage); 6101 } 6102 return (0); 6103 } 6104 6105 int 6106 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6107 struct ctl_page_index *page_index, 6108 uint8_t *page_ptr) 6109 { 6110 uint8_t *c; 6111 int i; 6112 6113 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6114 ctl_time_io_secs = 6115 (c[0] << 8) | 6116 (c[1] << 0) | 6117 0; 6118 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6119 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6120 printf("page data:"); 6121 for (i=0; i<8; i++) 6122 printf(" %.2x",page_ptr[i]); 6123 printf("\n"); 6124 return (0); 6125 } 6126 6127 int 6128 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6129 struct ctl_page_index *page_index, 6130 int pc) 6131 { 6132 struct copan_debugconf_subpage *page; 6133 6134 page = (struct copan_debugconf_subpage *)page_index->page_data + 6135 (page_index->page_len * pc); 6136 6137 switch (pc) { 6138 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6139 case SMS_PAGE_CTRL_DEFAULT >> 6: 6140 case SMS_PAGE_CTRL_SAVED >> 6: 6141 /* 6142 * We don't update the changable or default bits for this page. 6143 */ 6144 break; 6145 case SMS_PAGE_CTRL_CURRENT >> 6: 6146 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6147 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6148 break; 6149 default: 6150 #ifdef NEEDTOPORT 6151 EPRINT(0, "Invalid PC %d!!", pc); 6152 #endif /* NEEDTOPORT */ 6153 break; 6154 } 6155 return (0); 6156 } 6157 6158 6159 static int 6160 ctl_do_mode_select(union ctl_io *io) 6161 { 6162 struct scsi_mode_page_header *page_header; 6163 struct ctl_page_index *page_index; 6164 struct ctl_scsiio *ctsio; 6165 int control_dev, page_len; 6166 int page_len_offset, page_len_size; 6167 union ctl_modepage_info *modepage_info; 6168 struct ctl_lun *lun; 6169 int *len_left, *len_used; 6170 int retval, i; 6171 6172 ctsio = &io->scsiio; 6173 page_index = NULL; 6174 page_len = 0; 6175 retval = CTL_RETVAL_COMPLETE; 6176 6177 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6178 6179 if (lun->be_lun->lun_type != T_DIRECT) 6180 control_dev = 1; 6181 else 6182 control_dev = 0; 6183 6184 modepage_info = (union ctl_modepage_info *) 6185 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6186 len_left = &modepage_info->header.len_left; 6187 len_used = &modepage_info->header.len_used; 6188 6189 do_next_page: 6190 6191 page_header = (struct scsi_mode_page_header *) 6192 (ctsio->kern_data_ptr + *len_used); 6193 6194 if (*len_left == 0) { 6195 free(ctsio->kern_data_ptr, M_CTL); 6196 ctl_set_success(ctsio); 6197 ctl_done((union ctl_io *)ctsio); 6198 return (CTL_RETVAL_COMPLETE); 6199 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6200 6201 free(ctsio->kern_data_ptr, M_CTL); 6202 ctl_set_param_len_error(ctsio); 6203 ctl_done((union ctl_io *)ctsio); 6204 return (CTL_RETVAL_COMPLETE); 6205 6206 } else if ((page_header->page_code & SMPH_SPF) 6207 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6208 6209 free(ctsio->kern_data_ptr, M_CTL); 6210 ctl_set_param_len_error(ctsio); 6211 ctl_done((union ctl_io *)ctsio); 6212 return (CTL_RETVAL_COMPLETE); 6213 } 6214 6215 6216 /* 6217 * XXX KDM should we do something with the block descriptor? 6218 */ 6219 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6220 6221 if ((control_dev != 0) 6222 && (lun->mode_pages.index[i].page_flags & 6223 CTL_PAGE_FLAG_DISK_ONLY)) 6224 continue; 6225 6226 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6227 (page_header->page_code & SMPH_PC_MASK)) 6228 continue; 6229 6230 /* 6231 * If neither page has a subpage code, then we've got a 6232 * match. 6233 */ 6234 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6235 && ((page_header->page_code & SMPH_SPF) == 0)) { 6236 page_index = &lun->mode_pages.index[i]; 6237 page_len = page_header->page_length; 6238 break; 6239 } 6240 6241 /* 6242 * If both pages have subpages, then the subpage numbers 6243 * have to match. 6244 */ 6245 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6246 && (page_header->page_code & SMPH_SPF)) { 6247 struct scsi_mode_page_header_sp *sph; 6248 6249 sph = (struct scsi_mode_page_header_sp *)page_header; 6250 6251 if (lun->mode_pages.index[i].subpage == 6252 sph->subpage) { 6253 page_index = &lun->mode_pages.index[i]; 6254 page_len = scsi_2btoul(sph->page_length); 6255 break; 6256 } 6257 } 6258 } 6259 6260 /* 6261 * If we couldn't find the page, or if we don't have a mode select 6262 * handler for it, send back an error to the user. 6263 */ 6264 if ((page_index == NULL) 6265 || (page_index->select_handler == NULL)) { 6266 ctl_set_invalid_field(ctsio, 6267 /*sks_valid*/ 1, 6268 /*command*/ 0, 6269 /*field*/ *len_used, 6270 /*bit_valid*/ 0, 6271 /*bit*/ 0); 6272 free(ctsio->kern_data_ptr, M_CTL); 6273 ctl_done((union ctl_io *)ctsio); 6274 return (CTL_RETVAL_COMPLETE); 6275 } 6276 6277 if (page_index->page_code & SMPH_SPF) { 6278 page_len_offset = 2; 6279 page_len_size = 2; 6280 } else { 6281 page_len_size = 1; 6282 page_len_offset = 1; 6283 } 6284 6285 /* 6286 * If the length the initiator gives us isn't the one we specify in 6287 * the mode page header, or if they didn't specify enough data in 6288 * the CDB to avoid truncating this page, kick out the request. 6289 */ 6290 if ((page_len != (page_index->page_len - page_len_offset - 6291 page_len_size)) 6292 || (*len_left < page_index->page_len)) { 6293 6294 6295 ctl_set_invalid_field(ctsio, 6296 /*sks_valid*/ 1, 6297 /*command*/ 0, 6298 /*field*/ *len_used + page_len_offset, 6299 /*bit_valid*/ 0, 6300 /*bit*/ 0); 6301 free(ctsio->kern_data_ptr, M_CTL); 6302 ctl_done((union ctl_io *)ctsio); 6303 return (CTL_RETVAL_COMPLETE); 6304 } 6305 6306 /* 6307 * Run through the mode page, checking to make sure that the bits 6308 * the user changed are actually legal for him to change. 6309 */ 6310 for (i = 0; i < page_index->page_len; i++) { 6311 uint8_t *user_byte, *change_mask, *current_byte; 6312 int bad_bit; 6313 int j; 6314 6315 user_byte = (uint8_t *)page_header + i; 6316 change_mask = page_index->page_data + 6317 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6318 current_byte = page_index->page_data + 6319 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6320 6321 /* 6322 * Check to see whether the user set any bits in this byte 6323 * that he is not allowed to set. 6324 */ 6325 if ((*user_byte & ~(*change_mask)) == 6326 (*current_byte & ~(*change_mask))) 6327 continue; 6328 6329 /* 6330 * Go through bit by bit to determine which one is illegal. 6331 */ 6332 bad_bit = 0; 6333 for (j = 7; j >= 0; j--) { 6334 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6335 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6336 bad_bit = i; 6337 break; 6338 } 6339 } 6340 ctl_set_invalid_field(ctsio, 6341 /*sks_valid*/ 1, 6342 /*command*/ 0, 6343 /*field*/ *len_used + i, 6344 /*bit_valid*/ 1, 6345 /*bit*/ bad_bit); 6346 free(ctsio->kern_data_ptr, M_CTL); 6347 ctl_done((union ctl_io *)ctsio); 6348 return (CTL_RETVAL_COMPLETE); 6349 } 6350 6351 /* 6352 * Decrement these before we call the page handler, since we may 6353 * end up getting called back one way or another before the handler 6354 * returns to this context. 6355 */ 6356 *len_left -= page_index->page_len; 6357 *len_used += page_index->page_len; 6358 6359 retval = page_index->select_handler(ctsio, page_index, 6360 (uint8_t *)page_header); 6361 6362 /* 6363 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6364 * wait until this queued command completes to finish processing 6365 * the mode page. If it returns anything other than 6366 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6367 * already set the sense information, freed the data pointer, and 6368 * completed the io for us. 6369 */ 6370 if (retval != CTL_RETVAL_COMPLETE) 6371 goto bailout_no_done; 6372 6373 /* 6374 * If the initiator sent us more than one page, parse the next one. 6375 */ 6376 if (*len_left > 0) 6377 goto do_next_page; 6378 6379 ctl_set_success(ctsio); 6380 free(ctsio->kern_data_ptr, M_CTL); 6381 ctl_done((union ctl_io *)ctsio); 6382 6383 bailout_no_done: 6384 6385 return (CTL_RETVAL_COMPLETE); 6386 6387 } 6388 6389 int 6390 ctl_mode_select(struct ctl_scsiio *ctsio) 6391 { 6392 int param_len, pf, sp; 6393 int header_size, bd_len; 6394 int len_left, len_used; 6395 struct ctl_page_index *page_index; 6396 struct ctl_lun *lun; 6397 int control_dev, page_len; 6398 union ctl_modepage_info *modepage_info; 6399 int retval; 6400 6401 pf = 0; 6402 sp = 0; 6403 page_len = 0; 6404 len_used = 0; 6405 len_left = 0; 6406 retval = 0; 6407 bd_len = 0; 6408 page_index = NULL; 6409 6410 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6411 6412 if (lun->be_lun->lun_type != T_DIRECT) 6413 control_dev = 1; 6414 else 6415 control_dev = 0; 6416 6417 switch (ctsio->cdb[0]) { 6418 case MODE_SELECT_6: { 6419 struct scsi_mode_select_6 *cdb; 6420 6421 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6422 6423 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6424 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6425 6426 param_len = cdb->length; 6427 header_size = sizeof(struct scsi_mode_header_6); 6428 break; 6429 } 6430 case MODE_SELECT_10: { 6431 struct scsi_mode_select_10 *cdb; 6432 6433 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6434 6435 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6436 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6437 6438 param_len = scsi_2btoul(cdb->length); 6439 header_size = sizeof(struct scsi_mode_header_10); 6440 break; 6441 } 6442 default: 6443 ctl_set_invalid_opcode(ctsio); 6444 ctl_done((union ctl_io *)ctsio); 6445 return (CTL_RETVAL_COMPLETE); 6446 break; /* NOTREACHED */ 6447 } 6448 6449 /* 6450 * From SPC-3: 6451 * "A parameter list length of zero indicates that the Data-Out Buffer 6452 * shall be empty. This condition shall not be considered as an error." 6453 */ 6454 if (param_len == 0) { 6455 ctl_set_success(ctsio); 6456 ctl_done((union ctl_io *)ctsio); 6457 return (CTL_RETVAL_COMPLETE); 6458 } 6459 6460 /* 6461 * Since we'll hit this the first time through, prior to 6462 * allocation, we don't need to free a data buffer here. 6463 */ 6464 if (param_len < header_size) { 6465 ctl_set_param_len_error(ctsio); 6466 ctl_done((union ctl_io *)ctsio); 6467 return (CTL_RETVAL_COMPLETE); 6468 } 6469 6470 /* 6471 * Allocate the data buffer and grab the user's data. In theory, 6472 * we shouldn't have to sanity check the parameter list length here 6473 * because the maximum size is 64K. We should be able to malloc 6474 * that much without too many problems. 6475 */ 6476 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6477 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6478 ctsio->kern_data_len = param_len; 6479 ctsio->kern_total_len = param_len; 6480 ctsio->kern_data_resid = 0; 6481 ctsio->kern_rel_offset = 0; 6482 ctsio->kern_sg_entries = 0; 6483 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6484 ctsio->be_move_done = ctl_config_move_done; 6485 ctl_datamove((union ctl_io *)ctsio); 6486 6487 return (CTL_RETVAL_COMPLETE); 6488 } 6489 6490 switch (ctsio->cdb[0]) { 6491 case MODE_SELECT_6: { 6492 struct scsi_mode_header_6 *mh6; 6493 6494 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6495 bd_len = mh6->blk_desc_len; 6496 break; 6497 } 6498 case MODE_SELECT_10: { 6499 struct scsi_mode_header_10 *mh10; 6500 6501 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6502 bd_len = scsi_2btoul(mh10->blk_desc_len); 6503 break; 6504 } 6505 default: 6506 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6507 break; 6508 } 6509 6510 if (param_len < (header_size + bd_len)) { 6511 free(ctsio->kern_data_ptr, M_CTL); 6512 ctl_set_param_len_error(ctsio); 6513 ctl_done((union ctl_io *)ctsio); 6514 return (CTL_RETVAL_COMPLETE); 6515 } 6516 6517 /* 6518 * Set the IO_CONT flag, so that if this I/O gets passed to 6519 * ctl_config_write_done(), it'll get passed back to 6520 * ctl_do_mode_select() for further processing, or completion if 6521 * we're all done. 6522 */ 6523 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6524 ctsio->io_cont = ctl_do_mode_select; 6525 6526 modepage_info = (union ctl_modepage_info *) 6527 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6528 6529 memset(modepage_info, 0, sizeof(*modepage_info)); 6530 6531 len_left = param_len - header_size - bd_len; 6532 len_used = header_size + bd_len; 6533 6534 modepage_info->header.len_left = len_left; 6535 modepage_info->header.len_used = len_used; 6536 6537 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6538 } 6539 6540 int 6541 ctl_mode_sense(struct ctl_scsiio *ctsio) 6542 { 6543 struct ctl_lun *lun; 6544 int pc, page_code, dbd, llba, subpage; 6545 int alloc_len, page_len, header_len, total_len; 6546 struct scsi_mode_block_descr *block_desc; 6547 struct ctl_page_index *page_index; 6548 int control_dev; 6549 6550 dbd = 0; 6551 llba = 0; 6552 block_desc = NULL; 6553 page_index = NULL; 6554 6555 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6556 6557 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6558 6559 if (lun->be_lun->lun_type != T_DIRECT) 6560 control_dev = 1; 6561 else 6562 control_dev = 0; 6563 6564 switch (ctsio->cdb[0]) { 6565 case MODE_SENSE_6: { 6566 struct scsi_mode_sense_6 *cdb; 6567 6568 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6569 6570 header_len = sizeof(struct scsi_mode_hdr_6); 6571 if (cdb->byte2 & SMS_DBD) 6572 dbd = 1; 6573 else 6574 header_len += sizeof(struct scsi_mode_block_descr); 6575 6576 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6577 page_code = cdb->page & SMS_PAGE_CODE; 6578 subpage = cdb->subpage; 6579 alloc_len = cdb->length; 6580 break; 6581 } 6582 case MODE_SENSE_10: { 6583 struct scsi_mode_sense_10 *cdb; 6584 6585 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6586 6587 header_len = sizeof(struct scsi_mode_hdr_10); 6588 6589 if (cdb->byte2 & SMS_DBD) 6590 dbd = 1; 6591 else 6592 header_len += sizeof(struct scsi_mode_block_descr); 6593 if (cdb->byte2 & SMS10_LLBAA) 6594 llba = 1; 6595 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6596 page_code = cdb->page & SMS_PAGE_CODE; 6597 subpage = cdb->subpage; 6598 alloc_len = scsi_2btoul(cdb->length); 6599 break; 6600 } 6601 default: 6602 ctl_set_invalid_opcode(ctsio); 6603 ctl_done((union ctl_io *)ctsio); 6604 return (CTL_RETVAL_COMPLETE); 6605 break; /* NOTREACHED */ 6606 } 6607 6608 /* 6609 * We have to make a first pass through to calculate the size of 6610 * the pages that match the user's query. Then we allocate enough 6611 * memory to hold it, and actually copy the data into the buffer. 6612 */ 6613 switch (page_code) { 6614 case SMS_ALL_PAGES_PAGE: { 6615 int i; 6616 6617 page_len = 0; 6618 6619 /* 6620 * At the moment, values other than 0 and 0xff here are 6621 * reserved according to SPC-3. 6622 */ 6623 if ((subpage != SMS_SUBPAGE_PAGE_0) 6624 && (subpage != SMS_SUBPAGE_ALL)) { 6625 ctl_set_invalid_field(ctsio, 6626 /*sks_valid*/ 1, 6627 /*command*/ 1, 6628 /*field*/ 3, 6629 /*bit_valid*/ 0, 6630 /*bit*/ 0); 6631 ctl_done((union ctl_io *)ctsio); 6632 return (CTL_RETVAL_COMPLETE); 6633 } 6634 6635 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6636 if ((control_dev != 0) 6637 && (lun->mode_pages.index[i].page_flags & 6638 CTL_PAGE_FLAG_DISK_ONLY)) 6639 continue; 6640 6641 /* 6642 * We don't use this subpage if the user didn't 6643 * request all subpages. 6644 */ 6645 if ((lun->mode_pages.index[i].subpage != 0) 6646 && (subpage == SMS_SUBPAGE_PAGE_0)) 6647 continue; 6648 6649 #if 0 6650 printf("found page %#x len %d\n", 6651 lun->mode_pages.index[i].page_code & 6652 SMPH_PC_MASK, 6653 lun->mode_pages.index[i].page_len); 6654 #endif 6655 page_len += lun->mode_pages.index[i].page_len; 6656 } 6657 break; 6658 } 6659 default: { 6660 int i; 6661 6662 page_len = 0; 6663 6664 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6665 /* Look for the right page code */ 6666 if ((lun->mode_pages.index[i].page_code & 6667 SMPH_PC_MASK) != page_code) 6668 continue; 6669 6670 /* Look for the right subpage or the subpage wildcard*/ 6671 if ((lun->mode_pages.index[i].subpage != subpage) 6672 && (subpage != SMS_SUBPAGE_ALL)) 6673 continue; 6674 6675 /* Make sure the page is supported for this dev type */ 6676 if ((control_dev != 0) 6677 && (lun->mode_pages.index[i].page_flags & 6678 CTL_PAGE_FLAG_DISK_ONLY)) 6679 continue; 6680 6681 #if 0 6682 printf("found page %#x len %d\n", 6683 lun->mode_pages.index[i].page_code & 6684 SMPH_PC_MASK, 6685 lun->mode_pages.index[i].page_len); 6686 #endif 6687 6688 page_len += lun->mode_pages.index[i].page_len; 6689 } 6690 6691 if (page_len == 0) { 6692 ctl_set_invalid_field(ctsio, 6693 /*sks_valid*/ 1, 6694 /*command*/ 1, 6695 /*field*/ 2, 6696 /*bit_valid*/ 1, 6697 /*bit*/ 5); 6698 ctl_done((union ctl_io *)ctsio); 6699 return (CTL_RETVAL_COMPLETE); 6700 } 6701 break; 6702 } 6703 } 6704 6705 total_len = header_len + page_len; 6706 #if 0 6707 printf("header_len = %d, page_len = %d, total_len = %d\n", 6708 header_len, page_len, total_len); 6709 #endif 6710 6711 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6712 ctsio->kern_sg_entries = 0; 6713 ctsio->kern_data_resid = 0; 6714 ctsio->kern_rel_offset = 0; 6715 if (total_len < alloc_len) { 6716 ctsio->residual = alloc_len - total_len; 6717 ctsio->kern_data_len = total_len; 6718 ctsio->kern_total_len = total_len; 6719 } else { 6720 ctsio->residual = 0; 6721 ctsio->kern_data_len = alloc_len; 6722 ctsio->kern_total_len = alloc_len; 6723 } 6724 6725 switch (ctsio->cdb[0]) { 6726 case MODE_SENSE_6: { 6727 struct scsi_mode_hdr_6 *header; 6728 6729 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6730 6731 header->datalen = MIN(total_len - 1, 254); 6732 if (control_dev == 0) { 6733 header->dev_specific = 0x10; /* DPOFUA */ 6734 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6735 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6736 .eca_and_aen & SCP_SWP) != 0) 6737 header->dev_specific |= 0x80; /* WP */ 6738 } 6739 if (dbd) 6740 header->block_descr_len = 0; 6741 else 6742 header->block_descr_len = 6743 sizeof(struct scsi_mode_block_descr); 6744 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6745 break; 6746 } 6747 case MODE_SENSE_10: { 6748 struct scsi_mode_hdr_10 *header; 6749 int datalen; 6750 6751 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6752 6753 datalen = MIN(total_len - 2, 65533); 6754 scsi_ulto2b(datalen, header->datalen); 6755 if (control_dev == 0) { 6756 header->dev_specific = 0x10; /* DPOFUA */ 6757 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6758 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6759 .eca_and_aen & SCP_SWP) != 0) 6760 header->dev_specific |= 0x80; /* WP */ 6761 } 6762 if (dbd) 6763 scsi_ulto2b(0, header->block_descr_len); 6764 else 6765 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6766 header->block_descr_len); 6767 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6768 break; 6769 } 6770 default: 6771 panic("invalid CDB type %#x", ctsio->cdb[0]); 6772 break; /* NOTREACHED */ 6773 } 6774 6775 /* 6776 * If we've got a disk, use its blocksize in the block 6777 * descriptor. Otherwise, just set it to 0. 6778 */ 6779 if (dbd == 0) { 6780 if (control_dev == 0) 6781 scsi_ulto3b(lun->be_lun->blocksize, 6782 block_desc->block_len); 6783 else 6784 scsi_ulto3b(0, block_desc->block_len); 6785 } 6786 6787 switch (page_code) { 6788 case SMS_ALL_PAGES_PAGE: { 6789 int i, data_used; 6790 6791 data_used = header_len; 6792 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6793 struct ctl_page_index *page_index; 6794 6795 page_index = &lun->mode_pages.index[i]; 6796 6797 if ((control_dev != 0) 6798 && (page_index->page_flags & 6799 CTL_PAGE_FLAG_DISK_ONLY)) 6800 continue; 6801 6802 /* 6803 * We don't use this subpage if the user didn't 6804 * request all subpages. We already checked (above) 6805 * to make sure the user only specified a subpage 6806 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6807 */ 6808 if ((page_index->subpage != 0) 6809 && (subpage == SMS_SUBPAGE_PAGE_0)) 6810 continue; 6811 6812 /* 6813 * Call the handler, if it exists, to update the 6814 * page to the latest values. 6815 */ 6816 if (page_index->sense_handler != NULL) 6817 page_index->sense_handler(ctsio, page_index,pc); 6818 6819 memcpy(ctsio->kern_data_ptr + data_used, 6820 page_index->page_data + 6821 (page_index->page_len * pc), 6822 page_index->page_len); 6823 data_used += page_index->page_len; 6824 } 6825 break; 6826 } 6827 default: { 6828 int i, data_used; 6829 6830 data_used = header_len; 6831 6832 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6833 struct ctl_page_index *page_index; 6834 6835 page_index = &lun->mode_pages.index[i]; 6836 6837 /* Look for the right page code */ 6838 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6839 continue; 6840 6841 /* Look for the right subpage or the subpage wildcard*/ 6842 if ((page_index->subpage != subpage) 6843 && (subpage != SMS_SUBPAGE_ALL)) 6844 continue; 6845 6846 /* Make sure the page is supported for this dev type */ 6847 if ((control_dev != 0) 6848 && (page_index->page_flags & 6849 CTL_PAGE_FLAG_DISK_ONLY)) 6850 continue; 6851 6852 /* 6853 * Call the handler, if it exists, to update the 6854 * page to the latest values. 6855 */ 6856 if (page_index->sense_handler != NULL) 6857 page_index->sense_handler(ctsio, page_index,pc); 6858 6859 memcpy(ctsio->kern_data_ptr + data_used, 6860 page_index->page_data + 6861 (page_index->page_len * pc), 6862 page_index->page_len); 6863 data_used += page_index->page_len; 6864 } 6865 break; 6866 } 6867 } 6868 6869 ctl_set_success(ctsio); 6870 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6871 ctsio->be_move_done = ctl_config_move_done; 6872 ctl_datamove((union ctl_io *)ctsio); 6873 return (CTL_RETVAL_COMPLETE); 6874 } 6875 6876 int 6877 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6878 struct ctl_page_index *page_index, 6879 int pc) 6880 { 6881 struct ctl_lun *lun; 6882 struct scsi_log_param_header *phdr; 6883 uint8_t *data; 6884 uint64_t val; 6885 6886 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6887 data = page_index->page_data; 6888 6889 if (lun->backend->lun_attr != NULL && 6890 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6891 != UINT64_MAX) { 6892 phdr = (struct scsi_log_param_header *)data; 6893 scsi_ulto2b(0x0001, phdr->param_code); 6894 phdr->param_control = SLP_LBIN | SLP_LP; 6895 phdr->param_len = 8; 6896 data = (uint8_t *)(phdr + 1); 6897 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6898 data[4] = 0x02; /* per-pool */ 6899 data += phdr->param_len; 6900 } 6901 6902 if (lun->backend->lun_attr != NULL && 6903 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6904 != UINT64_MAX) { 6905 phdr = (struct scsi_log_param_header *)data; 6906 scsi_ulto2b(0x0002, phdr->param_code); 6907 phdr->param_control = SLP_LBIN | SLP_LP; 6908 phdr->param_len = 8; 6909 data = (uint8_t *)(phdr + 1); 6910 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6911 data[4] = 0x01; /* per-LUN */ 6912 data += phdr->param_len; 6913 } 6914 6915 if (lun->backend->lun_attr != NULL && 6916 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6917 != UINT64_MAX) { 6918 phdr = (struct scsi_log_param_header *)data; 6919 scsi_ulto2b(0x00f1, phdr->param_code); 6920 phdr->param_control = SLP_LBIN | SLP_LP; 6921 phdr->param_len = 8; 6922 data = (uint8_t *)(phdr + 1); 6923 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6924 data[4] = 0x02; /* per-pool */ 6925 data += phdr->param_len; 6926 } 6927 6928 if (lun->backend->lun_attr != NULL && 6929 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6930 != UINT64_MAX) { 6931 phdr = (struct scsi_log_param_header *)data; 6932 scsi_ulto2b(0x00f2, phdr->param_code); 6933 phdr->param_control = SLP_LBIN | SLP_LP; 6934 phdr->param_len = 8; 6935 data = (uint8_t *)(phdr + 1); 6936 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6937 data[4] = 0x02; /* per-pool */ 6938 data += phdr->param_len; 6939 } 6940 6941 page_index->page_len = data - page_index->page_data; 6942 return (0); 6943 } 6944 6945 int 6946 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6947 struct ctl_page_index *page_index, 6948 int pc) 6949 { 6950 struct ctl_lun *lun; 6951 struct stat_page *data; 6952 uint64_t rn, wn, rb, wb; 6953 struct bintime rt, wt; 6954 int i; 6955 6956 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6957 data = (struct stat_page *)page_index->page_data; 6958 6959 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6960 data->sap.hdr.param_control = SLP_LBIN; 6961 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6962 sizeof(struct scsi_log_param_header); 6963 rn = wn = rb = wb = 0; 6964 bintime_clear(&rt); 6965 bintime_clear(&wt); 6966 for (i = 0; i < CTL_MAX_PORTS; i++) { 6967 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6968 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6969 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6970 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6971 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6972 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6973 } 6974 scsi_u64to8b(rn, data->sap.read_num); 6975 scsi_u64to8b(wn, data->sap.write_num); 6976 if (lun->stats.blocksize > 0) { 6977 scsi_u64to8b(wb / lun->stats.blocksize, 6978 data->sap.recvieved_lba); 6979 scsi_u64to8b(rb / lun->stats.blocksize, 6980 data->sap.transmitted_lba); 6981 } 6982 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6983 data->sap.read_int); 6984 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6985 data->sap.write_int); 6986 scsi_u64to8b(0, data->sap.weighted_num); 6987 scsi_u64to8b(0, data->sap.weighted_int); 6988 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6989 data->it.hdr.param_control = SLP_LBIN; 6990 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6991 sizeof(struct scsi_log_param_header); 6992 #ifdef CTL_TIME_IO 6993 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6994 #endif 6995 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6996 data->it.hdr.param_control = SLP_LBIN; 6997 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6998 sizeof(struct scsi_log_param_header); 6999 scsi_ulto4b(3, data->ti.exponent); 7000 scsi_ulto4b(1, data->ti.integer); 7001 7002 page_index->page_len = sizeof(*data); 7003 return (0); 7004 } 7005 7006 int 7007 ctl_log_sense(struct ctl_scsiio *ctsio) 7008 { 7009 struct ctl_lun *lun; 7010 int i, pc, page_code, subpage; 7011 int alloc_len, total_len; 7012 struct ctl_page_index *page_index; 7013 struct scsi_log_sense *cdb; 7014 struct scsi_log_header *header; 7015 7016 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 7017 7018 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7019 cdb = (struct scsi_log_sense *)ctsio->cdb; 7020 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 7021 page_code = cdb->page & SLS_PAGE_CODE; 7022 subpage = cdb->subpage; 7023 alloc_len = scsi_2btoul(cdb->length); 7024 7025 page_index = NULL; 7026 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 7027 page_index = &lun->log_pages.index[i]; 7028 7029 /* Look for the right page code */ 7030 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 7031 continue; 7032 7033 /* Look for the right subpage or the subpage wildcard*/ 7034 if (page_index->subpage != subpage) 7035 continue; 7036 7037 break; 7038 } 7039 if (i >= CTL_NUM_LOG_PAGES) { 7040 ctl_set_invalid_field(ctsio, 7041 /*sks_valid*/ 1, 7042 /*command*/ 1, 7043 /*field*/ 2, 7044 /*bit_valid*/ 0, 7045 /*bit*/ 0); 7046 ctl_done((union ctl_io *)ctsio); 7047 return (CTL_RETVAL_COMPLETE); 7048 } 7049 7050 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 7051 7052 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7053 ctsio->kern_sg_entries = 0; 7054 ctsio->kern_data_resid = 0; 7055 ctsio->kern_rel_offset = 0; 7056 if (total_len < alloc_len) { 7057 ctsio->residual = alloc_len - total_len; 7058 ctsio->kern_data_len = total_len; 7059 ctsio->kern_total_len = total_len; 7060 } else { 7061 ctsio->residual = 0; 7062 ctsio->kern_data_len = alloc_len; 7063 ctsio->kern_total_len = alloc_len; 7064 } 7065 7066 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 7067 header->page = page_index->page_code; 7068 if (page_index->subpage) { 7069 header->page |= SL_SPF; 7070 header->subpage = page_index->subpage; 7071 } 7072 scsi_ulto2b(page_index->page_len, header->datalen); 7073 7074 /* 7075 * Call the handler, if it exists, to update the 7076 * page to the latest values. 7077 */ 7078 if (page_index->sense_handler != NULL) 7079 page_index->sense_handler(ctsio, page_index, pc); 7080 7081 memcpy(header + 1, page_index->page_data, page_index->page_len); 7082 7083 ctl_set_success(ctsio); 7084 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7085 ctsio->be_move_done = ctl_config_move_done; 7086 ctl_datamove((union ctl_io *)ctsio); 7087 return (CTL_RETVAL_COMPLETE); 7088 } 7089 7090 int 7091 ctl_read_capacity(struct ctl_scsiio *ctsio) 7092 { 7093 struct scsi_read_capacity *cdb; 7094 struct scsi_read_capacity_data *data; 7095 struct ctl_lun *lun; 7096 uint32_t lba; 7097 7098 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7099 7100 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7101 7102 lba = scsi_4btoul(cdb->addr); 7103 if (((cdb->pmi & SRC_PMI) == 0) 7104 && (lba != 0)) { 7105 ctl_set_invalid_field(/*ctsio*/ ctsio, 7106 /*sks_valid*/ 1, 7107 /*command*/ 1, 7108 /*field*/ 2, 7109 /*bit_valid*/ 0, 7110 /*bit*/ 0); 7111 ctl_done((union ctl_io *)ctsio); 7112 return (CTL_RETVAL_COMPLETE); 7113 } 7114 7115 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7116 7117 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7118 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7119 ctsio->residual = 0; 7120 ctsio->kern_data_len = sizeof(*data); 7121 ctsio->kern_total_len = sizeof(*data); 7122 ctsio->kern_data_resid = 0; 7123 ctsio->kern_rel_offset = 0; 7124 ctsio->kern_sg_entries = 0; 7125 7126 /* 7127 * If the maximum LBA is greater than 0xfffffffe, the user must 7128 * issue a SERVICE ACTION IN (16) command, with the read capacity 7129 * serivce action set. 7130 */ 7131 if (lun->be_lun->maxlba > 0xfffffffe) 7132 scsi_ulto4b(0xffffffff, data->addr); 7133 else 7134 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7135 7136 /* 7137 * XXX KDM this may not be 512 bytes... 7138 */ 7139 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7140 7141 ctl_set_success(ctsio); 7142 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7143 ctsio->be_move_done = ctl_config_move_done; 7144 ctl_datamove((union ctl_io *)ctsio); 7145 return (CTL_RETVAL_COMPLETE); 7146 } 7147 7148 int 7149 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7150 { 7151 struct scsi_read_capacity_16 *cdb; 7152 struct scsi_read_capacity_data_long *data; 7153 struct ctl_lun *lun; 7154 uint64_t lba; 7155 uint32_t alloc_len; 7156 7157 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7158 7159 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7160 7161 alloc_len = scsi_4btoul(cdb->alloc_len); 7162 lba = scsi_8btou64(cdb->addr); 7163 7164 if ((cdb->reladr & SRC16_PMI) 7165 && (lba != 0)) { 7166 ctl_set_invalid_field(/*ctsio*/ ctsio, 7167 /*sks_valid*/ 1, 7168 /*command*/ 1, 7169 /*field*/ 2, 7170 /*bit_valid*/ 0, 7171 /*bit*/ 0); 7172 ctl_done((union ctl_io *)ctsio); 7173 return (CTL_RETVAL_COMPLETE); 7174 } 7175 7176 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7177 7178 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7179 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7180 7181 if (sizeof(*data) < alloc_len) { 7182 ctsio->residual = alloc_len - sizeof(*data); 7183 ctsio->kern_data_len = sizeof(*data); 7184 ctsio->kern_total_len = sizeof(*data); 7185 } else { 7186 ctsio->residual = 0; 7187 ctsio->kern_data_len = alloc_len; 7188 ctsio->kern_total_len = alloc_len; 7189 } 7190 ctsio->kern_data_resid = 0; 7191 ctsio->kern_rel_offset = 0; 7192 ctsio->kern_sg_entries = 0; 7193 7194 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7195 /* XXX KDM this may not be 512 bytes... */ 7196 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7197 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7198 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7199 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7200 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7201 7202 ctl_set_success(ctsio); 7203 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7204 ctsio->be_move_done = ctl_config_move_done; 7205 ctl_datamove((union ctl_io *)ctsio); 7206 return (CTL_RETVAL_COMPLETE); 7207 } 7208 7209 int 7210 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7211 { 7212 struct scsi_get_lba_status *cdb; 7213 struct scsi_get_lba_status_data *data; 7214 struct ctl_lun *lun; 7215 struct ctl_lba_len_flags *lbalen; 7216 uint64_t lba; 7217 uint32_t alloc_len, total_len; 7218 int retval; 7219 7220 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7221 7222 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7223 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7224 lba = scsi_8btou64(cdb->addr); 7225 alloc_len = scsi_4btoul(cdb->alloc_len); 7226 7227 if (lba > lun->be_lun->maxlba) { 7228 ctl_set_lba_out_of_range(ctsio); 7229 ctl_done((union ctl_io *)ctsio); 7230 return (CTL_RETVAL_COMPLETE); 7231 } 7232 7233 total_len = sizeof(*data) + sizeof(data->descr[0]); 7234 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7235 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7236 7237 if (total_len < alloc_len) { 7238 ctsio->residual = alloc_len - total_len; 7239 ctsio->kern_data_len = total_len; 7240 ctsio->kern_total_len = total_len; 7241 } else { 7242 ctsio->residual = 0; 7243 ctsio->kern_data_len = alloc_len; 7244 ctsio->kern_total_len = alloc_len; 7245 } 7246 ctsio->kern_data_resid = 0; 7247 ctsio->kern_rel_offset = 0; 7248 ctsio->kern_sg_entries = 0; 7249 7250 /* Fill dummy data in case backend can't tell anything. */ 7251 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7252 scsi_u64to8b(lba, data->descr[0].addr); 7253 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7254 data->descr[0].length); 7255 data->descr[0].status = 0; /* Mapped or unknown. */ 7256 7257 ctl_set_success(ctsio); 7258 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7259 ctsio->be_move_done = ctl_config_move_done; 7260 7261 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7262 lbalen->lba = lba; 7263 lbalen->len = total_len; 7264 lbalen->flags = 0; 7265 retval = lun->backend->config_read((union ctl_io *)ctsio); 7266 return (CTL_RETVAL_COMPLETE); 7267 } 7268 7269 int 7270 ctl_read_defect(struct ctl_scsiio *ctsio) 7271 { 7272 struct scsi_read_defect_data_10 *ccb10; 7273 struct scsi_read_defect_data_12 *ccb12; 7274 struct scsi_read_defect_data_hdr_10 *data10; 7275 struct scsi_read_defect_data_hdr_12 *data12; 7276 uint32_t alloc_len, data_len; 7277 uint8_t format; 7278 7279 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7280 7281 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7282 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7283 format = ccb10->format; 7284 alloc_len = scsi_2btoul(ccb10->alloc_length); 7285 data_len = sizeof(*data10); 7286 } else { 7287 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7288 format = ccb12->format; 7289 alloc_len = scsi_4btoul(ccb12->alloc_length); 7290 data_len = sizeof(*data12); 7291 } 7292 if (alloc_len == 0) { 7293 ctl_set_success(ctsio); 7294 ctl_done((union ctl_io *)ctsio); 7295 return (CTL_RETVAL_COMPLETE); 7296 } 7297 7298 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7299 if (data_len < alloc_len) { 7300 ctsio->residual = alloc_len - data_len; 7301 ctsio->kern_data_len = data_len; 7302 ctsio->kern_total_len = data_len; 7303 } else { 7304 ctsio->residual = 0; 7305 ctsio->kern_data_len = alloc_len; 7306 ctsio->kern_total_len = alloc_len; 7307 } 7308 ctsio->kern_data_resid = 0; 7309 ctsio->kern_rel_offset = 0; 7310 ctsio->kern_sg_entries = 0; 7311 7312 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7313 data10 = (struct scsi_read_defect_data_hdr_10 *) 7314 ctsio->kern_data_ptr; 7315 data10->format = format; 7316 scsi_ulto2b(0, data10->length); 7317 } else { 7318 data12 = (struct scsi_read_defect_data_hdr_12 *) 7319 ctsio->kern_data_ptr; 7320 data12->format = format; 7321 scsi_ulto2b(0, data12->generation); 7322 scsi_ulto4b(0, data12->length); 7323 } 7324 7325 ctl_set_success(ctsio); 7326 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7327 ctsio->be_move_done = ctl_config_move_done; 7328 ctl_datamove((union ctl_io *)ctsio); 7329 return (CTL_RETVAL_COMPLETE); 7330 } 7331 7332 int 7333 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7334 { 7335 struct scsi_maintenance_in *cdb; 7336 int retval; 7337 int alloc_len, ext, total_len = 0, g, pc, pg, gs, os; 7338 int num_target_port_groups, num_target_ports; 7339 struct ctl_lun *lun; 7340 struct ctl_softc *softc; 7341 struct ctl_port *port; 7342 struct scsi_target_group_data *rtg_ptr; 7343 struct scsi_target_group_data_extended *rtg_ext_ptr; 7344 struct scsi_target_port_group_descriptor *tpg_desc; 7345 7346 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7347 7348 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7349 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7350 softc = lun->ctl_softc; 7351 7352 retval = CTL_RETVAL_COMPLETE; 7353 7354 switch (cdb->byte2 & STG_PDF_MASK) { 7355 case STG_PDF_LENGTH: 7356 ext = 0; 7357 break; 7358 case STG_PDF_EXTENDED: 7359 ext = 1; 7360 break; 7361 default: 7362 ctl_set_invalid_field(/*ctsio*/ ctsio, 7363 /*sks_valid*/ 1, 7364 /*command*/ 1, 7365 /*field*/ 2, 7366 /*bit_valid*/ 1, 7367 /*bit*/ 5); 7368 ctl_done((union ctl_io *)ctsio); 7369 return(retval); 7370 } 7371 7372 if (softc->is_single) 7373 num_target_port_groups = 1; 7374 else 7375 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7376 num_target_ports = 0; 7377 mtx_lock(&softc->ctl_lock); 7378 STAILQ_FOREACH(port, &softc->port_list, links) { 7379 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7380 continue; 7381 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7382 continue; 7383 num_target_ports++; 7384 } 7385 mtx_unlock(&softc->ctl_lock); 7386 7387 if (ext) 7388 total_len = sizeof(struct scsi_target_group_data_extended); 7389 else 7390 total_len = sizeof(struct scsi_target_group_data); 7391 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7392 num_target_port_groups + 7393 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7394 7395 alloc_len = scsi_4btoul(cdb->length); 7396 7397 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7398 7399 ctsio->kern_sg_entries = 0; 7400 7401 if (total_len < alloc_len) { 7402 ctsio->residual = alloc_len - total_len; 7403 ctsio->kern_data_len = total_len; 7404 ctsio->kern_total_len = total_len; 7405 } else { 7406 ctsio->residual = 0; 7407 ctsio->kern_data_len = alloc_len; 7408 ctsio->kern_total_len = alloc_len; 7409 } 7410 ctsio->kern_data_resid = 0; 7411 ctsio->kern_rel_offset = 0; 7412 7413 if (ext) { 7414 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7415 ctsio->kern_data_ptr; 7416 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7417 rtg_ext_ptr->format_type = 0x10; 7418 rtg_ext_ptr->implicit_transition_time = 0; 7419 tpg_desc = &rtg_ext_ptr->groups[0]; 7420 } else { 7421 rtg_ptr = (struct scsi_target_group_data *) 7422 ctsio->kern_data_ptr; 7423 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7424 tpg_desc = &rtg_ptr->groups[0]; 7425 } 7426 7427 mtx_lock(&softc->ctl_lock); 7428 pg = softc->port_min / softc->port_cnt; 7429 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7430 gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7431 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7432 gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7433 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7434 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 7435 else 7436 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7437 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7438 os = gs; 7439 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7440 } else 7441 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7442 for (g = 0; g < num_target_port_groups; g++) { 7443 tpg_desc->pref_state = (g == pg) ? gs : os; 7444 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7445 TPG_U_SUP | TPG_T_SUP; 7446 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7447 tpg_desc->status = TPG_IMPLICIT; 7448 pc = 0; 7449 STAILQ_FOREACH(port, &softc->port_list, links) { 7450 if (port->targ_port < g * softc->port_cnt || 7451 port->targ_port >= (g + 1) * softc->port_cnt) 7452 continue; 7453 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7454 continue; 7455 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7456 continue; 7457 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7458 relative_target_port_identifier); 7459 pc++; 7460 } 7461 tpg_desc->target_port_count = pc; 7462 tpg_desc = (struct scsi_target_port_group_descriptor *) 7463 &tpg_desc->descriptors[pc]; 7464 } 7465 mtx_unlock(&softc->ctl_lock); 7466 7467 ctl_set_success(ctsio); 7468 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7469 ctsio->be_move_done = ctl_config_move_done; 7470 ctl_datamove((union ctl_io *)ctsio); 7471 return(retval); 7472 } 7473 7474 int 7475 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7476 { 7477 struct ctl_lun *lun; 7478 struct scsi_report_supported_opcodes *cdb; 7479 const struct ctl_cmd_entry *entry, *sentry; 7480 struct scsi_report_supported_opcodes_all *all; 7481 struct scsi_report_supported_opcodes_descr *descr; 7482 struct scsi_report_supported_opcodes_one *one; 7483 int retval; 7484 int alloc_len, total_len; 7485 int opcode, service_action, i, j, num; 7486 7487 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7488 7489 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7490 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7491 7492 retval = CTL_RETVAL_COMPLETE; 7493 7494 opcode = cdb->requested_opcode; 7495 service_action = scsi_2btoul(cdb->requested_service_action); 7496 switch (cdb->options & RSO_OPTIONS_MASK) { 7497 case RSO_OPTIONS_ALL: 7498 num = 0; 7499 for (i = 0; i < 256; i++) { 7500 entry = &ctl_cmd_table[i]; 7501 if (entry->flags & CTL_CMD_FLAG_SA5) { 7502 for (j = 0; j < 32; j++) { 7503 sentry = &((const struct ctl_cmd_entry *) 7504 entry->execute)[j]; 7505 if (ctl_cmd_applicable( 7506 lun->be_lun->lun_type, sentry)) 7507 num++; 7508 } 7509 } else { 7510 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7511 entry)) 7512 num++; 7513 } 7514 } 7515 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7516 num * sizeof(struct scsi_report_supported_opcodes_descr); 7517 break; 7518 case RSO_OPTIONS_OC: 7519 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7520 ctl_set_invalid_field(/*ctsio*/ ctsio, 7521 /*sks_valid*/ 1, 7522 /*command*/ 1, 7523 /*field*/ 2, 7524 /*bit_valid*/ 1, 7525 /*bit*/ 2); 7526 ctl_done((union ctl_io *)ctsio); 7527 return (CTL_RETVAL_COMPLETE); 7528 } 7529 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7530 break; 7531 case RSO_OPTIONS_OC_SA: 7532 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7533 service_action >= 32) { 7534 ctl_set_invalid_field(/*ctsio*/ ctsio, 7535 /*sks_valid*/ 1, 7536 /*command*/ 1, 7537 /*field*/ 2, 7538 /*bit_valid*/ 1, 7539 /*bit*/ 2); 7540 ctl_done((union ctl_io *)ctsio); 7541 return (CTL_RETVAL_COMPLETE); 7542 } 7543 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7544 break; 7545 default: 7546 ctl_set_invalid_field(/*ctsio*/ ctsio, 7547 /*sks_valid*/ 1, 7548 /*command*/ 1, 7549 /*field*/ 2, 7550 /*bit_valid*/ 1, 7551 /*bit*/ 2); 7552 ctl_done((union ctl_io *)ctsio); 7553 return (CTL_RETVAL_COMPLETE); 7554 } 7555 7556 alloc_len = scsi_4btoul(cdb->length); 7557 7558 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7559 7560 ctsio->kern_sg_entries = 0; 7561 7562 if (total_len < alloc_len) { 7563 ctsio->residual = alloc_len - total_len; 7564 ctsio->kern_data_len = total_len; 7565 ctsio->kern_total_len = total_len; 7566 } else { 7567 ctsio->residual = 0; 7568 ctsio->kern_data_len = alloc_len; 7569 ctsio->kern_total_len = alloc_len; 7570 } 7571 ctsio->kern_data_resid = 0; 7572 ctsio->kern_rel_offset = 0; 7573 7574 switch (cdb->options & RSO_OPTIONS_MASK) { 7575 case RSO_OPTIONS_ALL: 7576 all = (struct scsi_report_supported_opcodes_all *) 7577 ctsio->kern_data_ptr; 7578 num = 0; 7579 for (i = 0; i < 256; i++) { 7580 entry = &ctl_cmd_table[i]; 7581 if (entry->flags & CTL_CMD_FLAG_SA5) { 7582 for (j = 0; j < 32; j++) { 7583 sentry = &((const struct ctl_cmd_entry *) 7584 entry->execute)[j]; 7585 if (!ctl_cmd_applicable( 7586 lun->be_lun->lun_type, sentry)) 7587 continue; 7588 descr = &all->descr[num++]; 7589 descr->opcode = i; 7590 scsi_ulto2b(j, descr->service_action); 7591 descr->flags = RSO_SERVACTV; 7592 scsi_ulto2b(sentry->length, 7593 descr->cdb_length); 7594 } 7595 } else { 7596 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7597 entry)) 7598 continue; 7599 descr = &all->descr[num++]; 7600 descr->opcode = i; 7601 scsi_ulto2b(0, descr->service_action); 7602 descr->flags = 0; 7603 scsi_ulto2b(entry->length, descr->cdb_length); 7604 } 7605 } 7606 scsi_ulto4b( 7607 num * sizeof(struct scsi_report_supported_opcodes_descr), 7608 all->length); 7609 break; 7610 case RSO_OPTIONS_OC: 7611 one = (struct scsi_report_supported_opcodes_one *) 7612 ctsio->kern_data_ptr; 7613 entry = &ctl_cmd_table[opcode]; 7614 goto fill_one; 7615 case RSO_OPTIONS_OC_SA: 7616 one = (struct scsi_report_supported_opcodes_one *) 7617 ctsio->kern_data_ptr; 7618 entry = &ctl_cmd_table[opcode]; 7619 entry = &((const struct ctl_cmd_entry *) 7620 entry->execute)[service_action]; 7621 fill_one: 7622 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7623 one->support = 3; 7624 scsi_ulto2b(entry->length, one->cdb_length); 7625 one->cdb_usage[0] = opcode; 7626 memcpy(&one->cdb_usage[1], entry->usage, 7627 entry->length - 1); 7628 } else 7629 one->support = 1; 7630 break; 7631 } 7632 7633 ctl_set_success(ctsio); 7634 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7635 ctsio->be_move_done = ctl_config_move_done; 7636 ctl_datamove((union ctl_io *)ctsio); 7637 return(retval); 7638 } 7639 7640 int 7641 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7642 { 7643 struct scsi_report_supported_tmf *cdb; 7644 struct scsi_report_supported_tmf_data *data; 7645 int retval; 7646 int alloc_len, total_len; 7647 7648 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7649 7650 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7651 7652 retval = CTL_RETVAL_COMPLETE; 7653 7654 total_len = sizeof(struct scsi_report_supported_tmf_data); 7655 alloc_len = scsi_4btoul(cdb->length); 7656 7657 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7658 7659 ctsio->kern_sg_entries = 0; 7660 7661 if (total_len < alloc_len) { 7662 ctsio->residual = alloc_len - total_len; 7663 ctsio->kern_data_len = total_len; 7664 ctsio->kern_total_len = total_len; 7665 } else { 7666 ctsio->residual = 0; 7667 ctsio->kern_data_len = alloc_len; 7668 ctsio->kern_total_len = alloc_len; 7669 } 7670 ctsio->kern_data_resid = 0; 7671 ctsio->kern_rel_offset = 0; 7672 7673 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7674 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7675 RST_TRS; 7676 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7677 7678 ctl_set_success(ctsio); 7679 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7680 ctsio->be_move_done = ctl_config_move_done; 7681 ctl_datamove((union ctl_io *)ctsio); 7682 return (retval); 7683 } 7684 7685 int 7686 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7687 { 7688 struct scsi_report_timestamp *cdb; 7689 struct scsi_report_timestamp_data *data; 7690 struct timeval tv; 7691 int64_t timestamp; 7692 int retval; 7693 int alloc_len, total_len; 7694 7695 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7696 7697 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7698 7699 retval = CTL_RETVAL_COMPLETE; 7700 7701 total_len = sizeof(struct scsi_report_timestamp_data); 7702 alloc_len = scsi_4btoul(cdb->length); 7703 7704 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7705 7706 ctsio->kern_sg_entries = 0; 7707 7708 if (total_len < alloc_len) { 7709 ctsio->residual = alloc_len - total_len; 7710 ctsio->kern_data_len = total_len; 7711 ctsio->kern_total_len = total_len; 7712 } else { 7713 ctsio->residual = 0; 7714 ctsio->kern_data_len = alloc_len; 7715 ctsio->kern_total_len = alloc_len; 7716 } 7717 ctsio->kern_data_resid = 0; 7718 ctsio->kern_rel_offset = 0; 7719 7720 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7721 scsi_ulto2b(sizeof(*data) - 2, data->length); 7722 data->origin = RTS_ORIG_OUTSIDE; 7723 getmicrotime(&tv); 7724 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7725 scsi_ulto4b(timestamp >> 16, data->timestamp); 7726 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7727 7728 ctl_set_success(ctsio); 7729 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7730 ctsio->be_move_done = ctl_config_move_done; 7731 ctl_datamove((union ctl_io *)ctsio); 7732 return (retval); 7733 } 7734 7735 int 7736 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7737 { 7738 struct scsi_per_res_in *cdb; 7739 int alloc_len, total_len = 0; 7740 /* struct scsi_per_res_in_rsrv in_data; */ 7741 struct ctl_lun *lun; 7742 struct ctl_softc *softc; 7743 uint64_t key; 7744 7745 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7746 7747 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7748 7749 alloc_len = scsi_2btoul(cdb->length); 7750 7751 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7752 softc = lun->ctl_softc; 7753 7754 retry: 7755 mtx_lock(&lun->lun_lock); 7756 switch (cdb->action) { 7757 case SPRI_RK: /* read keys */ 7758 total_len = sizeof(struct scsi_per_res_in_keys) + 7759 lun->pr_key_count * 7760 sizeof(struct scsi_per_res_key); 7761 break; 7762 case SPRI_RR: /* read reservation */ 7763 if (lun->flags & CTL_LUN_PR_RESERVED) 7764 total_len = sizeof(struct scsi_per_res_in_rsrv); 7765 else 7766 total_len = sizeof(struct scsi_per_res_in_header); 7767 break; 7768 case SPRI_RC: /* report capabilities */ 7769 total_len = sizeof(struct scsi_per_res_cap); 7770 break; 7771 case SPRI_RS: /* read full status */ 7772 total_len = sizeof(struct scsi_per_res_in_header) + 7773 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7774 lun->pr_key_count; 7775 break; 7776 default: 7777 panic("Invalid PR type %x", cdb->action); 7778 } 7779 mtx_unlock(&lun->lun_lock); 7780 7781 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7782 7783 if (total_len < alloc_len) { 7784 ctsio->residual = alloc_len - total_len; 7785 ctsio->kern_data_len = total_len; 7786 ctsio->kern_total_len = total_len; 7787 } else { 7788 ctsio->residual = 0; 7789 ctsio->kern_data_len = alloc_len; 7790 ctsio->kern_total_len = alloc_len; 7791 } 7792 7793 ctsio->kern_data_resid = 0; 7794 ctsio->kern_rel_offset = 0; 7795 ctsio->kern_sg_entries = 0; 7796 7797 mtx_lock(&lun->lun_lock); 7798 switch (cdb->action) { 7799 case SPRI_RK: { // read keys 7800 struct scsi_per_res_in_keys *res_keys; 7801 int i, key_count; 7802 7803 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7804 7805 /* 7806 * We had to drop the lock to allocate our buffer, which 7807 * leaves time for someone to come in with another 7808 * persistent reservation. (That is unlikely, though, 7809 * since this should be the only persistent reservation 7810 * command active right now.) 7811 */ 7812 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7813 (lun->pr_key_count * 7814 sizeof(struct scsi_per_res_key)))){ 7815 mtx_unlock(&lun->lun_lock); 7816 free(ctsio->kern_data_ptr, M_CTL); 7817 printf("%s: reservation length changed, retrying\n", 7818 __func__); 7819 goto retry; 7820 } 7821 7822 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7823 7824 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7825 lun->pr_key_count, res_keys->header.length); 7826 7827 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7828 if ((key = ctl_get_prkey(lun, i)) == 0) 7829 continue; 7830 7831 /* 7832 * We used lun->pr_key_count to calculate the 7833 * size to allocate. If it turns out the number of 7834 * initiators with the registered flag set is 7835 * larger than that (i.e. they haven't been kept in 7836 * sync), we've got a problem. 7837 */ 7838 if (key_count >= lun->pr_key_count) { 7839 #ifdef NEEDTOPORT 7840 csevent_log(CSC_CTL | CSC_SHELF_SW | 7841 CTL_PR_ERROR, 7842 csevent_LogType_Fault, 7843 csevent_AlertLevel_Yellow, 7844 csevent_FRU_ShelfController, 7845 csevent_FRU_Firmware, 7846 csevent_FRU_Unknown, 7847 "registered keys %d >= key " 7848 "count %d", key_count, 7849 lun->pr_key_count); 7850 #endif 7851 key_count++; 7852 continue; 7853 } 7854 scsi_u64to8b(key, res_keys->keys[key_count].key); 7855 key_count++; 7856 } 7857 break; 7858 } 7859 case SPRI_RR: { // read reservation 7860 struct scsi_per_res_in_rsrv *res; 7861 int tmp_len, header_only; 7862 7863 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7864 7865 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7866 7867 if (lun->flags & CTL_LUN_PR_RESERVED) 7868 { 7869 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7870 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7871 res->header.length); 7872 header_only = 0; 7873 } else { 7874 tmp_len = sizeof(struct scsi_per_res_in_header); 7875 scsi_ulto4b(0, res->header.length); 7876 header_only = 1; 7877 } 7878 7879 /* 7880 * We had to drop the lock to allocate our buffer, which 7881 * leaves time for someone to come in with another 7882 * persistent reservation. (That is unlikely, though, 7883 * since this should be the only persistent reservation 7884 * command active right now.) 7885 */ 7886 if (tmp_len != total_len) { 7887 mtx_unlock(&lun->lun_lock); 7888 free(ctsio->kern_data_ptr, M_CTL); 7889 printf("%s: reservation status changed, retrying\n", 7890 __func__); 7891 goto retry; 7892 } 7893 7894 /* 7895 * No reservation held, so we're done. 7896 */ 7897 if (header_only != 0) 7898 break; 7899 7900 /* 7901 * If the registration is an All Registrants type, the key 7902 * is 0, since it doesn't really matter. 7903 */ 7904 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7905 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7906 res->data.reservation); 7907 } 7908 res->data.scopetype = lun->res_type; 7909 break; 7910 } 7911 case SPRI_RC: //report capabilities 7912 { 7913 struct scsi_per_res_cap *res_cap; 7914 uint16_t type_mask; 7915 7916 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7917 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7918 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7919 type_mask = SPRI_TM_WR_EX_AR | 7920 SPRI_TM_EX_AC_RO | 7921 SPRI_TM_WR_EX_RO | 7922 SPRI_TM_EX_AC | 7923 SPRI_TM_WR_EX | 7924 SPRI_TM_EX_AC_AR; 7925 scsi_ulto2b(type_mask, res_cap->type_mask); 7926 break; 7927 } 7928 case SPRI_RS: { // read full status 7929 struct scsi_per_res_in_full *res_status; 7930 struct scsi_per_res_in_full_desc *res_desc; 7931 struct ctl_port *port; 7932 int i, len; 7933 7934 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7935 7936 /* 7937 * We had to drop the lock to allocate our buffer, which 7938 * leaves time for someone to come in with another 7939 * persistent reservation. (That is unlikely, though, 7940 * since this should be the only persistent reservation 7941 * command active right now.) 7942 */ 7943 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7944 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7945 lun->pr_key_count)){ 7946 mtx_unlock(&lun->lun_lock); 7947 free(ctsio->kern_data_ptr, M_CTL); 7948 printf("%s: reservation length changed, retrying\n", 7949 __func__); 7950 goto retry; 7951 } 7952 7953 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7954 7955 res_desc = &res_status->desc[0]; 7956 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7957 if ((key = ctl_get_prkey(lun, i)) == 0) 7958 continue; 7959 7960 scsi_u64to8b(key, res_desc->res_key.key); 7961 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7962 (lun->pr_res_idx == i || 7963 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7964 res_desc->flags = SPRI_FULL_R_HOLDER; 7965 res_desc->scopetype = lun->res_type; 7966 } 7967 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7968 res_desc->rel_trgt_port_id); 7969 len = 0; 7970 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7971 if (port != NULL) 7972 len = ctl_create_iid(port, 7973 i % CTL_MAX_INIT_PER_PORT, 7974 res_desc->transport_id); 7975 scsi_ulto4b(len, res_desc->additional_length); 7976 res_desc = (struct scsi_per_res_in_full_desc *) 7977 &res_desc->transport_id[len]; 7978 } 7979 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7980 res_status->header.length); 7981 break; 7982 } 7983 default: 7984 /* 7985 * This is a bug, because we just checked for this above, 7986 * and should have returned an error. 7987 */ 7988 panic("Invalid PR type %x", cdb->action); 7989 break; /* NOTREACHED */ 7990 } 7991 mtx_unlock(&lun->lun_lock); 7992 7993 ctl_set_success(ctsio); 7994 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7995 ctsio->be_move_done = ctl_config_move_done; 7996 ctl_datamove((union ctl_io *)ctsio); 7997 return (CTL_RETVAL_COMPLETE); 7998 } 7999 8000 /* 8001 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 8002 * it should return. 8003 */ 8004 static int 8005 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 8006 uint64_t sa_res_key, uint8_t type, uint32_t residx, 8007 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 8008 struct scsi_per_res_out_parms* param) 8009 { 8010 union ctl_ha_msg persis_io; 8011 int i; 8012 8013 mtx_lock(&lun->lun_lock); 8014 if (sa_res_key == 0) { 8015 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8016 /* validate scope and type */ 8017 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8018 SPR_LU_SCOPE) { 8019 mtx_unlock(&lun->lun_lock); 8020 ctl_set_invalid_field(/*ctsio*/ ctsio, 8021 /*sks_valid*/ 1, 8022 /*command*/ 1, 8023 /*field*/ 2, 8024 /*bit_valid*/ 1, 8025 /*bit*/ 4); 8026 ctl_done((union ctl_io *)ctsio); 8027 return (1); 8028 } 8029 8030 if (type>8 || type==2 || type==4 || type==0) { 8031 mtx_unlock(&lun->lun_lock); 8032 ctl_set_invalid_field(/*ctsio*/ ctsio, 8033 /*sks_valid*/ 1, 8034 /*command*/ 1, 8035 /*field*/ 2, 8036 /*bit_valid*/ 1, 8037 /*bit*/ 0); 8038 ctl_done((union ctl_io *)ctsio); 8039 return (1); 8040 } 8041 8042 /* 8043 * Unregister everybody else and build UA for 8044 * them 8045 */ 8046 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8047 if (i == residx || ctl_get_prkey(lun, i) == 0) 8048 continue; 8049 8050 ctl_clr_prkey(lun, i); 8051 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8052 } 8053 lun->pr_key_count = 1; 8054 lun->res_type = type; 8055 if (lun->res_type != SPR_TYPE_WR_EX_AR 8056 && lun->res_type != SPR_TYPE_EX_AC_AR) 8057 lun->pr_res_idx = residx; 8058 lun->PRGeneration++; 8059 mtx_unlock(&lun->lun_lock); 8060 8061 /* send msg to other side */ 8062 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8063 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8064 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8065 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8066 persis_io.pr.pr_info.res_type = type; 8067 memcpy(persis_io.pr.pr_info.sa_res_key, 8068 param->serv_act_res_key, 8069 sizeof(param->serv_act_res_key)); 8070 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8071 sizeof(persis_io.pr), M_WAITOK); 8072 } else { 8073 /* not all registrants */ 8074 mtx_unlock(&lun->lun_lock); 8075 free(ctsio->kern_data_ptr, M_CTL); 8076 ctl_set_invalid_field(ctsio, 8077 /*sks_valid*/ 1, 8078 /*command*/ 0, 8079 /*field*/ 8, 8080 /*bit_valid*/ 0, 8081 /*bit*/ 0); 8082 ctl_done((union ctl_io *)ctsio); 8083 return (1); 8084 } 8085 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8086 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 8087 int found = 0; 8088 8089 if (res_key == sa_res_key) { 8090 /* special case */ 8091 /* 8092 * The spec implies this is not good but doesn't 8093 * say what to do. There are two choices either 8094 * generate a res conflict or check condition 8095 * with illegal field in parameter data. Since 8096 * that is what is done when the sa_res_key is 8097 * zero I'll take that approach since this has 8098 * to do with the sa_res_key. 8099 */ 8100 mtx_unlock(&lun->lun_lock); 8101 free(ctsio->kern_data_ptr, M_CTL); 8102 ctl_set_invalid_field(ctsio, 8103 /*sks_valid*/ 1, 8104 /*command*/ 0, 8105 /*field*/ 8, 8106 /*bit_valid*/ 0, 8107 /*bit*/ 0); 8108 ctl_done((union ctl_io *)ctsio); 8109 return (1); 8110 } 8111 8112 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8113 if (ctl_get_prkey(lun, i) != sa_res_key) 8114 continue; 8115 8116 found = 1; 8117 ctl_clr_prkey(lun, i); 8118 lun->pr_key_count--; 8119 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8120 } 8121 if (!found) { 8122 mtx_unlock(&lun->lun_lock); 8123 free(ctsio->kern_data_ptr, M_CTL); 8124 ctl_set_reservation_conflict(ctsio); 8125 ctl_done((union ctl_io *)ctsio); 8126 return (CTL_RETVAL_COMPLETE); 8127 } 8128 lun->PRGeneration++; 8129 mtx_unlock(&lun->lun_lock); 8130 8131 /* send msg to other side */ 8132 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8133 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8134 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8135 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8136 persis_io.pr.pr_info.res_type = type; 8137 memcpy(persis_io.pr.pr_info.sa_res_key, 8138 param->serv_act_res_key, 8139 sizeof(param->serv_act_res_key)); 8140 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8141 sizeof(persis_io.pr), M_WAITOK); 8142 } else { 8143 /* Reserved but not all registrants */ 8144 /* sa_res_key is res holder */ 8145 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 8146 /* validate scope and type */ 8147 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8148 SPR_LU_SCOPE) { 8149 mtx_unlock(&lun->lun_lock); 8150 ctl_set_invalid_field(/*ctsio*/ ctsio, 8151 /*sks_valid*/ 1, 8152 /*command*/ 1, 8153 /*field*/ 2, 8154 /*bit_valid*/ 1, 8155 /*bit*/ 4); 8156 ctl_done((union ctl_io *)ctsio); 8157 return (1); 8158 } 8159 8160 if (type>8 || type==2 || type==4 || type==0) { 8161 mtx_unlock(&lun->lun_lock); 8162 ctl_set_invalid_field(/*ctsio*/ ctsio, 8163 /*sks_valid*/ 1, 8164 /*command*/ 1, 8165 /*field*/ 2, 8166 /*bit_valid*/ 1, 8167 /*bit*/ 0); 8168 ctl_done((union ctl_io *)ctsio); 8169 return (1); 8170 } 8171 8172 /* 8173 * Do the following: 8174 * if sa_res_key != res_key remove all 8175 * registrants w/sa_res_key and generate UA 8176 * for these registrants(Registrations 8177 * Preempted) if it wasn't an exclusive 8178 * reservation generate UA(Reservations 8179 * Preempted) for all other registered nexuses 8180 * if the type has changed. Establish the new 8181 * reservation and holder. If res_key and 8182 * sa_res_key are the same do the above 8183 * except don't unregister the res holder. 8184 */ 8185 8186 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8187 if (i == residx || ctl_get_prkey(lun, i) == 0) 8188 continue; 8189 8190 if (sa_res_key == ctl_get_prkey(lun, i)) { 8191 ctl_clr_prkey(lun, i); 8192 lun->pr_key_count--; 8193 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8194 } else if (type != lun->res_type 8195 && (lun->res_type == SPR_TYPE_WR_EX_RO 8196 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 8197 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8198 } 8199 } 8200 lun->res_type = type; 8201 if (lun->res_type != SPR_TYPE_WR_EX_AR 8202 && lun->res_type != SPR_TYPE_EX_AC_AR) 8203 lun->pr_res_idx = residx; 8204 else 8205 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8206 lun->PRGeneration++; 8207 mtx_unlock(&lun->lun_lock); 8208 8209 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8210 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8211 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8212 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8213 persis_io.pr.pr_info.res_type = type; 8214 memcpy(persis_io.pr.pr_info.sa_res_key, 8215 param->serv_act_res_key, 8216 sizeof(param->serv_act_res_key)); 8217 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8218 sizeof(persis_io.pr), M_WAITOK); 8219 } else { 8220 /* 8221 * sa_res_key is not the res holder just 8222 * remove registrants 8223 */ 8224 int found=0; 8225 8226 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8227 if (sa_res_key != ctl_get_prkey(lun, i)) 8228 continue; 8229 8230 found = 1; 8231 ctl_clr_prkey(lun, i); 8232 lun->pr_key_count--; 8233 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8234 } 8235 8236 if (!found) { 8237 mtx_unlock(&lun->lun_lock); 8238 free(ctsio->kern_data_ptr, M_CTL); 8239 ctl_set_reservation_conflict(ctsio); 8240 ctl_done((union ctl_io *)ctsio); 8241 return (1); 8242 } 8243 lun->PRGeneration++; 8244 mtx_unlock(&lun->lun_lock); 8245 8246 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8247 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8248 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8249 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8250 persis_io.pr.pr_info.res_type = type; 8251 memcpy(persis_io.pr.pr_info.sa_res_key, 8252 param->serv_act_res_key, 8253 sizeof(param->serv_act_res_key)); 8254 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8255 sizeof(persis_io.pr), M_WAITOK); 8256 } 8257 } 8258 return (0); 8259 } 8260 8261 static void 8262 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8263 { 8264 uint64_t sa_res_key; 8265 int i; 8266 8267 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8268 8269 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8270 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8271 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8272 if (sa_res_key == 0) { 8273 /* 8274 * Unregister everybody else and build UA for 8275 * them 8276 */ 8277 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8278 if (i == msg->pr.pr_info.residx || 8279 ctl_get_prkey(lun, i) == 0) 8280 continue; 8281 8282 ctl_clr_prkey(lun, i); 8283 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8284 } 8285 8286 lun->pr_key_count = 1; 8287 lun->res_type = msg->pr.pr_info.res_type; 8288 if (lun->res_type != SPR_TYPE_WR_EX_AR 8289 && lun->res_type != SPR_TYPE_EX_AC_AR) 8290 lun->pr_res_idx = msg->pr.pr_info.residx; 8291 } else { 8292 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8293 if (sa_res_key == ctl_get_prkey(lun, i)) 8294 continue; 8295 8296 ctl_clr_prkey(lun, i); 8297 lun->pr_key_count--; 8298 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8299 } 8300 } 8301 } else { 8302 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8303 if (i == msg->pr.pr_info.residx || 8304 ctl_get_prkey(lun, i) == 0) 8305 continue; 8306 8307 if (sa_res_key == ctl_get_prkey(lun, i)) { 8308 ctl_clr_prkey(lun, i); 8309 lun->pr_key_count--; 8310 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8311 } else if (msg->pr.pr_info.res_type != lun->res_type 8312 && (lun->res_type == SPR_TYPE_WR_EX_RO 8313 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8314 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8315 } 8316 } 8317 lun->res_type = msg->pr.pr_info.res_type; 8318 if (lun->res_type != SPR_TYPE_WR_EX_AR 8319 && lun->res_type != SPR_TYPE_EX_AC_AR) 8320 lun->pr_res_idx = msg->pr.pr_info.residx; 8321 else 8322 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8323 } 8324 lun->PRGeneration++; 8325 8326 } 8327 8328 8329 int 8330 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8331 { 8332 int retval; 8333 u_int32_t param_len; 8334 struct scsi_per_res_out *cdb; 8335 struct ctl_lun *lun; 8336 struct scsi_per_res_out_parms* param; 8337 struct ctl_softc *softc; 8338 uint32_t residx; 8339 uint64_t res_key, sa_res_key, key; 8340 uint8_t type; 8341 union ctl_ha_msg persis_io; 8342 int i; 8343 8344 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8345 8346 retval = CTL_RETVAL_COMPLETE; 8347 8348 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8349 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8350 softc = lun->ctl_softc; 8351 8352 /* 8353 * We only support whole-LUN scope. The scope & type are ignored for 8354 * register, register and ignore existing key and clear. 8355 * We sometimes ignore scope and type on preempts too!! 8356 * Verify reservation type here as well. 8357 */ 8358 type = cdb->scope_type & SPR_TYPE_MASK; 8359 if ((cdb->action == SPRO_RESERVE) 8360 || (cdb->action == SPRO_RELEASE)) { 8361 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8362 ctl_set_invalid_field(/*ctsio*/ ctsio, 8363 /*sks_valid*/ 1, 8364 /*command*/ 1, 8365 /*field*/ 2, 8366 /*bit_valid*/ 1, 8367 /*bit*/ 4); 8368 ctl_done((union ctl_io *)ctsio); 8369 return (CTL_RETVAL_COMPLETE); 8370 } 8371 8372 if (type>8 || type==2 || type==4 || type==0) { 8373 ctl_set_invalid_field(/*ctsio*/ ctsio, 8374 /*sks_valid*/ 1, 8375 /*command*/ 1, 8376 /*field*/ 2, 8377 /*bit_valid*/ 1, 8378 /*bit*/ 0); 8379 ctl_done((union ctl_io *)ctsio); 8380 return (CTL_RETVAL_COMPLETE); 8381 } 8382 } 8383 8384 param_len = scsi_4btoul(cdb->length); 8385 8386 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8387 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8388 ctsio->kern_data_len = param_len; 8389 ctsio->kern_total_len = param_len; 8390 ctsio->kern_data_resid = 0; 8391 ctsio->kern_rel_offset = 0; 8392 ctsio->kern_sg_entries = 0; 8393 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8394 ctsio->be_move_done = ctl_config_move_done; 8395 ctl_datamove((union ctl_io *)ctsio); 8396 8397 return (CTL_RETVAL_COMPLETE); 8398 } 8399 8400 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8401 8402 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8403 res_key = scsi_8btou64(param->res_key.key); 8404 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8405 8406 /* 8407 * Validate the reservation key here except for SPRO_REG_IGNO 8408 * This must be done for all other service actions 8409 */ 8410 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8411 mtx_lock(&lun->lun_lock); 8412 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8413 if (res_key != key) { 8414 /* 8415 * The current key passed in doesn't match 8416 * the one the initiator previously 8417 * registered. 8418 */ 8419 mtx_unlock(&lun->lun_lock); 8420 free(ctsio->kern_data_ptr, M_CTL); 8421 ctl_set_reservation_conflict(ctsio); 8422 ctl_done((union ctl_io *)ctsio); 8423 return (CTL_RETVAL_COMPLETE); 8424 } 8425 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8426 /* 8427 * We are not registered 8428 */ 8429 mtx_unlock(&lun->lun_lock); 8430 free(ctsio->kern_data_ptr, M_CTL); 8431 ctl_set_reservation_conflict(ctsio); 8432 ctl_done((union ctl_io *)ctsio); 8433 return (CTL_RETVAL_COMPLETE); 8434 } else if (res_key != 0) { 8435 /* 8436 * We are not registered and trying to register but 8437 * the register key isn't zero. 8438 */ 8439 mtx_unlock(&lun->lun_lock); 8440 free(ctsio->kern_data_ptr, M_CTL); 8441 ctl_set_reservation_conflict(ctsio); 8442 ctl_done((union ctl_io *)ctsio); 8443 return (CTL_RETVAL_COMPLETE); 8444 } 8445 mtx_unlock(&lun->lun_lock); 8446 } 8447 8448 switch (cdb->action & SPRO_ACTION_MASK) { 8449 case SPRO_REGISTER: 8450 case SPRO_REG_IGNO: { 8451 8452 #if 0 8453 printf("Registration received\n"); 8454 #endif 8455 8456 /* 8457 * We don't support any of these options, as we report in 8458 * the read capabilities request (see 8459 * ctl_persistent_reserve_in(), above). 8460 */ 8461 if ((param->flags & SPR_SPEC_I_PT) 8462 || (param->flags & SPR_ALL_TG_PT) 8463 || (param->flags & SPR_APTPL)) { 8464 int bit_ptr; 8465 8466 if (param->flags & SPR_APTPL) 8467 bit_ptr = 0; 8468 else if (param->flags & SPR_ALL_TG_PT) 8469 bit_ptr = 2; 8470 else /* SPR_SPEC_I_PT */ 8471 bit_ptr = 3; 8472 8473 free(ctsio->kern_data_ptr, M_CTL); 8474 ctl_set_invalid_field(ctsio, 8475 /*sks_valid*/ 1, 8476 /*command*/ 0, 8477 /*field*/ 20, 8478 /*bit_valid*/ 1, 8479 /*bit*/ bit_ptr); 8480 ctl_done((union ctl_io *)ctsio); 8481 return (CTL_RETVAL_COMPLETE); 8482 } 8483 8484 mtx_lock(&lun->lun_lock); 8485 8486 /* 8487 * The initiator wants to clear the 8488 * key/unregister. 8489 */ 8490 if (sa_res_key == 0) { 8491 if ((res_key == 0 8492 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8493 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8494 && ctl_get_prkey(lun, residx) == 0)) { 8495 mtx_unlock(&lun->lun_lock); 8496 goto done; 8497 } 8498 8499 ctl_clr_prkey(lun, residx); 8500 lun->pr_key_count--; 8501 8502 if (residx == lun->pr_res_idx) { 8503 lun->flags &= ~CTL_LUN_PR_RESERVED; 8504 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8505 8506 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8507 || lun->res_type == SPR_TYPE_EX_AC_RO) 8508 && lun->pr_key_count) { 8509 /* 8510 * If the reservation is a registrants 8511 * only type we need to generate a UA 8512 * for other registered inits. The 8513 * sense code should be RESERVATIONS 8514 * RELEASED 8515 */ 8516 8517 for (i = softc->init_min; i < softc->init_max; i++){ 8518 if (ctl_get_prkey(lun, i) == 0) 8519 continue; 8520 ctl_est_ua(lun, i, 8521 CTL_UA_RES_RELEASE); 8522 } 8523 } 8524 lun->res_type = 0; 8525 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8526 if (lun->pr_key_count==0) { 8527 lun->flags &= ~CTL_LUN_PR_RESERVED; 8528 lun->res_type = 0; 8529 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8530 } 8531 } 8532 lun->PRGeneration++; 8533 mtx_unlock(&lun->lun_lock); 8534 8535 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8536 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8537 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8538 persis_io.pr.pr_info.residx = residx; 8539 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8540 sizeof(persis_io.pr), M_WAITOK); 8541 } else /* sa_res_key != 0 */ { 8542 8543 /* 8544 * If we aren't registered currently then increment 8545 * the key count and set the registered flag. 8546 */ 8547 ctl_alloc_prkey(lun, residx); 8548 if (ctl_get_prkey(lun, residx) == 0) 8549 lun->pr_key_count++; 8550 ctl_set_prkey(lun, residx, sa_res_key); 8551 lun->PRGeneration++; 8552 mtx_unlock(&lun->lun_lock); 8553 8554 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8555 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8556 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8557 persis_io.pr.pr_info.residx = residx; 8558 memcpy(persis_io.pr.pr_info.sa_res_key, 8559 param->serv_act_res_key, 8560 sizeof(param->serv_act_res_key)); 8561 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8562 sizeof(persis_io.pr), M_WAITOK); 8563 } 8564 8565 break; 8566 } 8567 case SPRO_RESERVE: 8568 #if 0 8569 printf("Reserve executed type %d\n", type); 8570 #endif 8571 mtx_lock(&lun->lun_lock); 8572 if (lun->flags & CTL_LUN_PR_RESERVED) { 8573 /* 8574 * if this isn't the reservation holder and it's 8575 * not a "all registrants" type or if the type is 8576 * different then we have a conflict 8577 */ 8578 if ((lun->pr_res_idx != residx 8579 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8580 || lun->res_type != type) { 8581 mtx_unlock(&lun->lun_lock); 8582 free(ctsio->kern_data_ptr, M_CTL); 8583 ctl_set_reservation_conflict(ctsio); 8584 ctl_done((union ctl_io *)ctsio); 8585 return (CTL_RETVAL_COMPLETE); 8586 } 8587 mtx_unlock(&lun->lun_lock); 8588 } else /* create a reservation */ { 8589 /* 8590 * If it's not an "all registrants" type record 8591 * reservation holder 8592 */ 8593 if (type != SPR_TYPE_WR_EX_AR 8594 && type != SPR_TYPE_EX_AC_AR) 8595 lun->pr_res_idx = residx; /* Res holder */ 8596 else 8597 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8598 8599 lun->flags |= CTL_LUN_PR_RESERVED; 8600 lun->res_type = type; 8601 8602 mtx_unlock(&lun->lun_lock); 8603 8604 /* send msg to other side */ 8605 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8606 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8607 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8608 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8609 persis_io.pr.pr_info.res_type = type; 8610 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8611 sizeof(persis_io.pr), M_WAITOK); 8612 } 8613 break; 8614 8615 case SPRO_RELEASE: 8616 mtx_lock(&lun->lun_lock); 8617 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8618 /* No reservation exists return good status */ 8619 mtx_unlock(&lun->lun_lock); 8620 goto done; 8621 } 8622 /* 8623 * Is this nexus a reservation holder? 8624 */ 8625 if (lun->pr_res_idx != residx 8626 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8627 /* 8628 * not a res holder return good status but 8629 * do nothing 8630 */ 8631 mtx_unlock(&lun->lun_lock); 8632 goto done; 8633 } 8634 8635 if (lun->res_type != type) { 8636 mtx_unlock(&lun->lun_lock); 8637 free(ctsio->kern_data_ptr, M_CTL); 8638 ctl_set_illegal_pr_release(ctsio); 8639 ctl_done((union ctl_io *)ctsio); 8640 return (CTL_RETVAL_COMPLETE); 8641 } 8642 8643 /* okay to release */ 8644 lun->flags &= ~CTL_LUN_PR_RESERVED; 8645 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8646 lun->res_type = 0; 8647 8648 /* 8649 * if this isn't an exclusive access 8650 * res generate UA for all other 8651 * registrants. 8652 */ 8653 if (type != SPR_TYPE_EX_AC 8654 && type != SPR_TYPE_WR_EX) { 8655 for (i = softc->init_min; i < softc->init_max; i++) { 8656 if (i == residx || ctl_get_prkey(lun, i) == 0) 8657 continue; 8658 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8659 } 8660 } 8661 mtx_unlock(&lun->lun_lock); 8662 8663 /* Send msg to other side */ 8664 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8665 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8666 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8667 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8668 sizeof(persis_io.pr), M_WAITOK); 8669 break; 8670 8671 case SPRO_CLEAR: 8672 /* send msg to other side */ 8673 8674 mtx_lock(&lun->lun_lock); 8675 lun->flags &= ~CTL_LUN_PR_RESERVED; 8676 lun->res_type = 0; 8677 lun->pr_key_count = 0; 8678 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8679 8680 ctl_clr_prkey(lun, residx); 8681 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8682 if (ctl_get_prkey(lun, i) != 0) { 8683 ctl_clr_prkey(lun, i); 8684 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8685 } 8686 lun->PRGeneration++; 8687 mtx_unlock(&lun->lun_lock); 8688 8689 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8690 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8691 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8692 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8693 sizeof(persis_io.pr), M_WAITOK); 8694 break; 8695 8696 case SPRO_PREEMPT: 8697 case SPRO_PRE_ABO: { 8698 int nretval; 8699 8700 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8701 residx, ctsio, cdb, param); 8702 if (nretval != 0) 8703 return (CTL_RETVAL_COMPLETE); 8704 break; 8705 } 8706 default: 8707 panic("Invalid PR type %x", cdb->action); 8708 } 8709 8710 done: 8711 free(ctsio->kern_data_ptr, M_CTL); 8712 ctl_set_success(ctsio); 8713 ctl_done((union ctl_io *)ctsio); 8714 8715 return (retval); 8716 } 8717 8718 /* 8719 * This routine is for handling a message from the other SC pertaining to 8720 * persistent reserve out. All the error checking will have been done 8721 * so only perorming the action need be done here to keep the two 8722 * in sync. 8723 */ 8724 static void 8725 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8726 { 8727 struct ctl_lun *lun; 8728 struct ctl_softc *softc; 8729 int i; 8730 uint32_t residx, targ_lun; 8731 8732 softc = control_softc; 8733 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8734 mtx_lock(&softc->ctl_lock); 8735 if ((targ_lun >= CTL_MAX_LUNS) || 8736 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 8737 mtx_unlock(&softc->ctl_lock); 8738 return; 8739 } 8740 mtx_lock(&lun->lun_lock); 8741 mtx_unlock(&softc->ctl_lock); 8742 if (lun->flags & CTL_LUN_DISABLED) { 8743 mtx_unlock(&lun->lun_lock); 8744 return; 8745 } 8746 residx = ctl_get_initindex(&msg->hdr.nexus); 8747 switch(msg->pr.pr_info.action) { 8748 case CTL_PR_REG_KEY: 8749 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8750 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8751 lun->pr_key_count++; 8752 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8753 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8754 lun->PRGeneration++; 8755 break; 8756 8757 case CTL_PR_UNREG_KEY: 8758 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8759 lun->pr_key_count--; 8760 8761 /* XXX Need to see if the reservation has been released */ 8762 /* if so do we need to generate UA? */ 8763 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8764 lun->flags &= ~CTL_LUN_PR_RESERVED; 8765 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8766 8767 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8768 || lun->res_type == SPR_TYPE_EX_AC_RO) 8769 && lun->pr_key_count) { 8770 /* 8771 * If the reservation is a registrants 8772 * only type we need to generate a UA 8773 * for other registered inits. The 8774 * sense code should be RESERVATIONS 8775 * RELEASED 8776 */ 8777 8778 for (i = softc->init_min; i < softc->init_max; i++) { 8779 if (ctl_get_prkey(lun, i) == 0) 8780 continue; 8781 8782 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8783 } 8784 } 8785 lun->res_type = 0; 8786 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8787 if (lun->pr_key_count==0) { 8788 lun->flags &= ~CTL_LUN_PR_RESERVED; 8789 lun->res_type = 0; 8790 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8791 } 8792 } 8793 lun->PRGeneration++; 8794 break; 8795 8796 case CTL_PR_RESERVE: 8797 lun->flags |= CTL_LUN_PR_RESERVED; 8798 lun->res_type = msg->pr.pr_info.res_type; 8799 lun->pr_res_idx = msg->pr.pr_info.residx; 8800 8801 break; 8802 8803 case CTL_PR_RELEASE: 8804 /* 8805 * if this isn't an exclusive access res generate UA for all 8806 * other registrants. 8807 */ 8808 if (lun->res_type != SPR_TYPE_EX_AC 8809 && lun->res_type != SPR_TYPE_WR_EX) { 8810 for (i = softc->init_min; i < softc->init_max; i++) 8811 if (i == residx || ctl_get_prkey(lun, i) == 0) 8812 continue; 8813 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8814 } 8815 8816 lun->flags &= ~CTL_LUN_PR_RESERVED; 8817 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8818 lun->res_type = 0; 8819 break; 8820 8821 case CTL_PR_PREEMPT: 8822 ctl_pro_preempt_other(lun, msg); 8823 break; 8824 case CTL_PR_CLEAR: 8825 lun->flags &= ~CTL_LUN_PR_RESERVED; 8826 lun->res_type = 0; 8827 lun->pr_key_count = 0; 8828 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8829 8830 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8831 if (ctl_get_prkey(lun, i) == 0) 8832 continue; 8833 ctl_clr_prkey(lun, i); 8834 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8835 } 8836 lun->PRGeneration++; 8837 break; 8838 } 8839 8840 mtx_unlock(&lun->lun_lock); 8841 } 8842 8843 int 8844 ctl_read_write(struct ctl_scsiio *ctsio) 8845 { 8846 struct ctl_lun *lun; 8847 struct ctl_lba_len_flags *lbalen; 8848 uint64_t lba; 8849 uint32_t num_blocks; 8850 int flags, retval; 8851 int isread; 8852 8853 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8854 8855 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8856 8857 flags = 0; 8858 retval = CTL_RETVAL_COMPLETE; 8859 8860 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8861 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8862 switch (ctsio->cdb[0]) { 8863 case READ_6: 8864 case WRITE_6: { 8865 struct scsi_rw_6 *cdb; 8866 8867 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8868 8869 lba = scsi_3btoul(cdb->addr); 8870 /* only 5 bits are valid in the most significant address byte */ 8871 lba &= 0x1fffff; 8872 num_blocks = cdb->length; 8873 /* 8874 * This is correct according to SBC-2. 8875 */ 8876 if (num_blocks == 0) 8877 num_blocks = 256; 8878 break; 8879 } 8880 case READ_10: 8881 case WRITE_10: { 8882 struct scsi_rw_10 *cdb; 8883 8884 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8885 if (cdb->byte2 & SRW10_FUA) 8886 flags |= CTL_LLF_FUA; 8887 if (cdb->byte2 & SRW10_DPO) 8888 flags |= CTL_LLF_DPO; 8889 lba = scsi_4btoul(cdb->addr); 8890 num_blocks = scsi_2btoul(cdb->length); 8891 break; 8892 } 8893 case WRITE_VERIFY_10: { 8894 struct scsi_write_verify_10 *cdb; 8895 8896 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8897 flags |= CTL_LLF_FUA; 8898 if (cdb->byte2 & SWV_DPO) 8899 flags |= CTL_LLF_DPO; 8900 lba = scsi_4btoul(cdb->addr); 8901 num_blocks = scsi_2btoul(cdb->length); 8902 break; 8903 } 8904 case READ_12: 8905 case WRITE_12: { 8906 struct scsi_rw_12 *cdb; 8907 8908 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8909 if (cdb->byte2 & SRW12_FUA) 8910 flags |= CTL_LLF_FUA; 8911 if (cdb->byte2 & SRW12_DPO) 8912 flags |= CTL_LLF_DPO; 8913 lba = scsi_4btoul(cdb->addr); 8914 num_blocks = scsi_4btoul(cdb->length); 8915 break; 8916 } 8917 case WRITE_VERIFY_12: { 8918 struct scsi_write_verify_12 *cdb; 8919 8920 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8921 flags |= CTL_LLF_FUA; 8922 if (cdb->byte2 & SWV_DPO) 8923 flags |= CTL_LLF_DPO; 8924 lba = scsi_4btoul(cdb->addr); 8925 num_blocks = scsi_4btoul(cdb->length); 8926 break; 8927 } 8928 case READ_16: 8929 case WRITE_16: { 8930 struct scsi_rw_16 *cdb; 8931 8932 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8933 if (cdb->byte2 & SRW12_FUA) 8934 flags |= CTL_LLF_FUA; 8935 if (cdb->byte2 & SRW12_DPO) 8936 flags |= CTL_LLF_DPO; 8937 lba = scsi_8btou64(cdb->addr); 8938 num_blocks = scsi_4btoul(cdb->length); 8939 break; 8940 } 8941 case WRITE_ATOMIC_16: { 8942 struct scsi_write_atomic_16 *cdb; 8943 8944 if (lun->be_lun->atomicblock == 0) { 8945 ctl_set_invalid_opcode(ctsio); 8946 ctl_done((union ctl_io *)ctsio); 8947 return (CTL_RETVAL_COMPLETE); 8948 } 8949 8950 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8951 if (cdb->byte2 & SRW12_FUA) 8952 flags |= CTL_LLF_FUA; 8953 if (cdb->byte2 & SRW12_DPO) 8954 flags |= CTL_LLF_DPO; 8955 lba = scsi_8btou64(cdb->addr); 8956 num_blocks = scsi_2btoul(cdb->length); 8957 if (num_blocks > lun->be_lun->atomicblock) { 8958 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8959 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8960 /*bit*/ 0); 8961 ctl_done((union ctl_io *)ctsio); 8962 return (CTL_RETVAL_COMPLETE); 8963 } 8964 break; 8965 } 8966 case WRITE_VERIFY_16: { 8967 struct scsi_write_verify_16 *cdb; 8968 8969 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8970 flags |= CTL_LLF_FUA; 8971 if (cdb->byte2 & SWV_DPO) 8972 flags |= CTL_LLF_DPO; 8973 lba = scsi_8btou64(cdb->addr); 8974 num_blocks = scsi_4btoul(cdb->length); 8975 break; 8976 } 8977 default: 8978 /* 8979 * We got a command we don't support. This shouldn't 8980 * happen, commands should be filtered out above us. 8981 */ 8982 ctl_set_invalid_opcode(ctsio); 8983 ctl_done((union ctl_io *)ctsio); 8984 8985 return (CTL_RETVAL_COMPLETE); 8986 break; /* NOTREACHED */ 8987 } 8988 8989 /* 8990 * The first check is to make sure we're in bounds, the second 8991 * check is to catch wrap-around problems. If the lba + num blocks 8992 * is less than the lba, then we've wrapped around and the block 8993 * range is invalid anyway. 8994 */ 8995 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8996 || ((lba + num_blocks) < lba)) { 8997 ctl_set_lba_out_of_range(ctsio); 8998 ctl_done((union ctl_io *)ctsio); 8999 return (CTL_RETVAL_COMPLETE); 9000 } 9001 9002 /* 9003 * According to SBC-3, a transfer length of 0 is not an error. 9004 * Note that this cannot happen with WRITE(6) or READ(6), since 0 9005 * translates to 256 blocks for those commands. 9006 */ 9007 if (num_blocks == 0) { 9008 ctl_set_success(ctsio); 9009 ctl_done((union ctl_io *)ctsio); 9010 return (CTL_RETVAL_COMPLETE); 9011 } 9012 9013 /* Set FUA and/or DPO if caches are disabled. */ 9014 if (isread) { 9015 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9016 SCP_RCD) != 0) 9017 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 9018 } else { 9019 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9020 SCP_WCE) == 0) 9021 flags |= CTL_LLF_FUA; 9022 } 9023 9024 lbalen = (struct ctl_lba_len_flags *) 9025 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9026 lbalen->lba = lba; 9027 lbalen->len = num_blocks; 9028 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 9029 9030 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9031 ctsio->kern_rel_offset = 0; 9032 9033 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 9034 9035 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9036 9037 return (retval); 9038 } 9039 9040 static int 9041 ctl_cnw_cont(union ctl_io *io) 9042 { 9043 struct ctl_scsiio *ctsio; 9044 struct ctl_lun *lun; 9045 struct ctl_lba_len_flags *lbalen; 9046 int retval; 9047 9048 ctsio = &io->scsiio; 9049 ctsio->io_hdr.status = CTL_STATUS_NONE; 9050 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 9051 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9052 lbalen = (struct ctl_lba_len_flags *) 9053 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9054 lbalen->flags &= ~CTL_LLF_COMPARE; 9055 lbalen->flags |= CTL_LLF_WRITE; 9056 9057 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 9058 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9059 return (retval); 9060 } 9061 9062 int 9063 ctl_cnw(struct ctl_scsiio *ctsio) 9064 { 9065 struct ctl_lun *lun; 9066 struct ctl_lba_len_flags *lbalen; 9067 uint64_t lba; 9068 uint32_t num_blocks; 9069 int flags, retval; 9070 9071 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9072 9073 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 9074 9075 flags = 0; 9076 retval = CTL_RETVAL_COMPLETE; 9077 9078 switch (ctsio->cdb[0]) { 9079 case COMPARE_AND_WRITE: { 9080 struct scsi_compare_and_write *cdb; 9081 9082 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 9083 if (cdb->byte2 & SRW10_FUA) 9084 flags |= CTL_LLF_FUA; 9085 if (cdb->byte2 & SRW10_DPO) 9086 flags |= CTL_LLF_DPO; 9087 lba = scsi_8btou64(cdb->addr); 9088 num_blocks = cdb->length; 9089 break; 9090 } 9091 default: 9092 /* 9093 * We got a command we don't support. This shouldn't 9094 * happen, commands should be filtered out above us. 9095 */ 9096 ctl_set_invalid_opcode(ctsio); 9097 ctl_done((union ctl_io *)ctsio); 9098 9099 return (CTL_RETVAL_COMPLETE); 9100 break; /* NOTREACHED */ 9101 } 9102 9103 /* 9104 * The first check is to make sure we're in bounds, the second 9105 * check is to catch wrap-around problems. If the lba + num blocks 9106 * is less than the lba, then we've wrapped around and the block 9107 * range is invalid anyway. 9108 */ 9109 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9110 || ((lba + num_blocks) < lba)) { 9111 ctl_set_lba_out_of_range(ctsio); 9112 ctl_done((union ctl_io *)ctsio); 9113 return (CTL_RETVAL_COMPLETE); 9114 } 9115 9116 /* 9117 * According to SBC-3, a transfer length of 0 is not an error. 9118 */ 9119 if (num_blocks == 0) { 9120 ctl_set_success(ctsio); 9121 ctl_done((union ctl_io *)ctsio); 9122 return (CTL_RETVAL_COMPLETE); 9123 } 9124 9125 /* Set FUA if write cache is disabled. */ 9126 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9127 SCP_WCE) == 0) 9128 flags |= CTL_LLF_FUA; 9129 9130 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 9131 ctsio->kern_rel_offset = 0; 9132 9133 /* 9134 * Set the IO_CONT flag, so that if this I/O gets passed to 9135 * ctl_data_submit_done(), it'll get passed back to 9136 * ctl_ctl_cnw_cont() for further processing. 9137 */ 9138 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 9139 ctsio->io_cont = ctl_cnw_cont; 9140 9141 lbalen = (struct ctl_lba_len_flags *) 9142 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9143 lbalen->lba = lba; 9144 lbalen->len = num_blocks; 9145 lbalen->flags = CTL_LLF_COMPARE | flags; 9146 9147 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 9148 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9149 return (retval); 9150 } 9151 9152 int 9153 ctl_verify(struct ctl_scsiio *ctsio) 9154 { 9155 struct ctl_lun *lun; 9156 struct ctl_lba_len_flags *lbalen; 9157 uint64_t lba; 9158 uint32_t num_blocks; 9159 int bytchk, flags; 9160 int retval; 9161 9162 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9163 9164 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9165 9166 bytchk = 0; 9167 flags = CTL_LLF_FUA; 9168 retval = CTL_RETVAL_COMPLETE; 9169 9170 switch (ctsio->cdb[0]) { 9171 case VERIFY_10: { 9172 struct scsi_verify_10 *cdb; 9173 9174 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9175 if (cdb->byte2 & SVFY_BYTCHK) 9176 bytchk = 1; 9177 if (cdb->byte2 & SVFY_DPO) 9178 flags |= CTL_LLF_DPO; 9179 lba = scsi_4btoul(cdb->addr); 9180 num_blocks = scsi_2btoul(cdb->length); 9181 break; 9182 } 9183 case VERIFY_12: { 9184 struct scsi_verify_12 *cdb; 9185 9186 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9187 if (cdb->byte2 & SVFY_BYTCHK) 9188 bytchk = 1; 9189 if (cdb->byte2 & SVFY_DPO) 9190 flags |= CTL_LLF_DPO; 9191 lba = scsi_4btoul(cdb->addr); 9192 num_blocks = scsi_4btoul(cdb->length); 9193 break; 9194 } 9195 case VERIFY_16: { 9196 struct scsi_rw_16 *cdb; 9197 9198 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9199 if (cdb->byte2 & SVFY_BYTCHK) 9200 bytchk = 1; 9201 if (cdb->byte2 & SVFY_DPO) 9202 flags |= CTL_LLF_DPO; 9203 lba = scsi_8btou64(cdb->addr); 9204 num_blocks = scsi_4btoul(cdb->length); 9205 break; 9206 } 9207 default: 9208 /* 9209 * We got a command we don't support. This shouldn't 9210 * happen, commands should be filtered out above us. 9211 */ 9212 ctl_set_invalid_opcode(ctsio); 9213 ctl_done((union ctl_io *)ctsio); 9214 return (CTL_RETVAL_COMPLETE); 9215 } 9216 9217 /* 9218 * The first check is to make sure we're in bounds, the second 9219 * check is to catch wrap-around problems. If the lba + num blocks 9220 * is less than the lba, then we've wrapped around and the block 9221 * range is invalid anyway. 9222 */ 9223 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9224 || ((lba + num_blocks) < lba)) { 9225 ctl_set_lba_out_of_range(ctsio); 9226 ctl_done((union ctl_io *)ctsio); 9227 return (CTL_RETVAL_COMPLETE); 9228 } 9229 9230 /* 9231 * According to SBC-3, a transfer length of 0 is not an error. 9232 */ 9233 if (num_blocks == 0) { 9234 ctl_set_success(ctsio); 9235 ctl_done((union ctl_io *)ctsio); 9236 return (CTL_RETVAL_COMPLETE); 9237 } 9238 9239 lbalen = (struct ctl_lba_len_flags *) 9240 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9241 lbalen->lba = lba; 9242 lbalen->len = num_blocks; 9243 if (bytchk) { 9244 lbalen->flags = CTL_LLF_COMPARE | flags; 9245 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9246 } else { 9247 lbalen->flags = CTL_LLF_VERIFY | flags; 9248 ctsio->kern_total_len = 0; 9249 } 9250 ctsio->kern_rel_offset = 0; 9251 9252 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9253 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9254 return (retval); 9255 } 9256 9257 int 9258 ctl_report_luns(struct ctl_scsiio *ctsio) 9259 { 9260 struct ctl_softc *softc = control_softc; 9261 struct scsi_report_luns *cdb; 9262 struct scsi_report_luns_data *lun_data; 9263 struct ctl_lun *lun, *request_lun; 9264 struct ctl_port *port; 9265 int num_luns, retval; 9266 uint32_t alloc_len, lun_datalen; 9267 int num_filled; 9268 uint32_t initidx, targ_lun_id, lun_id; 9269 9270 retval = CTL_RETVAL_COMPLETE; 9271 cdb = (struct scsi_report_luns *)ctsio->cdb; 9272 port = ctl_io_port(&ctsio->io_hdr); 9273 9274 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9275 9276 mtx_lock(&softc->ctl_lock); 9277 num_luns = 0; 9278 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { 9279 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) 9280 num_luns++; 9281 } 9282 mtx_unlock(&softc->ctl_lock); 9283 9284 switch (cdb->select_report) { 9285 case RPL_REPORT_DEFAULT: 9286 case RPL_REPORT_ALL: 9287 case RPL_REPORT_NONSUBSID: 9288 break; 9289 case RPL_REPORT_WELLKNOWN: 9290 case RPL_REPORT_ADMIN: 9291 case RPL_REPORT_CONGLOM: 9292 num_luns = 0; 9293 break; 9294 default: 9295 ctl_set_invalid_field(ctsio, 9296 /*sks_valid*/ 1, 9297 /*command*/ 1, 9298 /*field*/ 2, 9299 /*bit_valid*/ 0, 9300 /*bit*/ 0); 9301 ctl_done((union ctl_io *)ctsio); 9302 return (retval); 9303 break; /* NOTREACHED */ 9304 } 9305 9306 alloc_len = scsi_4btoul(cdb->length); 9307 /* 9308 * The initiator has to allocate at least 16 bytes for this request, 9309 * so he can at least get the header and the first LUN. Otherwise 9310 * we reject the request (per SPC-3 rev 14, section 6.21). 9311 */ 9312 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9313 sizeof(struct scsi_report_luns_lundata))) { 9314 ctl_set_invalid_field(ctsio, 9315 /*sks_valid*/ 1, 9316 /*command*/ 1, 9317 /*field*/ 6, 9318 /*bit_valid*/ 0, 9319 /*bit*/ 0); 9320 ctl_done((union ctl_io *)ctsio); 9321 return (retval); 9322 } 9323 9324 request_lun = (struct ctl_lun *) 9325 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9326 9327 lun_datalen = sizeof(*lun_data) + 9328 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9329 9330 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9331 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9332 ctsio->kern_sg_entries = 0; 9333 9334 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9335 9336 mtx_lock(&softc->ctl_lock); 9337 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9338 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9339 if (lun_id >= CTL_MAX_LUNS) 9340 continue; 9341 lun = softc->ctl_luns[lun_id]; 9342 if (lun == NULL) 9343 continue; 9344 9345 if (targ_lun_id <= 0xff) { 9346 /* 9347 * Peripheral addressing method, bus number 0. 9348 */ 9349 lun_data->luns[num_filled].lundata[0] = 9350 RPL_LUNDATA_ATYP_PERIPH; 9351 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9352 num_filled++; 9353 } else if (targ_lun_id <= 0x3fff) { 9354 /* 9355 * Flat addressing method. 9356 */ 9357 lun_data->luns[num_filled].lundata[0] = 9358 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 9359 lun_data->luns[num_filled].lundata[1] = 9360 (targ_lun_id & 0xff); 9361 num_filled++; 9362 } else if (targ_lun_id <= 0xffffff) { 9363 /* 9364 * Extended flat addressing method. 9365 */ 9366 lun_data->luns[num_filled].lundata[0] = 9367 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 9368 scsi_ulto3b(targ_lun_id, 9369 &lun_data->luns[num_filled].lundata[1]); 9370 num_filled++; 9371 } else { 9372 printf("ctl_report_luns: bogus LUN number %jd, " 9373 "skipping\n", (intmax_t)targ_lun_id); 9374 } 9375 /* 9376 * According to SPC-3, rev 14 section 6.21: 9377 * 9378 * "The execution of a REPORT LUNS command to any valid and 9379 * installed logical unit shall clear the REPORTED LUNS DATA 9380 * HAS CHANGED unit attention condition for all logical 9381 * units of that target with respect to the requesting 9382 * initiator. A valid and installed logical unit is one 9383 * having a PERIPHERAL QUALIFIER of 000b in the standard 9384 * INQUIRY data (see 6.4.2)." 9385 * 9386 * If request_lun is NULL, the LUN this report luns command 9387 * was issued to is either disabled or doesn't exist. In that 9388 * case, we shouldn't clear any pending lun change unit 9389 * attention. 9390 */ 9391 if (request_lun != NULL) { 9392 mtx_lock(&lun->lun_lock); 9393 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9394 mtx_unlock(&lun->lun_lock); 9395 } 9396 } 9397 mtx_unlock(&softc->ctl_lock); 9398 9399 /* 9400 * It's quite possible that we've returned fewer LUNs than we allocated 9401 * space for. Trim it. 9402 */ 9403 lun_datalen = sizeof(*lun_data) + 9404 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9405 9406 if (lun_datalen < alloc_len) { 9407 ctsio->residual = alloc_len - lun_datalen; 9408 ctsio->kern_data_len = lun_datalen; 9409 ctsio->kern_total_len = lun_datalen; 9410 } else { 9411 ctsio->residual = 0; 9412 ctsio->kern_data_len = alloc_len; 9413 ctsio->kern_total_len = alloc_len; 9414 } 9415 ctsio->kern_data_resid = 0; 9416 ctsio->kern_rel_offset = 0; 9417 ctsio->kern_sg_entries = 0; 9418 9419 /* 9420 * We set this to the actual data length, regardless of how much 9421 * space we actually have to return results. If the user looks at 9422 * this value, he'll know whether or not he allocated enough space 9423 * and reissue the command if necessary. We don't support well 9424 * known logical units, so if the user asks for that, return none. 9425 */ 9426 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9427 9428 /* 9429 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9430 * this request. 9431 */ 9432 ctl_set_success(ctsio); 9433 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9434 ctsio->be_move_done = ctl_config_move_done; 9435 ctl_datamove((union ctl_io *)ctsio); 9436 return (retval); 9437 } 9438 9439 int 9440 ctl_request_sense(struct ctl_scsiio *ctsio) 9441 { 9442 struct scsi_request_sense *cdb; 9443 struct scsi_sense_data *sense_ptr; 9444 struct ctl_softc *ctl_softc; 9445 struct ctl_lun *lun; 9446 uint32_t initidx; 9447 int have_error; 9448 scsi_sense_data_type sense_format; 9449 ctl_ua_type ua_type; 9450 9451 cdb = (struct scsi_request_sense *)ctsio->cdb; 9452 9453 ctl_softc = control_softc; 9454 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9455 9456 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9457 9458 /* 9459 * Determine which sense format the user wants. 9460 */ 9461 if (cdb->byte2 & SRS_DESC) 9462 sense_format = SSD_TYPE_DESC; 9463 else 9464 sense_format = SSD_TYPE_FIXED; 9465 9466 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9467 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9468 ctsio->kern_sg_entries = 0; 9469 9470 /* 9471 * struct scsi_sense_data, which is currently set to 256 bytes, is 9472 * larger than the largest allowed value for the length field in the 9473 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9474 */ 9475 ctsio->residual = 0; 9476 ctsio->kern_data_len = cdb->length; 9477 ctsio->kern_total_len = cdb->length; 9478 9479 ctsio->kern_data_resid = 0; 9480 ctsio->kern_rel_offset = 0; 9481 ctsio->kern_sg_entries = 0; 9482 9483 /* 9484 * If we don't have a LUN, we don't have any pending sense. 9485 */ 9486 if (lun == NULL) 9487 goto no_sense; 9488 9489 have_error = 0; 9490 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9491 /* 9492 * Check for pending sense, and then for pending unit attentions. 9493 * Pending sense gets returned first, then pending unit attentions. 9494 */ 9495 mtx_lock(&lun->lun_lock); 9496 #ifdef CTL_WITH_CA 9497 if (ctl_is_set(lun->have_ca, initidx)) { 9498 scsi_sense_data_type stored_format; 9499 9500 /* 9501 * Check to see which sense format was used for the stored 9502 * sense data. 9503 */ 9504 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9505 9506 /* 9507 * If the user requested a different sense format than the 9508 * one we stored, then we need to convert it to the other 9509 * format. If we're going from descriptor to fixed format 9510 * sense data, we may lose things in translation, depending 9511 * on what options were used. 9512 * 9513 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9514 * for some reason we'll just copy it out as-is. 9515 */ 9516 if ((stored_format == SSD_TYPE_FIXED) 9517 && (sense_format == SSD_TYPE_DESC)) 9518 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9519 &lun->pending_sense[initidx], 9520 (struct scsi_sense_data_desc *)sense_ptr); 9521 else if ((stored_format == SSD_TYPE_DESC) 9522 && (sense_format == SSD_TYPE_FIXED)) 9523 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9524 &lun->pending_sense[initidx], 9525 (struct scsi_sense_data_fixed *)sense_ptr); 9526 else 9527 memcpy(sense_ptr, &lun->pending_sense[initidx], 9528 MIN(sizeof(*sense_ptr), 9529 sizeof(lun->pending_sense[initidx]))); 9530 9531 ctl_clear_mask(lun->have_ca, initidx); 9532 have_error = 1; 9533 } else 9534 #endif 9535 { 9536 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9537 if (ua_type != CTL_UA_NONE) 9538 have_error = 1; 9539 if (ua_type == CTL_UA_LUN_CHANGE) { 9540 mtx_unlock(&lun->lun_lock); 9541 mtx_lock(&ctl_softc->ctl_lock); 9542 ctl_clr_ua_allluns(ctl_softc, initidx, ua_type); 9543 mtx_unlock(&ctl_softc->ctl_lock); 9544 mtx_lock(&lun->lun_lock); 9545 } 9546 9547 } 9548 mtx_unlock(&lun->lun_lock); 9549 9550 /* 9551 * We already have a pending error, return it. 9552 */ 9553 if (have_error != 0) { 9554 /* 9555 * We report the SCSI status as OK, since the status of the 9556 * request sense command itself is OK. 9557 * We report 0 for the sense length, because we aren't doing 9558 * autosense in this case. We're reporting sense as 9559 * parameter data. 9560 */ 9561 ctl_set_success(ctsio); 9562 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9563 ctsio->be_move_done = ctl_config_move_done; 9564 ctl_datamove((union ctl_io *)ctsio); 9565 return (CTL_RETVAL_COMPLETE); 9566 } 9567 9568 no_sense: 9569 9570 /* 9571 * No sense information to report, so we report that everything is 9572 * okay. 9573 */ 9574 ctl_set_sense_data(sense_ptr, 9575 lun, 9576 sense_format, 9577 /*current_error*/ 1, 9578 /*sense_key*/ SSD_KEY_NO_SENSE, 9579 /*asc*/ 0x00, 9580 /*ascq*/ 0x00, 9581 SSD_ELEM_NONE); 9582 9583 /* 9584 * We report 0 for the sense length, because we aren't doing 9585 * autosense in this case. We're reporting sense as parameter data. 9586 */ 9587 ctl_set_success(ctsio); 9588 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9589 ctsio->be_move_done = ctl_config_move_done; 9590 ctl_datamove((union ctl_io *)ctsio); 9591 return (CTL_RETVAL_COMPLETE); 9592 } 9593 9594 int 9595 ctl_tur(struct ctl_scsiio *ctsio) 9596 { 9597 9598 CTL_DEBUG_PRINT(("ctl_tur\n")); 9599 9600 ctl_set_success(ctsio); 9601 ctl_done((union ctl_io *)ctsio); 9602 9603 return (CTL_RETVAL_COMPLETE); 9604 } 9605 9606 /* 9607 * SCSI VPD page 0x00, the Supported VPD Pages page. 9608 */ 9609 static int 9610 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9611 { 9612 struct scsi_vpd_supported_pages *pages; 9613 int sup_page_size; 9614 struct ctl_lun *lun; 9615 int p; 9616 9617 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9618 9619 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9620 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9621 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9622 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9623 ctsio->kern_sg_entries = 0; 9624 9625 if (sup_page_size < alloc_len) { 9626 ctsio->residual = alloc_len - sup_page_size; 9627 ctsio->kern_data_len = sup_page_size; 9628 ctsio->kern_total_len = sup_page_size; 9629 } else { 9630 ctsio->residual = 0; 9631 ctsio->kern_data_len = alloc_len; 9632 ctsio->kern_total_len = alloc_len; 9633 } 9634 ctsio->kern_data_resid = 0; 9635 ctsio->kern_rel_offset = 0; 9636 ctsio->kern_sg_entries = 0; 9637 9638 /* 9639 * The control device is always connected. The disk device, on the 9640 * other hand, may not be online all the time. Need to change this 9641 * to figure out whether the disk device is actually online or not. 9642 */ 9643 if (lun != NULL) 9644 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9645 lun->be_lun->lun_type; 9646 else 9647 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9648 9649 p = 0; 9650 /* Supported VPD pages */ 9651 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9652 /* Serial Number */ 9653 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9654 /* Device Identification */ 9655 pages->page_list[p++] = SVPD_DEVICE_ID; 9656 /* Extended INQUIRY Data */ 9657 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9658 /* Mode Page Policy */ 9659 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9660 /* SCSI Ports */ 9661 pages->page_list[p++] = SVPD_SCSI_PORTS; 9662 /* Third-party Copy */ 9663 pages->page_list[p++] = SVPD_SCSI_TPC; 9664 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9665 /* Block limits */ 9666 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9667 /* Block Device Characteristics */ 9668 pages->page_list[p++] = SVPD_BDC; 9669 /* Logical Block Provisioning */ 9670 pages->page_list[p++] = SVPD_LBP; 9671 } 9672 pages->length = p; 9673 9674 ctl_set_success(ctsio); 9675 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9676 ctsio->be_move_done = ctl_config_move_done; 9677 ctl_datamove((union ctl_io *)ctsio); 9678 return (CTL_RETVAL_COMPLETE); 9679 } 9680 9681 /* 9682 * SCSI VPD page 0x80, the Unit Serial Number page. 9683 */ 9684 static int 9685 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9686 { 9687 struct scsi_vpd_unit_serial_number *sn_ptr; 9688 struct ctl_lun *lun; 9689 int data_len; 9690 9691 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9692 9693 data_len = 4 + CTL_SN_LEN; 9694 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9695 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9696 if (data_len < alloc_len) { 9697 ctsio->residual = alloc_len - data_len; 9698 ctsio->kern_data_len = data_len; 9699 ctsio->kern_total_len = data_len; 9700 } else { 9701 ctsio->residual = 0; 9702 ctsio->kern_data_len = alloc_len; 9703 ctsio->kern_total_len = alloc_len; 9704 } 9705 ctsio->kern_data_resid = 0; 9706 ctsio->kern_rel_offset = 0; 9707 ctsio->kern_sg_entries = 0; 9708 9709 /* 9710 * The control device is always connected. The disk device, on the 9711 * other hand, may not be online all the time. Need to change this 9712 * to figure out whether the disk device is actually online or not. 9713 */ 9714 if (lun != NULL) 9715 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9716 lun->be_lun->lun_type; 9717 else 9718 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9719 9720 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9721 sn_ptr->length = CTL_SN_LEN; 9722 /* 9723 * If we don't have a LUN, we just leave the serial number as 9724 * all spaces. 9725 */ 9726 if (lun != NULL) { 9727 strncpy((char *)sn_ptr->serial_num, 9728 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9729 } else 9730 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9731 9732 ctl_set_success(ctsio); 9733 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9734 ctsio->be_move_done = ctl_config_move_done; 9735 ctl_datamove((union ctl_io *)ctsio); 9736 return (CTL_RETVAL_COMPLETE); 9737 } 9738 9739 9740 /* 9741 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9742 */ 9743 static int 9744 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9745 { 9746 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9747 struct ctl_lun *lun; 9748 int data_len; 9749 9750 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9751 9752 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9753 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9754 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9755 ctsio->kern_sg_entries = 0; 9756 9757 if (data_len < alloc_len) { 9758 ctsio->residual = alloc_len - data_len; 9759 ctsio->kern_data_len = data_len; 9760 ctsio->kern_total_len = data_len; 9761 } else { 9762 ctsio->residual = 0; 9763 ctsio->kern_data_len = alloc_len; 9764 ctsio->kern_total_len = alloc_len; 9765 } 9766 ctsio->kern_data_resid = 0; 9767 ctsio->kern_rel_offset = 0; 9768 ctsio->kern_sg_entries = 0; 9769 9770 /* 9771 * The control device is always connected. The disk device, on the 9772 * other hand, may not be online all the time. 9773 */ 9774 if (lun != NULL) 9775 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9776 lun->be_lun->lun_type; 9777 else 9778 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9779 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9780 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9781 /* 9782 * We support head of queue, ordered and simple tags. 9783 */ 9784 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9785 /* 9786 * Volatile cache supported. 9787 */ 9788 eid_ptr->flags3 = SVPD_EID_V_SUP; 9789 9790 /* 9791 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9792 * attention for a particular IT nexus on all LUNs once we report 9793 * it to that nexus once. This bit is required as of SPC-4. 9794 */ 9795 eid_ptr->flags4 = SVPD_EID_LUICLT; 9796 9797 /* 9798 * XXX KDM in order to correctly answer this, we would need 9799 * information from the SIM to determine how much sense data it 9800 * can send. So this would really be a path inquiry field, most 9801 * likely. This can be set to a maximum of 252 according to SPC-4, 9802 * but the hardware may or may not be able to support that much. 9803 * 0 just means that the maximum sense data length is not reported. 9804 */ 9805 eid_ptr->max_sense_length = 0; 9806 9807 ctl_set_success(ctsio); 9808 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9809 ctsio->be_move_done = ctl_config_move_done; 9810 ctl_datamove((union ctl_io *)ctsio); 9811 return (CTL_RETVAL_COMPLETE); 9812 } 9813 9814 static int 9815 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9816 { 9817 struct scsi_vpd_mode_page_policy *mpp_ptr; 9818 struct ctl_lun *lun; 9819 int data_len; 9820 9821 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9822 9823 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9824 sizeof(struct scsi_vpd_mode_page_policy_descr); 9825 9826 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9827 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9828 ctsio->kern_sg_entries = 0; 9829 9830 if (data_len < alloc_len) { 9831 ctsio->residual = alloc_len - data_len; 9832 ctsio->kern_data_len = data_len; 9833 ctsio->kern_total_len = data_len; 9834 } else { 9835 ctsio->residual = 0; 9836 ctsio->kern_data_len = alloc_len; 9837 ctsio->kern_total_len = alloc_len; 9838 } 9839 ctsio->kern_data_resid = 0; 9840 ctsio->kern_rel_offset = 0; 9841 ctsio->kern_sg_entries = 0; 9842 9843 /* 9844 * The control device is always connected. The disk device, on the 9845 * other hand, may not be online all the time. 9846 */ 9847 if (lun != NULL) 9848 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9849 lun->be_lun->lun_type; 9850 else 9851 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9852 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9853 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9854 mpp_ptr->descr[0].page_code = 0x3f; 9855 mpp_ptr->descr[0].subpage_code = 0xff; 9856 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9857 9858 ctl_set_success(ctsio); 9859 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9860 ctsio->be_move_done = ctl_config_move_done; 9861 ctl_datamove((union ctl_io *)ctsio); 9862 return (CTL_RETVAL_COMPLETE); 9863 } 9864 9865 /* 9866 * SCSI VPD page 0x83, the Device Identification page. 9867 */ 9868 static int 9869 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9870 { 9871 struct scsi_vpd_device_id *devid_ptr; 9872 struct scsi_vpd_id_descriptor *desc; 9873 struct ctl_softc *softc; 9874 struct ctl_lun *lun; 9875 struct ctl_port *port; 9876 int data_len; 9877 uint8_t proto; 9878 9879 softc = control_softc; 9880 9881 port = ctl_io_port(&ctsio->io_hdr); 9882 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9883 9884 data_len = sizeof(struct scsi_vpd_device_id) + 9885 sizeof(struct scsi_vpd_id_descriptor) + 9886 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9887 sizeof(struct scsi_vpd_id_descriptor) + 9888 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9889 if (lun && lun->lun_devid) 9890 data_len += lun->lun_devid->len; 9891 if (port && port->port_devid) 9892 data_len += port->port_devid->len; 9893 if (port && port->target_devid) 9894 data_len += port->target_devid->len; 9895 9896 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9897 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9898 ctsio->kern_sg_entries = 0; 9899 9900 if (data_len < alloc_len) { 9901 ctsio->residual = alloc_len - data_len; 9902 ctsio->kern_data_len = data_len; 9903 ctsio->kern_total_len = data_len; 9904 } else { 9905 ctsio->residual = 0; 9906 ctsio->kern_data_len = alloc_len; 9907 ctsio->kern_total_len = alloc_len; 9908 } 9909 ctsio->kern_data_resid = 0; 9910 ctsio->kern_rel_offset = 0; 9911 ctsio->kern_sg_entries = 0; 9912 9913 /* 9914 * The control device is always connected. The disk device, on the 9915 * other hand, may not be online all the time. 9916 */ 9917 if (lun != NULL) 9918 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9919 lun->be_lun->lun_type; 9920 else 9921 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9922 devid_ptr->page_code = SVPD_DEVICE_ID; 9923 scsi_ulto2b(data_len - 4, devid_ptr->length); 9924 9925 if (port && port->port_type == CTL_PORT_FC) 9926 proto = SCSI_PROTO_FC << 4; 9927 else if (port && port->port_type == CTL_PORT_ISCSI) 9928 proto = SCSI_PROTO_ISCSI << 4; 9929 else 9930 proto = SCSI_PROTO_SPI << 4; 9931 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9932 9933 /* 9934 * We're using a LUN association here. i.e., this device ID is a 9935 * per-LUN identifier. 9936 */ 9937 if (lun && lun->lun_devid) { 9938 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9939 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9940 lun->lun_devid->len); 9941 } 9942 9943 /* 9944 * This is for the WWPN which is a port association. 9945 */ 9946 if (port && port->port_devid) { 9947 memcpy(desc, port->port_devid->data, port->port_devid->len); 9948 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9949 port->port_devid->len); 9950 } 9951 9952 /* 9953 * This is for the Relative Target Port(type 4h) identifier 9954 */ 9955 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9956 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9957 SVPD_ID_TYPE_RELTARG; 9958 desc->length = 4; 9959 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9960 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9961 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9962 9963 /* 9964 * This is for the Target Port Group(type 5h) identifier 9965 */ 9966 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9967 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9968 SVPD_ID_TYPE_TPORTGRP; 9969 desc->length = 4; 9970 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1, 9971 &desc->identifier[2]); 9972 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9973 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9974 9975 /* 9976 * This is for the Target identifier 9977 */ 9978 if (port && port->target_devid) { 9979 memcpy(desc, port->target_devid->data, port->target_devid->len); 9980 } 9981 9982 ctl_set_success(ctsio); 9983 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9984 ctsio->be_move_done = ctl_config_move_done; 9985 ctl_datamove((union ctl_io *)ctsio); 9986 return (CTL_RETVAL_COMPLETE); 9987 } 9988 9989 static int 9990 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9991 { 9992 struct ctl_softc *softc = control_softc; 9993 struct scsi_vpd_scsi_ports *sp; 9994 struct scsi_vpd_port_designation *pd; 9995 struct scsi_vpd_port_designation_cont *pdc; 9996 struct ctl_lun *lun; 9997 struct ctl_port *port; 9998 int data_len, num_target_ports, iid_len, id_len; 9999 10000 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10001 10002 num_target_ports = 0; 10003 iid_len = 0; 10004 id_len = 0; 10005 mtx_lock(&softc->ctl_lock); 10006 STAILQ_FOREACH(port, &softc->port_list, links) { 10007 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10008 continue; 10009 if (lun != NULL && 10010 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 10011 continue; 10012 num_target_ports++; 10013 if (port->init_devid) 10014 iid_len += port->init_devid->len; 10015 if (port->port_devid) 10016 id_len += port->port_devid->len; 10017 } 10018 mtx_unlock(&softc->ctl_lock); 10019 10020 data_len = sizeof(struct scsi_vpd_scsi_ports) + 10021 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 10022 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 10023 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10024 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 10025 ctsio->kern_sg_entries = 0; 10026 10027 if (data_len < alloc_len) { 10028 ctsio->residual = alloc_len - data_len; 10029 ctsio->kern_data_len = data_len; 10030 ctsio->kern_total_len = data_len; 10031 } else { 10032 ctsio->residual = 0; 10033 ctsio->kern_data_len = alloc_len; 10034 ctsio->kern_total_len = alloc_len; 10035 } 10036 ctsio->kern_data_resid = 0; 10037 ctsio->kern_rel_offset = 0; 10038 ctsio->kern_sg_entries = 0; 10039 10040 /* 10041 * The control device is always connected. The disk device, on the 10042 * other hand, may not be online all the time. Need to change this 10043 * to figure out whether the disk device is actually online or not. 10044 */ 10045 if (lun != NULL) 10046 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 10047 lun->be_lun->lun_type; 10048 else 10049 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10050 10051 sp->page_code = SVPD_SCSI_PORTS; 10052 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 10053 sp->page_length); 10054 pd = &sp->design[0]; 10055 10056 mtx_lock(&softc->ctl_lock); 10057 STAILQ_FOREACH(port, &softc->port_list, links) { 10058 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10059 continue; 10060 if (lun != NULL && 10061 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 10062 continue; 10063 scsi_ulto2b(port->targ_port, pd->relative_port_id); 10064 if (port->init_devid) { 10065 iid_len = port->init_devid->len; 10066 memcpy(pd->initiator_transportid, 10067 port->init_devid->data, port->init_devid->len); 10068 } else 10069 iid_len = 0; 10070 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 10071 pdc = (struct scsi_vpd_port_designation_cont *) 10072 (&pd->initiator_transportid[iid_len]); 10073 if (port->port_devid) { 10074 id_len = port->port_devid->len; 10075 memcpy(pdc->target_port_descriptors, 10076 port->port_devid->data, port->port_devid->len); 10077 } else 10078 id_len = 0; 10079 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 10080 pd = (struct scsi_vpd_port_designation *) 10081 ((uint8_t *)pdc->target_port_descriptors + id_len); 10082 } 10083 mtx_unlock(&softc->ctl_lock); 10084 10085 ctl_set_success(ctsio); 10086 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10087 ctsio->be_move_done = ctl_config_move_done; 10088 ctl_datamove((union ctl_io *)ctsio); 10089 return (CTL_RETVAL_COMPLETE); 10090 } 10091 10092 static int 10093 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 10094 { 10095 struct scsi_vpd_block_limits *bl_ptr; 10096 struct ctl_lun *lun; 10097 int bs; 10098 10099 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10100 10101 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 10102 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 10103 ctsio->kern_sg_entries = 0; 10104 10105 if (sizeof(*bl_ptr) < alloc_len) { 10106 ctsio->residual = alloc_len - sizeof(*bl_ptr); 10107 ctsio->kern_data_len = sizeof(*bl_ptr); 10108 ctsio->kern_total_len = sizeof(*bl_ptr); 10109 } else { 10110 ctsio->residual = 0; 10111 ctsio->kern_data_len = alloc_len; 10112 ctsio->kern_total_len = alloc_len; 10113 } 10114 ctsio->kern_data_resid = 0; 10115 ctsio->kern_rel_offset = 0; 10116 ctsio->kern_sg_entries = 0; 10117 10118 /* 10119 * The control device is always connected. The disk device, on the 10120 * other hand, may not be online all the time. Need to change this 10121 * to figure out whether the disk device is actually online or not. 10122 */ 10123 if (lun != NULL) 10124 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10125 lun->be_lun->lun_type; 10126 else 10127 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10128 10129 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 10130 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 10131 bl_ptr->max_cmp_write_len = 0xff; 10132 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 10133 if (lun != NULL) { 10134 bs = lun->be_lun->blocksize; 10135 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 10136 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10137 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 10138 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 10139 if (lun->be_lun->ublockexp != 0) { 10140 scsi_ulto4b((1 << lun->be_lun->ublockexp), 10141 bl_ptr->opt_unmap_grain); 10142 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 10143 bl_ptr->unmap_grain_align); 10144 } 10145 } 10146 scsi_ulto4b(lun->be_lun->atomicblock, 10147 bl_ptr->max_atomic_transfer_length); 10148 scsi_ulto4b(0, bl_ptr->atomic_alignment); 10149 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 10150 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 10151 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 10152 } 10153 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 10154 10155 ctl_set_success(ctsio); 10156 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10157 ctsio->be_move_done = ctl_config_move_done; 10158 ctl_datamove((union ctl_io *)ctsio); 10159 return (CTL_RETVAL_COMPLETE); 10160 } 10161 10162 static int 10163 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 10164 { 10165 struct scsi_vpd_block_device_characteristics *bdc_ptr; 10166 struct ctl_lun *lun; 10167 const char *value; 10168 u_int i; 10169 10170 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10171 10172 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 10173 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 10174 ctsio->kern_sg_entries = 0; 10175 10176 if (sizeof(*bdc_ptr) < alloc_len) { 10177 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 10178 ctsio->kern_data_len = sizeof(*bdc_ptr); 10179 ctsio->kern_total_len = sizeof(*bdc_ptr); 10180 } else { 10181 ctsio->residual = 0; 10182 ctsio->kern_data_len = alloc_len; 10183 ctsio->kern_total_len = alloc_len; 10184 } 10185 ctsio->kern_data_resid = 0; 10186 ctsio->kern_rel_offset = 0; 10187 ctsio->kern_sg_entries = 0; 10188 10189 /* 10190 * The control device is always connected. The disk device, on the 10191 * other hand, may not be online all the time. Need to change this 10192 * to figure out whether the disk device is actually online or not. 10193 */ 10194 if (lun != NULL) 10195 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10196 lun->be_lun->lun_type; 10197 else 10198 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10199 bdc_ptr->page_code = SVPD_BDC; 10200 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10201 if (lun != NULL && 10202 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 10203 i = strtol(value, NULL, 0); 10204 else 10205 i = CTL_DEFAULT_ROTATION_RATE; 10206 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10207 if (lun != NULL && 10208 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 10209 i = strtol(value, NULL, 0); 10210 else 10211 i = 0; 10212 bdc_ptr->wab_wac_ff = (i & 0x0f); 10213 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 10214 10215 ctl_set_success(ctsio); 10216 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10217 ctsio->be_move_done = ctl_config_move_done; 10218 ctl_datamove((union ctl_io *)ctsio); 10219 return (CTL_RETVAL_COMPLETE); 10220 } 10221 10222 static int 10223 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10224 { 10225 struct scsi_vpd_logical_block_prov *lbp_ptr; 10226 struct ctl_lun *lun; 10227 10228 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10229 10230 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10231 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10232 ctsio->kern_sg_entries = 0; 10233 10234 if (sizeof(*lbp_ptr) < alloc_len) { 10235 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10236 ctsio->kern_data_len = sizeof(*lbp_ptr); 10237 ctsio->kern_total_len = sizeof(*lbp_ptr); 10238 } else { 10239 ctsio->residual = 0; 10240 ctsio->kern_data_len = alloc_len; 10241 ctsio->kern_total_len = alloc_len; 10242 } 10243 ctsio->kern_data_resid = 0; 10244 ctsio->kern_rel_offset = 0; 10245 ctsio->kern_sg_entries = 0; 10246 10247 /* 10248 * The control device is always connected. The disk device, on the 10249 * other hand, may not be online all the time. Need to change this 10250 * to figure out whether the disk device is actually online or not. 10251 */ 10252 if (lun != NULL) 10253 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10254 lun->be_lun->lun_type; 10255 else 10256 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10257 10258 lbp_ptr->page_code = SVPD_LBP; 10259 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10260 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10261 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10262 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10263 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10264 lbp_ptr->prov_type = SVPD_LBP_THIN; 10265 } 10266 10267 ctl_set_success(ctsio); 10268 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10269 ctsio->be_move_done = ctl_config_move_done; 10270 ctl_datamove((union ctl_io *)ctsio); 10271 return (CTL_RETVAL_COMPLETE); 10272 } 10273 10274 /* 10275 * INQUIRY with the EVPD bit set. 10276 */ 10277 static int 10278 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10279 { 10280 struct ctl_lun *lun; 10281 struct scsi_inquiry *cdb; 10282 int alloc_len, retval; 10283 10284 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10285 cdb = (struct scsi_inquiry *)ctsio->cdb; 10286 alloc_len = scsi_2btoul(cdb->length); 10287 10288 switch (cdb->page_code) { 10289 case SVPD_SUPPORTED_PAGES: 10290 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10291 break; 10292 case SVPD_UNIT_SERIAL_NUMBER: 10293 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10294 break; 10295 case SVPD_DEVICE_ID: 10296 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10297 break; 10298 case SVPD_EXTENDED_INQUIRY_DATA: 10299 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10300 break; 10301 case SVPD_MODE_PAGE_POLICY: 10302 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10303 break; 10304 case SVPD_SCSI_PORTS: 10305 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10306 break; 10307 case SVPD_SCSI_TPC: 10308 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10309 break; 10310 case SVPD_BLOCK_LIMITS: 10311 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10312 goto err; 10313 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10314 break; 10315 case SVPD_BDC: 10316 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10317 goto err; 10318 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10319 break; 10320 case SVPD_LBP: 10321 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10322 goto err; 10323 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10324 break; 10325 default: 10326 err: 10327 ctl_set_invalid_field(ctsio, 10328 /*sks_valid*/ 1, 10329 /*command*/ 1, 10330 /*field*/ 2, 10331 /*bit_valid*/ 0, 10332 /*bit*/ 0); 10333 ctl_done((union ctl_io *)ctsio); 10334 retval = CTL_RETVAL_COMPLETE; 10335 break; 10336 } 10337 10338 return (retval); 10339 } 10340 10341 /* 10342 * Standard INQUIRY data. 10343 */ 10344 static int 10345 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10346 { 10347 struct scsi_inquiry_data *inq_ptr; 10348 struct scsi_inquiry *cdb; 10349 struct ctl_softc *softc; 10350 struct ctl_port *port; 10351 struct ctl_lun *lun; 10352 char *val; 10353 uint32_t alloc_len, data_len; 10354 ctl_port_type port_type; 10355 10356 softc = control_softc; 10357 10358 /* 10359 * Figure out whether we're talking to a Fibre Channel port or not. 10360 * We treat the ioctl front end, and any SCSI adapters, as packetized 10361 * SCSI front ends. 10362 */ 10363 port = ctl_io_port(&ctsio->io_hdr); 10364 if (port != NULL) 10365 port_type = port->port_type; 10366 else 10367 port_type = CTL_PORT_SCSI; 10368 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10369 port_type = CTL_PORT_SCSI; 10370 10371 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10372 cdb = (struct scsi_inquiry *)ctsio->cdb; 10373 alloc_len = scsi_2btoul(cdb->length); 10374 10375 /* 10376 * We malloc the full inquiry data size here and fill it 10377 * in. If the user only asks for less, we'll give him 10378 * that much. 10379 */ 10380 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10381 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10382 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10383 ctsio->kern_sg_entries = 0; 10384 ctsio->kern_data_resid = 0; 10385 ctsio->kern_rel_offset = 0; 10386 10387 if (data_len < alloc_len) { 10388 ctsio->residual = alloc_len - data_len; 10389 ctsio->kern_data_len = data_len; 10390 ctsio->kern_total_len = data_len; 10391 } else { 10392 ctsio->residual = 0; 10393 ctsio->kern_data_len = alloc_len; 10394 ctsio->kern_total_len = alloc_len; 10395 } 10396 10397 if (lun != NULL) { 10398 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10399 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10400 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10401 lun->be_lun->lun_type; 10402 } else { 10403 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10404 lun->be_lun->lun_type; 10405 } 10406 } else 10407 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10408 10409 /* RMB in byte 2 is 0 */ 10410 inq_ptr->version = SCSI_REV_SPC4; 10411 10412 /* 10413 * According to SAM-3, even if a device only supports a single 10414 * level of LUN addressing, it should still set the HISUP bit: 10415 * 10416 * 4.9.1 Logical unit numbers overview 10417 * 10418 * All logical unit number formats described in this standard are 10419 * hierarchical in structure even when only a single level in that 10420 * hierarchy is used. The HISUP bit shall be set to one in the 10421 * standard INQUIRY data (see SPC-2) when any logical unit number 10422 * format described in this standard is used. Non-hierarchical 10423 * formats are outside the scope of this standard. 10424 * 10425 * Therefore we set the HiSup bit here. 10426 * 10427 * The reponse format is 2, per SPC-3. 10428 */ 10429 inq_ptr->response_format = SID_HiSup | 2; 10430 10431 inq_ptr->additional_length = data_len - 10432 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10433 CTL_DEBUG_PRINT(("additional_length = %d\n", 10434 inq_ptr->additional_length)); 10435 10436 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10437 /* 16 bit addressing */ 10438 if (port_type == CTL_PORT_SCSI) 10439 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10440 /* XXX set the SID_MultiP bit here if we're actually going to 10441 respond on multiple ports */ 10442 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10443 10444 /* 16 bit data bus, synchronous transfers */ 10445 if (port_type == CTL_PORT_SCSI) 10446 inq_ptr->flags = SID_WBus16 | SID_Sync; 10447 /* 10448 * XXX KDM do we want to support tagged queueing on the control 10449 * device at all? 10450 */ 10451 if ((lun == NULL) 10452 || (lun->be_lun->lun_type != T_PROCESSOR)) 10453 inq_ptr->flags |= SID_CmdQue; 10454 /* 10455 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10456 * We have 8 bytes for the vendor name, and 16 bytes for the device 10457 * name and 4 bytes for the revision. 10458 */ 10459 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10460 "vendor")) == NULL) { 10461 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10462 } else { 10463 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10464 strncpy(inq_ptr->vendor, val, 10465 min(sizeof(inq_ptr->vendor), strlen(val))); 10466 } 10467 if (lun == NULL) { 10468 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10469 sizeof(inq_ptr->product)); 10470 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10471 switch (lun->be_lun->lun_type) { 10472 case T_DIRECT: 10473 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10474 sizeof(inq_ptr->product)); 10475 break; 10476 case T_PROCESSOR: 10477 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10478 sizeof(inq_ptr->product)); 10479 break; 10480 default: 10481 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10482 sizeof(inq_ptr->product)); 10483 break; 10484 } 10485 } else { 10486 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10487 strncpy(inq_ptr->product, val, 10488 min(sizeof(inq_ptr->product), strlen(val))); 10489 } 10490 10491 /* 10492 * XXX make this a macro somewhere so it automatically gets 10493 * incremented when we make changes. 10494 */ 10495 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10496 "revision")) == NULL) { 10497 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10498 } else { 10499 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10500 strncpy(inq_ptr->revision, val, 10501 min(sizeof(inq_ptr->revision), strlen(val))); 10502 } 10503 10504 /* 10505 * For parallel SCSI, we support double transition and single 10506 * transition clocking. We also support QAS (Quick Arbitration 10507 * and Selection) and Information Unit transfers on both the 10508 * control and array devices. 10509 */ 10510 if (port_type == CTL_PORT_SCSI) 10511 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10512 SID_SPI_IUS; 10513 10514 /* SAM-5 (no version claimed) */ 10515 scsi_ulto2b(0x00A0, inq_ptr->version1); 10516 /* SPC-4 (no version claimed) */ 10517 scsi_ulto2b(0x0460, inq_ptr->version2); 10518 if (port_type == CTL_PORT_FC) { 10519 /* FCP-2 ANSI INCITS.350:2003 */ 10520 scsi_ulto2b(0x0917, inq_ptr->version3); 10521 } else if (port_type == CTL_PORT_SCSI) { 10522 /* SPI-4 ANSI INCITS.362:200x */ 10523 scsi_ulto2b(0x0B56, inq_ptr->version3); 10524 } else if (port_type == CTL_PORT_ISCSI) { 10525 /* iSCSI (no version claimed) */ 10526 scsi_ulto2b(0x0960, inq_ptr->version3); 10527 } else if (port_type == CTL_PORT_SAS) { 10528 /* SAS (no version claimed) */ 10529 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10530 } 10531 10532 if (lun == NULL) { 10533 /* SBC-4 (no version claimed) */ 10534 scsi_ulto2b(0x0600, inq_ptr->version4); 10535 } else { 10536 switch (lun->be_lun->lun_type) { 10537 case T_DIRECT: 10538 /* SBC-4 (no version claimed) */ 10539 scsi_ulto2b(0x0600, inq_ptr->version4); 10540 break; 10541 case T_PROCESSOR: 10542 default: 10543 break; 10544 } 10545 } 10546 10547 ctl_set_success(ctsio); 10548 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10549 ctsio->be_move_done = ctl_config_move_done; 10550 ctl_datamove((union ctl_io *)ctsio); 10551 return (CTL_RETVAL_COMPLETE); 10552 } 10553 10554 int 10555 ctl_inquiry(struct ctl_scsiio *ctsio) 10556 { 10557 struct scsi_inquiry *cdb; 10558 int retval; 10559 10560 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10561 10562 cdb = (struct scsi_inquiry *)ctsio->cdb; 10563 if (cdb->byte2 & SI_EVPD) 10564 retval = ctl_inquiry_evpd(ctsio); 10565 else if (cdb->page_code == 0) 10566 retval = ctl_inquiry_std(ctsio); 10567 else { 10568 ctl_set_invalid_field(ctsio, 10569 /*sks_valid*/ 1, 10570 /*command*/ 1, 10571 /*field*/ 2, 10572 /*bit_valid*/ 0, 10573 /*bit*/ 0); 10574 ctl_done((union ctl_io *)ctsio); 10575 return (CTL_RETVAL_COMPLETE); 10576 } 10577 10578 return (retval); 10579 } 10580 10581 /* 10582 * For known CDB types, parse the LBA and length. 10583 */ 10584 static int 10585 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10586 { 10587 if (io->io_hdr.io_type != CTL_IO_SCSI) 10588 return (1); 10589 10590 switch (io->scsiio.cdb[0]) { 10591 case COMPARE_AND_WRITE: { 10592 struct scsi_compare_and_write *cdb; 10593 10594 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10595 10596 *lba = scsi_8btou64(cdb->addr); 10597 *len = cdb->length; 10598 break; 10599 } 10600 case READ_6: 10601 case WRITE_6: { 10602 struct scsi_rw_6 *cdb; 10603 10604 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10605 10606 *lba = scsi_3btoul(cdb->addr); 10607 /* only 5 bits are valid in the most significant address byte */ 10608 *lba &= 0x1fffff; 10609 *len = cdb->length; 10610 break; 10611 } 10612 case READ_10: 10613 case WRITE_10: { 10614 struct scsi_rw_10 *cdb; 10615 10616 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10617 10618 *lba = scsi_4btoul(cdb->addr); 10619 *len = scsi_2btoul(cdb->length); 10620 break; 10621 } 10622 case WRITE_VERIFY_10: { 10623 struct scsi_write_verify_10 *cdb; 10624 10625 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10626 10627 *lba = scsi_4btoul(cdb->addr); 10628 *len = scsi_2btoul(cdb->length); 10629 break; 10630 } 10631 case READ_12: 10632 case WRITE_12: { 10633 struct scsi_rw_12 *cdb; 10634 10635 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10636 10637 *lba = scsi_4btoul(cdb->addr); 10638 *len = scsi_4btoul(cdb->length); 10639 break; 10640 } 10641 case WRITE_VERIFY_12: { 10642 struct scsi_write_verify_12 *cdb; 10643 10644 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10645 10646 *lba = scsi_4btoul(cdb->addr); 10647 *len = scsi_4btoul(cdb->length); 10648 break; 10649 } 10650 case READ_16: 10651 case WRITE_16: { 10652 struct scsi_rw_16 *cdb; 10653 10654 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10655 10656 *lba = scsi_8btou64(cdb->addr); 10657 *len = scsi_4btoul(cdb->length); 10658 break; 10659 } 10660 case WRITE_ATOMIC_16: { 10661 struct scsi_write_atomic_16 *cdb; 10662 10663 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10664 10665 *lba = scsi_8btou64(cdb->addr); 10666 *len = scsi_2btoul(cdb->length); 10667 break; 10668 } 10669 case WRITE_VERIFY_16: { 10670 struct scsi_write_verify_16 *cdb; 10671 10672 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10673 10674 *lba = scsi_8btou64(cdb->addr); 10675 *len = scsi_4btoul(cdb->length); 10676 break; 10677 } 10678 case WRITE_SAME_10: { 10679 struct scsi_write_same_10 *cdb; 10680 10681 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10682 10683 *lba = scsi_4btoul(cdb->addr); 10684 *len = scsi_2btoul(cdb->length); 10685 break; 10686 } 10687 case WRITE_SAME_16: { 10688 struct scsi_write_same_16 *cdb; 10689 10690 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10691 10692 *lba = scsi_8btou64(cdb->addr); 10693 *len = scsi_4btoul(cdb->length); 10694 break; 10695 } 10696 case VERIFY_10: { 10697 struct scsi_verify_10 *cdb; 10698 10699 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10700 10701 *lba = scsi_4btoul(cdb->addr); 10702 *len = scsi_2btoul(cdb->length); 10703 break; 10704 } 10705 case VERIFY_12: { 10706 struct scsi_verify_12 *cdb; 10707 10708 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10709 10710 *lba = scsi_4btoul(cdb->addr); 10711 *len = scsi_4btoul(cdb->length); 10712 break; 10713 } 10714 case VERIFY_16: { 10715 struct scsi_verify_16 *cdb; 10716 10717 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10718 10719 *lba = scsi_8btou64(cdb->addr); 10720 *len = scsi_4btoul(cdb->length); 10721 break; 10722 } 10723 case UNMAP: { 10724 *lba = 0; 10725 *len = UINT64_MAX; 10726 break; 10727 } 10728 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10729 struct scsi_get_lba_status *cdb; 10730 10731 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10732 *lba = scsi_8btou64(cdb->addr); 10733 *len = UINT32_MAX; 10734 break; 10735 } 10736 default: 10737 return (1); 10738 break; /* NOTREACHED */ 10739 } 10740 10741 return (0); 10742 } 10743 10744 static ctl_action 10745 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10746 bool seq) 10747 { 10748 uint64_t endlba1, endlba2; 10749 10750 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10751 endlba2 = lba2 + len2 - 1; 10752 10753 if ((endlba1 < lba2) || (endlba2 < lba1)) 10754 return (CTL_ACTION_PASS); 10755 else 10756 return (CTL_ACTION_BLOCK); 10757 } 10758 10759 static int 10760 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10761 { 10762 struct ctl_ptr_len_flags *ptrlen; 10763 struct scsi_unmap_desc *buf, *end, *range; 10764 uint64_t lba; 10765 uint32_t len; 10766 10767 /* If not UNMAP -- go other way. */ 10768 if (io->io_hdr.io_type != CTL_IO_SCSI || 10769 io->scsiio.cdb[0] != UNMAP) 10770 return (CTL_ACTION_ERROR); 10771 10772 /* If UNMAP without data -- block and wait for data. */ 10773 ptrlen = (struct ctl_ptr_len_flags *) 10774 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10775 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10776 ptrlen->ptr == NULL) 10777 return (CTL_ACTION_BLOCK); 10778 10779 /* UNMAP with data -- check for collision. */ 10780 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10781 end = buf + ptrlen->len / sizeof(*buf); 10782 for (range = buf; range < end; range++) { 10783 lba = scsi_8btou64(range->lba); 10784 len = scsi_4btoul(range->length); 10785 if ((lba < lba2 + len2) && (lba + len > lba2)) 10786 return (CTL_ACTION_BLOCK); 10787 } 10788 return (CTL_ACTION_PASS); 10789 } 10790 10791 static ctl_action 10792 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10793 { 10794 uint64_t lba1, lba2; 10795 uint64_t len1, len2; 10796 int retval; 10797 10798 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10799 return (CTL_ACTION_ERROR); 10800 10801 retval = ctl_extent_check_unmap(io1, lba2, len2); 10802 if (retval != CTL_ACTION_ERROR) 10803 return (retval); 10804 10805 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10806 return (CTL_ACTION_ERROR); 10807 10808 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10809 seq = FALSE; 10810 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10811 } 10812 10813 static ctl_action 10814 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10815 { 10816 uint64_t lba1, lba2; 10817 uint64_t len1, len2; 10818 10819 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10820 return (CTL_ACTION_PASS); 10821 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10822 return (CTL_ACTION_ERROR); 10823 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10824 return (CTL_ACTION_ERROR); 10825 10826 if (lba1 + len1 == lba2) 10827 return (CTL_ACTION_BLOCK); 10828 return (CTL_ACTION_PASS); 10829 } 10830 10831 static ctl_action 10832 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10833 union ctl_io *ooa_io) 10834 { 10835 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10836 ctl_serialize_action *serialize_row; 10837 10838 /* 10839 * The initiator attempted multiple untagged commands at the same 10840 * time. Can't do that. 10841 */ 10842 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10843 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10844 && ((pending_io->io_hdr.nexus.targ_port == 10845 ooa_io->io_hdr.nexus.targ_port) 10846 && (pending_io->io_hdr.nexus.initid == 10847 ooa_io->io_hdr.nexus.initid)) 10848 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10849 CTL_FLAG_STATUS_SENT)) == 0)) 10850 return (CTL_ACTION_OVERLAP); 10851 10852 /* 10853 * The initiator attempted to send multiple tagged commands with 10854 * the same ID. (It's fine if different initiators have the same 10855 * tag ID.) 10856 * 10857 * Even if all of those conditions are true, we don't kill the I/O 10858 * if the command ahead of us has been aborted. We won't end up 10859 * sending it to the FETD, and it's perfectly legal to resend a 10860 * command with the same tag number as long as the previous 10861 * instance of this tag number has been aborted somehow. 10862 */ 10863 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10864 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10865 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10866 && ((pending_io->io_hdr.nexus.targ_port == 10867 ooa_io->io_hdr.nexus.targ_port) 10868 && (pending_io->io_hdr.nexus.initid == 10869 ooa_io->io_hdr.nexus.initid)) 10870 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10871 CTL_FLAG_STATUS_SENT)) == 0)) 10872 return (CTL_ACTION_OVERLAP_TAG); 10873 10874 /* 10875 * If we get a head of queue tag, SAM-3 says that we should 10876 * immediately execute it. 10877 * 10878 * What happens if this command would normally block for some other 10879 * reason? e.g. a request sense with a head of queue tag 10880 * immediately after a write. Normally that would block, but this 10881 * will result in its getting executed immediately... 10882 * 10883 * We currently return "pass" instead of "skip", so we'll end up 10884 * going through the rest of the queue to check for overlapped tags. 10885 * 10886 * XXX KDM check for other types of blockage first?? 10887 */ 10888 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10889 return (CTL_ACTION_PASS); 10890 10891 /* 10892 * Ordered tags have to block until all items ahead of them 10893 * have completed. If we get called with an ordered tag, we always 10894 * block, if something else is ahead of us in the queue. 10895 */ 10896 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10897 return (CTL_ACTION_BLOCK); 10898 10899 /* 10900 * Simple tags get blocked until all head of queue and ordered tags 10901 * ahead of them have completed. I'm lumping untagged commands in 10902 * with simple tags here. XXX KDM is that the right thing to do? 10903 */ 10904 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10905 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10906 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10907 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10908 return (CTL_ACTION_BLOCK); 10909 10910 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10911 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10912 10913 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10914 10915 switch (serialize_row[pending_entry->seridx]) { 10916 case CTL_SER_BLOCK: 10917 return (CTL_ACTION_BLOCK); 10918 case CTL_SER_EXTENT: 10919 return (ctl_extent_check(ooa_io, pending_io, 10920 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10921 case CTL_SER_EXTENTOPT: 10922 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10923 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10924 return (ctl_extent_check(ooa_io, pending_io, 10925 (lun->be_lun && 10926 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10927 return (CTL_ACTION_PASS); 10928 case CTL_SER_EXTENTSEQ: 10929 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10930 return (ctl_extent_check_seq(ooa_io, pending_io)); 10931 return (CTL_ACTION_PASS); 10932 case CTL_SER_PASS: 10933 return (CTL_ACTION_PASS); 10934 case CTL_SER_BLOCKOPT: 10935 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10936 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10937 return (CTL_ACTION_BLOCK); 10938 return (CTL_ACTION_PASS); 10939 case CTL_SER_SKIP: 10940 return (CTL_ACTION_SKIP); 10941 default: 10942 panic("invalid serialization value %d", 10943 serialize_row[pending_entry->seridx]); 10944 } 10945 10946 return (CTL_ACTION_ERROR); 10947 } 10948 10949 /* 10950 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10951 * Assumptions: 10952 * - pending_io is generally either incoming, or on the blocked queue 10953 * - starting I/O is the I/O we want to start the check with. 10954 */ 10955 static ctl_action 10956 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10957 union ctl_io *starting_io) 10958 { 10959 union ctl_io *ooa_io; 10960 ctl_action action; 10961 10962 mtx_assert(&lun->lun_lock, MA_OWNED); 10963 10964 /* 10965 * Run back along the OOA queue, starting with the current 10966 * blocked I/O and going through every I/O before it on the 10967 * queue. If starting_io is NULL, we'll just end up returning 10968 * CTL_ACTION_PASS. 10969 */ 10970 for (ooa_io = starting_io; ooa_io != NULL; 10971 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10972 ooa_links)){ 10973 10974 /* 10975 * This routine just checks to see whether 10976 * cur_blocked is blocked by ooa_io, which is ahead 10977 * of it in the queue. It doesn't queue/dequeue 10978 * cur_blocked. 10979 */ 10980 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10981 switch (action) { 10982 case CTL_ACTION_BLOCK: 10983 case CTL_ACTION_OVERLAP: 10984 case CTL_ACTION_OVERLAP_TAG: 10985 case CTL_ACTION_SKIP: 10986 case CTL_ACTION_ERROR: 10987 return (action); 10988 break; /* NOTREACHED */ 10989 case CTL_ACTION_PASS: 10990 break; 10991 default: 10992 panic("invalid action %d", action); 10993 break; /* NOTREACHED */ 10994 } 10995 } 10996 10997 return (CTL_ACTION_PASS); 10998 } 10999 11000 /* 11001 * Assumptions: 11002 * - An I/O has just completed, and has been removed from the per-LUN OOA 11003 * queue, so some items on the blocked queue may now be unblocked. 11004 */ 11005 static int 11006 ctl_check_blocked(struct ctl_lun *lun) 11007 { 11008 struct ctl_softc *softc = lun->ctl_softc; 11009 union ctl_io *cur_blocked, *next_blocked; 11010 11011 mtx_assert(&lun->lun_lock, MA_OWNED); 11012 11013 /* 11014 * Run forward from the head of the blocked queue, checking each 11015 * entry against the I/Os prior to it on the OOA queue to see if 11016 * there is still any blockage. 11017 * 11018 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 11019 * with our removing a variable on it while it is traversing the 11020 * list. 11021 */ 11022 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 11023 cur_blocked != NULL; cur_blocked = next_blocked) { 11024 union ctl_io *prev_ooa; 11025 ctl_action action; 11026 11027 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 11028 blocked_links); 11029 11030 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 11031 ctl_ooaq, ooa_links); 11032 11033 /* 11034 * If cur_blocked happens to be the first item in the OOA 11035 * queue now, prev_ooa will be NULL, and the action 11036 * returned will just be CTL_ACTION_PASS. 11037 */ 11038 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 11039 11040 switch (action) { 11041 case CTL_ACTION_BLOCK: 11042 /* Nothing to do here, still blocked */ 11043 break; 11044 case CTL_ACTION_OVERLAP: 11045 case CTL_ACTION_OVERLAP_TAG: 11046 /* 11047 * This shouldn't happen! In theory we've already 11048 * checked this command for overlap... 11049 */ 11050 break; 11051 case CTL_ACTION_PASS: 11052 case CTL_ACTION_SKIP: { 11053 const struct ctl_cmd_entry *entry; 11054 11055 /* 11056 * The skip case shouldn't happen, this transaction 11057 * should have never made it onto the blocked queue. 11058 */ 11059 /* 11060 * This I/O is no longer blocked, we can remove it 11061 * from the blocked queue. Since this is a TAILQ 11062 * (doubly linked list), we can do O(1) removals 11063 * from any place on the list. 11064 */ 11065 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11066 blocked_links); 11067 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11068 11069 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 11070 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 11071 /* 11072 * Need to send IO back to original side to 11073 * run 11074 */ 11075 union ctl_ha_msg msg_info; 11076 11077 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11078 msg_info.hdr.original_sc = 11079 cur_blocked->io_hdr.original_sc; 11080 msg_info.hdr.serializing_sc = cur_blocked; 11081 msg_info.hdr.msg_type = CTL_MSG_R2R; 11082 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11083 sizeof(msg_info.hdr), M_NOWAIT); 11084 break; 11085 } 11086 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11087 11088 /* 11089 * Check this I/O for LUN state changes that may 11090 * have happened while this command was blocked. 11091 * The LUN state may have been changed by a command 11092 * ahead of us in the queue, so we need to re-check 11093 * for any states that can be caused by SCSI 11094 * commands. 11095 */ 11096 if (ctl_scsiio_lun_check(lun, entry, 11097 &cur_blocked->scsiio) == 0) { 11098 cur_blocked->io_hdr.flags |= 11099 CTL_FLAG_IS_WAS_ON_RTR; 11100 ctl_enqueue_rtr(cur_blocked); 11101 } else 11102 ctl_done(cur_blocked); 11103 break; 11104 } 11105 default: 11106 /* 11107 * This probably shouldn't happen -- we shouldn't 11108 * get CTL_ACTION_ERROR, or anything else. 11109 */ 11110 break; 11111 } 11112 } 11113 11114 return (CTL_RETVAL_COMPLETE); 11115 } 11116 11117 /* 11118 * This routine (with one exception) checks LUN flags that can be set by 11119 * commands ahead of us in the OOA queue. These flags have to be checked 11120 * when a command initially comes in, and when we pull a command off the 11121 * blocked queue and are preparing to execute it. The reason we have to 11122 * check these flags for commands on the blocked queue is that the LUN 11123 * state may have been changed by a command ahead of us while we're on the 11124 * blocked queue. 11125 * 11126 * Ordering is somewhat important with these checks, so please pay 11127 * careful attention to the placement of any new checks. 11128 */ 11129 static int 11130 ctl_scsiio_lun_check(struct ctl_lun *lun, 11131 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11132 { 11133 struct ctl_softc *softc = lun->ctl_softc; 11134 int retval; 11135 uint32_t residx; 11136 11137 retval = 0; 11138 11139 mtx_assert(&lun->lun_lock, MA_OWNED); 11140 11141 /* 11142 * If this shelf is a secondary shelf controller, we may have to 11143 * reject some commands disallowed by HA mode and link state. 11144 */ 11145 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11146 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11147 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11148 ctl_set_lun_unavail(ctsio); 11149 retval = 1; 11150 goto bailout; 11151 } 11152 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11153 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11154 ctl_set_lun_transit(ctsio); 11155 retval = 1; 11156 goto bailout; 11157 } 11158 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11159 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11160 ctl_set_lun_standby(ctsio); 11161 retval = 1; 11162 goto bailout; 11163 } 11164 11165 /* The rest of checks are only done on executing side */ 11166 if (softc->ha_mode == CTL_HA_MODE_XFER) 11167 goto bailout; 11168 } 11169 11170 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11171 if (lun->be_lun && 11172 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11173 ctl_set_hw_write_protected(ctsio); 11174 retval = 1; 11175 goto bailout; 11176 } 11177 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 11178 .eca_and_aen & SCP_SWP) != 0) { 11179 ctl_set_sense(ctsio, /*current_error*/ 1, 11180 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11181 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11182 retval = 1; 11183 goto bailout; 11184 } 11185 } 11186 11187 /* 11188 * Check for a reservation conflict. If this command isn't allowed 11189 * even on reserved LUNs, and if this initiator isn't the one who 11190 * reserved us, reject the command with a reservation conflict. 11191 */ 11192 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11193 if ((lun->flags & CTL_LUN_RESERVED) 11194 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11195 if (lun->res_idx != residx) { 11196 ctl_set_reservation_conflict(ctsio); 11197 retval = 1; 11198 goto bailout; 11199 } 11200 } 11201 11202 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11203 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11204 /* No reservation or command is allowed. */; 11205 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11206 (lun->res_type == SPR_TYPE_WR_EX || 11207 lun->res_type == SPR_TYPE_WR_EX_RO || 11208 lun->res_type == SPR_TYPE_WR_EX_AR)) { 11209 /* The command is allowed for Write Exclusive resv. */; 11210 } else { 11211 /* 11212 * if we aren't registered or it's a res holder type 11213 * reservation and this isn't the res holder then set a 11214 * conflict. 11215 */ 11216 if (ctl_get_prkey(lun, residx) == 0 11217 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 11218 ctl_set_reservation_conflict(ctsio); 11219 retval = 1; 11220 goto bailout; 11221 } 11222 } 11223 11224 if ((lun->flags & CTL_LUN_OFFLINE) 11225 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) { 11226 ctl_set_lun_not_ready(ctsio); 11227 retval = 1; 11228 goto bailout; 11229 } 11230 11231 if ((lun->flags & CTL_LUN_STOPPED) 11232 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 11233 /* "Logical unit not ready, initializing cmd. required" */ 11234 ctl_set_lun_stopped(ctsio); 11235 retval = 1; 11236 goto bailout; 11237 } 11238 11239 if ((lun->flags & CTL_LUN_INOPERABLE) 11240 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 11241 /* "Medium format corrupted" */ 11242 ctl_set_medium_format_corrupted(ctsio); 11243 retval = 1; 11244 goto bailout; 11245 } 11246 11247 bailout: 11248 return (retval); 11249 } 11250 11251 static void 11252 ctl_failover_io(union ctl_io *io, int have_lock) 11253 { 11254 ctl_set_busy(&io->scsiio); 11255 ctl_done(io); 11256 } 11257 11258 static void 11259 ctl_failover_lun(struct ctl_lun *lun) 11260 { 11261 struct ctl_softc *softc = lun->ctl_softc; 11262 struct ctl_io_hdr *io, *next_io; 11263 11264 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun)); 11265 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11266 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11267 /* We are master */ 11268 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11269 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11270 io->flags |= CTL_FLAG_ABORT; 11271 io->flags |= CTL_FLAG_FAILOVER; 11272 } else { /* This can be only due to DATAMOVE */ 11273 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11274 io->flags &= ~CTL_FLAG_DMA_INPROG; 11275 io->flags |= CTL_FLAG_IO_ACTIVE; 11276 io->port_status = 31340; 11277 ctl_enqueue_isc((union ctl_io *)io); 11278 } 11279 } 11280 /* We are slave */ 11281 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11282 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11283 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11284 io->flags |= CTL_FLAG_FAILOVER; 11285 } else { 11286 ctl_set_busy(&((union ctl_io *)io)-> 11287 scsiio); 11288 ctl_done((union ctl_io *)io); 11289 } 11290 } 11291 } 11292 } else { /* SERIALIZE modes */ 11293 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11294 next_io) { 11295 /* We are master */ 11296 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11297 TAILQ_REMOVE(&lun->blocked_queue, io, 11298 blocked_links); 11299 io->flags &= ~CTL_FLAG_BLOCKED; 11300 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11301 ctl_free_io((union ctl_io *)io); 11302 } 11303 } 11304 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11305 /* We are master */ 11306 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11307 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11308 ctl_free_io((union ctl_io *)io); 11309 } 11310 /* We are slave */ 11311 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11312 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11313 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11314 ctl_set_busy(&((union ctl_io *)io)-> 11315 scsiio); 11316 ctl_done((union ctl_io *)io); 11317 } 11318 } 11319 } 11320 ctl_check_blocked(lun); 11321 } 11322 } 11323 11324 static int 11325 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11326 { 11327 struct ctl_lun *lun; 11328 const struct ctl_cmd_entry *entry; 11329 uint32_t initidx, targ_lun; 11330 int retval; 11331 11332 retval = 0; 11333 11334 lun = NULL; 11335 11336 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11337 if ((targ_lun < CTL_MAX_LUNS) 11338 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11339 /* 11340 * If the LUN is invalid, pretend that it doesn't exist. 11341 * It will go away as soon as all pending I/O has been 11342 * completed. 11343 */ 11344 mtx_lock(&lun->lun_lock); 11345 if (lun->flags & CTL_LUN_DISABLED) { 11346 mtx_unlock(&lun->lun_lock); 11347 lun = NULL; 11348 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11349 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11350 } else { 11351 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11352 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11353 lun->be_lun; 11354 11355 /* 11356 * Every I/O goes into the OOA queue for a 11357 * particular LUN, and stays there until completion. 11358 */ 11359 #ifdef CTL_TIME_IO 11360 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11361 lun->idle_time += getsbinuptime() - 11362 lun->last_busy; 11363 } 11364 #endif 11365 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11366 ooa_links); 11367 } 11368 } else { 11369 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11370 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11371 } 11372 11373 /* Get command entry and return error if it is unsuppotyed. */ 11374 entry = ctl_validate_command(ctsio); 11375 if (entry == NULL) { 11376 if (lun) 11377 mtx_unlock(&lun->lun_lock); 11378 return (retval); 11379 } 11380 11381 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11382 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11383 11384 /* 11385 * Check to see whether we can send this command to LUNs that don't 11386 * exist. This should pretty much only be the case for inquiry 11387 * and request sense. Further checks, below, really require having 11388 * a LUN, so we can't really check the command anymore. Just put 11389 * it on the rtr queue. 11390 */ 11391 if (lun == NULL) { 11392 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11393 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11394 ctl_enqueue_rtr((union ctl_io *)ctsio); 11395 return (retval); 11396 } 11397 11398 ctl_set_unsupported_lun(ctsio); 11399 ctl_done((union ctl_io *)ctsio); 11400 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11401 return (retval); 11402 } else { 11403 /* 11404 * Make sure we support this particular command on this LUN. 11405 * e.g., we don't support writes to the control LUN. 11406 */ 11407 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11408 mtx_unlock(&lun->lun_lock); 11409 ctl_set_invalid_opcode(ctsio); 11410 ctl_done((union ctl_io *)ctsio); 11411 return (retval); 11412 } 11413 } 11414 11415 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11416 11417 #ifdef CTL_WITH_CA 11418 /* 11419 * If we've got a request sense, it'll clear the contingent 11420 * allegiance condition. Otherwise, if we have a CA condition for 11421 * this initiator, clear it, because it sent down a command other 11422 * than request sense. 11423 */ 11424 if ((ctsio->cdb[0] != REQUEST_SENSE) 11425 && (ctl_is_set(lun->have_ca, initidx))) 11426 ctl_clear_mask(lun->have_ca, initidx); 11427 #endif 11428 11429 /* 11430 * If the command has this flag set, it handles its own unit 11431 * attention reporting, we shouldn't do anything. Otherwise we 11432 * check for any pending unit attentions, and send them back to the 11433 * initiator. We only do this when a command initially comes in, 11434 * not when we pull it off the blocked queue. 11435 * 11436 * According to SAM-3, section 5.3.2, the order that things get 11437 * presented back to the host is basically unit attentions caused 11438 * by some sort of reset event, busy status, reservation conflicts 11439 * or task set full, and finally any other status. 11440 * 11441 * One issue here is that some of the unit attentions we report 11442 * don't fall into the "reset" category (e.g. "reported luns data 11443 * has changed"). So reporting it here, before the reservation 11444 * check, may be technically wrong. I guess the only thing to do 11445 * would be to check for and report the reset events here, and then 11446 * check for the other unit attention types after we check for a 11447 * reservation conflict. 11448 * 11449 * XXX KDM need to fix this 11450 */ 11451 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11452 ctl_ua_type ua_type; 11453 11454 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11455 SSD_TYPE_NONE); 11456 if (ua_type != CTL_UA_NONE) { 11457 mtx_unlock(&lun->lun_lock); 11458 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11459 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11460 ctsio->sense_len = SSD_FULL_SIZE; 11461 ctl_done((union ctl_io *)ctsio); 11462 return (retval); 11463 } 11464 } 11465 11466 11467 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11468 mtx_unlock(&lun->lun_lock); 11469 ctl_done((union ctl_io *)ctsio); 11470 return (retval); 11471 } 11472 11473 /* 11474 * XXX CHD this is where we want to send IO to other side if 11475 * this LUN is secondary on this SC. We will need to make a copy 11476 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11477 * the copy we send as FROM_OTHER. 11478 * We also need to stuff the address of the original IO so we can 11479 * find it easily. Something similar will need be done on the other 11480 * side so when we are done we can find the copy. 11481 */ 11482 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11483 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11484 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11485 union ctl_ha_msg msg_info; 11486 int isc_retval; 11487 11488 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11489 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11490 mtx_unlock(&lun->lun_lock); 11491 11492 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11493 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11494 msg_info.hdr.serializing_sc = NULL; 11495 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11496 msg_info.scsi.tag_num = ctsio->tag_num; 11497 msg_info.scsi.tag_type = ctsio->tag_type; 11498 msg_info.scsi.cdb_len = ctsio->cdb_len; 11499 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11500 11501 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11502 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11503 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11504 ctl_set_busy(ctsio); 11505 ctl_done((union ctl_io *)ctsio); 11506 return (retval); 11507 } 11508 return (retval); 11509 } 11510 11511 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11512 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11513 ctl_ooaq, ooa_links))) { 11514 case CTL_ACTION_BLOCK: 11515 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11516 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11517 blocked_links); 11518 mtx_unlock(&lun->lun_lock); 11519 return (retval); 11520 case CTL_ACTION_PASS: 11521 case CTL_ACTION_SKIP: 11522 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11523 mtx_unlock(&lun->lun_lock); 11524 ctl_enqueue_rtr((union ctl_io *)ctsio); 11525 break; 11526 case CTL_ACTION_OVERLAP: 11527 mtx_unlock(&lun->lun_lock); 11528 ctl_set_overlapped_cmd(ctsio); 11529 ctl_done((union ctl_io *)ctsio); 11530 break; 11531 case CTL_ACTION_OVERLAP_TAG: 11532 mtx_unlock(&lun->lun_lock); 11533 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11534 ctl_done((union ctl_io *)ctsio); 11535 break; 11536 case CTL_ACTION_ERROR: 11537 default: 11538 mtx_unlock(&lun->lun_lock); 11539 ctl_set_internal_failure(ctsio, 11540 /*sks_valid*/ 0, 11541 /*retry_count*/ 0); 11542 ctl_done((union ctl_io *)ctsio); 11543 break; 11544 } 11545 return (retval); 11546 } 11547 11548 const struct ctl_cmd_entry * 11549 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11550 { 11551 const struct ctl_cmd_entry *entry; 11552 int service_action; 11553 11554 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11555 if (sa) 11556 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11557 if (entry->flags & CTL_CMD_FLAG_SA5) { 11558 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11559 entry = &((const struct ctl_cmd_entry *) 11560 entry->execute)[service_action]; 11561 } 11562 return (entry); 11563 } 11564 11565 const struct ctl_cmd_entry * 11566 ctl_validate_command(struct ctl_scsiio *ctsio) 11567 { 11568 const struct ctl_cmd_entry *entry; 11569 int i, sa; 11570 uint8_t diff; 11571 11572 entry = ctl_get_cmd_entry(ctsio, &sa); 11573 if (entry->execute == NULL) { 11574 if (sa) 11575 ctl_set_invalid_field(ctsio, 11576 /*sks_valid*/ 1, 11577 /*command*/ 1, 11578 /*field*/ 1, 11579 /*bit_valid*/ 1, 11580 /*bit*/ 4); 11581 else 11582 ctl_set_invalid_opcode(ctsio); 11583 ctl_done((union ctl_io *)ctsio); 11584 return (NULL); 11585 } 11586 KASSERT(entry->length > 0, 11587 ("Not defined length for command 0x%02x/0x%02x", 11588 ctsio->cdb[0], ctsio->cdb[1])); 11589 for (i = 1; i < entry->length; i++) { 11590 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11591 if (diff == 0) 11592 continue; 11593 ctl_set_invalid_field(ctsio, 11594 /*sks_valid*/ 1, 11595 /*command*/ 1, 11596 /*field*/ i, 11597 /*bit_valid*/ 1, 11598 /*bit*/ fls(diff) - 1); 11599 ctl_done((union ctl_io *)ctsio); 11600 return (NULL); 11601 } 11602 return (entry); 11603 } 11604 11605 static int 11606 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11607 { 11608 11609 switch (lun_type) { 11610 case T_PROCESSOR: 11611 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11612 return (0); 11613 break; 11614 case T_DIRECT: 11615 if ((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) 11616 return (0); 11617 break; 11618 default: 11619 return (0); 11620 } 11621 return (1); 11622 } 11623 11624 static int 11625 ctl_scsiio(struct ctl_scsiio *ctsio) 11626 { 11627 int retval; 11628 const struct ctl_cmd_entry *entry; 11629 11630 retval = CTL_RETVAL_COMPLETE; 11631 11632 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11633 11634 entry = ctl_get_cmd_entry(ctsio, NULL); 11635 11636 /* 11637 * If this I/O has been aborted, just send it straight to 11638 * ctl_done() without executing it. 11639 */ 11640 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11641 ctl_done((union ctl_io *)ctsio); 11642 goto bailout; 11643 } 11644 11645 /* 11646 * All the checks should have been handled by ctl_scsiio_precheck(). 11647 * We should be clear now to just execute the I/O. 11648 */ 11649 retval = entry->execute(ctsio); 11650 11651 bailout: 11652 return (retval); 11653 } 11654 11655 /* 11656 * Since we only implement one target right now, a bus reset simply resets 11657 * our single target. 11658 */ 11659 static int 11660 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11661 { 11662 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11663 } 11664 11665 static int 11666 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11667 ctl_ua_type ua_type) 11668 { 11669 struct ctl_port *port; 11670 struct ctl_lun *lun; 11671 int retval; 11672 11673 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11674 union ctl_ha_msg msg_info; 11675 11676 msg_info.hdr.nexus = io->io_hdr.nexus; 11677 if (ua_type==CTL_UA_TARG_RESET) 11678 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11679 else 11680 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11681 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11682 msg_info.hdr.original_sc = NULL; 11683 msg_info.hdr.serializing_sc = NULL; 11684 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11685 sizeof(msg_info.task), M_WAITOK); 11686 } 11687 retval = 0; 11688 11689 mtx_lock(&softc->ctl_lock); 11690 port = softc->ctl_ports[io->io_hdr.nexus.targ_port]; 11691 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11692 if (port != NULL && 11693 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 11694 continue; 11695 retval += ctl_do_lun_reset(lun, io, ua_type); 11696 } 11697 mtx_unlock(&softc->ctl_lock); 11698 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11699 return (retval); 11700 } 11701 11702 /* 11703 * The LUN should always be set. The I/O is optional, and is used to 11704 * distinguish between I/Os sent by this initiator, and by other 11705 * initiators. We set unit attention for initiators other than this one. 11706 * SAM-3 is vague on this point. It does say that a unit attention should 11707 * be established for other initiators when a LUN is reset (see section 11708 * 5.7.3), but it doesn't specifically say that the unit attention should 11709 * be established for this particular initiator when a LUN is reset. Here 11710 * is the relevant text, from SAM-3 rev 8: 11711 * 11712 * 5.7.2 When a SCSI initiator port aborts its own tasks 11713 * 11714 * When a SCSI initiator port causes its own task(s) to be aborted, no 11715 * notification that the task(s) have been aborted shall be returned to 11716 * the SCSI initiator port other than the completion response for the 11717 * command or task management function action that caused the task(s) to 11718 * be aborted and notification(s) associated with related effects of the 11719 * action (e.g., a reset unit attention condition). 11720 * 11721 * XXX KDM for now, we're setting unit attention for all initiators. 11722 */ 11723 static int 11724 ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11725 { 11726 union ctl_io *xio; 11727 #if 0 11728 uint32_t initidx; 11729 #endif 11730 #ifdef CTL_WITH_CA 11731 int i; 11732 #endif 11733 11734 mtx_lock(&lun->lun_lock); 11735 /* 11736 * Run through the OOA queue and abort each I/O. 11737 */ 11738 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11739 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11740 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11741 } 11742 11743 /* 11744 * This version sets unit attention for every 11745 */ 11746 #if 0 11747 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11748 ctl_est_ua_all(lun, initidx, ua_type); 11749 #else 11750 ctl_est_ua_all(lun, -1, ua_type); 11751 #endif 11752 11753 /* 11754 * A reset (any kind, really) clears reservations established with 11755 * RESERVE/RELEASE. It does not clear reservations established 11756 * with PERSISTENT RESERVE OUT, but we don't support that at the 11757 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11758 * reservations made with the RESERVE/RELEASE commands, because 11759 * those commands are obsolete in SPC-3. 11760 */ 11761 lun->flags &= ~CTL_LUN_RESERVED; 11762 11763 #ifdef CTL_WITH_CA 11764 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11765 ctl_clear_mask(lun->have_ca, i); 11766 #endif 11767 mtx_unlock(&lun->lun_lock); 11768 11769 return (0); 11770 } 11771 11772 static int 11773 ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io) 11774 { 11775 struct ctl_lun *lun; 11776 uint32_t targ_lun; 11777 int retval; 11778 11779 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11780 mtx_lock(&softc->ctl_lock); 11781 if ((targ_lun >= CTL_MAX_LUNS) || 11782 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11783 mtx_unlock(&softc->ctl_lock); 11784 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11785 return (1); 11786 } 11787 retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET); 11788 mtx_unlock(&softc->ctl_lock); 11789 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11790 11791 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11792 union ctl_ha_msg msg_info; 11793 11794 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11795 msg_info.hdr.nexus = io->io_hdr.nexus; 11796 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11797 msg_info.hdr.original_sc = NULL; 11798 msg_info.hdr.serializing_sc = NULL; 11799 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11800 sizeof(msg_info.task), M_WAITOK); 11801 } 11802 return (retval); 11803 } 11804 11805 static void 11806 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11807 int other_sc) 11808 { 11809 union ctl_io *xio; 11810 11811 mtx_assert(&lun->lun_lock, MA_OWNED); 11812 11813 /* 11814 * Run through the OOA queue and attempt to find the given I/O. 11815 * The target port, initiator ID, tag type and tag number have to 11816 * match the values that we got from the initiator. If we have an 11817 * untagged command to abort, simply abort the first untagged command 11818 * we come to. We only allow one untagged command at a time of course. 11819 */ 11820 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11821 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11822 11823 if ((targ_port == UINT32_MAX || 11824 targ_port == xio->io_hdr.nexus.targ_port) && 11825 (init_id == UINT32_MAX || 11826 init_id == xio->io_hdr.nexus.initid)) { 11827 if (targ_port != xio->io_hdr.nexus.targ_port || 11828 init_id != xio->io_hdr.nexus.initid) 11829 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11830 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11831 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11832 union ctl_ha_msg msg_info; 11833 11834 msg_info.hdr.nexus = xio->io_hdr.nexus; 11835 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11836 msg_info.task.tag_num = xio->scsiio.tag_num; 11837 msg_info.task.tag_type = xio->scsiio.tag_type; 11838 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11839 msg_info.hdr.original_sc = NULL; 11840 msg_info.hdr.serializing_sc = NULL; 11841 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11842 sizeof(msg_info.task), M_NOWAIT); 11843 } 11844 } 11845 } 11846 } 11847 11848 static int 11849 ctl_abort_task_set(union ctl_io *io) 11850 { 11851 struct ctl_softc *softc = control_softc; 11852 struct ctl_lun *lun; 11853 uint32_t targ_lun; 11854 11855 /* 11856 * Look up the LUN. 11857 */ 11858 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11859 mtx_lock(&softc->ctl_lock); 11860 if ((targ_lun >= CTL_MAX_LUNS) || 11861 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11862 mtx_unlock(&softc->ctl_lock); 11863 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11864 return (1); 11865 } 11866 11867 mtx_lock(&lun->lun_lock); 11868 mtx_unlock(&softc->ctl_lock); 11869 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11870 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11871 io->io_hdr.nexus.initid, 11872 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11873 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11874 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11875 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11876 } 11877 mtx_unlock(&lun->lun_lock); 11878 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11879 return (0); 11880 } 11881 11882 static int 11883 ctl_i_t_nexus_reset(union ctl_io *io) 11884 { 11885 struct ctl_softc *softc = control_softc; 11886 struct ctl_lun *lun; 11887 uint32_t initidx; 11888 11889 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11890 union ctl_ha_msg msg_info; 11891 11892 msg_info.hdr.nexus = io->io_hdr.nexus; 11893 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11894 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11895 msg_info.hdr.original_sc = NULL; 11896 msg_info.hdr.serializing_sc = NULL; 11897 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11898 sizeof(msg_info.task), M_WAITOK); 11899 } 11900 11901 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11902 mtx_lock(&softc->ctl_lock); 11903 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11904 mtx_lock(&lun->lun_lock); 11905 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11906 io->io_hdr.nexus.initid, 1); 11907 #ifdef CTL_WITH_CA 11908 ctl_clear_mask(lun->have_ca, initidx); 11909 #endif 11910 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11911 lun->flags &= ~CTL_LUN_RESERVED; 11912 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11913 mtx_unlock(&lun->lun_lock); 11914 } 11915 mtx_unlock(&softc->ctl_lock); 11916 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11917 return (0); 11918 } 11919 11920 static int 11921 ctl_abort_task(union ctl_io *io) 11922 { 11923 union ctl_io *xio; 11924 struct ctl_lun *lun; 11925 struct ctl_softc *softc; 11926 #if 0 11927 struct sbuf sb; 11928 char printbuf[128]; 11929 #endif 11930 int found; 11931 uint32_t targ_lun; 11932 11933 softc = control_softc; 11934 found = 0; 11935 11936 /* 11937 * Look up the LUN. 11938 */ 11939 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11940 mtx_lock(&softc->ctl_lock); 11941 if ((targ_lun >= CTL_MAX_LUNS) || 11942 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11943 mtx_unlock(&softc->ctl_lock); 11944 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11945 return (1); 11946 } 11947 11948 #if 0 11949 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11950 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11951 #endif 11952 11953 mtx_lock(&lun->lun_lock); 11954 mtx_unlock(&softc->ctl_lock); 11955 /* 11956 * Run through the OOA queue and attempt to find the given I/O. 11957 * The target port, initiator ID, tag type and tag number have to 11958 * match the values that we got from the initiator. If we have an 11959 * untagged command to abort, simply abort the first untagged command 11960 * we come to. We only allow one untagged command at a time of course. 11961 */ 11962 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11963 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11964 #if 0 11965 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11966 11967 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11968 lun->lun, xio->scsiio.tag_num, 11969 xio->scsiio.tag_type, 11970 (xio->io_hdr.blocked_links.tqe_prev 11971 == NULL) ? "" : " BLOCKED", 11972 (xio->io_hdr.flags & 11973 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11974 (xio->io_hdr.flags & 11975 CTL_FLAG_ABORT) ? " ABORT" : "", 11976 (xio->io_hdr.flags & 11977 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11978 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11979 sbuf_finish(&sb); 11980 printf("%s\n", sbuf_data(&sb)); 11981 #endif 11982 11983 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11984 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11985 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11986 continue; 11987 11988 /* 11989 * If the abort says that the task is untagged, the 11990 * task in the queue must be untagged. Otherwise, 11991 * we just check to see whether the tag numbers 11992 * match. This is because the QLogic firmware 11993 * doesn't pass back the tag type in an abort 11994 * request. 11995 */ 11996 #if 0 11997 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11998 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11999 || (xio->scsiio.tag_num == io->taskio.tag_num)) 12000 #endif 12001 /* 12002 * XXX KDM we've got problems with FC, because it 12003 * doesn't send down a tag type with aborts. So we 12004 * can only really go by the tag number... 12005 * This may cause problems with parallel SCSI. 12006 * Need to figure that out!! 12007 */ 12008 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12009 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12010 found = 1; 12011 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12012 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12013 union ctl_ha_msg msg_info; 12014 12015 msg_info.hdr.nexus = io->io_hdr.nexus; 12016 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12017 msg_info.task.tag_num = io->taskio.tag_num; 12018 msg_info.task.tag_type = io->taskio.tag_type; 12019 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12020 msg_info.hdr.original_sc = NULL; 12021 msg_info.hdr.serializing_sc = NULL; 12022 #if 0 12023 printf("Sent Abort to other side\n"); 12024 #endif 12025 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12026 sizeof(msg_info.task), M_NOWAIT); 12027 } 12028 #if 0 12029 printf("ctl_abort_task: found I/O to abort\n"); 12030 #endif 12031 } 12032 } 12033 mtx_unlock(&lun->lun_lock); 12034 12035 if (found == 0) { 12036 /* 12037 * This isn't really an error. It's entirely possible for 12038 * the abort and command completion to cross on the wire. 12039 * This is more of an informative/diagnostic error. 12040 */ 12041 #if 0 12042 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12043 "%u:%u:%u tag %d type %d\n", 12044 io->io_hdr.nexus.initid, 12045 io->io_hdr.nexus.targ_port, 12046 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12047 io->taskio.tag_type); 12048 #endif 12049 } 12050 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12051 return (0); 12052 } 12053 12054 static int 12055 ctl_query_task(union ctl_io *io, int task_set) 12056 { 12057 union ctl_io *xio; 12058 struct ctl_lun *lun; 12059 struct ctl_softc *softc; 12060 int found = 0; 12061 uint32_t targ_lun; 12062 12063 softc = control_softc; 12064 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12065 mtx_lock(&softc->ctl_lock); 12066 if ((targ_lun >= CTL_MAX_LUNS) || 12067 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12068 mtx_unlock(&softc->ctl_lock); 12069 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12070 return (1); 12071 } 12072 mtx_lock(&lun->lun_lock); 12073 mtx_unlock(&softc->ctl_lock); 12074 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12075 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12076 12077 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12078 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12079 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12080 continue; 12081 12082 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12083 found = 1; 12084 break; 12085 } 12086 } 12087 mtx_unlock(&lun->lun_lock); 12088 if (found) 12089 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12090 else 12091 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12092 return (0); 12093 } 12094 12095 static int 12096 ctl_query_async_event(union ctl_io *io) 12097 { 12098 struct ctl_lun *lun; 12099 struct ctl_softc *softc; 12100 ctl_ua_type ua; 12101 uint32_t targ_lun, initidx; 12102 12103 softc = control_softc; 12104 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12105 mtx_lock(&softc->ctl_lock); 12106 if ((targ_lun >= CTL_MAX_LUNS) || 12107 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12108 mtx_unlock(&softc->ctl_lock); 12109 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12110 return (1); 12111 } 12112 mtx_lock(&lun->lun_lock); 12113 mtx_unlock(&softc->ctl_lock); 12114 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12115 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12116 mtx_unlock(&lun->lun_lock); 12117 if (ua != CTL_UA_NONE) 12118 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12119 else 12120 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12121 return (0); 12122 } 12123 12124 static void 12125 ctl_run_task(union ctl_io *io) 12126 { 12127 struct ctl_softc *softc = control_softc; 12128 int retval = 1; 12129 12130 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12131 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12132 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12133 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12134 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12135 switch (io->taskio.task_action) { 12136 case CTL_TASK_ABORT_TASK: 12137 retval = ctl_abort_task(io); 12138 break; 12139 case CTL_TASK_ABORT_TASK_SET: 12140 case CTL_TASK_CLEAR_TASK_SET: 12141 retval = ctl_abort_task_set(io); 12142 break; 12143 case CTL_TASK_CLEAR_ACA: 12144 break; 12145 case CTL_TASK_I_T_NEXUS_RESET: 12146 retval = ctl_i_t_nexus_reset(io); 12147 break; 12148 case CTL_TASK_LUN_RESET: 12149 retval = ctl_lun_reset(softc, io); 12150 break; 12151 case CTL_TASK_TARGET_RESET: 12152 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 12153 break; 12154 case CTL_TASK_BUS_RESET: 12155 retval = ctl_bus_reset(softc, io); 12156 break; 12157 case CTL_TASK_PORT_LOGIN: 12158 break; 12159 case CTL_TASK_PORT_LOGOUT: 12160 break; 12161 case CTL_TASK_QUERY_TASK: 12162 retval = ctl_query_task(io, 0); 12163 break; 12164 case CTL_TASK_QUERY_TASK_SET: 12165 retval = ctl_query_task(io, 1); 12166 break; 12167 case CTL_TASK_QUERY_ASYNC_EVENT: 12168 retval = ctl_query_async_event(io); 12169 break; 12170 default: 12171 printf("%s: got unknown task management event %d\n", 12172 __func__, io->taskio.task_action); 12173 break; 12174 } 12175 if (retval == 0) 12176 io->io_hdr.status = CTL_SUCCESS; 12177 else 12178 io->io_hdr.status = CTL_ERROR; 12179 ctl_done(io); 12180 } 12181 12182 /* 12183 * For HA operation. Handle commands that come in from the other 12184 * controller. 12185 */ 12186 static void 12187 ctl_handle_isc(union ctl_io *io) 12188 { 12189 int free_io; 12190 struct ctl_lun *lun; 12191 struct ctl_softc *softc; 12192 uint32_t targ_lun; 12193 12194 softc = control_softc; 12195 12196 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12197 lun = softc->ctl_luns[targ_lun]; 12198 12199 switch (io->io_hdr.msg_type) { 12200 case CTL_MSG_SERIALIZE: 12201 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 12202 break; 12203 case CTL_MSG_R2R: { 12204 const struct ctl_cmd_entry *entry; 12205 12206 /* 12207 * This is only used in SER_ONLY mode. 12208 */ 12209 free_io = 0; 12210 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12211 mtx_lock(&lun->lun_lock); 12212 if (ctl_scsiio_lun_check(lun, 12213 entry, (struct ctl_scsiio *)io) != 0) { 12214 mtx_unlock(&lun->lun_lock); 12215 ctl_done(io); 12216 break; 12217 } 12218 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12219 mtx_unlock(&lun->lun_lock); 12220 ctl_enqueue_rtr(io); 12221 break; 12222 } 12223 case CTL_MSG_FINISH_IO: 12224 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12225 free_io = 0; 12226 ctl_done(io); 12227 } else { 12228 free_io = 1; 12229 mtx_lock(&lun->lun_lock); 12230 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 12231 ooa_links); 12232 ctl_check_blocked(lun); 12233 mtx_unlock(&lun->lun_lock); 12234 } 12235 break; 12236 case CTL_MSG_PERS_ACTION: 12237 ctl_hndl_per_res_out_on_other_sc( 12238 (union ctl_ha_msg *)&io->presio.pr_msg); 12239 free_io = 1; 12240 break; 12241 case CTL_MSG_BAD_JUJU: 12242 free_io = 0; 12243 ctl_done(io); 12244 break; 12245 case CTL_MSG_DATAMOVE: 12246 /* Only used in XFER mode */ 12247 free_io = 0; 12248 ctl_datamove_remote(io); 12249 break; 12250 case CTL_MSG_DATAMOVE_DONE: 12251 /* Only used in XFER mode */ 12252 free_io = 0; 12253 io->scsiio.be_move_done(io); 12254 break; 12255 case CTL_MSG_FAILOVER: 12256 mtx_lock(&lun->lun_lock); 12257 ctl_failover_lun(lun); 12258 mtx_unlock(&lun->lun_lock); 12259 free_io = 1; 12260 break; 12261 default: 12262 free_io = 1; 12263 printf("%s: Invalid message type %d\n", 12264 __func__, io->io_hdr.msg_type); 12265 break; 12266 } 12267 if (free_io) 12268 ctl_free_io(io); 12269 12270 } 12271 12272 12273 /* 12274 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12275 * there is no match. 12276 */ 12277 static ctl_lun_error_pattern 12278 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12279 { 12280 const struct ctl_cmd_entry *entry; 12281 ctl_lun_error_pattern filtered_pattern, pattern; 12282 12283 pattern = desc->error_pattern; 12284 12285 /* 12286 * XXX KDM we need more data passed into this function to match a 12287 * custom pattern, and we actually need to implement custom pattern 12288 * matching. 12289 */ 12290 if (pattern & CTL_LUN_PAT_CMD) 12291 return (CTL_LUN_PAT_CMD); 12292 12293 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12294 return (CTL_LUN_PAT_ANY); 12295 12296 entry = ctl_get_cmd_entry(ctsio, NULL); 12297 12298 filtered_pattern = entry->pattern & pattern; 12299 12300 /* 12301 * If the user requested specific flags in the pattern (e.g. 12302 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12303 * flags. 12304 * 12305 * If the user did not specify any flags, it doesn't matter whether 12306 * or not the command supports the flags. 12307 */ 12308 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12309 (pattern & ~CTL_LUN_PAT_MASK)) 12310 return (CTL_LUN_PAT_NONE); 12311 12312 /* 12313 * If the user asked for a range check, see if the requested LBA 12314 * range overlaps with this command's LBA range. 12315 */ 12316 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12317 uint64_t lba1; 12318 uint64_t len1; 12319 ctl_action action; 12320 int retval; 12321 12322 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12323 if (retval != 0) 12324 return (CTL_LUN_PAT_NONE); 12325 12326 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12327 desc->lba_range.len, FALSE); 12328 /* 12329 * A "pass" means that the LBA ranges don't overlap, so 12330 * this doesn't match the user's range criteria. 12331 */ 12332 if (action == CTL_ACTION_PASS) 12333 return (CTL_LUN_PAT_NONE); 12334 } 12335 12336 return (filtered_pattern); 12337 } 12338 12339 static void 12340 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12341 { 12342 struct ctl_error_desc *desc, *desc2; 12343 12344 mtx_assert(&lun->lun_lock, MA_OWNED); 12345 12346 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12347 ctl_lun_error_pattern pattern; 12348 /* 12349 * Check to see whether this particular command matches 12350 * the pattern in the descriptor. 12351 */ 12352 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12353 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12354 continue; 12355 12356 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12357 case CTL_LUN_INJ_ABORTED: 12358 ctl_set_aborted(&io->scsiio); 12359 break; 12360 case CTL_LUN_INJ_MEDIUM_ERR: 12361 ctl_set_medium_error(&io->scsiio, 12362 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12363 CTL_FLAG_DATA_OUT); 12364 break; 12365 case CTL_LUN_INJ_UA: 12366 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12367 * OCCURRED */ 12368 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12369 break; 12370 case CTL_LUN_INJ_CUSTOM: 12371 /* 12372 * We're assuming the user knows what he is doing. 12373 * Just copy the sense information without doing 12374 * checks. 12375 */ 12376 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12377 MIN(sizeof(desc->custom_sense), 12378 sizeof(io->scsiio.sense_data))); 12379 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12380 io->scsiio.sense_len = SSD_FULL_SIZE; 12381 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12382 break; 12383 case CTL_LUN_INJ_NONE: 12384 default: 12385 /* 12386 * If this is an error injection type we don't know 12387 * about, clear the continuous flag (if it is set) 12388 * so it will get deleted below. 12389 */ 12390 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12391 break; 12392 } 12393 /* 12394 * By default, each error injection action is a one-shot 12395 */ 12396 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12397 continue; 12398 12399 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12400 12401 free(desc, M_CTL); 12402 } 12403 } 12404 12405 #ifdef CTL_IO_DELAY 12406 static void 12407 ctl_datamove_timer_wakeup(void *arg) 12408 { 12409 union ctl_io *io; 12410 12411 io = (union ctl_io *)arg; 12412 12413 ctl_datamove(io); 12414 } 12415 #endif /* CTL_IO_DELAY */ 12416 12417 void 12418 ctl_datamove(union ctl_io *io) 12419 { 12420 struct ctl_lun *lun; 12421 void (*fe_datamove)(union ctl_io *io); 12422 12423 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12424 12425 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12426 12427 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12428 #ifdef CTL_TIME_IO 12429 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12430 char str[256]; 12431 char path_str[64]; 12432 struct sbuf sb; 12433 12434 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12435 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12436 12437 sbuf_cat(&sb, path_str); 12438 switch (io->io_hdr.io_type) { 12439 case CTL_IO_SCSI: 12440 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12441 sbuf_printf(&sb, "\n"); 12442 sbuf_cat(&sb, path_str); 12443 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12444 io->scsiio.tag_num, io->scsiio.tag_type); 12445 break; 12446 case CTL_IO_TASK: 12447 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12448 "Tag Type: %d\n", io->taskio.task_action, 12449 io->taskio.tag_num, io->taskio.tag_type); 12450 break; 12451 default: 12452 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12453 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12454 break; 12455 } 12456 sbuf_cat(&sb, path_str); 12457 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12458 (intmax_t)time_uptime - io->io_hdr.start_time); 12459 sbuf_finish(&sb); 12460 printf("%s", sbuf_data(&sb)); 12461 } 12462 #endif /* CTL_TIME_IO */ 12463 12464 #ifdef CTL_IO_DELAY 12465 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12466 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12467 } else { 12468 if ((lun != NULL) 12469 && (lun->delay_info.datamove_delay > 0)) { 12470 12471 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12472 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12473 callout_reset(&io->io_hdr.delay_callout, 12474 lun->delay_info.datamove_delay * hz, 12475 ctl_datamove_timer_wakeup, io); 12476 if (lun->delay_info.datamove_type == 12477 CTL_DELAY_TYPE_ONESHOT) 12478 lun->delay_info.datamove_delay = 0; 12479 return; 12480 } 12481 } 12482 #endif 12483 12484 /* 12485 * This command has been aborted. Set the port status, so we fail 12486 * the data move. 12487 */ 12488 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12489 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12490 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12491 io->io_hdr.nexus.targ_port, 12492 io->io_hdr.nexus.targ_lun); 12493 io->io_hdr.port_status = 31337; 12494 /* 12495 * Note that the backend, in this case, will get the 12496 * callback in its context. In other cases it may get 12497 * called in the frontend's interrupt thread context. 12498 */ 12499 io->scsiio.be_move_done(io); 12500 return; 12501 } 12502 12503 /* Don't confuse frontend with zero length data move. */ 12504 if (io->scsiio.kern_data_len == 0) { 12505 io->scsiio.be_move_done(io); 12506 return; 12507 } 12508 12509 /* 12510 * If we're in XFER mode and this I/O is from the other shelf 12511 * controller, we need to send the DMA to the other side to 12512 * actually transfer the data to/from the host. In serialize only 12513 * mode the transfer happens below CTL and ctl_datamove() is only 12514 * called on the machine that originally received the I/O. 12515 */ 12516 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12517 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12518 union ctl_ha_msg msg; 12519 uint32_t sg_entries_sent; 12520 int do_sg_copy; 12521 int i; 12522 12523 memset(&msg, 0, sizeof(msg)); 12524 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12525 msg.hdr.original_sc = io->io_hdr.original_sc; 12526 msg.hdr.serializing_sc = io; 12527 msg.hdr.nexus = io->io_hdr.nexus; 12528 msg.hdr.status = io->io_hdr.status; 12529 msg.dt.flags = io->io_hdr.flags; 12530 /* 12531 * We convert everything into a S/G list here. We can't 12532 * pass by reference, only by value between controllers. 12533 * So we can't pass a pointer to the S/G list, only as many 12534 * S/G entries as we can fit in here. If it's possible for 12535 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12536 * then we need to break this up into multiple transfers. 12537 */ 12538 if (io->scsiio.kern_sg_entries == 0) { 12539 msg.dt.kern_sg_entries = 1; 12540 #if 0 12541 /* 12542 * Convert to a physical address if this is a 12543 * virtual address. 12544 */ 12545 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12546 msg.dt.sg_list[0].addr = 12547 io->scsiio.kern_data_ptr; 12548 } else { 12549 /* 12550 * XXX KDM use busdma here! 12551 */ 12552 msg.dt.sg_list[0].addr = (void *) 12553 vtophys(io->scsiio.kern_data_ptr); 12554 } 12555 #else 12556 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12557 ("HA does not support BUS_ADDR")); 12558 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 12559 #endif 12560 12561 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12562 do_sg_copy = 0; 12563 } else { 12564 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12565 do_sg_copy = 1; 12566 } 12567 12568 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12569 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12570 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12571 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12572 msg.dt.sg_sequence = 0; 12573 12574 /* 12575 * Loop until we've sent all of the S/G entries. On the 12576 * other end, we'll recompose these S/G entries into one 12577 * contiguous list before passing it to the 12578 */ 12579 for (sg_entries_sent = 0; sg_entries_sent < 12580 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12581 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12582 sizeof(msg.dt.sg_list[0])), 12583 msg.dt.kern_sg_entries - sg_entries_sent); 12584 12585 if (do_sg_copy != 0) { 12586 struct ctl_sg_entry *sgl; 12587 int j; 12588 12589 sgl = (struct ctl_sg_entry *) 12590 io->scsiio.kern_data_ptr; 12591 /* 12592 * If this is in cached memory, flush the cache 12593 * before we send the DMA request to the other 12594 * controller. We want to do this in either 12595 * the * read or the write case. The read 12596 * case is straightforward. In the write 12597 * case, we want to make sure nothing is 12598 * in the local cache that could overwrite 12599 * the DMAed data. 12600 */ 12601 12602 for (i = sg_entries_sent, j = 0; 12603 i < msg.dt.cur_sg_entries; i++, j++) { 12604 #if 0 12605 if ((io->io_hdr.flags & 12606 CTL_FLAG_BUS_ADDR) == 0) { 12607 /* 12608 * XXX KDM use busdma. 12609 */ 12610 msg.dt.sg_list[j].addr =(void *) 12611 vtophys(sgl[i].addr); 12612 } else { 12613 msg.dt.sg_list[j].addr = 12614 sgl[i].addr; 12615 } 12616 #else 12617 KASSERT((io->io_hdr.flags & 12618 CTL_FLAG_BUS_ADDR) == 0, 12619 ("HA does not support BUS_ADDR")); 12620 msg.dt.sg_list[j].addr = sgl[i].addr; 12621 #endif 12622 msg.dt.sg_list[j].len = sgl[i].len; 12623 } 12624 } 12625 12626 sg_entries_sent += msg.dt.cur_sg_entries; 12627 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12628 msg.dt.sg_last = 1; 12629 else 12630 msg.dt.sg_last = 0; 12631 12632 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12633 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 12634 sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries, 12635 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 12636 io->io_hdr.port_status = 31341; 12637 io->scsiio.be_move_done(io); 12638 return; 12639 } 12640 12641 msg.dt.sent_sg_entries = sg_entries_sent; 12642 } 12643 12644 /* 12645 * Officially handover the request from us to peer. 12646 * If failover has just happened, then we must return error. 12647 * If failover happen just after, then it is not our problem. 12648 */ 12649 if (lun) 12650 mtx_lock(&lun->lun_lock); 12651 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12652 if (lun) 12653 mtx_unlock(&lun->lun_lock); 12654 io->io_hdr.port_status = 31342; 12655 io->scsiio.be_move_done(io); 12656 return; 12657 } 12658 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12659 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 12660 if (lun) 12661 mtx_unlock(&lun->lun_lock); 12662 } else { 12663 12664 /* 12665 * Lookup the fe_datamove() function for this particular 12666 * front end. 12667 */ 12668 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12669 12670 fe_datamove(io); 12671 } 12672 } 12673 12674 static void 12675 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12676 { 12677 union ctl_ha_msg msg; 12678 12679 memset(&msg, 0, sizeof(msg)); 12680 12681 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12682 msg.hdr.original_sc = io; 12683 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12684 msg.hdr.nexus = io->io_hdr.nexus; 12685 msg.hdr.status = io->io_hdr.status; 12686 msg.scsi.tag_num = io->scsiio.tag_num; 12687 msg.scsi.tag_type = io->scsiio.tag_type; 12688 msg.scsi.scsi_status = io->scsiio.scsi_status; 12689 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12690 io->scsiio.sense_len); 12691 msg.scsi.sense_len = io->scsiio.sense_len; 12692 msg.scsi.sense_residual = io->scsiio.sense_residual; 12693 msg.scsi.fetd_status = io->io_hdr.port_status; 12694 msg.scsi.residual = io->scsiio.residual; 12695 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12696 12697 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12698 ctl_failover_io(io, /*have_lock*/ have_lock); 12699 return; 12700 } 12701 12702 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12703 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12704 msg.scsi.sense_len, M_WAITOK); 12705 } 12706 12707 /* 12708 * The DMA to the remote side is done, now we need to tell the other side 12709 * we're done so it can continue with its data movement. 12710 */ 12711 static void 12712 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12713 { 12714 union ctl_io *io; 12715 int i; 12716 12717 io = rq->context; 12718 12719 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12720 printf("%s: ISC DMA write failed with error %d", __func__, 12721 rq->ret); 12722 ctl_set_internal_failure(&io->scsiio, 12723 /*sks_valid*/ 1, 12724 /*retry_count*/ rq->ret); 12725 } 12726 12727 ctl_dt_req_free(rq); 12728 12729 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12730 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12731 free(io->io_hdr.remote_sglist, M_CTL); 12732 io->io_hdr.remote_sglist = NULL; 12733 io->io_hdr.local_sglist = NULL; 12734 12735 /* 12736 * The data is in local and remote memory, so now we need to send 12737 * status (good or back) back to the other side. 12738 */ 12739 ctl_send_datamove_done(io, /*have_lock*/ 0); 12740 } 12741 12742 /* 12743 * We've moved the data from the host/controller into local memory. Now we 12744 * need to push it over to the remote controller's memory. 12745 */ 12746 static int 12747 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12748 { 12749 int retval; 12750 12751 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12752 ctl_datamove_remote_write_cb); 12753 return (retval); 12754 } 12755 12756 static void 12757 ctl_datamove_remote_write(union ctl_io *io) 12758 { 12759 int retval; 12760 void (*fe_datamove)(union ctl_io *io); 12761 12762 /* 12763 * - Get the data from the host/HBA into local memory. 12764 * - DMA memory from the local controller to the remote controller. 12765 * - Send status back to the remote controller. 12766 */ 12767 12768 retval = ctl_datamove_remote_sgl_setup(io); 12769 if (retval != 0) 12770 return; 12771 12772 /* Switch the pointer over so the FETD knows what to do */ 12773 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12774 12775 /* 12776 * Use a custom move done callback, since we need to send completion 12777 * back to the other controller, not to the backend on this side. 12778 */ 12779 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12780 12781 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12782 fe_datamove(io); 12783 } 12784 12785 static int 12786 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12787 { 12788 #if 0 12789 char str[256]; 12790 char path_str[64]; 12791 struct sbuf sb; 12792 #endif 12793 int i; 12794 12795 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12796 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12797 free(io->io_hdr.remote_sglist, M_CTL); 12798 io->io_hdr.remote_sglist = NULL; 12799 io->io_hdr.local_sglist = NULL; 12800 12801 #if 0 12802 scsi_path_string(io, path_str, sizeof(path_str)); 12803 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12804 sbuf_cat(&sb, path_str); 12805 scsi_command_string(&io->scsiio, NULL, &sb); 12806 sbuf_printf(&sb, "\n"); 12807 sbuf_cat(&sb, path_str); 12808 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12809 io->scsiio.tag_num, io->scsiio.tag_type); 12810 sbuf_cat(&sb, path_str); 12811 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12812 io->io_hdr.flags, io->io_hdr.status); 12813 sbuf_finish(&sb); 12814 printk("%s", sbuf_data(&sb)); 12815 #endif 12816 12817 12818 /* 12819 * The read is done, now we need to send status (good or bad) back 12820 * to the other side. 12821 */ 12822 ctl_send_datamove_done(io, /*have_lock*/ 0); 12823 12824 return (0); 12825 } 12826 12827 static void 12828 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12829 { 12830 union ctl_io *io; 12831 void (*fe_datamove)(union ctl_io *io); 12832 12833 io = rq->context; 12834 12835 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12836 printf("%s: ISC DMA read failed with error %d\n", __func__, 12837 rq->ret); 12838 ctl_set_internal_failure(&io->scsiio, 12839 /*sks_valid*/ 1, 12840 /*retry_count*/ rq->ret); 12841 } 12842 12843 ctl_dt_req_free(rq); 12844 12845 /* Switch the pointer over so the FETD knows what to do */ 12846 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12847 12848 /* 12849 * Use a custom move done callback, since we need to send completion 12850 * back to the other controller, not to the backend on this side. 12851 */ 12852 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12853 12854 /* XXX KDM add checks like the ones in ctl_datamove? */ 12855 12856 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12857 fe_datamove(io); 12858 } 12859 12860 static int 12861 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12862 { 12863 struct ctl_sg_entry *local_sglist; 12864 struct ctl_softc *softc; 12865 uint32_t len_to_go; 12866 int retval; 12867 int i; 12868 12869 retval = 0; 12870 softc = control_softc; 12871 local_sglist = io->io_hdr.local_sglist; 12872 len_to_go = io->scsiio.kern_data_len; 12873 12874 /* 12875 * The difficult thing here is that the size of the various 12876 * S/G segments may be different than the size from the 12877 * remote controller. That'll make it harder when DMAing 12878 * the data back to the other side. 12879 */ 12880 for (i = 0; len_to_go > 0; i++) { 12881 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12882 local_sglist[i].addr = 12883 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12884 12885 len_to_go -= local_sglist[i].len; 12886 } 12887 /* 12888 * Reset the number of S/G entries accordingly. The original 12889 * number of S/G entries is available in rem_sg_entries. 12890 */ 12891 io->scsiio.kern_sg_entries = i; 12892 12893 #if 0 12894 printf("%s: kern_sg_entries = %d\n", __func__, 12895 io->scsiio.kern_sg_entries); 12896 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12897 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12898 local_sglist[i].addr, local_sglist[i].len); 12899 #endif 12900 12901 return (retval); 12902 } 12903 12904 static int 12905 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12906 ctl_ha_dt_cb callback) 12907 { 12908 struct ctl_ha_dt_req *rq; 12909 struct ctl_sg_entry *remote_sglist, *local_sglist; 12910 uint32_t local_used, remote_used, total_used; 12911 int i, j, isc_ret; 12912 12913 rq = ctl_dt_req_alloc(); 12914 12915 /* 12916 * If we failed to allocate the request, and if the DMA didn't fail 12917 * anyway, set busy status. This is just a resource allocation 12918 * failure. 12919 */ 12920 if ((rq == NULL) 12921 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12922 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12923 ctl_set_busy(&io->scsiio); 12924 12925 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12926 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12927 12928 if (rq != NULL) 12929 ctl_dt_req_free(rq); 12930 12931 /* 12932 * The data move failed. We need to return status back 12933 * to the other controller. No point in trying to DMA 12934 * data to the remote controller. 12935 */ 12936 12937 ctl_send_datamove_done(io, /*have_lock*/ 0); 12938 12939 return (1); 12940 } 12941 12942 local_sglist = io->io_hdr.local_sglist; 12943 remote_sglist = io->io_hdr.remote_sglist; 12944 local_used = 0; 12945 remote_used = 0; 12946 total_used = 0; 12947 12948 /* 12949 * Pull/push the data over the wire from/to the other controller. 12950 * This takes into account the possibility that the local and 12951 * remote sglists may not be identical in terms of the size of 12952 * the elements and the number of elements. 12953 * 12954 * One fundamental assumption here is that the length allocated for 12955 * both the local and remote sglists is identical. Otherwise, we've 12956 * essentially got a coding error of some sort. 12957 */ 12958 isc_ret = CTL_HA_STATUS_SUCCESS; 12959 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12960 uint32_t cur_len; 12961 uint8_t *tmp_ptr; 12962 12963 rq->command = command; 12964 rq->context = io; 12965 12966 /* 12967 * Both pointers should be aligned. But it is possible 12968 * that the allocation length is not. They should both 12969 * also have enough slack left over at the end, though, 12970 * to round up to the next 8 byte boundary. 12971 */ 12972 cur_len = MIN(local_sglist[i].len - local_used, 12973 remote_sglist[j].len - remote_used); 12974 rq->size = cur_len; 12975 12976 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12977 tmp_ptr += local_used; 12978 12979 #if 0 12980 /* Use physical addresses when talking to ISC hardware */ 12981 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12982 /* XXX KDM use busdma */ 12983 rq->local = vtophys(tmp_ptr); 12984 } else 12985 rq->local = tmp_ptr; 12986 #else 12987 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12988 ("HA does not support BUS_ADDR")); 12989 rq->local = tmp_ptr; 12990 #endif 12991 12992 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12993 tmp_ptr += remote_used; 12994 rq->remote = tmp_ptr; 12995 12996 rq->callback = NULL; 12997 12998 local_used += cur_len; 12999 if (local_used >= local_sglist[i].len) { 13000 i++; 13001 local_used = 0; 13002 } 13003 13004 remote_used += cur_len; 13005 if (remote_used >= remote_sglist[j].len) { 13006 j++; 13007 remote_used = 0; 13008 } 13009 total_used += cur_len; 13010 13011 if (total_used >= io->scsiio.kern_data_len) 13012 rq->callback = callback; 13013 13014 #if 0 13015 printf("%s: %s: local %p remote %p size %d\n", __func__, 13016 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 13017 rq->local, rq->remote, rq->size); 13018 #endif 13019 13020 isc_ret = ctl_dt_single(rq); 13021 if (isc_ret > CTL_HA_STATUS_SUCCESS) 13022 break; 13023 } 13024 if (isc_ret != CTL_HA_STATUS_WAIT) { 13025 rq->ret = isc_ret; 13026 callback(rq); 13027 } 13028 13029 return (0); 13030 } 13031 13032 static void 13033 ctl_datamove_remote_read(union ctl_io *io) 13034 { 13035 int retval; 13036 int i; 13037 13038 /* 13039 * This will send an error to the other controller in the case of a 13040 * failure. 13041 */ 13042 retval = ctl_datamove_remote_sgl_setup(io); 13043 if (retval != 0) 13044 return; 13045 13046 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 13047 ctl_datamove_remote_read_cb); 13048 if (retval != 0) { 13049 /* 13050 * Make sure we free memory if there was an error.. The 13051 * ctl_datamove_remote_xfer() function will send the 13052 * datamove done message, or call the callback with an 13053 * error if there is a problem. 13054 */ 13055 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13056 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13057 free(io->io_hdr.remote_sglist, M_CTL); 13058 io->io_hdr.remote_sglist = NULL; 13059 io->io_hdr.local_sglist = NULL; 13060 } 13061 } 13062 13063 /* 13064 * Process a datamove request from the other controller. This is used for 13065 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 13066 * first. Once that is complete, the data gets DMAed into the remote 13067 * controller's memory. For reads, we DMA from the remote controller's 13068 * memory into our memory first, and then move it out to the FETD. 13069 */ 13070 static void 13071 ctl_datamove_remote(union ctl_io *io) 13072 { 13073 13074 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 13075 13076 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13077 ctl_failover_io(io, /*have_lock*/ 0); 13078 return; 13079 } 13080 13081 /* 13082 * Note that we look for an aborted I/O here, but don't do some of 13083 * the other checks that ctl_datamove() normally does. 13084 * We don't need to run the datamove delay code, since that should 13085 * have been done if need be on the other controller. 13086 */ 13087 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13088 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 13089 io->scsiio.tag_num, io->io_hdr.nexus.initid, 13090 io->io_hdr.nexus.targ_port, 13091 io->io_hdr.nexus.targ_lun); 13092 io->io_hdr.port_status = 31338; 13093 ctl_send_datamove_done(io, /*have_lock*/ 0); 13094 return; 13095 } 13096 13097 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 13098 ctl_datamove_remote_write(io); 13099 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 13100 ctl_datamove_remote_read(io); 13101 else { 13102 io->io_hdr.port_status = 31339; 13103 ctl_send_datamove_done(io, /*have_lock*/ 0); 13104 } 13105 } 13106 13107 static int 13108 ctl_process_done(union ctl_io *io) 13109 { 13110 struct ctl_lun *lun; 13111 struct ctl_softc *softc = control_softc; 13112 void (*fe_done)(union ctl_io *io); 13113 union ctl_ha_msg msg; 13114 uint32_t targ_port = io->io_hdr.nexus.targ_port; 13115 13116 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13117 13118 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) 13119 fe_done = softc->ctl_ports[targ_port]->fe_done; 13120 else 13121 fe_done = NULL; 13122 13123 #ifdef CTL_TIME_IO 13124 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13125 char str[256]; 13126 char path_str[64]; 13127 struct sbuf sb; 13128 13129 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13130 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13131 13132 sbuf_cat(&sb, path_str); 13133 switch (io->io_hdr.io_type) { 13134 case CTL_IO_SCSI: 13135 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13136 sbuf_printf(&sb, "\n"); 13137 sbuf_cat(&sb, path_str); 13138 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13139 io->scsiio.tag_num, io->scsiio.tag_type); 13140 break; 13141 case CTL_IO_TASK: 13142 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13143 "Tag Type: %d\n", io->taskio.task_action, 13144 io->taskio.tag_num, io->taskio.tag_type); 13145 break; 13146 default: 13147 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13148 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13149 break; 13150 } 13151 sbuf_cat(&sb, path_str); 13152 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13153 (intmax_t)time_uptime - io->io_hdr.start_time); 13154 sbuf_finish(&sb); 13155 printf("%s", sbuf_data(&sb)); 13156 } 13157 #endif /* CTL_TIME_IO */ 13158 13159 switch (io->io_hdr.io_type) { 13160 case CTL_IO_SCSI: 13161 break; 13162 case CTL_IO_TASK: 13163 if (ctl_debug & CTL_DEBUG_INFO) 13164 ctl_io_error_print(io, NULL); 13165 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13166 ctl_free_io(io); 13167 else 13168 fe_done(io); 13169 return (CTL_RETVAL_COMPLETE); 13170 default: 13171 panic("ctl_process_done: invalid io type %d\n", 13172 io->io_hdr.io_type); 13173 break; /* NOTREACHED */ 13174 } 13175 13176 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13177 if (lun == NULL) { 13178 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13179 io->io_hdr.nexus.targ_mapped_lun)); 13180 goto bailout; 13181 } 13182 13183 mtx_lock(&lun->lun_lock); 13184 13185 /* 13186 * Check to see if we have any errors to inject here. We only 13187 * inject errors for commands that don't already have errors set. 13188 */ 13189 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 13190 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13191 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13192 ctl_inject_error(lun, io); 13193 13194 /* 13195 * XXX KDM how do we treat commands that aren't completed 13196 * successfully? 13197 * 13198 * XXX KDM should we also track I/O latency? 13199 */ 13200 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13201 io->io_hdr.io_type == CTL_IO_SCSI) { 13202 #ifdef CTL_TIME_IO 13203 struct bintime cur_bt; 13204 #endif 13205 int type; 13206 13207 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13208 CTL_FLAG_DATA_IN) 13209 type = CTL_STATS_READ; 13210 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13211 CTL_FLAG_DATA_OUT) 13212 type = CTL_STATS_WRITE; 13213 else 13214 type = CTL_STATS_NO_IO; 13215 13216 lun->stats.ports[targ_port].bytes[type] += 13217 io->scsiio.kern_total_len; 13218 lun->stats.ports[targ_port].operations[type]++; 13219 #ifdef CTL_TIME_IO 13220 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13221 &io->io_hdr.dma_bt); 13222 lun->stats.ports[targ_port].num_dmas[type] += 13223 io->io_hdr.num_dmas; 13224 getbintime(&cur_bt); 13225 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13226 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13227 #endif 13228 } 13229 13230 /* 13231 * Remove this from the OOA queue. 13232 */ 13233 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13234 #ifdef CTL_TIME_IO 13235 if (TAILQ_EMPTY(&lun->ooa_queue)) 13236 lun->last_busy = getsbinuptime(); 13237 #endif 13238 13239 /* 13240 * Run through the blocked queue on this LUN and see if anything 13241 * has become unblocked, now that this transaction is done. 13242 */ 13243 ctl_check_blocked(lun); 13244 13245 /* 13246 * If the LUN has been invalidated, free it if there is nothing 13247 * left on its OOA queue. 13248 */ 13249 if ((lun->flags & CTL_LUN_INVALID) 13250 && TAILQ_EMPTY(&lun->ooa_queue)) { 13251 mtx_unlock(&lun->lun_lock); 13252 mtx_lock(&softc->ctl_lock); 13253 ctl_free_lun(lun); 13254 mtx_unlock(&softc->ctl_lock); 13255 } else 13256 mtx_unlock(&lun->lun_lock); 13257 13258 bailout: 13259 13260 /* 13261 * If this command has been aborted, make sure we set the status 13262 * properly. The FETD is responsible for freeing the I/O and doing 13263 * whatever it needs to do to clean up its state. 13264 */ 13265 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13266 ctl_set_task_aborted(&io->scsiio); 13267 13268 /* 13269 * If enabled, print command error status. 13270 */ 13271 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13272 (ctl_debug & CTL_DEBUG_INFO) != 0) 13273 ctl_io_error_print(io, NULL); 13274 13275 /* 13276 * Tell the FETD or the other shelf controller we're done with this 13277 * command. Note that only SCSI commands get to this point. Task 13278 * management commands are completed above. 13279 */ 13280 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13281 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13282 memset(&msg, 0, sizeof(msg)); 13283 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13284 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13285 msg.hdr.nexus = io->io_hdr.nexus; 13286 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13287 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13288 M_WAITOK); 13289 } 13290 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13291 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13292 memset(&msg, 0, sizeof(msg)); 13293 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13294 msg.hdr.original_sc = io->io_hdr.original_sc; 13295 msg.hdr.nexus = io->io_hdr.nexus; 13296 msg.hdr.status = io->io_hdr.status; 13297 msg.scsi.scsi_status = io->scsiio.scsi_status; 13298 msg.scsi.tag_num = io->scsiio.tag_num; 13299 msg.scsi.tag_type = io->scsiio.tag_type; 13300 msg.scsi.sense_len = io->scsiio.sense_len; 13301 msg.scsi.sense_residual = io->scsiio.sense_residual; 13302 msg.scsi.residual = io->scsiio.residual; 13303 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13304 io->scsiio.sense_len); 13305 13306 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13307 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 13308 msg.scsi.sense_len, M_WAITOK); 13309 ctl_free_io(io); 13310 } else 13311 fe_done(io); 13312 13313 return (CTL_RETVAL_COMPLETE); 13314 } 13315 13316 #ifdef CTL_WITH_CA 13317 /* 13318 * Front end should call this if it doesn't do autosense. When the request 13319 * sense comes back in from the initiator, we'll dequeue this and send it. 13320 */ 13321 int 13322 ctl_queue_sense(union ctl_io *io) 13323 { 13324 struct ctl_lun *lun; 13325 struct ctl_port *port; 13326 struct ctl_softc *softc; 13327 uint32_t initidx, targ_lun; 13328 13329 softc = control_softc; 13330 13331 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13332 13333 /* 13334 * LUN lookup will likely move to the ctl_work_thread() once we 13335 * have our new queueing infrastructure (that doesn't put things on 13336 * a per-LUN queue initially). That is so that we can handle 13337 * things like an INQUIRY to a LUN that we don't have enabled. We 13338 * can't deal with that right now. 13339 */ 13340 mtx_lock(&softc->ctl_lock); 13341 13342 /* 13343 * If we don't have a LUN for this, just toss the sense 13344 * information. 13345 */ 13346 port = ctl_io_port(&ctsio->io_hdr); 13347 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13348 if ((targ_lun < CTL_MAX_LUNS) 13349 && (softc->ctl_luns[targ_lun] != NULL)) 13350 lun = softc->ctl_luns[targ_lun]; 13351 else 13352 goto bailout; 13353 13354 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13355 13356 mtx_lock(&lun->lun_lock); 13357 /* 13358 * Already have CA set for this LUN...toss the sense information. 13359 */ 13360 if (ctl_is_set(lun->have_ca, initidx)) { 13361 mtx_unlock(&lun->lun_lock); 13362 goto bailout; 13363 } 13364 13365 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13366 MIN(sizeof(lun->pending_sense[initidx]), 13367 sizeof(io->scsiio.sense_data))); 13368 ctl_set_mask(lun->have_ca, initidx); 13369 mtx_unlock(&lun->lun_lock); 13370 13371 bailout: 13372 mtx_unlock(&softc->ctl_lock); 13373 13374 ctl_free_io(io); 13375 13376 return (CTL_RETVAL_COMPLETE); 13377 } 13378 #endif 13379 13380 /* 13381 * Primary command inlet from frontend ports. All SCSI and task I/O 13382 * requests must go through this function. 13383 */ 13384 int 13385 ctl_queue(union ctl_io *io) 13386 { 13387 struct ctl_port *port; 13388 13389 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13390 13391 #ifdef CTL_TIME_IO 13392 io->io_hdr.start_time = time_uptime; 13393 getbintime(&io->io_hdr.start_bt); 13394 #endif /* CTL_TIME_IO */ 13395 13396 /* Map FE-specific LUN ID into global one. */ 13397 port = ctl_io_port(&io->io_hdr); 13398 io->io_hdr.nexus.targ_mapped_lun = 13399 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13400 13401 switch (io->io_hdr.io_type) { 13402 case CTL_IO_SCSI: 13403 case CTL_IO_TASK: 13404 if (ctl_debug & CTL_DEBUG_CDB) 13405 ctl_io_print(io); 13406 ctl_enqueue_incoming(io); 13407 break; 13408 default: 13409 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13410 return (EINVAL); 13411 } 13412 13413 return (CTL_RETVAL_COMPLETE); 13414 } 13415 13416 #ifdef CTL_IO_DELAY 13417 static void 13418 ctl_done_timer_wakeup(void *arg) 13419 { 13420 union ctl_io *io; 13421 13422 io = (union ctl_io *)arg; 13423 ctl_done(io); 13424 } 13425 #endif /* CTL_IO_DELAY */ 13426 13427 void 13428 ctl_serseq_done(union ctl_io *io) 13429 { 13430 struct ctl_lun *lun; 13431 13432 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13433 if (lun->be_lun == NULL || 13434 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13435 return; 13436 mtx_lock(&lun->lun_lock); 13437 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13438 ctl_check_blocked(lun); 13439 mtx_unlock(&lun->lun_lock); 13440 } 13441 13442 void 13443 ctl_done(union ctl_io *io) 13444 { 13445 13446 /* 13447 * Enable this to catch duplicate completion issues. 13448 */ 13449 #if 0 13450 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13451 printf("%s: type %d msg %d cdb %x iptl: " 13452 "%u:%u:%u tag 0x%04x " 13453 "flag %#x status %x\n", 13454 __func__, 13455 io->io_hdr.io_type, 13456 io->io_hdr.msg_type, 13457 io->scsiio.cdb[0], 13458 io->io_hdr.nexus.initid, 13459 io->io_hdr.nexus.targ_port, 13460 io->io_hdr.nexus.targ_lun, 13461 (io->io_hdr.io_type == 13462 CTL_IO_TASK) ? 13463 io->taskio.tag_num : 13464 io->scsiio.tag_num, 13465 io->io_hdr.flags, 13466 io->io_hdr.status); 13467 } else 13468 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13469 #endif 13470 13471 /* 13472 * This is an internal copy of an I/O, and should not go through 13473 * the normal done processing logic. 13474 */ 13475 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13476 return; 13477 13478 #ifdef CTL_IO_DELAY 13479 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13480 struct ctl_lun *lun; 13481 13482 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13483 13484 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13485 } else { 13486 struct ctl_lun *lun; 13487 13488 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13489 13490 if ((lun != NULL) 13491 && (lun->delay_info.done_delay > 0)) { 13492 13493 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13494 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13495 callout_reset(&io->io_hdr.delay_callout, 13496 lun->delay_info.done_delay * hz, 13497 ctl_done_timer_wakeup, io); 13498 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13499 lun->delay_info.done_delay = 0; 13500 return; 13501 } 13502 } 13503 #endif /* CTL_IO_DELAY */ 13504 13505 ctl_enqueue_done(io); 13506 } 13507 13508 static void 13509 ctl_work_thread(void *arg) 13510 { 13511 struct ctl_thread *thr = (struct ctl_thread *)arg; 13512 struct ctl_softc *softc = thr->ctl_softc; 13513 union ctl_io *io; 13514 int retval; 13515 13516 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13517 13518 for (;;) { 13519 retval = 0; 13520 13521 /* 13522 * We handle the queues in this order: 13523 * - ISC 13524 * - done queue (to free up resources, unblock other commands) 13525 * - RtR queue 13526 * - incoming queue 13527 * 13528 * If those queues are empty, we break out of the loop and 13529 * go to sleep. 13530 */ 13531 mtx_lock(&thr->queue_lock); 13532 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13533 if (io != NULL) { 13534 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13535 mtx_unlock(&thr->queue_lock); 13536 ctl_handle_isc(io); 13537 continue; 13538 } 13539 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13540 if (io != NULL) { 13541 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13542 /* clear any blocked commands, call fe_done */ 13543 mtx_unlock(&thr->queue_lock); 13544 retval = ctl_process_done(io); 13545 continue; 13546 } 13547 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13548 if (io != NULL) { 13549 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13550 mtx_unlock(&thr->queue_lock); 13551 if (io->io_hdr.io_type == CTL_IO_TASK) 13552 ctl_run_task(io); 13553 else 13554 ctl_scsiio_precheck(softc, &io->scsiio); 13555 continue; 13556 } 13557 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13558 if (io != NULL) { 13559 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13560 mtx_unlock(&thr->queue_lock); 13561 retval = ctl_scsiio(&io->scsiio); 13562 if (retval != CTL_RETVAL_COMPLETE) 13563 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13564 continue; 13565 } 13566 13567 /* Sleep until we have something to do. */ 13568 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13569 } 13570 } 13571 13572 static void 13573 ctl_lun_thread(void *arg) 13574 { 13575 struct ctl_softc *softc = (struct ctl_softc *)arg; 13576 struct ctl_be_lun *be_lun; 13577 int retval; 13578 13579 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13580 13581 for (;;) { 13582 retval = 0; 13583 mtx_lock(&softc->ctl_lock); 13584 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13585 if (be_lun != NULL) { 13586 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13587 mtx_unlock(&softc->ctl_lock); 13588 ctl_create_lun(be_lun); 13589 continue; 13590 } 13591 13592 /* Sleep until we have something to do. */ 13593 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13594 PDROP | PRIBIO, "-", 0); 13595 } 13596 } 13597 13598 static void 13599 ctl_thresh_thread(void *arg) 13600 { 13601 struct ctl_softc *softc = (struct ctl_softc *)arg; 13602 struct ctl_lun *lun; 13603 struct ctl_be_lun *be_lun; 13604 struct scsi_da_rw_recovery_page *rwpage; 13605 struct ctl_logical_block_provisioning_page *page; 13606 const char *attr; 13607 union ctl_ha_msg msg; 13608 uint64_t thres, val; 13609 int i, e, set; 13610 13611 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13612 13613 for (;;) { 13614 mtx_lock(&softc->ctl_lock); 13615 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13616 be_lun = lun->be_lun; 13617 if ((lun->flags & CTL_LUN_DISABLED) || 13618 (lun->flags & CTL_LUN_OFFLINE) || 13619 lun->backend->lun_attr == NULL) 13620 continue; 13621 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13622 softc->ha_mode == CTL_HA_MODE_XFER) 13623 continue; 13624 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13625 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13626 continue; 13627 e = 0; 13628 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13629 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13630 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13631 continue; 13632 thres = scsi_4btoul(page->descr[i].count); 13633 thres <<= CTL_LBP_EXPONENT; 13634 switch (page->descr[i].resource) { 13635 case 0x01: 13636 attr = "blocksavail"; 13637 break; 13638 case 0x02: 13639 attr = "blocksused"; 13640 break; 13641 case 0xf1: 13642 attr = "poolblocksavail"; 13643 break; 13644 case 0xf2: 13645 attr = "poolblocksused"; 13646 break; 13647 default: 13648 continue; 13649 } 13650 mtx_unlock(&softc->ctl_lock); // XXX 13651 val = lun->backend->lun_attr( 13652 lun->be_lun->be_lun, attr); 13653 mtx_lock(&softc->ctl_lock); 13654 if (val == UINT64_MAX) 13655 continue; 13656 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13657 == SLBPPD_ARMING_INC) 13658 e = (val >= thres); 13659 else 13660 e = (val <= thres); 13661 if (e) 13662 break; 13663 } 13664 mtx_lock(&lun->lun_lock); 13665 if (e) { 13666 scsi_u64to8b((uint8_t *)&page->descr[i] - 13667 (uint8_t *)page, lun->ua_tpt_info); 13668 if (lun->lasttpt == 0 || 13669 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13670 lun->lasttpt = time_uptime; 13671 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13672 set = 1; 13673 } else 13674 set = 0; 13675 } else { 13676 lun->lasttpt = 0; 13677 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13678 set = -1; 13679 } 13680 mtx_unlock(&lun->lun_lock); 13681 if (set != 0 && 13682 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13683 /* Send msg to other side. */ 13684 bzero(&msg.ua, sizeof(msg.ua)); 13685 msg.hdr.msg_type = CTL_MSG_UA; 13686 msg.hdr.nexus.initid = -1; 13687 msg.hdr.nexus.targ_port = -1; 13688 msg.hdr.nexus.targ_lun = lun->lun; 13689 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13690 msg.ua.ua_all = 1; 13691 msg.ua.ua_set = (set > 0); 13692 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13693 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13694 mtx_unlock(&softc->ctl_lock); // XXX 13695 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13696 sizeof(msg.ua), M_WAITOK); 13697 mtx_lock(&softc->ctl_lock); 13698 } 13699 } 13700 mtx_unlock(&softc->ctl_lock); 13701 pause("-", CTL_LBP_PERIOD * hz); 13702 } 13703 } 13704 13705 static void 13706 ctl_enqueue_incoming(union ctl_io *io) 13707 { 13708 struct ctl_softc *softc = control_softc; 13709 struct ctl_thread *thr; 13710 u_int idx; 13711 13712 idx = (io->io_hdr.nexus.targ_port * 127 + 13713 io->io_hdr.nexus.initid) % worker_threads; 13714 thr = &softc->threads[idx]; 13715 mtx_lock(&thr->queue_lock); 13716 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13717 mtx_unlock(&thr->queue_lock); 13718 wakeup(thr); 13719 } 13720 13721 static void 13722 ctl_enqueue_rtr(union ctl_io *io) 13723 { 13724 struct ctl_softc *softc = control_softc; 13725 struct ctl_thread *thr; 13726 13727 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13728 mtx_lock(&thr->queue_lock); 13729 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13730 mtx_unlock(&thr->queue_lock); 13731 wakeup(thr); 13732 } 13733 13734 static void 13735 ctl_enqueue_done(union ctl_io *io) 13736 { 13737 struct ctl_softc *softc = control_softc; 13738 struct ctl_thread *thr; 13739 13740 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13741 mtx_lock(&thr->queue_lock); 13742 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13743 mtx_unlock(&thr->queue_lock); 13744 wakeup(thr); 13745 } 13746 13747 static void 13748 ctl_enqueue_isc(union ctl_io *io) 13749 { 13750 struct ctl_softc *softc = control_softc; 13751 struct ctl_thread *thr; 13752 13753 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13754 mtx_lock(&thr->queue_lock); 13755 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13756 mtx_unlock(&thr->queue_lock); 13757 wakeup(thr); 13758 } 13759 13760 /* 13761 * vim: ts=8 13762 */ 13763