1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $ 35 */ 36 /* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #define _CTL_C 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/ctype.h> 50 #include <sys/kernel.h> 51 #include <sys/types.h> 52 #include <sys/kthread.h> 53 #include <sys/bio.h> 54 #include <sys/fcntl.h> 55 #include <sys/lock.h> 56 #include <sys/module.h> 57 #include <sys/mutex.h> 58 #include <sys/condvar.h> 59 #include <sys/malloc.h> 60 #include <sys/conf.h> 61 #include <sys/ioccom.h> 62 #include <sys/queue.h> 63 #include <sys/sbuf.h> 64 #include <sys/smp.h> 65 #include <sys/endian.h> 66 #include <sys/sysctl.h> 67 #include <vm/uma.h> 68 69 #include <cam/cam.h> 70 #include <cam/scsi/scsi_all.h> 71 #include <cam/scsi/scsi_da.h> 72 #include <cam/ctl/ctl_io.h> 73 #include <cam/ctl/ctl.h> 74 #include <cam/ctl/ctl_frontend.h> 75 #include <cam/ctl/ctl_frontend_internal.h> 76 #include <cam/ctl/ctl_util.h> 77 #include <cam/ctl/ctl_backend.h> 78 #include <cam/ctl/ctl_ioctl.h> 79 #include <cam/ctl/ctl_ha.h> 80 #include <cam/ctl/ctl_private.h> 81 #include <cam/ctl/ctl_debug.h> 82 #include <cam/ctl/ctl_scsi_all.h> 83 #include <cam/ctl/ctl_error.h> 84 85 struct ctl_softc *control_softc = NULL; 86 87 /* 88 * Size and alignment macros needed for Copan-specific HA hardware. These 89 * can go away when the HA code is re-written, and uses busdma for any 90 * hardware. 91 */ 92 #define CTL_ALIGN_8B(target, source, type) \ 93 if (((uint32_t)source & 0x7) != 0) \ 94 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 95 else \ 96 target = (type)source; 97 98 #define CTL_SIZE_8B(target, size) \ 99 if ((size & 0x7) != 0) \ 100 target = size + (0x8 - (size & 0x7)); \ 101 else \ 102 target = size; 103 104 #define CTL_ALIGN_8B_MARGIN 16 105 106 /* 107 * Template mode pages. 108 */ 109 110 /* 111 * Note that these are default values only. The actual values will be 112 * filled in when the user does a mode sense. 113 */ 114 static struct copan_debugconf_subpage debugconf_page_default = { 115 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 116 DBGCNF_SUBPAGE_CODE, /* subpage */ 117 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 118 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 119 DBGCNF_VERSION, /* page_version */ 120 {CTL_TIME_IO_DEFAULT_SECS>>8, 121 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 122 }; 123 124 static struct copan_debugconf_subpage debugconf_page_changeable = { 125 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 126 DBGCNF_SUBPAGE_CODE, /* subpage */ 127 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 128 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 129 0, /* page_version */ 130 {0xff,0xff}, /* ctl_time_io_secs */ 131 }; 132 133 static struct scsi_da_rw_recovery_page rw_er_page_default = { 134 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 135 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 136 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 137 /*read_retry_count*/0, 138 /*correction_span*/0, 139 /*head_offset_count*/0, 140 /*data_strobe_offset_cnt*/0, 141 /*byte8*/SMS_RWER_LBPERE, 142 /*write_retry_count*/0, 143 /*reserved2*/0, 144 /*recovery_time_limit*/{0, 0}, 145 }; 146 147 static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 148 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 149 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 150 /*byte3*/0, 151 /*read_retry_count*/0, 152 /*correction_span*/0, 153 /*head_offset_count*/0, 154 /*data_strobe_offset_cnt*/0, 155 /*byte8*/0, 156 /*write_retry_count*/0, 157 /*reserved2*/0, 158 /*recovery_time_limit*/{0, 0}, 159 }; 160 161 static struct scsi_format_page format_page_default = { 162 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 163 /*page_length*/sizeof(struct scsi_format_page) - 2, 164 /*tracks_per_zone*/ {0, 0}, 165 /*alt_sectors_per_zone*/ {0, 0}, 166 /*alt_tracks_per_zone*/ {0, 0}, 167 /*alt_tracks_per_lun*/ {0, 0}, 168 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 169 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 170 /*bytes_per_sector*/ {0, 0}, 171 /*interleave*/ {0, 0}, 172 /*track_skew*/ {0, 0}, 173 /*cylinder_skew*/ {0, 0}, 174 /*flags*/ SFP_HSEC, 175 /*reserved*/ {0, 0, 0} 176 }; 177 178 static struct scsi_format_page format_page_changeable = { 179 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 180 /*page_length*/sizeof(struct scsi_format_page) - 2, 181 /*tracks_per_zone*/ {0, 0}, 182 /*alt_sectors_per_zone*/ {0, 0}, 183 /*alt_tracks_per_zone*/ {0, 0}, 184 /*alt_tracks_per_lun*/ {0, 0}, 185 /*sectors_per_track*/ {0, 0}, 186 /*bytes_per_sector*/ {0, 0}, 187 /*interleave*/ {0, 0}, 188 /*track_skew*/ {0, 0}, 189 /*cylinder_skew*/ {0, 0}, 190 /*flags*/ 0, 191 /*reserved*/ {0, 0, 0} 192 }; 193 194 static struct scsi_rigid_disk_page rigid_disk_page_default = { 195 /*page_code*/SMS_RIGID_DISK_PAGE, 196 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 197 /*cylinders*/ {0, 0, 0}, 198 /*heads*/ CTL_DEFAULT_HEADS, 199 /*start_write_precomp*/ {0, 0, 0}, 200 /*start_reduced_current*/ {0, 0, 0}, 201 /*step_rate*/ {0, 0}, 202 /*landing_zone_cylinder*/ {0, 0, 0}, 203 /*rpl*/ SRDP_RPL_DISABLED, 204 /*rotational_offset*/ 0, 205 /*reserved1*/ 0, 206 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 207 CTL_DEFAULT_ROTATION_RATE & 0xff}, 208 /*reserved2*/ {0, 0} 209 }; 210 211 static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 212 /*page_code*/SMS_RIGID_DISK_PAGE, 213 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 214 /*cylinders*/ {0, 0, 0}, 215 /*heads*/ 0, 216 /*start_write_precomp*/ {0, 0, 0}, 217 /*start_reduced_current*/ {0, 0, 0}, 218 /*step_rate*/ {0, 0}, 219 /*landing_zone_cylinder*/ {0, 0, 0}, 220 /*rpl*/ 0, 221 /*rotational_offset*/ 0, 222 /*reserved1*/ 0, 223 /*rotation_rate*/ {0, 0}, 224 /*reserved2*/ {0, 0} 225 }; 226 227 static struct scsi_caching_page caching_page_default = { 228 /*page_code*/SMS_CACHING_PAGE, 229 /*page_length*/sizeof(struct scsi_caching_page) - 2, 230 /*flags1*/ SCP_DISC | SCP_WCE, 231 /*ret_priority*/ 0, 232 /*disable_pf_transfer_len*/ {0xff, 0xff}, 233 /*min_prefetch*/ {0, 0}, 234 /*max_prefetch*/ {0xff, 0xff}, 235 /*max_pf_ceiling*/ {0xff, 0xff}, 236 /*flags2*/ 0, 237 /*cache_segments*/ 0, 238 /*cache_seg_size*/ {0, 0}, 239 /*reserved*/ 0, 240 /*non_cache_seg_size*/ {0, 0, 0} 241 }; 242 243 static struct scsi_caching_page caching_page_changeable = { 244 /*page_code*/SMS_CACHING_PAGE, 245 /*page_length*/sizeof(struct scsi_caching_page) - 2, 246 /*flags1*/ SCP_WCE | SCP_RCD, 247 /*ret_priority*/ 0, 248 /*disable_pf_transfer_len*/ {0, 0}, 249 /*min_prefetch*/ {0, 0}, 250 /*max_prefetch*/ {0, 0}, 251 /*max_pf_ceiling*/ {0, 0}, 252 /*flags2*/ 0, 253 /*cache_segments*/ 0, 254 /*cache_seg_size*/ {0, 0}, 255 /*reserved*/ 0, 256 /*non_cache_seg_size*/ {0, 0, 0} 257 }; 258 259 static struct scsi_control_page control_page_default = { 260 /*page_code*/SMS_CONTROL_MODE_PAGE, 261 /*page_length*/sizeof(struct scsi_control_page) - 2, 262 /*rlec*/0, 263 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 264 /*eca_and_aen*/0, 265 /*flags4*/SCP_TAS, 266 /*aen_holdoff_period*/{0, 0}, 267 /*busy_timeout_period*/{0, 0}, 268 /*extended_selftest_completion_time*/{0, 0} 269 }; 270 271 static struct scsi_control_page control_page_changeable = { 272 /*page_code*/SMS_CONTROL_MODE_PAGE, 273 /*page_length*/sizeof(struct scsi_control_page) - 2, 274 /*rlec*/SCP_DSENSE, 275 /*queue_flags*/SCP_QUEUE_ALG_MASK, 276 /*eca_and_aen*/SCP_SWP, 277 /*flags4*/0, 278 /*aen_holdoff_period*/{0, 0}, 279 /*busy_timeout_period*/{0, 0}, 280 /*extended_selftest_completion_time*/{0, 0} 281 }; 282 283 static struct scsi_info_exceptions_page ie_page_default = { 284 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 285 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 286 /*info_flags*/SIEP_FLAGS_DEXCPT, 287 /*mrie*/0, 288 /*interval_timer*/{0, 0, 0, 0}, 289 /*report_count*/{0, 0, 0, 0} 290 }; 291 292 static struct scsi_info_exceptions_page ie_page_changeable = { 293 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 294 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 295 /*info_flags*/0, 296 /*mrie*/0, 297 /*interval_timer*/{0, 0, 0, 0}, 298 /*report_count*/{0, 0, 0, 0} 299 }; 300 301 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 302 303 static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 304 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 305 /*subpage_code*/0x02, 306 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 307 /*flags*/0, 308 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 309 /*descr*/{}}, 310 {{/*flags*/0, 311 /*resource*/0x01, 312 /*reserved*/{0, 0}, 313 /*count*/{0, 0, 0, 0}}, 314 {/*flags*/0, 315 /*resource*/0x02, 316 /*reserved*/{0, 0}, 317 /*count*/{0, 0, 0, 0}}, 318 {/*flags*/0, 319 /*resource*/0xf1, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0xf2, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}} 326 } 327 }; 328 329 static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 330 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 331 /*subpage_code*/0x02, 332 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 333 /*flags*/0, 334 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 335 /*descr*/{}}, 336 {{/*flags*/0, 337 /*resource*/0, 338 /*reserved*/{0, 0}, 339 /*count*/{0, 0, 0, 0}}, 340 {/*flags*/0, 341 /*resource*/0, 342 /*reserved*/{0, 0}, 343 /*count*/{0, 0, 0, 0}}, 344 {/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}} 352 } 353 }; 354 355 /* 356 * XXX KDM move these into the softc. 357 */ 358 static int rcv_sync_msg; 359 static uint8_t ctl_pause_rtr; 360 361 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 362 static int worker_threads = -1; 363 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 364 &worker_threads, 1, "Number of worker threads"); 365 static int ctl_debug = CTL_DEBUG_NONE; 366 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 367 &ctl_debug, 0, "Enabled debug flags"); 368 369 /* 370 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 371 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 372 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 373 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 374 */ 375 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 376 377 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 378 int param); 379 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 380 static int ctl_init(void); 381 void ctl_shutdown(void); 382 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 383 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 384 static void ctl_ioctl_online(void *arg); 385 static void ctl_ioctl_offline(void *arg); 386 static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 387 static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 388 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 389 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 390 static int ctl_ioctl_submit_wait(union ctl_io *io); 391 static void ctl_ioctl_datamove(union ctl_io *io); 392 static void ctl_ioctl_done(union ctl_io *io); 393 static void ctl_ioctl_hard_startstop_callback(void *arg, 394 struct cfi_metatask *metatask); 395 static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 396 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 397 struct ctl_ooa *ooa_hdr, 398 struct ctl_ooa_entry *kern_entries); 399 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 400 struct thread *td); 401 static uint32_t ctl_map_lun(int port_num, uint32_t lun); 402 static uint32_t ctl_map_lun_back(int port_num, uint32_t lun); 403 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 404 struct ctl_be_lun *be_lun, struct ctl_id target_id); 405 static int ctl_free_lun(struct ctl_lun *lun); 406 static void ctl_create_lun(struct ctl_be_lun *be_lun); 407 /** 408 static void ctl_failover_change_pages(struct ctl_softc *softc, 409 struct ctl_scsiio *ctsio, int master); 410 **/ 411 412 static int ctl_do_mode_select(union ctl_io *io); 413 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 414 uint64_t res_key, uint64_t sa_res_key, 415 uint8_t type, uint32_t residx, 416 struct ctl_scsiio *ctsio, 417 struct scsi_per_res_out *cdb, 418 struct scsi_per_res_out_parms* param); 419 static void ctl_pro_preempt_other(struct ctl_lun *lun, 420 union ctl_ha_msg *msg); 421 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 422 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 423 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 424 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 425 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 426 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 427 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 428 int alloc_len); 429 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 430 int alloc_len); 431 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 432 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 433 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 434 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 435 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 436 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2); 437 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 438 union ctl_io *pending_io, union ctl_io *ooa_io); 439 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 440 union ctl_io *starting_io); 441 static int ctl_check_blocked(struct ctl_lun *lun); 442 static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, 443 struct ctl_lun *lun, 444 const struct ctl_cmd_entry *entry, 445 struct ctl_scsiio *ctsio); 446 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 447 static void ctl_failover(void); 448 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 449 struct ctl_scsiio *ctsio); 450 static int ctl_scsiio(struct ctl_scsiio *ctsio); 451 452 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 453 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 454 ctl_ua_type ua_type); 455 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 456 ctl_ua_type ua_type); 457 static int ctl_abort_task(union ctl_io *io); 458 static int ctl_abort_task_set(union ctl_io *io); 459 static int ctl_i_t_nexus_reset(union ctl_io *io); 460 static void ctl_run_task(union ctl_io *io); 461 #ifdef CTL_IO_DELAY 462 static void ctl_datamove_timer_wakeup(void *arg); 463 static void ctl_done_timer_wakeup(void *arg); 464 #endif /* CTL_IO_DELAY */ 465 466 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 467 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 468 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 469 static void ctl_datamove_remote_write(union ctl_io *io); 470 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 471 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 472 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 473 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 474 ctl_ha_dt_cb callback); 475 static void ctl_datamove_remote_read(union ctl_io *io); 476 static void ctl_datamove_remote(union ctl_io *io); 477 static int ctl_process_done(union ctl_io *io); 478 static void ctl_lun_thread(void *arg); 479 static void ctl_thresh_thread(void *arg); 480 static void ctl_work_thread(void *arg); 481 static void ctl_enqueue_incoming(union ctl_io *io); 482 static void ctl_enqueue_rtr(union ctl_io *io); 483 static void ctl_enqueue_done(union ctl_io *io); 484 static void ctl_enqueue_isc(union ctl_io *io); 485 static const struct ctl_cmd_entry * 486 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 487 static const struct ctl_cmd_entry * 488 ctl_validate_command(struct ctl_scsiio *ctsio); 489 static int ctl_cmd_applicable(uint8_t lun_type, 490 const struct ctl_cmd_entry *entry); 491 492 /* 493 * Load the serialization table. This isn't very pretty, but is probably 494 * the easiest way to do it. 495 */ 496 #include "ctl_ser_table.c" 497 498 /* 499 * We only need to define open, close and ioctl routines for this driver. 500 */ 501 static struct cdevsw ctl_cdevsw = { 502 .d_version = D_VERSION, 503 .d_flags = 0, 504 .d_open = ctl_open, 505 .d_close = ctl_close, 506 .d_ioctl = ctl_ioctl, 507 .d_name = "ctl", 508 }; 509 510 511 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 512 MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests"); 513 514 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 515 516 static moduledata_t ctl_moduledata = { 517 "ctl", 518 ctl_module_event_handler, 519 NULL 520 }; 521 522 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 523 MODULE_VERSION(ctl, 1); 524 525 static struct ctl_frontend ioctl_frontend = 526 { 527 .name = "ioctl", 528 }; 529 530 static void 531 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 532 union ctl_ha_msg *msg_info) 533 { 534 struct ctl_scsiio *ctsio; 535 536 if (msg_info->hdr.original_sc == NULL) { 537 printf("%s: original_sc == NULL!\n", __func__); 538 /* XXX KDM now what? */ 539 return; 540 } 541 542 ctsio = &msg_info->hdr.original_sc->scsiio; 543 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 544 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 545 ctsio->io_hdr.status = msg_info->hdr.status; 546 ctsio->scsi_status = msg_info->scsi.scsi_status; 547 ctsio->sense_len = msg_info->scsi.sense_len; 548 ctsio->sense_residual = msg_info->scsi.sense_residual; 549 ctsio->residual = msg_info->scsi.residual; 550 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 551 sizeof(ctsio->sense_data)); 552 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 553 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 554 ctl_enqueue_isc((union ctl_io *)ctsio); 555 } 556 557 static void 558 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 559 union ctl_ha_msg *msg_info) 560 { 561 struct ctl_scsiio *ctsio; 562 563 if (msg_info->hdr.serializing_sc == NULL) { 564 printf("%s: serializing_sc == NULL!\n", __func__); 565 /* XXX KDM now what? */ 566 return; 567 } 568 569 ctsio = &msg_info->hdr.serializing_sc->scsiio; 570 #if 0 571 /* 572 * Attempt to catch the situation where an I/O has 573 * been freed, and we're using it again. 574 */ 575 if (ctsio->io_hdr.io_type == 0xff) { 576 union ctl_io *tmp_io; 577 tmp_io = (union ctl_io *)ctsio; 578 printf("%s: %p use after free!\n", __func__, 579 ctsio); 580 printf("%s: type %d msg %d cdb %x iptl: " 581 "%d:%d:%d:%d tag 0x%04x " 582 "flag %#x status %x\n", 583 __func__, 584 tmp_io->io_hdr.io_type, 585 tmp_io->io_hdr.msg_type, 586 tmp_io->scsiio.cdb[0], 587 tmp_io->io_hdr.nexus.initid.id, 588 tmp_io->io_hdr.nexus.targ_port, 589 tmp_io->io_hdr.nexus.targ_target.id, 590 tmp_io->io_hdr.nexus.targ_lun, 591 (tmp_io->io_hdr.io_type == 592 CTL_IO_TASK) ? 593 tmp_io->taskio.tag_num : 594 tmp_io->scsiio.tag_num, 595 tmp_io->io_hdr.flags, 596 tmp_io->io_hdr.status); 597 } 598 #endif 599 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 600 ctl_enqueue_isc((union ctl_io *)ctsio); 601 } 602 603 /* 604 * ISC (Inter Shelf Communication) event handler. Events from the HA 605 * subsystem come in here. 606 */ 607 static void 608 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 609 { 610 struct ctl_softc *ctl_softc; 611 union ctl_io *io; 612 struct ctl_prio *presio; 613 ctl_ha_status isc_status; 614 615 ctl_softc = control_softc; 616 io = NULL; 617 618 619 #if 0 620 printf("CTL: Isc Msg event %d\n", event); 621 #endif 622 if (event == CTL_HA_EVT_MSG_RECV) { 623 union ctl_ha_msg msg_info; 624 625 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 626 sizeof(msg_info), /*wait*/ 0); 627 #if 0 628 printf("CTL: msg_type %d\n", msg_info.msg_type); 629 #endif 630 if (isc_status != 0) { 631 printf("Error receiving message, status = %d\n", 632 isc_status); 633 return; 634 } 635 636 switch (msg_info.hdr.msg_type) { 637 case CTL_MSG_SERIALIZE: 638 #if 0 639 printf("Serialize\n"); 640 #endif 641 io = ctl_alloc_io_nowait(ctl_softc->othersc_pool); 642 if (io == NULL) { 643 printf("ctl_isc_event_handler: can't allocate " 644 "ctl_io!\n"); 645 /* Bad Juju */ 646 /* Need to set busy and send msg back */ 647 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 648 msg_info.hdr.status = CTL_SCSI_ERROR; 649 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 650 msg_info.scsi.sense_len = 0; 651 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 652 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 653 } 654 goto bailout; 655 } 656 ctl_zero_io(io); 657 // populate ctsio from msg_info 658 io->io_hdr.io_type = CTL_IO_SCSI; 659 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 660 io->io_hdr.original_sc = msg_info.hdr.original_sc; 661 #if 0 662 printf("pOrig %x\n", (int)msg_info.original_sc); 663 #endif 664 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 665 CTL_FLAG_IO_ACTIVE; 666 /* 667 * If we're in serialization-only mode, we don't 668 * want to go through full done processing. Thus 669 * the COPY flag. 670 * 671 * XXX KDM add another flag that is more specific. 672 */ 673 if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY) 674 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 675 io->io_hdr.nexus = msg_info.hdr.nexus; 676 #if 0 677 printf("targ %d, port %d, iid %d, lun %d\n", 678 io->io_hdr.nexus.targ_target.id, 679 io->io_hdr.nexus.targ_port, 680 io->io_hdr.nexus.initid.id, 681 io->io_hdr.nexus.targ_lun); 682 #endif 683 io->scsiio.tag_num = msg_info.scsi.tag_num; 684 io->scsiio.tag_type = msg_info.scsi.tag_type; 685 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 686 CTL_MAX_CDBLEN); 687 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 688 const struct ctl_cmd_entry *entry; 689 690 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 691 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 692 io->io_hdr.flags |= 693 entry->flags & CTL_FLAG_DATA_MASK; 694 } 695 ctl_enqueue_isc(io); 696 break; 697 698 /* Performed on the Originating SC, XFER mode only */ 699 case CTL_MSG_DATAMOVE: { 700 struct ctl_sg_entry *sgl; 701 int i, j; 702 703 io = msg_info.hdr.original_sc; 704 if (io == NULL) { 705 printf("%s: original_sc == NULL!\n", __func__); 706 /* XXX KDM do something here */ 707 break; 708 } 709 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 710 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 711 /* 712 * Keep track of this, we need to send it back over 713 * when the datamove is complete. 714 */ 715 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 716 717 if (msg_info.dt.sg_sequence == 0) { 718 /* 719 * XXX KDM we use the preallocated S/G list 720 * here, but we'll need to change this to 721 * dynamic allocation if we need larger S/G 722 * lists. 723 */ 724 if (msg_info.dt.kern_sg_entries > 725 sizeof(io->io_hdr.remote_sglist) / 726 sizeof(io->io_hdr.remote_sglist[0])) { 727 printf("%s: number of S/G entries " 728 "needed %u > allocated num %zd\n", 729 __func__, 730 msg_info.dt.kern_sg_entries, 731 sizeof(io->io_hdr.remote_sglist)/ 732 sizeof(io->io_hdr.remote_sglist[0])); 733 734 /* 735 * XXX KDM send a message back to 736 * the other side to shut down the 737 * DMA. The error will come back 738 * through via the normal channel. 739 */ 740 break; 741 } 742 sgl = io->io_hdr.remote_sglist; 743 memset(sgl, 0, 744 sizeof(io->io_hdr.remote_sglist)); 745 746 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 747 748 io->scsiio.kern_sg_entries = 749 msg_info.dt.kern_sg_entries; 750 io->scsiio.rem_sg_entries = 751 msg_info.dt.kern_sg_entries; 752 io->scsiio.kern_data_len = 753 msg_info.dt.kern_data_len; 754 io->scsiio.kern_total_len = 755 msg_info.dt.kern_total_len; 756 io->scsiio.kern_data_resid = 757 msg_info.dt.kern_data_resid; 758 io->scsiio.kern_rel_offset = 759 msg_info.dt.kern_rel_offset; 760 /* 761 * Clear out per-DMA flags. 762 */ 763 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 764 /* 765 * Add per-DMA flags that are set for this 766 * particular DMA request. 767 */ 768 io->io_hdr.flags |= msg_info.dt.flags & 769 CTL_FLAG_RDMA_MASK; 770 } else 771 sgl = (struct ctl_sg_entry *) 772 io->scsiio.kern_data_ptr; 773 774 for (i = msg_info.dt.sent_sg_entries, j = 0; 775 i < (msg_info.dt.sent_sg_entries + 776 msg_info.dt.cur_sg_entries); i++, j++) { 777 sgl[i].addr = msg_info.dt.sg_list[j].addr; 778 sgl[i].len = msg_info.dt.sg_list[j].len; 779 780 #if 0 781 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 782 __func__, 783 msg_info.dt.sg_list[j].addr, 784 msg_info.dt.sg_list[j].len, 785 sgl[i].addr, sgl[i].len, j, i); 786 #endif 787 } 788 #if 0 789 memcpy(&sgl[msg_info.dt.sent_sg_entries], 790 msg_info.dt.sg_list, 791 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 792 #endif 793 794 /* 795 * If this is the last piece of the I/O, we've got 796 * the full S/G list. Queue processing in the thread. 797 * Otherwise wait for the next piece. 798 */ 799 if (msg_info.dt.sg_last != 0) 800 ctl_enqueue_isc(io); 801 break; 802 } 803 /* Performed on the Serializing (primary) SC, XFER mode only */ 804 case CTL_MSG_DATAMOVE_DONE: { 805 if (msg_info.hdr.serializing_sc == NULL) { 806 printf("%s: serializing_sc == NULL!\n", 807 __func__); 808 /* XXX KDM now what? */ 809 break; 810 } 811 /* 812 * We grab the sense information here in case 813 * there was a failure, so we can return status 814 * back to the initiator. 815 */ 816 io = msg_info.hdr.serializing_sc; 817 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 818 io->io_hdr.status = msg_info.hdr.status; 819 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 820 io->scsiio.sense_len = msg_info.scsi.sense_len; 821 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 822 io->io_hdr.port_status = msg_info.scsi.fetd_status; 823 io->scsiio.residual = msg_info.scsi.residual; 824 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 825 sizeof(io->scsiio.sense_data)); 826 ctl_enqueue_isc(io); 827 break; 828 } 829 830 /* Preformed on Originating SC, SER_ONLY mode */ 831 case CTL_MSG_R2R: 832 io = msg_info.hdr.original_sc; 833 if (io == NULL) { 834 printf("%s: Major Bummer\n", __func__); 835 return; 836 } else { 837 #if 0 838 printf("pOrig %x\n",(int) ctsio); 839 #endif 840 } 841 io->io_hdr.msg_type = CTL_MSG_R2R; 842 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 843 ctl_enqueue_isc(io); 844 break; 845 846 /* 847 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 848 * mode. 849 * Performed on the Originating (i.e. secondary) SC in XFER 850 * mode 851 */ 852 case CTL_MSG_FINISH_IO: 853 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) 854 ctl_isc_handler_finish_xfer(ctl_softc, 855 &msg_info); 856 else 857 ctl_isc_handler_finish_ser_only(ctl_softc, 858 &msg_info); 859 break; 860 861 /* Preformed on Originating SC */ 862 case CTL_MSG_BAD_JUJU: 863 io = msg_info.hdr.original_sc; 864 if (io == NULL) { 865 printf("%s: Bad JUJU!, original_sc is NULL!\n", 866 __func__); 867 break; 868 } 869 ctl_copy_sense_data(&msg_info, io); 870 /* 871 * IO should have already been cleaned up on other 872 * SC so clear this flag so we won't send a message 873 * back to finish the IO there. 874 */ 875 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 876 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 877 878 /* io = msg_info.hdr.serializing_sc; */ 879 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 880 ctl_enqueue_isc(io); 881 break; 882 883 /* Handle resets sent from the other side */ 884 case CTL_MSG_MANAGE_TASKS: { 885 struct ctl_taskio *taskio; 886 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( 887 ctl_softc->othersc_pool); 888 if (taskio == NULL) { 889 printf("ctl_isc_event_handler: can't allocate " 890 "ctl_io!\n"); 891 /* Bad Juju */ 892 /* should I just call the proper reset func 893 here??? */ 894 goto bailout; 895 } 896 ctl_zero_io((union ctl_io *)taskio); 897 taskio->io_hdr.io_type = CTL_IO_TASK; 898 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 899 taskio->io_hdr.nexus = msg_info.hdr.nexus; 900 taskio->task_action = msg_info.task.task_action; 901 taskio->tag_num = msg_info.task.tag_num; 902 taskio->tag_type = msg_info.task.tag_type; 903 #ifdef CTL_TIME_IO 904 taskio->io_hdr.start_time = time_uptime; 905 getbintime(&taskio->io_hdr.start_bt); 906 #if 0 907 cs_prof_gettime(&taskio->io_hdr.start_ticks); 908 #endif 909 #endif /* CTL_TIME_IO */ 910 ctl_run_task((union ctl_io *)taskio); 911 break; 912 } 913 /* Persistent Reserve action which needs attention */ 914 case CTL_MSG_PERS_ACTION: 915 presio = (struct ctl_prio *)ctl_alloc_io_nowait( 916 ctl_softc->othersc_pool); 917 if (presio == NULL) { 918 printf("ctl_isc_event_handler: can't allocate " 919 "ctl_io!\n"); 920 /* Bad Juju */ 921 /* Need to set busy and send msg back */ 922 goto bailout; 923 } 924 ctl_zero_io((union ctl_io *)presio); 925 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 926 presio->pr_msg = msg_info.pr; 927 ctl_enqueue_isc((union ctl_io *)presio); 928 break; 929 case CTL_MSG_SYNC_FE: 930 rcv_sync_msg = 1; 931 break; 932 default: 933 printf("How did I get here?\n"); 934 } 935 } else if (event == CTL_HA_EVT_MSG_SENT) { 936 if (param != CTL_HA_STATUS_SUCCESS) { 937 printf("Bad status from ctl_ha_msg_send status %d\n", 938 param); 939 } 940 return; 941 } else if (event == CTL_HA_EVT_DISCONNECT) { 942 printf("CTL: Got a disconnect from Isc\n"); 943 return; 944 } else { 945 printf("ctl_isc_event_handler: Unknown event %d\n", event); 946 return; 947 } 948 949 bailout: 950 return; 951 } 952 953 static void 954 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 955 { 956 struct scsi_sense_data *sense; 957 958 sense = &dest->scsiio.sense_data; 959 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 960 dest->scsiio.scsi_status = src->scsi.scsi_status; 961 dest->scsiio.sense_len = src->scsi.sense_len; 962 dest->io_hdr.status = src->hdr.status; 963 } 964 965 static void 966 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 967 { 968 ctl_ua_type *pu; 969 970 mtx_assert(&lun->lun_lock, MA_OWNED); 971 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 972 if (pu == NULL) 973 return; 974 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 975 } 976 977 static void 978 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 979 { 980 int i, j; 981 982 mtx_assert(&lun->lun_lock, MA_OWNED); 983 for (i = 0; i < CTL_MAX_PORTS; i++) { 984 if (lun->pending_ua[i] == NULL) 985 continue; 986 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 987 if (i * CTL_MAX_INIT_PER_PORT + j == except) 988 continue; 989 lun->pending_ua[i][j] |= ua; 990 } 991 } 992 } 993 994 static void 995 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 996 { 997 ctl_ua_type *pu; 998 999 mtx_assert(&lun->lun_lock, MA_OWNED); 1000 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1001 if (pu == NULL) 1002 return; 1003 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1004 } 1005 1006 static void 1007 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1008 { 1009 int i, j; 1010 1011 mtx_assert(&lun->lun_lock, MA_OWNED); 1012 for (i = 0; i < CTL_MAX_PORTS; i++) { 1013 if (lun->pending_ua[i] == NULL) 1014 continue; 1015 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1016 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1017 continue; 1018 lun->pending_ua[i][j] &= ~ua; 1019 } 1020 } 1021 } 1022 1023 static int 1024 ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS) 1025 { 1026 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1027 struct ctl_lun *lun; 1028 int error, value; 1029 1030 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) 1031 value = 0; 1032 else 1033 value = 1; 1034 1035 error = sysctl_handle_int(oidp, &value, 0, req); 1036 if ((error != 0) || (req->newptr == NULL)) 1037 return (error); 1038 1039 mtx_lock(&softc->ctl_lock); 1040 if (value == 0) 1041 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1042 else 1043 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1044 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1045 mtx_lock(&lun->lun_lock); 1046 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1047 mtx_unlock(&lun->lun_lock); 1048 } 1049 mtx_unlock(&softc->ctl_lock); 1050 return (0); 1051 } 1052 1053 static int 1054 ctl_init(void) 1055 { 1056 struct ctl_softc *softc; 1057 void *other_pool; 1058 struct ctl_port *port; 1059 int i, error, retval; 1060 //int isc_retval; 1061 1062 retval = 0; 1063 ctl_pause_rtr = 0; 1064 rcv_sync_msg = 0; 1065 1066 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1067 M_WAITOK | M_ZERO); 1068 softc = control_softc; 1069 1070 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1071 "cam/ctl"); 1072 1073 softc->dev->si_drv1 = softc; 1074 1075 /* 1076 * By default, return a "bad LUN" peripheral qualifier for unknown 1077 * LUNs. The user can override this default using the tunable or 1078 * sysctl. See the comment in ctl_inquiry_std() for more details. 1079 */ 1080 softc->inquiry_pq_no_lun = 1; 1081 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 1082 &softc->inquiry_pq_no_lun); 1083 sysctl_ctx_init(&softc->sysctl_ctx); 1084 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1085 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1086 CTLFLAG_RD, 0, "CAM Target Layer"); 1087 1088 if (softc->sysctl_tree == NULL) { 1089 printf("%s: unable to allocate sysctl tree\n", __func__); 1090 destroy_dev(softc->dev); 1091 free(control_softc, M_DEVBUF); 1092 control_softc = NULL; 1093 return (ENOMEM); 1094 } 1095 1096 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1097 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1098 "inquiry_pq_no_lun", CTLFLAG_RW, 1099 &softc->inquiry_pq_no_lun, 0, 1100 "Report no lun possible for invalid LUNs"); 1101 1102 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1103 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1104 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1105 softc->open_count = 0; 1106 1107 /* 1108 * Default to actually sending a SYNCHRONIZE CACHE command down to 1109 * the drive. 1110 */ 1111 softc->flags = CTL_FLAG_REAL_SYNC; 1112 1113 /* 1114 * In Copan's HA scheme, the "master" and "slave" roles are 1115 * figured out through the slot the controller is in. Although it 1116 * is an active/active system, someone has to be in charge. 1117 */ 1118 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1119 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1120 "HA head ID (0 - no HA)"); 1121 if (softc->ha_id == 0) { 1122 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1123 softc->is_single = 1; 1124 softc->port_offset = 0; 1125 } else 1126 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS; 1127 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT; 1128 1129 /* 1130 * XXX KDM need to figure out where we want to get our target ID 1131 * and WWID. Is it different on each port? 1132 */ 1133 softc->target.id = 0; 1134 softc->target.wwid[0] = 0x12345678; 1135 softc->target.wwid[1] = 0x87654321; 1136 STAILQ_INIT(&softc->lun_list); 1137 STAILQ_INIT(&softc->pending_lun_queue); 1138 STAILQ_INIT(&softc->fe_list); 1139 STAILQ_INIT(&softc->port_list); 1140 STAILQ_INIT(&softc->be_list); 1141 ctl_tpc_init(softc); 1142 1143 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1144 &other_pool) != 0) 1145 { 1146 printf("ctl: can't allocate %d entry other SC pool, " 1147 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1148 return (ENOMEM); 1149 } 1150 softc->othersc_pool = other_pool; 1151 1152 if (worker_threads <= 0) 1153 worker_threads = max(1, mp_ncpus / 4); 1154 if (worker_threads > CTL_MAX_THREADS) 1155 worker_threads = CTL_MAX_THREADS; 1156 1157 for (i = 0; i < worker_threads; i++) { 1158 struct ctl_thread *thr = &softc->threads[i]; 1159 1160 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1161 thr->ctl_softc = softc; 1162 STAILQ_INIT(&thr->incoming_queue); 1163 STAILQ_INIT(&thr->rtr_queue); 1164 STAILQ_INIT(&thr->done_queue); 1165 STAILQ_INIT(&thr->isc_queue); 1166 1167 error = kproc_kthread_add(ctl_work_thread, thr, 1168 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1169 if (error != 0) { 1170 printf("error creating CTL work thread!\n"); 1171 ctl_pool_free(other_pool); 1172 return (error); 1173 } 1174 } 1175 error = kproc_kthread_add(ctl_lun_thread, softc, 1176 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1177 if (error != 0) { 1178 printf("error creating CTL lun thread!\n"); 1179 ctl_pool_free(other_pool); 1180 return (error); 1181 } 1182 error = kproc_kthread_add(ctl_thresh_thread, softc, 1183 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1184 if (error != 0) { 1185 printf("error creating CTL threshold thread!\n"); 1186 ctl_pool_free(other_pool); 1187 return (error); 1188 } 1189 if (bootverbose) 1190 printf("ctl: CAM Target Layer loaded\n"); 1191 1192 /* 1193 * Initialize the ioctl front end. 1194 */ 1195 ctl_frontend_register(&ioctl_frontend); 1196 port = &softc->ioctl_info.port; 1197 port->frontend = &ioctl_frontend; 1198 sprintf(softc->ioctl_info.port_name, "ioctl"); 1199 port->port_type = CTL_PORT_IOCTL; 1200 port->num_requested_ctl_io = 100; 1201 port->port_name = softc->ioctl_info.port_name; 1202 port->port_online = ctl_ioctl_online; 1203 port->port_offline = ctl_ioctl_offline; 1204 port->onoff_arg = &softc->ioctl_info; 1205 port->lun_enable = ctl_ioctl_lun_enable; 1206 port->lun_disable = ctl_ioctl_lun_disable; 1207 port->targ_lun_arg = &softc->ioctl_info; 1208 port->fe_datamove = ctl_ioctl_datamove; 1209 port->fe_done = ctl_ioctl_done; 1210 port->max_targets = 15; 1211 port->max_target_id = 15; 1212 1213 if (ctl_port_register(&softc->ioctl_info.port) != 0) { 1214 printf("ctl: ioctl front end registration failed, will " 1215 "continue anyway\n"); 1216 } 1217 1218 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1219 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN, 1220 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head"); 1221 1222 #ifdef CTL_IO_DELAY 1223 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1224 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1225 sizeof(struct callout), CTL_TIMER_BYTES); 1226 return (EINVAL); 1227 } 1228 #endif /* CTL_IO_DELAY */ 1229 1230 return (0); 1231 } 1232 1233 void 1234 ctl_shutdown(void) 1235 { 1236 struct ctl_softc *softc; 1237 struct ctl_lun *lun, *next_lun; 1238 1239 softc = (struct ctl_softc *)control_softc; 1240 1241 if (ctl_port_deregister(&softc->ioctl_info.port) != 0) 1242 printf("ctl: ioctl front end deregistration failed\n"); 1243 1244 mtx_lock(&softc->ctl_lock); 1245 1246 /* 1247 * Free up each LUN. 1248 */ 1249 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1250 next_lun = STAILQ_NEXT(lun, links); 1251 ctl_free_lun(lun); 1252 } 1253 1254 mtx_unlock(&softc->ctl_lock); 1255 1256 ctl_frontend_deregister(&ioctl_frontend); 1257 1258 #if 0 1259 ctl_shutdown_thread(softc->work_thread); 1260 mtx_destroy(&softc->queue_lock); 1261 #endif 1262 1263 ctl_tpc_shutdown(softc); 1264 uma_zdestroy(softc->io_zone); 1265 mtx_destroy(&softc->ctl_lock); 1266 1267 destroy_dev(softc->dev); 1268 1269 sysctl_ctx_free(&softc->sysctl_ctx); 1270 1271 free(control_softc, M_DEVBUF); 1272 control_softc = NULL; 1273 1274 if (bootverbose) 1275 printf("ctl: CAM Target Layer unloaded\n"); 1276 } 1277 1278 static int 1279 ctl_module_event_handler(module_t mod, int what, void *arg) 1280 { 1281 1282 switch (what) { 1283 case MOD_LOAD: 1284 return (ctl_init()); 1285 case MOD_UNLOAD: 1286 return (EBUSY); 1287 default: 1288 return (EOPNOTSUPP); 1289 } 1290 } 1291 1292 /* 1293 * XXX KDM should we do some access checks here? Bump a reference count to 1294 * prevent a CTL module from being unloaded while someone has it open? 1295 */ 1296 static int 1297 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1298 { 1299 return (0); 1300 } 1301 1302 static int 1303 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1304 { 1305 return (0); 1306 } 1307 1308 int 1309 ctl_port_enable(ctl_port_type port_type) 1310 { 1311 struct ctl_softc *softc = control_softc; 1312 struct ctl_port *port; 1313 1314 if (softc->is_single == 0) { 1315 union ctl_ha_msg msg_info; 1316 int isc_retval; 1317 1318 #if 0 1319 printf("%s: HA mode, synchronizing frontend enable\n", 1320 __func__); 1321 #endif 1322 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1323 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1324 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1325 printf("Sync msg send error retval %d\n", isc_retval); 1326 } 1327 if (!rcv_sync_msg) { 1328 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1329 sizeof(msg_info), 1); 1330 } 1331 #if 0 1332 printf("CTL:Frontend Enable\n"); 1333 } else { 1334 printf("%s: single mode, skipping frontend synchronization\n", 1335 __func__); 1336 #endif 1337 } 1338 1339 STAILQ_FOREACH(port, &softc->port_list, links) { 1340 if (port_type & port->port_type) 1341 { 1342 #if 0 1343 printf("port %d\n", port->targ_port); 1344 #endif 1345 ctl_port_online(port); 1346 } 1347 } 1348 1349 return (0); 1350 } 1351 1352 int 1353 ctl_port_disable(ctl_port_type port_type) 1354 { 1355 struct ctl_softc *softc; 1356 struct ctl_port *port; 1357 1358 softc = control_softc; 1359 1360 STAILQ_FOREACH(port, &softc->port_list, links) { 1361 if (port_type & port->port_type) 1362 ctl_port_offline(port); 1363 } 1364 1365 return (0); 1366 } 1367 1368 /* 1369 * Returns 0 for success, 1 for failure. 1370 * Currently the only failure mode is if there aren't enough entries 1371 * allocated. So, in case of a failure, look at num_entries_dropped, 1372 * reallocate and try again. 1373 */ 1374 int 1375 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1376 int *num_entries_filled, int *num_entries_dropped, 1377 ctl_port_type port_type, int no_virtual) 1378 { 1379 struct ctl_softc *softc; 1380 struct ctl_port *port; 1381 int entries_dropped, entries_filled; 1382 int retval; 1383 int i; 1384 1385 softc = control_softc; 1386 1387 retval = 0; 1388 entries_filled = 0; 1389 entries_dropped = 0; 1390 1391 i = 0; 1392 mtx_lock(&softc->ctl_lock); 1393 STAILQ_FOREACH(port, &softc->port_list, links) { 1394 struct ctl_port_entry *entry; 1395 1396 if ((port->port_type & port_type) == 0) 1397 continue; 1398 1399 if ((no_virtual != 0) 1400 && (port->virtual_port != 0)) 1401 continue; 1402 1403 if (entries_filled >= num_entries_alloced) { 1404 entries_dropped++; 1405 continue; 1406 } 1407 entry = &entries[i]; 1408 1409 entry->port_type = port->port_type; 1410 strlcpy(entry->port_name, port->port_name, 1411 sizeof(entry->port_name)); 1412 entry->physical_port = port->physical_port; 1413 entry->virtual_port = port->virtual_port; 1414 entry->wwnn = port->wwnn; 1415 entry->wwpn = port->wwpn; 1416 1417 i++; 1418 entries_filled++; 1419 } 1420 1421 mtx_unlock(&softc->ctl_lock); 1422 1423 if (entries_dropped > 0) 1424 retval = 1; 1425 1426 *num_entries_dropped = entries_dropped; 1427 *num_entries_filled = entries_filled; 1428 1429 return (retval); 1430 } 1431 1432 static void 1433 ctl_ioctl_online(void *arg) 1434 { 1435 struct ctl_ioctl_info *ioctl_info; 1436 1437 ioctl_info = (struct ctl_ioctl_info *)arg; 1438 1439 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1440 } 1441 1442 static void 1443 ctl_ioctl_offline(void *arg) 1444 { 1445 struct ctl_ioctl_info *ioctl_info; 1446 1447 ioctl_info = (struct ctl_ioctl_info *)arg; 1448 1449 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1450 } 1451 1452 /* 1453 * Remove an initiator by port number and initiator ID. 1454 * Returns 0 for success, -1 for failure. 1455 */ 1456 int 1457 ctl_remove_initiator(struct ctl_port *port, int iid) 1458 { 1459 struct ctl_softc *softc = control_softc; 1460 1461 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1462 1463 if (iid > CTL_MAX_INIT_PER_PORT) { 1464 printf("%s: initiator ID %u > maximun %u!\n", 1465 __func__, iid, CTL_MAX_INIT_PER_PORT); 1466 return (-1); 1467 } 1468 1469 mtx_lock(&softc->ctl_lock); 1470 port->wwpn_iid[iid].in_use--; 1471 port->wwpn_iid[iid].last_use = time_uptime; 1472 mtx_unlock(&softc->ctl_lock); 1473 1474 return (0); 1475 } 1476 1477 /* 1478 * Add an initiator to the initiator map. 1479 * Returns iid for success, < 0 for failure. 1480 */ 1481 int 1482 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1483 { 1484 struct ctl_softc *softc = control_softc; 1485 time_t best_time; 1486 int i, best; 1487 1488 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1489 1490 if (iid >= CTL_MAX_INIT_PER_PORT) { 1491 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1492 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1493 free(name, M_CTL); 1494 return (-1); 1495 } 1496 1497 mtx_lock(&softc->ctl_lock); 1498 1499 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1500 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1501 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1502 iid = i; 1503 break; 1504 } 1505 if (name != NULL && port->wwpn_iid[i].name != NULL && 1506 strcmp(name, port->wwpn_iid[i].name) == 0) { 1507 iid = i; 1508 break; 1509 } 1510 } 1511 } 1512 1513 if (iid < 0) { 1514 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1515 if (port->wwpn_iid[i].in_use == 0 && 1516 port->wwpn_iid[i].wwpn == 0 && 1517 port->wwpn_iid[i].name == NULL) { 1518 iid = i; 1519 break; 1520 } 1521 } 1522 } 1523 1524 if (iid < 0) { 1525 best = -1; 1526 best_time = INT32_MAX; 1527 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1528 if (port->wwpn_iid[i].in_use == 0) { 1529 if (port->wwpn_iid[i].last_use < best_time) { 1530 best = i; 1531 best_time = port->wwpn_iid[i].last_use; 1532 } 1533 } 1534 } 1535 iid = best; 1536 } 1537 1538 if (iid < 0) { 1539 mtx_unlock(&softc->ctl_lock); 1540 free(name, M_CTL); 1541 return (-2); 1542 } 1543 1544 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1545 /* 1546 * This is not an error yet. 1547 */ 1548 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1549 #if 0 1550 printf("%s: port %d iid %u WWPN %#jx arrived" 1551 " again\n", __func__, port->targ_port, 1552 iid, (uintmax_t)wwpn); 1553 #endif 1554 goto take; 1555 } 1556 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1557 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1558 #if 0 1559 printf("%s: port %d iid %u name '%s' arrived" 1560 " again\n", __func__, port->targ_port, 1561 iid, name); 1562 #endif 1563 goto take; 1564 } 1565 1566 /* 1567 * This is an error, but what do we do about it? The 1568 * driver is telling us we have a new WWPN for this 1569 * initiator ID, so we pretty much need to use it. 1570 */ 1571 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1572 " but WWPN %#jx '%s' is still at that address\n", 1573 __func__, port->targ_port, iid, wwpn, name, 1574 (uintmax_t)port->wwpn_iid[iid].wwpn, 1575 port->wwpn_iid[iid].name); 1576 1577 /* 1578 * XXX KDM clear have_ca and ua_pending on each LUN for 1579 * this initiator. 1580 */ 1581 } 1582 take: 1583 free(port->wwpn_iid[iid].name, M_CTL); 1584 port->wwpn_iid[iid].name = name; 1585 port->wwpn_iid[iid].wwpn = wwpn; 1586 port->wwpn_iid[iid].in_use++; 1587 mtx_unlock(&softc->ctl_lock); 1588 1589 return (iid); 1590 } 1591 1592 static int 1593 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1594 { 1595 int len; 1596 1597 switch (port->port_type) { 1598 case CTL_PORT_FC: 1599 { 1600 struct scsi_transportid_fcp *id = 1601 (struct scsi_transportid_fcp *)buf; 1602 if (port->wwpn_iid[iid].wwpn == 0) 1603 return (0); 1604 memset(id, 0, sizeof(*id)); 1605 id->format_protocol = SCSI_PROTO_FC; 1606 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1607 return (sizeof(*id)); 1608 } 1609 case CTL_PORT_ISCSI: 1610 { 1611 struct scsi_transportid_iscsi_port *id = 1612 (struct scsi_transportid_iscsi_port *)buf; 1613 if (port->wwpn_iid[iid].name == NULL) 1614 return (0); 1615 memset(id, 0, 256); 1616 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1617 SCSI_PROTO_ISCSI; 1618 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1619 len = roundup2(min(len, 252), 4); 1620 scsi_ulto2b(len, id->additional_length); 1621 return (sizeof(*id) + len); 1622 } 1623 case CTL_PORT_SAS: 1624 { 1625 struct scsi_transportid_sas *id = 1626 (struct scsi_transportid_sas *)buf; 1627 if (port->wwpn_iid[iid].wwpn == 0) 1628 return (0); 1629 memset(id, 0, sizeof(*id)); 1630 id->format_protocol = SCSI_PROTO_SAS; 1631 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1632 return (sizeof(*id)); 1633 } 1634 default: 1635 { 1636 struct scsi_transportid_spi *id = 1637 (struct scsi_transportid_spi *)buf; 1638 memset(id, 0, sizeof(*id)); 1639 id->format_protocol = SCSI_PROTO_SPI; 1640 scsi_ulto2b(iid, id->scsi_addr); 1641 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1642 return (sizeof(*id)); 1643 } 1644 } 1645 } 1646 1647 static int 1648 ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1649 { 1650 return (0); 1651 } 1652 1653 static int 1654 ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1655 { 1656 return (0); 1657 } 1658 1659 /* 1660 * Data movement routine for the CTL ioctl frontend port. 1661 */ 1662 static int 1663 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1664 { 1665 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1666 struct ctl_sg_entry ext_entry, kern_entry; 1667 int ext_sglen, ext_sg_entries, kern_sg_entries; 1668 int ext_sg_start, ext_offset; 1669 int len_to_copy, len_copied; 1670 int kern_watermark, ext_watermark; 1671 int ext_sglist_malloced; 1672 int i, j; 1673 1674 ext_sglist_malloced = 0; 1675 ext_sg_start = 0; 1676 ext_offset = 0; 1677 1678 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1679 1680 /* 1681 * If this flag is set, fake the data transfer. 1682 */ 1683 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1684 ctsio->ext_data_filled = ctsio->ext_data_len; 1685 goto bailout; 1686 } 1687 1688 /* 1689 * To simplify things here, if we have a single buffer, stick it in 1690 * a S/G entry and just make it a single entry S/G list. 1691 */ 1692 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1693 int len_seen; 1694 1695 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1696 1697 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1698 M_WAITOK); 1699 ext_sglist_malloced = 1; 1700 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1701 ext_sglen) != 0) { 1702 ctl_set_internal_failure(ctsio, 1703 /*sks_valid*/ 0, 1704 /*retry_count*/ 0); 1705 goto bailout; 1706 } 1707 ext_sg_entries = ctsio->ext_sg_entries; 1708 len_seen = 0; 1709 for (i = 0; i < ext_sg_entries; i++) { 1710 if ((len_seen + ext_sglist[i].len) >= 1711 ctsio->ext_data_filled) { 1712 ext_sg_start = i; 1713 ext_offset = ctsio->ext_data_filled - len_seen; 1714 break; 1715 } 1716 len_seen += ext_sglist[i].len; 1717 } 1718 } else { 1719 ext_sglist = &ext_entry; 1720 ext_sglist->addr = ctsio->ext_data_ptr; 1721 ext_sglist->len = ctsio->ext_data_len; 1722 ext_sg_entries = 1; 1723 ext_sg_start = 0; 1724 ext_offset = ctsio->ext_data_filled; 1725 } 1726 1727 if (ctsio->kern_sg_entries > 0) { 1728 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1729 kern_sg_entries = ctsio->kern_sg_entries; 1730 } else { 1731 kern_sglist = &kern_entry; 1732 kern_sglist->addr = ctsio->kern_data_ptr; 1733 kern_sglist->len = ctsio->kern_data_len; 1734 kern_sg_entries = 1; 1735 } 1736 1737 1738 kern_watermark = 0; 1739 ext_watermark = ext_offset; 1740 len_copied = 0; 1741 for (i = ext_sg_start, j = 0; 1742 i < ext_sg_entries && j < kern_sg_entries;) { 1743 uint8_t *ext_ptr, *kern_ptr; 1744 1745 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark, 1746 kern_sglist[j].len - kern_watermark); 1747 1748 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1749 ext_ptr = ext_ptr + ext_watermark; 1750 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1751 /* 1752 * XXX KDM fix this! 1753 */ 1754 panic("need to implement bus address support"); 1755 #if 0 1756 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1757 #endif 1758 } else 1759 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1760 kern_ptr = kern_ptr + kern_watermark; 1761 1762 kern_watermark += len_to_copy; 1763 ext_watermark += len_to_copy; 1764 1765 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1766 CTL_FLAG_DATA_IN) { 1767 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1768 "bytes to user\n", len_to_copy)); 1769 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1770 "to %p\n", kern_ptr, ext_ptr)); 1771 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1772 ctl_set_internal_failure(ctsio, 1773 /*sks_valid*/ 0, 1774 /*retry_count*/ 0); 1775 goto bailout; 1776 } 1777 } else { 1778 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1779 "bytes from user\n", len_to_copy)); 1780 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1781 "to %p\n", ext_ptr, kern_ptr)); 1782 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1783 ctl_set_internal_failure(ctsio, 1784 /*sks_valid*/ 0, 1785 /*retry_count*/0); 1786 goto bailout; 1787 } 1788 } 1789 1790 len_copied += len_to_copy; 1791 1792 if (ext_sglist[i].len == ext_watermark) { 1793 i++; 1794 ext_watermark = 0; 1795 } 1796 1797 if (kern_sglist[j].len == kern_watermark) { 1798 j++; 1799 kern_watermark = 0; 1800 } 1801 } 1802 1803 ctsio->ext_data_filled += len_copied; 1804 1805 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1806 "kern_sg_entries: %d\n", ext_sg_entries, 1807 kern_sg_entries)); 1808 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1809 "kern_data_len = %d\n", ctsio->ext_data_len, 1810 ctsio->kern_data_len)); 1811 1812 1813 /* XXX KDM set residual?? */ 1814 bailout: 1815 1816 if (ext_sglist_malloced != 0) 1817 free(ext_sglist, M_CTL); 1818 1819 return (CTL_RETVAL_COMPLETE); 1820 } 1821 1822 /* 1823 * Serialize a command that went down the "wrong" side, and so was sent to 1824 * this controller for execution. The logic is a little different than the 1825 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1826 * sent back to the other side, but in the success case, we execute the 1827 * command on this side (XFER mode) or tell the other side to execute it 1828 * (SER_ONLY mode). 1829 */ 1830 static int 1831 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1832 { 1833 struct ctl_softc *ctl_softc; 1834 union ctl_ha_msg msg_info; 1835 struct ctl_lun *lun; 1836 int retval = 0; 1837 uint32_t targ_lun; 1838 1839 ctl_softc = control_softc; 1840 1841 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1842 lun = ctl_softc->ctl_luns[targ_lun]; 1843 if (lun==NULL) 1844 { 1845 /* 1846 * Why isn't LUN defined? The other side wouldn't 1847 * send a cmd if the LUN is undefined. 1848 */ 1849 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1850 1851 /* "Logical unit not supported" */ 1852 ctl_set_sense_data(&msg_info.scsi.sense_data, 1853 lun, 1854 /*sense_format*/SSD_TYPE_NONE, 1855 /*current_error*/ 1, 1856 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1857 /*asc*/ 0x25, 1858 /*ascq*/ 0x00, 1859 SSD_ELEM_NONE); 1860 1861 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1862 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1863 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1864 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1865 msg_info.hdr.serializing_sc = NULL; 1866 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1867 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1868 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1869 } 1870 return(1); 1871 1872 } 1873 1874 mtx_lock(&lun->lun_lock); 1875 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1876 1877 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1878 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1879 ooa_links))) { 1880 case CTL_ACTION_BLOCK: 1881 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1882 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1883 blocked_links); 1884 break; 1885 case CTL_ACTION_PASS: 1886 case CTL_ACTION_SKIP: 1887 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 1888 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1889 ctl_enqueue_rtr((union ctl_io *)ctsio); 1890 } else { 1891 1892 /* send msg back to other side */ 1893 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1894 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1895 msg_info.hdr.msg_type = CTL_MSG_R2R; 1896 #if 0 1897 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1898 #endif 1899 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1900 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1901 } 1902 } 1903 break; 1904 case CTL_ACTION_OVERLAP: 1905 /* OVERLAPPED COMMANDS ATTEMPTED */ 1906 ctl_set_sense_data(&msg_info.scsi.sense_data, 1907 lun, 1908 /*sense_format*/SSD_TYPE_NONE, 1909 /*current_error*/ 1, 1910 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1911 /*asc*/ 0x4E, 1912 /*ascq*/ 0x00, 1913 SSD_ELEM_NONE); 1914 1915 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1916 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1917 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1918 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1919 msg_info.hdr.serializing_sc = NULL; 1920 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1921 #if 0 1922 printf("BAD JUJU:Major Bummer Overlap\n"); 1923 #endif 1924 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1925 retval = 1; 1926 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1927 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1928 } 1929 break; 1930 case CTL_ACTION_OVERLAP_TAG: 1931 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1932 ctl_set_sense_data(&msg_info.scsi.sense_data, 1933 lun, 1934 /*sense_format*/SSD_TYPE_NONE, 1935 /*current_error*/ 1, 1936 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1937 /*asc*/ 0x4D, 1938 /*ascq*/ ctsio->tag_num & 0xff, 1939 SSD_ELEM_NONE); 1940 1941 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1942 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1943 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1944 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1945 msg_info.hdr.serializing_sc = NULL; 1946 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1947 #if 0 1948 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1949 #endif 1950 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1951 retval = 1; 1952 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1953 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1954 } 1955 break; 1956 case CTL_ACTION_ERROR: 1957 default: 1958 /* "Internal target failure" */ 1959 ctl_set_sense_data(&msg_info.scsi.sense_data, 1960 lun, 1961 /*sense_format*/SSD_TYPE_NONE, 1962 /*current_error*/ 1, 1963 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1964 /*asc*/ 0x44, 1965 /*ascq*/ 0x00, 1966 SSD_ELEM_NONE); 1967 1968 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1969 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1970 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1971 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1972 msg_info.hdr.serializing_sc = NULL; 1973 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1974 #if 0 1975 printf("BAD JUJU:Major Bummer HW Error\n"); 1976 #endif 1977 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1978 retval = 1; 1979 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1980 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1981 } 1982 break; 1983 } 1984 mtx_unlock(&lun->lun_lock); 1985 return (retval); 1986 } 1987 1988 static int 1989 ctl_ioctl_submit_wait(union ctl_io *io) 1990 { 1991 struct ctl_fe_ioctl_params params; 1992 ctl_fe_ioctl_state last_state; 1993 int done, retval; 1994 1995 retval = 0; 1996 1997 bzero(¶ms, sizeof(params)); 1998 1999 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 2000 cv_init(¶ms.sem, "ctlioccv"); 2001 params.state = CTL_IOCTL_INPROG; 2002 last_state = params.state; 2003 2004 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 2005 2006 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 2007 2008 /* This shouldn't happen */ 2009 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 2010 return (retval); 2011 2012 done = 0; 2013 2014 do { 2015 mtx_lock(¶ms.ioctl_mtx); 2016 /* 2017 * Check the state here, and don't sleep if the state has 2018 * already changed (i.e. wakeup has already occured, but we 2019 * weren't waiting yet). 2020 */ 2021 if (params.state == last_state) { 2022 /* XXX KDM cv_wait_sig instead? */ 2023 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 2024 } 2025 last_state = params.state; 2026 2027 switch (params.state) { 2028 case CTL_IOCTL_INPROG: 2029 /* Why did we wake up? */ 2030 /* XXX KDM error here? */ 2031 mtx_unlock(¶ms.ioctl_mtx); 2032 break; 2033 case CTL_IOCTL_DATAMOVE: 2034 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 2035 2036 /* 2037 * change last_state back to INPROG to avoid 2038 * deadlock on subsequent data moves. 2039 */ 2040 params.state = last_state = CTL_IOCTL_INPROG; 2041 2042 mtx_unlock(¶ms.ioctl_mtx); 2043 ctl_ioctl_do_datamove(&io->scsiio); 2044 /* 2045 * Note that in some cases, most notably writes, 2046 * this will queue the I/O and call us back later. 2047 * In other cases, generally reads, this routine 2048 * will immediately call back and wake us up, 2049 * probably using our own context. 2050 */ 2051 io->scsiio.be_move_done(io); 2052 break; 2053 case CTL_IOCTL_DONE: 2054 mtx_unlock(¶ms.ioctl_mtx); 2055 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 2056 done = 1; 2057 break; 2058 default: 2059 mtx_unlock(¶ms.ioctl_mtx); 2060 /* XXX KDM error here? */ 2061 break; 2062 } 2063 } while (done == 0); 2064 2065 mtx_destroy(¶ms.ioctl_mtx); 2066 cv_destroy(¶ms.sem); 2067 2068 return (CTL_RETVAL_COMPLETE); 2069 } 2070 2071 static void 2072 ctl_ioctl_datamove(union ctl_io *io) 2073 { 2074 struct ctl_fe_ioctl_params *params; 2075 2076 params = (struct ctl_fe_ioctl_params *) 2077 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2078 2079 mtx_lock(¶ms->ioctl_mtx); 2080 params->state = CTL_IOCTL_DATAMOVE; 2081 cv_broadcast(¶ms->sem); 2082 mtx_unlock(¶ms->ioctl_mtx); 2083 } 2084 2085 static void 2086 ctl_ioctl_done(union ctl_io *io) 2087 { 2088 struct ctl_fe_ioctl_params *params; 2089 2090 params = (struct ctl_fe_ioctl_params *) 2091 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2092 2093 mtx_lock(¶ms->ioctl_mtx); 2094 params->state = CTL_IOCTL_DONE; 2095 cv_broadcast(¶ms->sem); 2096 mtx_unlock(¶ms->ioctl_mtx); 2097 } 2098 2099 static void 2100 ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 2101 { 2102 struct ctl_fe_ioctl_startstop_info *sd_info; 2103 2104 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 2105 2106 sd_info->hs_info.status = metatask->status; 2107 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 2108 sd_info->hs_info.luns_complete = 2109 metatask->taskinfo.startstop.luns_complete; 2110 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 2111 2112 cv_broadcast(&sd_info->sem); 2113 } 2114 2115 static void 2116 ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 2117 { 2118 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 2119 2120 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 2121 2122 mtx_lock(fe_bbr_info->lock); 2123 fe_bbr_info->bbr_info->status = metatask->status; 2124 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2125 fe_bbr_info->wakeup_done = 1; 2126 mtx_unlock(fe_bbr_info->lock); 2127 2128 cv_broadcast(&fe_bbr_info->sem); 2129 } 2130 2131 /* 2132 * Returns 0 for success, errno for failure. 2133 */ 2134 static int 2135 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2136 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2137 { 2138 union ctl_io *io; 2139 int retval; 2140 2141 retval = 0; 2142 2143 mtx_lock(&lun->lun_lock); 2144 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2145 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2146 ooa_links)) { 2147 struct ctl_ooa_entry *entry; 2148 2149 /* 2150 * If we've got more than we can fit, just count the 2151 * remaining entries. 2152 */ 2153 if (*cur_fill_num >= ooa_hdr->alloc_num) 2154 continue; 2155 2156 entry = &kern_entries[*cur_fill_num]; 2157 2158 entry->tag_num = io->scsiio.tag_num; 2159 entry->lun_num = lun->lun; 2160 #ifdef CTL_TIME_IO 2161 entry->start_bt = io->io_hdr.start_bt; 2162 #endif 2163 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2164 entry->cdb_len = io->scsiio.cdb_len; 2165 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2166 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2167 2168 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2169 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2170 2171 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2172 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2173 2174 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2175 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2176 2177 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2178 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2179 } 2180 mtx_unlock(&lun->lun_lock); 2181 2182 return (retval); 2183 } 2184 2185 static void * 2186 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2187 size_t error_str_len) 2188 { 2189 void *kptr; 2190 2191 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2192 2193 if (copyin(user_addr, kptr, len) != 0) { 2194 snprintf(error_str, error_str_len, "Error copying %d bytes " 2195 "from user address %p to kernel address %p", len, 2196 user_addr, kptr); 2197 free(kptr, M_CTL); 2198 return (NULL); 2199 } 2200 2201 return (kptr); 2202 } 2203 2204 static void 2205 ctl_free_args(int num_args, struct ctl_be_arg *args) 2206 { 2207 int i; 2208 2209 if (args == NULL) 2210 return; 2211 2212 for (i = 0; i < num_args; i++) { 2213 free(args[i].kname, M_CTL); 2214 free(args[i].kvalue, M_CTL); 2215 } 2216 2217 free(args, M_CTL); 2218 } 2219 2220 static struct ctl_be_arg * 2221 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2222 char *error_str, size_t error_str_len) 2223 { 2224 struct ctl_be_arg *args; 2225 int i; 2226 2227 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2228 error_str, error_str_len); 2229 2230 if (args == NULL) 2231 goto bailout; 2232 2233 for (i = 0; i < num_args; i++) { 2234 args[i].kname = NULL; 2235 args[i].kvalue = NULL; 2236 } 2237 2238 for (i = 0; i < num_args; i++) { 2239 uint8_t *tmpptr; 2240 2241 args[i].kname = ctl_copyin_alloc(args[i].name, 2242 args[i].namelen, error_str, error_str_len); 2243 if (args[i].kname == NULL) 2244 goto bailout; 2245 2246 if (args[i].kname[args[i].namelen - 1] != '\0') { 2247 snprintf(error_str, error_str_len, "Argument %d " 2248 "name is not NUL-terminated", i); 2249 goto bailout; 2250 } 2251 2252 if (args[i].flags & CTL_BEARG_RD) { 2253 tmpptr = ctl_copyin_alloc(args[i].value, 2254 args[i].vallen, error_str, error_str_len); 2255 if (tmpptr == NULL) 2256 goto bailout; 2257 if ((args[i].flags & CTL_BEARG_ASCII) 2258 && (tmpptr[args[i].vallen - 1] != '\0')) { 2259 snprintf(error_str, error_str_len, "Argument " 2260 "%d value is not NUL-terminated", i); 2261 goto bailout; 2262 } 2263 args[i].kvalue = tmpptr; 2264 } else { 2265 args[i].kvalue = malloc(args[i].vallen, 2266 M_CTL, M_WAITOK | M_ZERO); 2267 } 2268 } 2269 2270 return (args); 2271 bailout: 2272 2273 ctl_free_args(num_args, args); 2274 2275 return (NULL); 2276 } 2277 2278 static void 2279 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2280 { 2281 int i; 2282 2283 for (i = 0; i < num_args; i++) { 2284 if (args[i].flags & CTL_BEARG_WR) 2285 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2286 } 2287 } 2288 2289 /* 2290 * Escape characters that are illegal or not recommended in XML. 2291 */ 2292 int 2293 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2294 { 2295 char *end = str + size; 2296 int retval; 2297 2298 retval = 0; 2299 2300 for (; *str && str < end; str++) { 2301 switch (*str) { 2302 case '&': 2303 retval = sbuf_printf(sb, "&"); 2304 break; 2305 case '>': 2306 retval = sbuf_printf(sb, ">"); 2307 break; 2308 case '<': 2309 retval = sbuf_printf(sb, "<"); 2310 break; 2311 default: 2312 retval = sbuf_putc(sb, *str); 2313 break; 2314 } 2315 2316 if (retval != 0) 2317 break; 2318 2319 } 2320 2321 return (retval); 2322 } 2323 2324 static void 2325 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2326 { 2327 struct scsi_vpd_id_descriptor *desc; 2328 int i; 2329 2330 if (id == NULL || id->len < 4) 2331 return; 2332 desc = (struct scsi_vpd_id_descriptor *)id->data; 2333 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2334 case SVPD_ID_TYPE_T10: 2335 sbuf_printf(sb, "t10."); 2336 break; 2337 case SVPD_ID_TYPE_EUI64: 2338 sbuf_printf(sb, "eui."); 2339 break; 2340 case SVPD_ID_TYPE_NAA: 2341 sbuf_printf(sb, "naa."); 2342 break; 2343 case SVPD_ID_TYPE_SCSI_NAME: 2344 break; 2345 } 2346 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2347 case SVPD_ID_CODESET_BINARY: 2348 for (i = 0; i < desc->length; i++) 2349 sbuf_printf(sb, "%02x", desc->identifier[i]); 2350 break; 2351 case SVPD_ID_CODESET_ASCII: 2352 sbuf_printf(sb, "%.*s", (int)desc->length, 2353 (char *)desc->identifier); 2354 break; 2355 case SVPD_ID_CODESET_UTF8: 2356 sbuf_printf(sb, "%s", (char *)desc->identifier); 2357 break; 2358 } 2359 } 2360 2361 static int 2362 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2363 struct thread *td) 2364 { 2365 struct ctl_softc *softc; 2366 int retval; 2367 2368 softc = control_softc; 2369 2370 retval = 0; 2371 2372 switch (cmd) { 2373 case CTL_IO: { 2374 union ctl_io *io; 2375 void *pool_tmp; 2376 2377 /* 2378 * If we haven't been "enabled", don't allow any SCSI I/O 2379 * to this FETD. 2380 */ 2381 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2382 retval = EPERM; 2383 break; 2384 } 2385 2386 io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref); 2387 2388 /* 2389 * Need to save the pool reference so it doesn't get 2390 * spammed by the user's ctl_io. 2391 */ 2392 pool_tmp = io->io_hdr.pool; 2393 memcpy(io, (void *)addr, sizeof(*io)); 2394 io->io_hdr.pool = pool_tmp; 2395 2396 /* 2397 * No status yet, so make sure the status is set properly. 2398 */ 2399 io->io_hdr.status = CTL_STATUS_NONE; 2400 2401 /* 2402 * The user sets the initiator ID, target and LUN IDs. 2403 */ 2404 io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port; 2405 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2406 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2407 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2408 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2409 2410 retval = ctl_ioctl_submit_wait(io); 2411 2412 if (retval != 0) { 2413 ctl_free_io(io); 2414 break; 2415 } 2416 2417 memcpy((void *)addr, io, sizeof(*io)); 2418 2419 /* return this to our pool */ 2420 ctl_free_io(io); 2421 2422 break; 2423 } 2424 case CTL_ENABLE_PORT: 2425 case CTL_DISABLE_PORT: 2426 case CTL_SET_PORT_WWNS: { 2427 struct ctl_port *port; 2428 struct ctl_port_entry *entry; 2429 2430 entry = (struct ctl_port_entry *)addr; 2431 2432 mtx_lock(&softc->ctl_lock); 2433 STAILQ_FOREACH(port, &softc->port_list, links) { 2434 int action, done; 2435 2436 action = 0; 2437 done = 0; 2438 2439 if ((entry->port_type == CTL_PORT_NONE) 2440 && (entry->targ_port == port->targ_port)) { 2441 /* 2442 * If the user only wants to enable or 2443 * disable or set WWNs on a specific port, 2444 * do the operation and we're done. 2445 */ 2446 action = 1; 2447 done = 1; 2448 } else if (entry->port_type & port->port_type) { 2449 /* 2450 * Compare the user's type mask with the 2451 * particular frontend type to see if we 2452 * have a match. 2453 */ 2454 action = 1; 2455 done = 0; 2456 2457 /* 2458 * Make sure the user isn't trying to set 2459 * WWNs on multiple ports at the same time. 2460 */ 2461 if (cmd == CTL_SET_PORT_WWNS) { 2462 printf("%s: Can't set WWNs on " 2463 "multiple ports\n", __func__); 2464 retval = EINVAL; 2465 break; 2466 } 2467 } 2468 if (action != 0) { 2469 /* 2470 * XXX KDM we have to drop the lock here, 2471 * because the online/offline operations 2472 * can potentially block. We need to 2473 * reference count the frontends so they 2474 * can't go away, 2475 */ 2476 mtx_unlock(&softc->ctl_lock); 2477 2478 if (cmd == CTL_ENABLE_PORT) { 2479 struct ctl_lun *lun; 2480 2481 STAILQ_FOREACH(lun, &softc->lun_list, 2482 links) { 2483 port->lun_enable(port->targ_lun_arg, 2484 lun->target, 2485 lun->lun); 2486 } 2487 2488 ctl_port_online(port); 2489 } else if (cmd == CTL_DISABLE_PORT) { 2490 struct ctl_lun *lun; 2491 2492 ctl_port_offline(port); 2493 2494 STAILQ_FOREACH(lun, &softc->lun_list, 2495 links) { 2496 port->lun_disable( 2497 port->targ_lun_arg, 2498 lun->target, 2499 lun->lun); 2500 } 2501 } 2502 2503 mtx_lock(&softc->ctl_lock); 2504 2505 if (cmd == CTL_SET_PORT_WWNS) 2506 ctl_port_set_wwns(port, 2507 (entry->flags & CTL_PORT_WWNN_VALID) ? 2508 1 : 0, entry->wwnn, 2509 (entry->flags & CTL_PORT_WWPN_VALID) ? 2510 1 : 0, entry->wwpn); 2511 } 2512 if (done != 0) 2513 break; 2514 } 2515 mtx_unlock(&softc->ctl_lock); 2516 break; 2517 } 2518 case CTL_GET_PORT_LIST: { 2519 struct ctl_port *port; 2520 struct ctl_port_list *list; 2521 int i; 2522 2523 list = (struct ctl_port_list *)addr; 2524 2525 if (list->alloc_len != (list->alloc_num * 2526 sizeof(struct ctl_port_entry))) { 2527 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2528 "alloc_num %u * sizeof(struct ctl_port_entry) " 2529 "%zu\n", __func__, list->alloc_len, 2530 list->alloc_num, sizeof(struct ctl_port_entry)); 2531 retval = EINVAL; 2532 break; 2533 } 2534 list->fill_len = 0; 2535 list->fill_num = 0; 2536 list->dropped_num = 0; 2537 i = 0; 2538 mtx_lock(&softc->ctl_lock); 2539 STAILQ_FOREACH(port, &softc->port_list, links) { 2540 struct ctl_port_entry entry, *list_entry; 2541 2542 if (list->fill_num >= list->alloc_num) { 2543 list->dropped_num++; 2544 continue; 2545 } 2546 2547 entry.port_type = port->port_type; 2548 strlcpy(entry.port_name, port->port_name, 2549 sizeof(entry.port_name)); 2550 entry.targ_port = port->targ_port; 2551 entry.physical_port = port->physical_port; 2552 entry.virtual_port = port->virtual_port; 2553 entry.wwnn = port->wwnn; 2554 entry.wwpn = port->wwpn; 2555 if (port->status & CTL_PORT_STATUS_ONLINE) 2556 entry.online = 1; 2557 else 2558 entry.online = 0; 2559 2560 list_entry = &list->entries[i]; 2561 2562 retval = copyout(&entry, list_entry, sizeof(entry)); 2563 if (retval != 0) { 2564 printf("%s: CTL_GET_PORT_LIST: copyout " 2565 "returned %d\n", __func__, retval); 2566 break; 2567 } 2568 i++; 2569 list->fill_num++; 2570 list->fill_len += sizeof(entry); 2571 } 2572 mtx_unlock(&softc->ctl_lock); 2573 2574 /* 2575 * If this is non-zero, we had a copyout fault, so there's 2576 * probably no point in attempting to set the status inside 2577 * the structure. 2578 */ 2579 if (retval != 0) 2580 break; 2581 2582 if (list->dropped_num > 0) 2583 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2584 else 2585 list->status = CTL_PORT_LIST_OK; 2586 break; 2587 } 2588 case CTL_DUMP_OOA: { 2589 struct ctl_lun *lun; 2590 union ctl_io *io; 2591 char printbuf[128]; 2592 struct sbuf sb; 2593 2594 mtx_lock(&softc->ctl_lock); 2595 printf("Dumping OOA queues:\n"); 2596 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2597 mtx_lock(&lun->lun_lock); 2598 for (io = (union ctl_io *)TAILQ_FIRST( 2599 &lun->ooa_queue); io != NULL; 2600 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2601 ooa_links)) { 2602 sbuf_new(&sb, printbuf, sizeof(printbuf), 2603 SBUF_FIXEDLEN); 2604 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2605 (intmax_t)lun->lun, 2606 io->scsiio.tag_num, 2607 (io->io_hdr.flags & 2608 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2609 (io->io_hdr.flags & 2610 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2611 (io->io_hdr.flags & 2612 CTL_FLAG_ABORT) ? " ABORT" : "", 2613 (io->io_hdr.flags & 2614 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2615 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2616 sbuf_finish(&sb); 2617 printf("%s\n", sbuf_data(&sb)); 2618 } 2619 mtx_unlock(&lun->lun_lock); 2620 } 2621 printf("OOA queues dump done\n"); 2622 mtx_unlock(&softc->ctl_lock); 2623 break; 2624 } 2625 case CTL_GET_OOA: { 2626 struct ctl_lun *lun; 2627 struct ctl_ooa *ooa_hdr; 2628 struct ctl_ooa_entry *entries; 2629 uint32_t cur_fill_num; 2630 2631 ooa_hdr = (struct ctl_ooa *)addr; 2632 2633 if ((ooa_hdr->alloc_len == 0) 2634 || (ooa_hdr->alloc_num == 0)) { 2635 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2636 "must be non-zero\n", __func__, 2637 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2638 retval = EINVAL; 2639 break; 2640 } 2641 2642 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2643 sizeof(struct ctl_ooa_entry))) { 2644 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2645 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2646 __func__, ooa_hdr->alloc_len, 2647 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2648 retval = EINVAL; 2649 break; 2650 } 2651 2652 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2653 if (entries == NULL) { 2654 printf("%s: could not allocate %d bytes for OOA " 2655 "dump\n", __func__, ooa_hdr->alloc_len); 2656 retval = ENOMEM; 2657 break; 2658 } 2659 2660 mtx_lock(&softc->ctl_lock); 2661 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2662 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2663 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2664 mtx_unlock(&softc->ctl_lock); 2665 free(entries, M_CTL); 2666 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2667 __func__, (uintmax_t)ooa_hdr->lun_num); 2668 retval = EINVAL; 2669 break; 2670 } 2671 2672 cur_fill_num = 0; 2673 2674 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2675 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2676 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2677 ooa_hdr, entries); 2678 if (retval != 0) 2679 break; 2680 } 2681 if (retval != 0) { 2682 mtx_unlock(&softc->ctl_lock); 2683 free(entries, M_CTL); 2684 break; 2685 } 2686 } else { 2687 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2688 2689 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2690 entries); 2691 } 2692 mtx_unlock(&softc->ctl_lock); 2693 2694 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2695 ooa_hdr->fill_len = ooa_hdr->fill_num * 2696 sizeof(struct ctl_ooa_entry); 2697 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2698 if (retval != 0) { 2699 printf("%s: error copying out %d bytes for OOA dump\n", 2700 __func__, ooa_hdr->fill_len); 2701 } 2702 2703 getbintime(&ooa_hdr->cur_bt); 2704 2705 if (cur_fill_num > ooa_hdr->alloc_num) { 2706 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2707 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2708 } else { 2709 ooa_hdr->dropped_num = 0; 2710 ooa_hdr->status = CTL_OOA_OK; 2711 } 2712 2713 free(entries, M_CTL); 2714 break; 2715 } 2716 case CTL_CHECK_OOA: { 2717 union ctl_io *io; 2718 struct ctl_lun *lun; 2719 struct ctl_ooa_info *ooa_info; 2720 2721 2722 ooa_info = (struct ctl_ooa_info *)addr; 2723 2724 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2725 ooa_info->status = CTL_OOA_INVALID_LUN; 2726 break; 2727 } 2728 mtx_lock(&softc->ctl_lock); 2729 lun = softc->ctl_luns[ooa_info->lun_id]; 2730 if (lun == NULL) { 2731 mtx_unlock(&softc->ctl_lock); 2732 ooa_info->status = CTL_OOA_INVALID_LUN; 2733 break; 2734 } 2735 mtx_lock(&lun->lun_lock); 2736 mtx_unlock(&softc->ctl_lock); 2737 ooa_info->num_entries = 0; 2738 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2739 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2740 &io->io_hdr, ooa_links)) { 2741 ooa_info->num_entries++; 2742 } 2743 mtx_unlock(&lun->lun_lock); 2744 2745 ooa_info->status = CTL_OOA_SUCCESS; 2746 2747 break; 2748 } 2749 case CTL_HARD_START: 2750 case CTL_HARD_STOP: { 2751 struct ctl_fe_ioctl_startstop_info ss_info; 2752 struct cfi_metatask *metatask; 2753 struct mtx hs_mtx; 2754 2755 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2756 2757 cv_init(&ss_info.sem, "hard start/stop cv" ); 2758 2759 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2760 if (metatask == NULL) { 2761 retval = ENOMEM; 2762 mtx_destroy(&hs_mtx); 2763 break; 2764 } 2765 2766 if (cmd == CTL_HARD_START) 2767 metatask->tasktype = CFI_TASK_STARTUP; 2768 else 2769 metatask->tasktype = CFI_TASK_SHUTDOWN; 2770 2771 metatask->callback = ctl_ioctl_hard_startstop_callback; 2772 metatask->callback_arg = &ss_info; 2773 2774 cfi_action(metatask); 2775 2776 /* Wait for the callback */ 2777 mtx_lock(&hs_mtx); 2778 cv_wait_sig(&ss_info.sem, &hs_mtx); 2779 mtx_unlock(&hs_mtx); 2780 2781 /* 2782 * All information has been copied from the metatask by the 2783 * time cv_broadcast() is called, so we free the metatask here. 2784 */ 2785 cfi_free_metatask(metatask); 2786 2787 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2788 2789 mtx_destroy(&hs_mtx); 2790 break; 2791 } 2792 case CTL_BBRREAD: { 2793 struct ctl_bbrread_info *bbr_info; 2794 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2795 struct mtx bbr_mtx; 2796 struct cfi_metatask *metatask; 2797 2798 bbr_info = (struct ctl_bbrread_info *)addr; 2799 2800 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2801 2802 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2803 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2804 2805 fe_bbr_info.bbr_info = bbr_info; 2806 fe_bbr_info.lock = &bbr_mtx; 2807 2808 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2809 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2810 2811 if (metatask == NULL) { 2812 mtx_destroy(&bbr_mtx); 2813 cv_destroy(&fe_bbr_info.sem); 2814 retval = ENOMEM; 2815 break; 2816 } 2817 metatask->tasktype = CFI_TASK_BBRREAD; 2818 metatask->callback = ctl_ioctl_bbrread_callback; 2819 metatask->callback_arg = &fe_bbr_info; 2820 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2821 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2822 metatask->taskinfo.bbrread.len = bbr_info->len; 2823 2824 cfi_action(metatask); 2825 2826 mtx_lock(&bbr_mtx); 2827 while (fe_bbr_info.wakeup_done == 0) 2828 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2829 mtx_unlock(&bbr_mtx); 2830 2831 bbr_info->status = metatask->status; 2832 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2833 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2834 memcpy(&bbr_info->sense_data, 2835 &metatask->taskinfo.bbrread.sense_data, 2836 ctl_min(sizeof(bbr_info->sense_data), 2837 sizeof(metatask->taskinfo.bbrread.sense_data))); 2838 2839 cfi_free_metatask(metatask); 2840 2841 mtx_destroy(&bbr_mtx); 2842 cv_destroy(&fe_bbr_info.sem); 2843 2844 break; 2845 } 2846 case CTL_DELAY_IO: { 2847 struct ctl_io_delay_info *delay_info; 2848 #ifdef CTL_IO_DELAY 2849 struct ctl_lun *lun; 2850 #endif /* CTL_IO_DELAY */ 2851 2852 delay_info = (struct ctl_io_delay_info *)addr; 2853 2854 #ifdef CTL_IO_DELAY 2855 mtx_lock(&softc->ctl_lock); 2856 2857 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2858 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2859 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2860 } else { 2861 lun = softc->ctl_luns[delay_info->lun_id]; 2862 mtx_lock(&lun->lun_lock); 2863 2864 delay_info->status = CTL_DELAY_STATUS_OK; 2865 2866 switch (delay_info->delay_type) { 2867 case CTL_DELAY_TYPE_CONT: 2868 break; 2869 case CTL_DELAY_TYPE_ONESHOT: 2870 break; 2871 default: 2872 delay_info->status = 2873 CTL_DELAY_STATUS_INVALID_TYPE; 2874 break; 2875 } 2876 2877 switch (delay_info->delay_loc) { 2878 case CTL_DELAY_LOC_DATAMOVE: 2879 lun->delay_info.datamove_type = 2880 delay_info->delay_type; 2881 lun->delay_info.datamove_delay = 2882 delay_info->delay_secs; 2883 break; 2884 case CTL_DELAY_LOC_DONE: 2885 lun->delay_info.done_type = 2886 delay_info->delay_type; 2887 lun->delay_info.done_delay = 2888 delay_info->delay_secs; 2889 break; 2890 default: 2891 delay_info->status = 2892 CTL_DELAY_STATUS_INVALID_LOC; 2893 break; 2894 } 2895 mtx_unlock(&lun->lun_lock); 2896 } 2897 2898 mtx_unlock(&softc->ctl_lock); 2899 #else 2900 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2901 #endif /* CTL_IO_DELAY */ 2902 break; 2903 } 2904 case CTL_REALSYNC_SET: { 2905 int *syncstate; 2906 2907 syncstate = (int *)addr; 2908 2909 mtx_lock(&softc->ctl_lock); 2910 switch (*syncstate) { 2911 case 0: 2912 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2913 break; 2914 case 1: 2915 softc->flags |= CTL_FLAG_REAL_SYNC; 2916 break; 2917 default: 2918 retval = EINVAL; 2919 break; 2920 } 2921 mtx_unlock(&softc->ctl_lock); 2922 break; 2923 } 2924 case CTL_REALSYNC_GET: { 2925 int *syncstate; 2926 2927 syncstate = (int*)addr; 2928 2929 mtx_lock(&softc->ctl_lock); 2930 if (softc->flags & CTL_FLAG_REAL_SYNC) 2931 *syncstate = 1; 2932 else 2933 *syncstate = 0; 2934 mtx_unlock(&softc->ctl_lock); 2935 2936 break; 2937 } 2938 case CTL_SETSYNC: 2939 case CTL_GETSYNC: { 2940 struct ctl_sync_info *sync_info; 2941 struct ctl_lun *lun; 2942 2943 sync_info = (struct ctl_sync_info *)addr; 2944 2945 mtx_lock(&softc->ctl_lock); 2946 lun = softc->ctl_luns[sync_info->lun_id]; 2947 if (lun == NULL) { 2948 mtx_unlock(&softc->ctl_lock); 2949 sync_info->status = CTL_GS_SYNC_NO_LUN; 2950 } 2951 /* 2952 * Get or set the sync interval. We're not bounds checking 2953 * in the set case, hopefully the user won't do something 2954 * silly. 2955 */ 2956 mtx_lock(&lun->lun_lock); 2957 mtx_unlock(&softc->ctl_lock); 2958 if (cmd == CTL_GETSYNC) 2959 sync_info->sync_interval = lun->sync_interval; 2960 else 2961 lun->sync_interval = sync_info->sync_interval; 2962 mtx_unlock(&lun->lun_lock); 2963 2964 sync_info->status = CTL_GS_SYNC_OK; 2965 2966 break; 2967 } 2968 case CTL_GETSTATS: { 2969 struct ctl_stats *stats; 2970 struct ctl_lun *lun; 2971 int i; 2972 2973 stats = (struct ctl_stats *)addr; 2974 2975 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2976 stats->alloc_len) { 2977 stats->status = CTL_SS_NEED_MORE_SPACE; 2978 stats->num_luns = softc->num_luns; 2979 break; 2980 } 2981 /* 2982 * XXX KDM no locking here. If the LUN list changes, 2983 * things can blow up. 2984 */ 2985 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2986 i++, lun = STAILQ_NEXT(lun, links)) { 2987 retval = copyout(&lun->stats, &stats->lun_stats[i], 2988 sizeof(lun->stats)); 2989 if (retval != 0) 2990 break; 2991 } 2992 stats->num_luns = softc->num_luns; 2993 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2994 softc->num_luns; 2995 stats->status = CTL_SS_OK; 2996 #ifdef CTL_TIME_IO 2997 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2998 #else 2999 stats->flags = CTL_STATS_FLAG_NONE; 3000 #endif 3001 getnanouptime(&stats->timestamp); 3002 break; 3003 } 3004 case CTL_ERROR_INJECT: { 3005 struct ctl_error_desc *err_desc, *new_err_desc; 3006 struct ctl_lun *lun; 3007 3008 err_desc = (struct ctl_error_desc *)addr; 3009 3010 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 3011 M_WAITOK | M_ZERO); 3012 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 3013 3014 mtx_lock(&softc->ctl_lock); 3015 lun = softc->ctl_luns[err_desc->lun_id]; 3016 if (lun == NULL) { 3017 mtx_unlock(&softc->ctl_lock); 3018 free(new_err_desc, M_CTL); 3019 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 3020 __func__, (uintmax_t)err_desc->lun_id); 3021 retval = EINVAL; 3022 break; 3023 } 3024 mtx_lock(&lun->lun_lock); 3025 mtx_unlock(&softc->ctl_lock); 3026 3027 /* 3028 * We could do some checking here to verify the validity 3029 * of the request, but given the complexity of error 3030 * injection requests, the checking logic would be fairly 3031 * complex. 3032 * 3033 * For now, if the request is invalid, it just won't get 3034 * executed and might get deleted. 3035 */ 3036 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 3037 3038 /* 3039 * XXX KDM check to make sure the serial number is unique, 3040 * in case we somehow manage to wrap. That shouldn't 3041 * happen for a very long time, but it's the right thing to 3042 * do. 3043 */ 3044 new_err_desc->serial = lun->error_serial; 3045 err_desc->serial = lun->error_serial; 3046 lun->error_serial++; 3047 3048 mtx_unlock(&lun->lun_lock); 3049 break; 3050 } 3051 case CTL_ERROR_INJECT_DELETE: { 3052 struct ctl_error_desc *delete_desc, *desc, *desc2; 3053 struct ctl_lun *lun; 3054 int delete_done; 3055 3056 delete_desc = (struct ctl_error_desc *)addr; 3057 delete_done = 0; 3058 3059 mtx_lock(&softc->ctl_lock); 3060 lun = softc->ctl_luns[delete_desc->lun_id]; 3061 if (lun == NULL) { 3062 mtx_unlock(&softc->ctl_lock); 3063 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 3064 __func__, (uintmax_t)delete_desc->lun_id); 3065 retval = EINVAL; 3066 break; 3067 } 3068 mtx_lock(&lun->lun_lock); 3069 mtx_unlock(&softc->ctl_lock); 3070 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 3071 if (desc->serial != delete_desc->serial) 3072 continue; 3073 3074 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 3075 links); 3076 free(desc, M_CTL); 3077 delete_done = 1; 3078 } 3079 mtx_unlock(&lun->lun_lock); 3080 if (delete_done == 0) { 3081 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 3082 "error serial %ju on LUN %u\n", __func__, 3083 delete_desc->serial, delete_desc->lun_id); 3084 retval = EINVAL; 3085 break; 3086 } 3087 break; 3088 } 3089 case CTL_DUMP_STRUCTS: { 3090 int i, j, k; 3091 struct ctl_port *port; 3092 struct ctl_frontend *fe; 3093 3094 mtx_lock(&softc->ctl_lock); 3095 printf("CTL Persistent Reservation information start:\n"); 3096 for (i = 0; i < CTL_MAX_LUNS; i++) { 3097 struct ctl_lun *lun; 3098 3099 lun = softc->ctl_luns[i]; 3100 3101 if ((lun == NULL) 3102 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 3103 continue; 3104 3105 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 3106 if (lun->pr_keys[j] == NULL) 3107 continue; 3108 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 3109 if (lun->pr_keys[j][k] == 0) 3110 continue; 3111 printf(" LUN %d port %d iid %d key " 3112 "%#jx\n", i, j, k, 3113 (uintmax_t)lun->pr_keys[j][k]); 3114 } 3115 } 3116 } 3117 printf("CTL Persistent Reservation information end\n"); 3118 printf("CTL Ports:\n"); 3119 STAILQ_FOREACH(port, &softc->port_list, links) { 3120 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 3121 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 3122 port->frontend->name, port->port_type, 3123 port->physical_port, port->virtual_port, 3124 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 3125 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3126 if (port->wwpn_iid[j].in_use == 0 && 3127 port->wwpn_iid[j].wwpn == 0 && 3128 port->wwpn_iid[j].name == NULL) 3129 continue; 3130 3131 printf(" iid %u use %d WWPN %#jx '%s'\n", 3132 j, port->wwpn_iid[j].in_use, 3133 (uintmax_t)port->wwpn_iid[j].wwpn, 3134 port->wwpn_iid[j].name); 3135 } 3136 } 3137 printf("CTL Port information end\n"); 3138 mtx_unlock(&softc->ctl_lock); 3139 /* 3140 * XXX KDM calling this without a lock. We'd likely want 3141 * to drop the lock before calling the frontend's dump 3142 * routine anyway. 3143 */ 3144 printf("CTL Frontends:\n"); 3145 STAILQ_FOREACH(fe, &softc->fe_list, links) { 3146 printf(" Frontend '%s'\n", fe->name); 3147 if (fe->fe_dump != NULL) 3148 fe->fe_dump(); 3149 } 3150 printf("CTL Frontend information end\n"); 3151 break; 3152 } 3153 case CTL_LUN_REQ: { 3154 struct ctl_lun_req *lun_req; 3155 struct ctl_backend_driver *backend; 3156 3157 lun_req = (struct ctl_lun_req *)addr; 3158 3159 backend = ctl_backend_find(lun_req->backend); 3160 if (backend == NULL) { 3161 lun_req->status = CTL_LUN_ERROR; 3162 snprintf(lun_req->error_str, 3163 sizeof(lun_req->error_str), 3164 "Backend \"%s\" not found.", 3165 lun_req->backend); 3166 break; 3167 } 3168 if (lun_req->num_be_args > 0) { 3169 lun_req->kern_be_args = ctl_copyin_args( 3170 lun_req->num_be_args, 3171 lun_req->be_args, 3172 lun_req->error_str, 3173 sizeof(lun_req->error_str)); 3174 if (lun_req->kern_be_args == NULL) { 3175 lun_req->status = CTL_LUN_ERROR; 3176 break; 3177 } 3178 } 3179 3180 retval = backend->ioctl(dev, cmd, addr, flag, td); 3181 3182 if (lun_req->num_be_args > 0) { 3183 ctl_copyout_args(lun_req->num_be_args, 3184 lun_req->kern_be_args); 3185 ctl_free_args(lun_req->num_be_args, 3186 lun_req->kern_be_args); 3187 } 3188 break; 3189 } 3190 case CTL_LUN_LIST: { 3191 struct sbuf *sb; 3192 struct ctl_lun *lun; 3193 struct ctl_lun_list *list; 3194 struct ctl_option *opt; 3195 3196 list = (struct ctl_lun_list *)addr; 3197 3198 /* 3199 * Allocate a fixed length sbuf here, based on the length 3200 * of the user's buffer. We could allocate an auto-extending 3201 * buffer, and then tell the user how much larger our 3202 * amount of data is than his buffer, but that presents 3203 * some problems: 3204 * 3205 * 1. The sbuf(9) routines use a blocking malloc, and so 3206 * we can't hold a lock while calling them with an 3207 * auto-extending buffer. 3208 * 3209 * 2. There is not currently a LUN reference counting 3210 * mechanism, outside of outstanding transactions on 3211 * the LUN's OOA queue. So a LUN could go away on us 3212 * while we're getting the LUN number, backend-specific 3213 * information, etc. Thus, given the way things 3214 * currently work, we need to hold the CTL lock while 3215 * grabbing LUN information. 3216 * 3217 * So, from the user's standpoint, the best thing to do is 3218 * allocate what he thinks is a reasonable buffer length, 3219 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3220 * double the buffer length and try again. (And repeat 3221 * that until he succeeds.) 3222 */ 3223 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3224 if (sb == NULL) { 3225 list->status = CTL_LUN_LIST_ERROR; 3226 snprintf(list->error_str, sizeof(list->error_str), 3227 "Unable to allocate %d bytes for LUN list", 3228 list->alloc_len); 3229 break; 3230 } 3231 3232 sbuf_printf(sb, "<ctllunlist>\n"); 3233 3234 mtx_lock(&softc->ctl_lock); 3235 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3236 mtx_lock(&lun->lun_lock); 3237 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3238 (uintmax_t)lun->lun); 3239 3240 /* 3241 * Bail out as soon as we see that we've overfilled 3242 * the buffer. 3243 */ 3244 if (retval != 0) 3245 break; 3246 3247 retval = sbuf_printf(sb, "\t<backend_type>%s" 3248 "</backend_type>\n", 3249 (lun->backend == NULL) ? "none" : 3250 lun->backend->name); 3251 3252 if (retval != 0) 3253 break; 3254 3255 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3256 lun->be_lun->lun_type); 3257 3258 if (retval != 0) 3259 break; 3260 3261 if (lun->backend == NULL) { 3262 retval = sbuf_printf(sb, "</lun>\n"); 3263 if (retval != 0) 3264 break; 3265 continue; 3266 } 3267 3268 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3269 (lun->be_lun->maxlba > 0) ? 3270 lun->be_lun->maxlba + 1 : 0); 3271 3272 if (retval != 0) 3273 break; 3274 3275 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3276 lun->be_lun->blocksize); 3277 3278 if (retval != 0) 3279 break; 3280 3281 retval = sbuf_printf(sb, "\t<serial_number>"); 3282 3283 if (retval != 0) 3284 break; 3285 3286 retval = ctl_sbuf_printf_esc(sb, 3287 lun->be_lun->serial_num, 3288 sizeof(lun->be_lun->serial_num)); 3289 3290 if (retval != 0) 3291 break; 3292 3293 retval = sbuf_printf(sb, "</serial_number>\n"); 3294 3295 if (retval != 0) 3296 break; 3297 3298 retval = sbuf_printf(sb, "\t<device_id>"); 3299 3300 if (retval != 0) 3301 break; 3302 3303 retval = ctl_sbuf_printf_esc(sb, 3304 lun->be_lun->device_id, 3305 sizeof(lun->be_lun->device_id)); 3306 3307 if (retval != 0) 3308 break; 3309 3310 retval = sbuf_printf(sb, "</device_id>\n"); 3311 3312 if (retval != 0) 3313 break; 3314 3315 if (lun->backend->lun_info != NULL) { 3316 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3317 if (retval != 0) 3318 break; 3319 } 3320 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3321 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3322 opt->name, opt->value, opt->name); 3323 if (retval != 0) 3324 break; 3325 } 3326 3327 retval = sbuf_printf(sb, "</lun>\n"); 3328 3329 if (retval != 0) 3330 break; 3331 mtx_unlock(&lun->lun_lock); 3332 } 3333 if (lun != NULL) 3334 mtx_unlock(&lun->lun_lock); 3335 mtx_unlock(&softc->ctl_lock); 3336 3337 if ((retval != 0) 3338 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3339 retval = 0; 3340 sbuf_delete(sb); 3341 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3342 snprintf(list->error_str, sizeof(list->error_str), 3343 "Out of space, %d bytes is too small", 3344 list->alloc_len); 3345 break; 3346 } 3347 3348 sbuf_finish(sb); 3349 3350 retval = copyout(sbuf_data(sb), list->lun_xml, 3351 sbuf_len(sb) + 1); 3352 3353 list->fill_len = sbuf_len(sb) + 1; 3354 list->status = CTL_LUN_LIST_OK; 3355 sbuf_delete(sb); 3356 break; 3357 } 3358 case CTL_ISCSI: { 3359 struct ctl_iscsi *ci; 3360 struct ctl_frontend *fe; 3361 3362 ci = (struct ctl_iscsi *)addr; 3363 3364 fe = ctl_frontend_find("iscsi"); 3365 if (fe == NULL) { 3366 ci->status = CTL_ISCSI_ERROR; 3367 snprintf(ci->error_str, sizeof(ci->error_str), 3368 "Frontend \"iscsi\" not found."); 3369 break; 3370 } 3371 3372 retval = fe->ioctl(dev, cmd, addr, flag, td); 3373 break; 3374 } 3375 case CTL_PORT_REQ: { 3376 struct ctl_req *req; 3377 struct ctl_frontend *fe; 3378 3379 req = (struct ctl_req *)addr; 3380 3381 fe = ctl_frontend_find(req->driver); 3382 if (fe == NULL) { 3383 req->status = CTL_LUN_ERROR; 3384 snprintf(req->error_str, sizeof(req->error_str), 3385 "Frontend \"%s\" not found.", req->driver); 3386 break; 3387 } 3388 if (req->num_args > 0) { 3389 req->kern_args = ctl_copyin_args(req->num_args, 3390 req->args, req->error_str, sizeof(req->error_str)); 3391 if (req->kern_args == NULL) { 3392 req->status = CTL_LUN_ERROR; 3393 break; 3394 } 3395 } 3396 3397 retval = fe->ioctl(dev, cmd, addr, flag, td); 3398 3399 if (req->num_args > 0) { 3400 ctl_copyout_args(req->num_args, req->kern_args); 3401 ctl_free_args(req->num_args, req->kern_args); 3402 } 3403 break; 3404 } 3405 case CTL_PORT_LIST: { 3406 struct sbuf *sb; 3407 struct ctl_port *port; 3408 struct ctl_lun_list *list; 3409 struct ctl_option *opt; 3410 int j; 3411 3412 list = (struct ctl_lun_list *)addr; 3413 3414 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3415 if (sb == NULL) { 3416 list->status = CTL_LUN_LIST_ERROR; 3417 snprintf(list->error_str, sizeof(list->error_str), 3418 "Unable to allocate %d bytes for LUN list", 3419 list->alloc_len); 3420 break; 3421 } 3422 3423 sbuf_printf(sb, "<ctlportlist>\n"); 3424 3425 mtx_lock(&softc->ctl_lock); 3426 STAILQ_FOREACH(port, &softc->port_list, links) { 3427 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3428 (uintmax_t)port->targ_port); 3429 3430 /* 3431 * Bail out as soon as we see that we've overfilled 3432 * the buffer. 3433 */ 3434 if (retval != 0) 3435 break; 3436 3437 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3438 "</frontend_type>\n", port->frontend->name); 3439 if (retval != 0) 3440 break; 3441 3442 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3443 port->port_type); 3444 if (retval != 0) 3445 break; 3446 3447 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3448 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3449 if (retval != 0) 3450 break; 3451 3452 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3453 port->port_name); 3454 if (retval != 0) 3455 break; 3456 3457 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3458 port->physical_port); 3459 if (retval != 0) 3460 break; 3461 3462 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3463 port->virtual_port); 3464 if (retval != 0) 3465 break; 3466 3467 if (port->target_devid != NULL) { 3468 sbuf_printf(sb, "\t<target>"); 3469 ctl_id_sbuf(port->target_devid, sb); 3470 sbuf_printf(sb, "</target>\n"); 3471 } 3472 3473 if (port->port_devid != NULL) { 3474 sbuf_printf(sb, "\t<port>"); 3475 ctl_id_sbuf(port->port_devid, sb); 3476 sbuf_printf(sb, "</port>\n"); 3477 } 3478 3479 if (port->port_info != NULL) { 3480 retval = port->port_info(port->onoff_arg, sb); 3481 if (retval != 0) 3482 break; 3483 } 3484 STAILQ_FOREACH(opt, &port->options, links) { 3485 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3486 opt->name, opt->value, opt->name); 3487 if (retval != 0) 3488 break; 3489 } 3490 3491 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3492 if (port->wwpn_iid[j].in_use == 0 || 3493 (port->wwpn_iid[j].wwpn == 0 && 3494 port->wwpn_iid[j].name == NULL)) 3495 continue; 3496 3497 if (port->wwpn_iid[j].name != NULL) 3498 retval = sbuf_printf(sb, 3499 "\t<initiator>%u %s</initiator>\n", 3500 j, port->wwpn_iid[j].name); 3501 else 3502 retval = sbuf_printf(sb, 3503 "\t<initiator>%u naa.%08jx</initiator>\n", 3504 j, port->wwpn_iid[j].wwpn); 3505 if (retval != 0) 3506 break; 3507 } 3508 if (retval != 0) 3509 break; 3510 3511 retval = sbuf_printf(sb, "</targ_port>\n"); 3512 if (retval != 0) 3513 break; 3514 } 3515 mtx_unlock(&softc->ctl_lock); 3516 3517 if ((retval != 0) 3518 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3519 retval = 0; 3520 sbuf_delete(sb); 3521 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3522 snprintf(list->error_str, sizeof(list->error_str), 3523 "Out of space, %d bytes is too small", 3524 list->alloc_len); 3525 break; 3526 } 3527 3528 sbuf_finish(sb); 3529 3530 retval = copyout(sbuf_data(sb), list->lun_xml, 3531 sbuf_len(sb) + 1); 3532 3533 list->fill_len = sbuf_len(sb) + 1; 3534 list->status = CTL_LUN_LIST_OK; 3535 sbuf_delete(sb); 3536 break; 3537 } 3538 default: { 3539 /* XXX KDM should we fix this? */ 3540 #if 0 3541 struct ctl_backend_driver *backend; 3542 unsigned int type; 3543 int found; 3544 3545 found = 0; 3546 3547 /* 3548 * We encode the backend type as the ioctl type for backend 3549 * ioctls. So parse it out here, and then search for a 3550 * backend of this type. 3551 */ 3552 type = _IOC_TYPE(cmd); 3553 3554 STAILQ_FOREACH(backend, &softc->be_list, links) { 3555 if (backend->type == type) { 3556 found = 1; 3557 break; 3558 } 3559 } 3560 if (found == 0) { 3561 printf("ctl: unknown ioctl command %#lx or backend " 3562 "%d\n", cmd, type); 3563 retval = EINVAL; 3564 break; 3565 } 3566 retval = backend->ioctl(dev, cmd, addr, flag, td); 3567 #endif 3568 retval = ENOTTY; 3569 break; 3570 } 3571 } 3572 return (retval); 3573 } 3574 3575 uint32_t 3576 ctl_get_initindex(struct ctl_nexus *nexus) 3577 { 3578 if (nexus->targ_port < CTL_MAX_PORTS) 3579 return (nexus->initid.id + 3580 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3581 else 3582 return (nexus->initid.id + 3583 ((nexus->targ_port - CTL_MAX_PORTS) * 3584 CTL_MAX_INIT_PER_PORT)); 3585 } 3586 3587 uint32_t 3588 ctl_get_resindex(struct ctl_nexus *nexus) 3589 { 3590 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3591 } 3592 3593 uint32_t 3594 ctl_port_idx(int port_num) 3595 { 3596 if (port_num < CTL_MAX_PORTS) 3597 return(port_num); 3598 else 3599 return(port_num - CTL_MAX_PORTS); 3600 } 3601 3602 static uint32_t 3603 ctl_map_lun(int port_num, uint32_t lun_id) 3604 { 3605 struct ctl_port *port; 3606 3607 port = control_softc->ctl_ports[ctl_port_idx(port_num)]; 3608 if (port == NULL) 3609 return (UINT32_MAX); 3610 if (port->lun_map == NULL) 3611 return (lun_id); 3612 return (port->lun_map(port->targ_lun_arg, lun_id)); 3613 } 3614 3615 static uint32_t 3616 ctl_map_lun_back(int port_num, uint32_t lun_id) 3617 { 3618 struct ctl_port *port; 3619 uint32_t i; 3620 3621 port = control_softc->ctl_ports[ctl_port_idx(port_num)]; 3622 if (port->lun_map == NULL) 3623 return (lun_id); 3624 for (i = 0; i < CTL_MAX_LUNS; i++) { 3625 if (port->lun_map(port->targ_lun_arg, i) == lun_id) 3626 return (i); 3627 } 3628 return (UINT32_MAX); 3629 } 3630 3631 /* 3632 * Note: This only works for bitmask sizes that are at least 32 bits, and 3633 * that are a power of 2. 3634 */ 3635 int 3636 ctl_ffz(uint32_t *mask, uint32_t size) 3637 { 3638 uint32_t num_chunks, num_pieces; 3639 int i, j; 3640 3641 num_chunks = (size >> 5); 3642 if (num_chunks == 0) 3643 num_chunks++; 3644 num_pieces = ctl_min((sizeof(uint32_t) * 8), size); 3645 3646 for (i = 0; i < num_chunks; i++) { 3647 for (j = 0; j < num_pieces; j++) { 3648 if ((mask[i] & (1 << j)) == 0) 3649 return ((i << 5) + j); 3650 } 3651 } 3652 3653 return (-1); 3654 } 3655 3656 int 3657 ctl_set_mask(uint32_t *mask, uint32_t bit) 3658 { 3659 uint32_t chunk, piece; 3660 3661 chunk = bit >> 5; 3662 piece = bit % (sizeof(uint32_t) * 8); 3663 3664 if ((mask[chunk] & (1 << piece)) != 0) 3665 return (-1); 3666 else 3667 mask[chunk] |= (1 << piece); 3668 3669 return (0); 3670 } 3671 3672 int 3673 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3674 { 3675 uint32_t chunk, piece; 3676 3677 chunk = bit >> 5; 3678 piece = bit % (sizeof(uint32_t) * 8); 3679 3680 if ((mask[chunk] & (1 << piece)) == 0) 3681 return (-1); 3682 else 3683 mask[chunk] &= ~(1 << piece); 3684 3685 return (0); 3686 } 3687 3688 int 3689 ctl_is_set(uint32_t *mask, uint32_t bit) 3690 { 3691 uint32_t chunk, piece; 3692 3693 chunk = bit >> 5; 3694 piece = bit % (sizeof(uint32_t) * 8); 3695 3696 if ((mask[chunk] & (1 << piece)) == 0) 3697 return (0); 3698 else 3699 return (1); 3700 } 3701 3702 static uint64_t 3703 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3704 { 3705 uint64_t *t; 3706 3707 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3708 if (t == NULL) 3709 return (0); 3710 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3711 } 3712 3713 static void 3714 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3715 { 3716 uint64_t *t; 3717 3718 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3719 if (t == NULL) 3720 return; 3721 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3722 } 3723 3724 static void 3725 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3726 { 3727 uint64_t *p; 3728 u_int i; 3729 3730 i = residx/CTL_MAX_INIT_PER_PORT; 3731 if (lun->pr_keys[i] != NULL) 3732 return; 3733 mtx_unlock(&lun->lun_lock); 3734 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3735 M_WAITOK | M_ZERO); 3736 mtx_lock(&lun->lun_lock); 3737 if (lun->pr_keys[i] == NULL) 3738 lun->pr_keys[i] = p; 3739 else 3740 free(p, M_CTL); 3741 } 3742 3743 static void 3744 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3745 { 3746 uint64_t *t; 3747 3748 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3749 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3750 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3751 } 3752 3753 /* 3754 * ctl_softc, pool_name, total_ctl_io are passed in. 3755 * npool is passed out. 3756 */ 3757 int 3758 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3759 uint32_t total_ctl_io, void **npool) 3760 { 3761 #ifdef IO_POOLS 3762 struct ctl_io_pool *pool; 3763 3764 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3765 M_NOWAIT | M_ZERO); 3766 if (pool == NULL) 3767 return (ENOMEM); 3768 3769 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3770 pool->ctl_softc = ctl_softc; 3771 pool->zone = uma_zsecond_create(pool->name, NULL, 3772 NULL, NULL, NULL, ctl_softc->io_zone); 3773 /* uma_prealloc(pool->zone, total_ctl_io); */ 3774 3775 *npool = pool; 3776 #else 3777 *npool = ctl_softc->io_zone; 3778 #endif 3779 return (0); 3780 } 3781 3782 void 3783 ctl_pool_free(struct ctl_io_pool *pool) 3784 { 3785 3786 if (pool == NULL) 3787 return; 3788 3789 #ifdef IO_POOLS 3790 uma_zdestroy(pool->zone); 3791 free(pool, M_CTL); 3792 #endif 3793 } 3794 3795 union ctl_io * 3796 ctl_alloc_io(void *pool_ref) 3797 { 3798 union ctl_io *io; 3799 #ifdef IO_POOLS 3800 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3801 3802 io = uma_zalloc(pool->zone, M_WAITOK); 3803 #else 3804 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3805 #endif 3806 if (io != NULL) 3807 io->io_hdr.pool = pool_ref; 3808 return (io); 3809 } 3810 3811 union ctl_io * 3812 ctl_alloc_io_nowait(void *pool_ref) 3813 { 3814 union ctl_io *io; 3815 #ifdef IO_POOLS 3816 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3817 3818 io = uma_zalloc(pool->zone, M_NOWAIT); 3819 #else 3820 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3821 #endif 3822 if (io != NULL) 3823 io->io_hdr.pool = pool_ref; 3824 return (io); 3825 } 3826 3827 void 3828 ctl_free_io(union ctl_io *io) 3829 { 3830 #ifdef IO_POOLS 3831 struct ctl_io_pool *pool; 3832 #endif 3833 3834 if (io == NULL) 3835 return; 3836 3837 #ifdef IO_POOLS 3838 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3839 uma_zfree(pool->zone, io); 3840 #else 3841 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3842 #endif 3843 } 3844 3845 void 3846 ctl_zero_io(union ctl_io *io) 3847 { 3848 void *pool_ref; 3849 3850 if (io == NULL) 3851 return; 3852 3853 /* 3854 * May need to preserve linked list pointers at some point too. 3855 */ 3856 pool_ref = io->io_hdr.pool; 3857 memset(io, 0, sizeof(*io)); 3858 io->io_hdr.pool = pool_ref; 3859 } 3860 3861 /* 3862 * This routine is currently used for internal copies of ctl_ios that need 3863 * to persist for some reason after we've already returned status to the 3864 * FETD. (Thus the flag set.) 3865 * 3866 * XXX XXX 3867 * Note that this makes a blind copy of all fields in the ctl_io, except 3868 * for the pool reference. This includes any memory that has been 3869 * allocated! That memory will no longer be valid after done has been 3870 * called, so this would be VERY DANGEROUS for command that actually does 3871 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3872 * start and stop commands, which don't transfer any data, so this is not a 3873 * problem. If it is used for anything else, the caller would also need to 3874 * allocate data buffer space and this routine would need to be modified to 3875 * copy the data buffer(s) as well. 3876 */ 3877 void 3878 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3879 { 3880 void *pool_ref; 3881 3882 if ((src == NULL) 3883 || (dest == NULL)) 3884 return; 3885 3886 /* 3887 * May need to preserve linked list pointers at some point too. 3888 */ 3889 pool_ref = dest->io_hdr.pool; 3890 3891 memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest))); 3892 3893 dest->io_hdr.pool = pool_ref; 3894 /* 3895 * We need to know that this is an internal copy, and doesn't need 3896 * to get passed back to the FETD that allocated it. 3897 */ 3898 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3899 } 3900 3901 static int 3902 ctl_expand_number(const char *buf, uint64_t *num) 3903 { 3904 char *endptr; 3905 uint64_t number; 3906 unsigned shift; 3907 3908 number = strtoq(buf, &endptr, 0); 3909 3910 switch (tolower((unsigned char)*endptr)) { 3911 case 'e': 3912 shift = 60; 3913 break; 3914 case 'p': 3915 shift = 50; 3916 break; 3917 case 't': 3918 shift = 40; 3919 break; 3920 case 'g': 3921 shift = 30; 3922 break; 3923 case 'm': 3924 shift = 20; 3925 break; 3926 case 'k': 3927 shift = 10; 3928 break; 3929 case 'b': 3930 case '\0': /* No unit. */ 3931 *num = number; 3932 return (0); 3933 default: 3934 /* Unrecognized unit. */ 3935 return (-1); 3936 } 3937 3938 if ((number << shift) >> shift != number) { 3939 /* Overflow */ 3940 return (-1); 3941 } 3942 *num = number << shift; 3943 return (0); 3944 } 3945 3946 3947 /* 3948 * This routine could be used in the future to load default and/or saved 3949 * mode page parameters for a particuar lun. 3950 */ 3951 static int 3952 ctl_init_page_index(struct ctl_lun *lun) 3953 { 3954 int i; 3955 struct ctl_page_index *page_index; 3956 const char *value; 3957 uint64_t ival; 3958 3959 memcpy(&lun->mode_pages.index, page_index_template, 3960 sizeof(page_index_template)); 3961 3962 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3963 3964 page_index = &lun->mode_pages.index[i]; 3965 /* 3966 * If this is a disk-only mode page, there's no point in 3967 * setting it up. For some pages, we have to have some 3968 * basic information about the disk in order to calculate the 3969 * mode page data. 3970 */ 3971 if ((lun->be_lun->lun_type != T_DIRECT) 3972 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3973 continue; 3974 3975 switch (page_index->page_code & SMPH_PC_MASK) { 3976 case SMS_RW_ERROR_RECOVERY_PAGE: { 3977 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3978 panic("subpage is incorrect!"); 3979 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3980 &rw_er_page_default, 3981 sizeof(rw_er_page_default)); 3982 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3983 &rw_er_page_changeable, 3984 sizeof(rw_er_page_changeable)); 3985 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3986 &rw_er_page_default, 3987 sizeof(rw_er_page_default)); 3988 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3989 &rw_er_page_default, 3990 sizeof(rw_er_page_default)); 3991 page_index->page_data = 3992 (uint8_t *)lun->mode_pages.rw_er_page; 3993 break; 3994 } 3995 case SMS_FORMAT_DEVICE_PAGE: { 3996 struct scsi_format_page *format_page; 3997 3998 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3999 panic("subpage is incorrect!"); 4000 4001 /* 4002 * Sectors per track are set above. Bytes per 4003 * sector need to be set here on a per-LUN basis. 4004 */ 4005 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4006 &format_page_default, 4007 sizeof(format_page_default)); 4008 memcpy(&lun->mode_pages.format_page[ 4009 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4010 sizeof(format_page_changeable)); 4011 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4012 &format_page_default, 4013 sizeof(format_page_default)); 4014 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4015 &format_page_default, 4016 sizeof(format_page_default)); 4017 4018 format_page = &lun->mode_pages.format_page[ 4019 CTL_PAGE_CURRENT]; 4020 scsi_ulto2b(lun->be_lun->blocksize, 4021 format_page->bytes_per_sector); 4022 4023 format_page = &lun->mode_pages.format_page[ 4024 CTL_PAGE_DEFAULT]; 4025 scsi_ulto2b(lun->be_lun->blocksize, 4026 format_page->bytes_per_sector); 4027 4028 format_page = &lun->mode_pages.format_page[ 4029 CTL_PAGE_SAVED]; 4030 scsi_ulto2b(lun->be_lun->blocksize, 4031 format_page->bytes_per_sector); 4032 4033 page_index->page_data = 4034 (uint8_t *)lun->mode_pages.format_page; 4035 break; 4036 } 4037 case SMS_RIGID_DISK_PAGE: { 4038 struct scsi_rigid_disk_page *rigid_disk_page; 4039 uint32_t sectors_per_cylinder; 4040 uint64_t cylinders; 4041 #ifndef __XSCALE__ 4042 int shift; 4043 #endif /* !__XSCALE__ */ 4044 4045 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4046 panic("invalid subpage value %d", 4047 page_index->subpage); 4048 4049 /* 4050 * Rotation rate and sectors per track are set 4051 * above. We calculate the cylinders here based on 4052 * capacity. Due to the number of heads and 4053 * sectors per track we're using, smaller arrays 4054 * may turn out to have 0 cylinders. Linux and 4055 * FreeBSD don't pay attention to these mode pages 4056 * to figure out capacity, but Solaris does. It 4057 * seems to deal with 0 cylinders just fine, and 4058 * works out a fake geometry based on the capacity. 4059 */ 4060 memcpy(&lun->mode_pages.rigid_disk_page[ 4061 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4062 sizeof(rigid_disk_page_default)); 4063 memcpy(&lun->mode_pages.rigid_disk_page[ 4064 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4065 sizeof(rigid_disk_page_changeable)); 4066 4067 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4068 CTL_DEFAULT_HEADS; 4069 4070 /* 4071 * The divide method here will be more accurate, 4072 * probably, but results in floating point being 4073 * used in the kernel on i386 (__udivdi3()). On the 4074 * XScale, though, __udivdi3() is implemented in 4075 * software. 4076 * 4077 * The shift method for cylinder calculation is 4078 * accurate if sectors_per_cylinder is a power of 4079 * 2. Otherwise it might be slightly off -- you 4080 * might have a bit of a truncation problem. 4081 */ 4082 #ifdef __XSCALE__ 4083 cylinders = (lun->be_lun->maxlba + 1) / 4084 sectors_per_cylinder; 4085 #else 4086 for (shift = 31; shift > 0; shift--) { 4087 if (sectors_per_cylinder & (1 << shift)) 4088 break; 4089 } 4090 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4091 #endif 4092 4093 /* 4094 * We've basically got 3 bytes, or 24 bits for the 4095 * cylinder size in the mode page. If we're over, 4096 * just round down to 2^24. 4097 */ 4098 if (cylinders > 0xffffff) 4099 cylinders = 0xffffff; 4100 4101 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4102 CTL_PAGE_DEFAULT]; 4103 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4104 4105 if ((value = ctl_get_opt(&lun->be_lun->options, 4106 "rpm")) != NULL) { 4107 scsi_ulto2b(strtol(value, NULL, 0), 4108 rigid_disk_page->rotation_rate); 4109 } 4110 4111 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4112 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4113 sizeof(rigid_disk_page_default)); 4114 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4115 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4116 sizeof(rigid_disk_page_default)); 4117 4118 page_index->page_data = 4119 (uint8_t *)lun->mode_pages.rigid_disk_page; 4120 break; 4121 } 4122 case SMS_CACHING_PAGE: { 4123 struct scsi_caching_page *caching_page; 4124 4125 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4126 panic("invalid subpage value %d", 4127 page_index->subpage); 4128 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4129 &caching_page_default, 4130 sizeof(caching_page_default)); 4131 memcpy(&lun->mode_pages.caching_page[ 4132 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4133 sizeof(caching_page_changeable)); 4134 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4135 &caching_page_default, 4136 sizeof(caching_page_default)); 4137 caching_page = &lun->mode_pages.caching_page[ 4138 CTL_PAGE_SAVED]; 4139 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4140 if (value != NULL && strcmp(value, "off") == 0) 4141 caching_page->flags1 &= ~SCP_WCE; 4142 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4143 if (value != NULL && strcmp(value, "off") == 0) 4144 caching_page->flags1 |= SCP_RCD; 4145 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4146 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4147 sizeof(caching_page_default)); 4148 page_index->page_data = 4149 (uint8_t *)lun->mode_pages.caching_page; 4150 break; 4151 } 4152 case SMS_CONTROL_MODE_PAGE: { 4153 struct scsi_control_page *control_page; 4154 4155 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4156 panic("invalid subpage value %d", 4157 page_index->subpage); 4158 4159 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4160 &control_page_default, 4161 sizeof(control_page_default)); 4162 memcpy(&lun->mode_pages.control_page[ 4163 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4164 sizeof(control_page_changeable)); 4165 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4166 &control_page_default, 4167 sizeof(control_page_default)); 4168 control_page = &lun->mode_pages.control_page[ 4169 CTL_PAGE_SAVED]; 4170 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 4171 if (value != NULL && strcmp(value, "unrestricted") == 0) { 4172 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 4173 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 4174 } 4175 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4176 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 4177 sizeof(control_page_default)); 4178 page_index->page_data = 4179 (uint8_t *)lun->mode_pages.control_page; 4180 break; 4181 4182 } 4183 case SMS_INFO_EXCEPTIONS_PAGE: { 4184 switch (page_index->subpage) { 4185 case SMS_SUBPAGE_PAGE_0: 4186 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4187 &ie_page_default, 4188 sizeof(ie_page_default)); 4189 memcpy(&lun->mode_pages.ie_page[ 4190 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4191 sizeof(ie_page_changeable)); 4192 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4193 &ie_page_default, 4194 sizeof(ie_page_default)); 4195 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4196 &ie_page_default, 4197 sizeof(ie_page_default)); 4198 page_index->page_data = 4199 (uint8_t *)lun->mode_pages.ie_page; 4200 break; 4201 case 0x02: { 4202 struct ctl_logical_block_provisioning_page *page; 4203 4204 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4205 &lbp_page_default, 4206 sizeof(lbp_page_default)); 4207 memcpy(&lun->mode_pages.lbp_page[ 4208 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4209 sizeof(lbp_page_changeable)); 4210 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4211 &lbp_page_default, 4212 sizeof(lbp_page_default)); 4213 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4214 value = ctl_get_opt(&lun->be_lun->options, 4215 "avail-threshold"); 4216 if (value != NULL && 4217 ctl_expand_number(value, &ival) == 0) { 4218 page->descr[0].flags |= SLBPPD_ENABLED | 4219 SLBPPD_ARMING_DEC; 4220 if (lun->be_lun->blocksize) 4221 ival /= lun->be_lun->blocksize; 4222 else 4223 ival /= 512; 4224 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4225 page->descr[0].count); 4226 } 4227 value = ctl_get_opt(&lun->be_lun->options, 4228 "used-threshold"); 4229 if (value != NULL && 4230 ctl_expand_number(value, &ival) == 0) { 4231 page->descr[1].flags |= SLBPPD_ENABLED | 4232 SLBPPD_ARMING_INC; 4233 if (lun->be_lun->blocksize) 4234 ival /= lun->be_lun->blocksize; 4235 else 4236 ival /= 512; 4237 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4238 page->descr[1].count); 4239 } 4240 value = ctl_get_opt(&lun->be_lun->options, 4241 "pool-avail-threshold"); 4242 if (value != NULL && 4243 ctl_expand_number(value, &ival) == 0) { 4244 page->descr[2].flags |= SLBPPD_ENABLED | 4245 SLBPPD_ARMING_DEC; 4246 if (lun->be_lun->blocksize) 4247 ival /= lun->be_lun->blocksize; 4248 else 4249 ival /= 512; 4250 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4251 page->descr[2].count); 4252 } 4253 value = ctl_get_opt(&lun->be_lun->options, 4254 "pool-used-threshold"); 4255 if (value != NULL && 4256 ctl_expand_number(value, &ival) == 0) { 4257 page->descr[3].flags |= SLBPPD_ENABLED | 4258 SLBPPD_ARMING_INC; 4259 if (lun->be_lun->blocksize) 4260 ival /= lun->be_lun->blocksize; 4261 else 4262 ival /= 512; 4263 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4264 page->descr[3].count); 4265 } 4266 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4267 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4268 sizeof(lbp_page_default)); 4269 page_index->page_data = 4270 (uint8_t *)lun->mode_pages.lbp_page; 4271 }} 4272 break; 4273 } 4274 case SMS_VENDOR_SPECIFIC_PAGE:{ 4275 switch (page_index->subpage) { 4276 case DBGCNF_SUBPAGE_CODE: { 4277 struct copan_debugconf_subpage *current_page, 4278 *saved_page; 4279 4280 memcpy(&lun->mode_pages.debugconf_subpage[ 4281 CTL_PAGE_CURRENT], 4282 &debugconf_page_default, 4283 sizeof(debugconf_page_default)); 4284 memcpy(&lun->mode_pages.debugconf_subpage[ 4285 CTL_PAGE_CHANGEABLE], 4286 &debugconf_page_changeable, 4287 sizeof(debugconf_page_changeable)); 4288 memcpy(&lun->mode_pages.debugconf_subpage[ 4289 CTL_PAGE_DEFAULT], 4290 &debugconf_page_default, 4291 sizeof(debugconf_page_default)); 4292 memcpy(&lun->mode_pages.debugconf_subpage[ 4293 CTL_PAGE_SAVED], 4294 &debugconf_page_default, 4295 sizeof(debugconf_page_default)); 4296 page_index->page_data = 4297 (uint8_t *)lun->mode_pages.debugconf_subpage; 4298 4299 current_page = (struct copan_debugconf_subpage *) 4300 (page_index->page_data + 4301 (page_index->page_len * 4302 CTL_PAGE_CURRENT)); 4303 saved_page = (struct copan_debugconf_subpage *) 4304 (page_index->page_data + 4305 (page_index->page_len * 4306 CTL_PAGE_SAVED)); 4307 break; 4308 } 4309 default: 4310 panic("invalid subpage value %d", 4311 page_index->subpage); 4312 break; 4313 } 4314 break; 4315 } 4316 default: 4317 panic("invalid page value %d", 4318 page_index->page_code & SMPH_PC_MASK); 4319 break; 4320 } 4321 } 4322 4323 return (CTL_RETVAL_COMPLETE); 4324 } 4325 4326 static int 4327 ctl_init_log_page_index(struct ctl_lun *lun) 4328 { 4329 struct ctl_page_index *page_index; 4330 int i, j, k, prev; 4331 4332 memcpy(&lun->log_pages.index, log_page_index_template, 4333 sizeof(log_page_index_template)); 4334 4335 prev = -1; 4336 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4337 4338 page_index = &lun->log_pages.index[i]; 4339 /* 4340 * If this is a disk-only mode page, there's no point in 4341 * setting it up. For some pages, we have to have some 4342 * basic information about the disk in order to calculate the 4343 * mode page data. 4344 */ 4345 if ((lun->be_lun->lun_type != T_DIRECT) 4346 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4347 continue; 4348 4349 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4350 lun->backend->lun_attr == NULL) 4351 continue; 4352 4353 if (page_index->page_code != prev) { 4354 lun->log_pages.pages_page[j] = page_index->page_code; 4355 prev = page_index->page_code; 4356 j++; 4357 } 4358 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4359 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4360 k++; 4361 } 4362 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4363 lun->log_pages.index[0].page_len = j; 4364 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4365 lun->log_pages.index[1].page_len = k * 2; 4366 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4367 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4368 4369 return (CTL_RETVAL_COMPLETE); 4370 } 4371 4372 static int 4373 hex2bin(const char *str, uint8_t *buf, int buf_size) 4374 { 4375 int i; 4376 u_char c; 4377 4378 memset(buf, 0, buf_size); 4379 while (isspace(str[0])) 4380 str++; 4381 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4382 str += 2; 4383 buf_size *= 2; 4384 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4385 c = str[i]; 4386 if (isdigit(c)) 4387 c -= '0'; 4388 else if (isalpha(c)) 4389 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4390 else 4391 break; 4392 if (c >= 16) 4393 break; 4394 if ((i & 1) == 0) 4395 buf[i / 2] |= (c << 4); 4396 else 4397 buf[i / 2] |= c; 4398 } 4399 return ((i + 1) / 2); 4400 } 4401 4402 /* 4403 * LUN allocation. 4404 * 4405 * Requirements: 4406 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4407 * wants us to allocate the LUN and he can block. 4408 * - ctl_softc is always set 4409 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4410 * 4411 * Returns 0 for success, non-zero (errno) for failure. 4412 */ 4413 static int 4414 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4415 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4416 { 4417 struct ctl_lun *nlun, *lun; 4418 struct ctl_port *port; 4419 struct scsi_vpd_id_descriptor *desc; 4420 struct scsi_vpd_id_t10 *t10id; 4421 const char *eui, *naa, *scsiname, *vendor, *value; 4422 int lun_number, i, lun_malloced; 4423 int devidlen, idlen1, idlen2 = 0, len; 4424 4425 if (be_lun == NULL) 4426 return (EINVAL); 4427 4428 /* 4429 * We currently only support Direct Access or Processor LUN types. 4430 */ 4431 switch (be_lun->lun_type) { 4432 case T_DIRECT: 4433 break; 4434 case T_PROCESSOR: 4435 break; 4436 case T_SEQUENTIAL: 4437 case T_CHANGER: 4438 default: 4439 be_lun->lun_config_status(be_lun->be_lun, 4440 CTL_LUN_CONFIG_FAILURE); 4441 break; 4442 } 4443 if (ctl_lun == NULL) { 4444 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4445 lun_malloced = 1; 4446 } else { 4447 lun_malloced = 0; 4448 lun = ctl_lun; 4449 } 4450 4451 memset(lun, 0, sizeof(*lun)); 4452 if (lun_malloced) 4453 lun->flags = CTL_LUN_MALLOCED; 4454 4455 /* Generate LUN ID. */ 4456 devidlen = max(CTL_DEVID_MIN_LEN, 4457 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4458 idlen1 = sizeof(*t10id) + devidlen; 4459 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4460 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4461 if (scsiname != NULL) { 4462 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4463 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4464 } 4465 eui = ctl_get_opt(&be_lun->options, "eui"); 4466 if (eui != NULL) { 4467 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4468 } 4469 naa = ctl_get_opt(&be_lun->options, "naa"); 4470 if (naa != NULL) { 4471 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4472 } 4473 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4474 M_CTL, M_WAITOK | M_ZERO); 4475 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4476 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4477 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4478 desc->length = idlen1; 4479 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4480 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4481 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4482 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4483 } else { 4484 strncpy(t10id->vendor, vendor, 4485 min(sizeof(t10id->vendor), strlen(vendor))); 4486 } 4487 strncpy((char *)t10id->vendor_spec_id, 4488 (char *)be_lun->device_id, devidlen); 4489 if (scsiname != NULL) { 4490 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4491 desc->length); 4492 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4493 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4494 SVPD_ID_TYPE_SCSI_NAME; 4495 desc->length = idlen2; 4496 strlcpy(desc->identifier, scsiname, idlen2); 4497 } 4498 if (eui != NULL) { 4499 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4500 desc->length); 4501 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4502 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4503 SVPD_ID_TYPE_EUI64; 4504 desc->length = hex2bin(eui, desc->identifier, 16); 4505 desc->length = desc->length > 12 ? 16 : 4506 (desc->length > 8 ? 12 : 8); 4507 len -= 16 - desc->length; 4508 } 4509 if (naa != NULL) { 4510 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4511 desc->length); 4512 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4513 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4514 SVPD_ID_TYPE_NAA; 4515 desc->length = hex2bin(naa, desc->identifier, 16); 4516 desc->length = desc->length > 8 ? 16 : 8; 4517 len -= 16 - desc->length; 4518 } 4519 lun->lun_devid->len = len; 4520 4521 mtx_lock(&ctl_softc->ctl_lock); 4522 /* 4523 * See if the caller requested a particular LUN number. If so, see 4524 * if it is available. Otherwise, allocate the first available LUN. 4525 */ 4526 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4527 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4528 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4529 mtx_unlock(&ctl_softc->ctl_lock); 4530 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4531 printf("ctl: requested LUN ID %d is higher " 4532 "than CTL_MAX_LUNS - 1 (%d)\n", 4533 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4534 } else { 4535 /* 4536 * XXX KDM return an error, or just assign 4537 * another LUN ID in this case?? 4538 */ 4539 printf("ctl: requested LUN ID %d is already " 4540 "in use\n", be_lun->req_lun_id); 4541 } 4542 if (lun->flags & CTL_LUN_MALLOCED) 4543 free(lun, M_CTL); 4544 be_lun->lun_config_status(be_lun->be_lun, 4545 CTL_LUN_CONFIG_FAILURE); 4546 return (ENOSPC); 4547 } 4548 lun_number = be_lun->req_lun_id; 4549 } else { 4550 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4551 if (lun_number == -1) { 4552 mtx_unlock(&ctl_softc->ctl_lock); 4553 printf("ctl: can't allocate LUN on target %ju, out of " 4554 "LUNs\n", (uintmax_t)target_id.id); 4555 if (lun->flags & CTL_LUN_MALLOCED) 4556 free(lun, M_CTL); 4557 be_lun->lun_config_status(be_lun->be_lun, 4558 CTL_LUN_CONFIG_FAILURE); 4559 return (ENOSPC); 4560 } 4561 } 4562 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4563 4564 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4565 lun->target = target_id; 4566 lun->lun = lun_number; 4567 lun->be_lun = be_lun; 4568 /* 4569 * The processor LUN is always enabled. Disk LUNs come on line 4570 * disabled, and must be enabled by the backend. 4571 */ 4572 lun->flags |= CTL_LUN_DISABLED; 4573 lun->backend = be_lun->be; 4574 be_lun->ctl_lun = lun; 4575 be_lun->lun_id = lun_number; 4576 atomic_add_int(&be_lun->be->num_luns, 1); 4577 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4578 lun->flags |= CTL_LUN_OFFLINE; 4579 4580 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4581 lun->flags |= CTL_LUN_STOPPED; 4582 4583 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4584 lun->flags |= CTL_LUN_INOPERABLE; 4585 4586 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4587 lun->flags |= CTL_LUN_PRIMARY_SC; 4588 4589 value = ctl_get_opt(&be_lun->options, "readonly"); 4590 if (value != NULL && strcmp(value, "on") == 0) 4591 lun->flags |= CTL_LUN_READONLY; 4592 4593 lun->ctl_softc = ctl_softc; 4594 TAILQ_INIT(&lun->ooa_queue); 4595 TAILQ_INIT(&lun->blocked_queue); 4596 STAILQ_INIT(&lun->error_list); 4597 ctl_tpc_lun_init(lun); 4598 4599 /* 4600 * Initialize the mode and log page index. 4601 */ 4602 ctl_init_page_index(lun); 4603 ctl_init_log_page_index(lun); 4604 4605 /* 4606 * Now, before we insert this lun on the lun list, set the lun 4607 * inventory changed UA for all other luns. 4608 */ 4609 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4610 mtx_lock(&nlun->lun_lock); 4611 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4612 mtx_unlock(&nlun->lun_lock); 4613 } 4614 4615 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4616 4617 ctl_softc->ctl_luns[lun_number] = lun; 4618 4619 ctl_softc->num_luns++; 4620 4621 /* Setup statistics gathering */ 4622 lun->stats.device_type = be_lun->lun_type; 4623 lun->stats.lun_number = lun_number; 4624 if (lun->stats.device_type == T_DIRECT) 4625 lun->stats.blocksize = be_lun->blocksize; 4626 else 4627 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4628 for (i = 0;i < CTL_MAX_PORTS;i++) 4629 lun->stats.ports[i].targ_port = i; 4630 4631 mtx_unlock(&ctl_softc->ctl_lock); 4632 4633 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4634 4635 /* 4636 * Run through each registered FETD and bring it online if it isn't 4637 * already. Enable the target ID if it hasn't been enabled, and 4638 * enable this particular LUN. 4639 */ 4640 STAILQ_FOREACH(port, &ctl_softc->port_list, links) { 4641 int retval; 4642 4643 retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number); 4644 if (retval != 0) { 4645 printf("ctl_alloc_lun: FETD %s port %d returned error " 4646 "%d for lun_enable on target %ju lun %d\n", 4647 port->port_name, port->targ_port, retval, 4648 (uintmax_t)target_id.id, lun_number); 4649 } else 4650 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4651 } 4652 return (0); 4653 } 4654 4655 /* 4656 * Delete a LUN. 4657 * Assumptions: 4658 * - LUN has already been marked invalid and any pending I/O has been taken 4659 * care of. 4660 */ 4661 static int 4662 ctl_free_lun(struct ctl_lun *lun) 4663 { 4664 struct ctl_softc *softc; 4665 #if 0 4666 struct ctl_port *port; 4667 #endif 4668 struct ctl_lun *nlun; 4669 int i; 4670 4671 softc = lun->ctl_softc; 4672 4673 mtx_assert(&softc->ctl_lock, MA_OWNED); 4674 4675 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4676 4677 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4678 4679 softc->ctl_luns[lun->lun] = NULL; 4680 4681 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4682 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4683 4684 softc->num_luns--; 4685 4686 /* 4687 * XXX KDM this scheme only works for a single target/multiple LUN 4688 * setup. It needs to be revamped for a multiple target scheme. 4689 * 4690 * XXX KDM this results in port->lun_disable() getting called twice, 4691 * once when ctl_disable_lun() is called, and a second time here. 4692 * We really need to re-think the LUN disable semantics. There 4693 * should probably be several steps/levels to LUN removal: 4694 * - disable 4695 * - invalidate 4696 * - free 4697 * 4698 * Right now we only have a disable method when communicating to 4699 * the front end ports, at least for individual LUNs. 4700 */ 4701 #if 0 4702 STAILQ_FOREACH(port, &softc->port_list, links) { 4703 int retval; 4704 4705 retval = port->lun_disable(port->targ_lun_arg, lun->target, 4706 lun->lun); 4707 if (retval != 0) { 4708 printf("ctl_free_lun: FETD %s port %d returned error " 4709 "%d for lun_disable on target %ju lun %jd\n", 4710 port->port_name, port->targ_port, retval, 4711 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4712 } 4713 4714 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4715 port->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4716 4717 retval = port->targ_disable(port->targ_lun_arg,lun->target); 4718 if (retval != 0) { 4719 printf("ctl_free_lun: FETD %s port %d " 4720 "returned error %d for targ_disable on " 4721 "target %ju\n", port->port_name, 4722 port->targ_port, retval, 4723 (uintmax_t)lun->target.id); 4724 } else 4725 port->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4726 4727 if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4728 continue; 4729 4730 #if 0 4731 port->port_offline(port->onoff_arg); 4732 port->status &= ~CTL_PORT_STATUS_ONLINE; 4733 #endif 4734 } 4735 } 4736 #endif 4737 4738 /* 4739 * Tell the backend to free resources, if this LUN has a backend. 4740 */ 4741 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4742 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4743 4744 ctl_tpc_lun_shutdown(lun); 4745 mtx_destroy(&lun->lun_lock); 4746 free(lun->lun_devid, M_CTL); 4747 for (i = 0; i < CTL_MAX_PORTS; i++) 4748 free(lun->pending_ua[i], M_CTL); 4749 for (i = 0; i < 2 * CTL_MAX_PORTS; i++) 4750 free(lun->pr_keys[i], M_CTL); 4751 free(lun->write_buffer, M_CTL); 4752 if (lun->flags & CTL_LUN_MALLOCED) 4753 free(lun, M_CTL); 4754 4755 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4756 mtx_lock(&nlun->lun_lock); 4757 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4758 mtx_unlock(&nlun->lun_lock); 4759 } 4760 4761 return (0); 4762 } 4763 4764 static void 4765 ctl_create_lun(struct ctl_be_lun *be_lun) 4766 { 4767 struct ctl_softc *ctl_softc; 4768 4769 ctl_softc = control_softc; 4770 4771 /* 4772 * ctl_alloc_lun() should handle all potential failure cases. 4773 */ 4774 ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target); 4775 } 4776 4777 int 4778 ctl_add_lun(struct ctl_be_lun *be_lun) 4779 { 4780 struct ctl_softc *ctl_softc = control_softc; 4781 4782 mtx_lock(&ctl_softc->ctl_lock); 4783 STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links); 4784 mtx_unlock(&ctl_softc->ctl_lock); 4785 wakeup(&ctl_softc->pending_lun_queue); 4786 4787 return (0); 4788 } 4789 4790 int 4791 ctl_enable_lun(struct ctl_be_lun *be_lun) 4792 { 4793 struct ctl_softc *ctl_softc; 4794 struct ctl_port *port, *nport; 4795 struct ctl_lun *lun; 4796 int retval; 4797 4798 ctl_softc = control_softc; 4799 4800 lun = (struct ctl_lun *)be_lun->ctl_lun; 4801 4802 mtx_lock(&ctl_softc->ctl_lock); 4803 mtx_lock(&lun->lun_lock); 4804 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4805 /* 4806 * eh? Why did we get called if the LUN is already 4807 * enabled? 4808 */ 4809 mtx_unlock(&lun->lun_lock); 4810 mtx_unlock(&ctl_softc->ctl_lock); 4811 return (0); 4812 } 4813 lun->flags &= ~CTL_LUN_DISABLED; 4814 mtx_unlock(&lun->lun_lock); 4815 4816 for (port = STAILQ_FIRST(&ctl_softc->port_list); port != NULL; port = nport) { 4817 nport = STAILQ_NEXT(port, links); 4818 4819 /* 4820 * Drop the lock while we call the FETD's enable routine. 4821 * This can lead to a callback into CTL (at least in the 4822 * case of the internal initiator frontend. 4823 */ 4824 mtx_unlock(&ctl_softc->ctl_lock); 4825 retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun); 4826 mtx_lock(&ctl_softc->ctl_lock); 4827 if (retval != 0) { 4828 printf("%s: FETD %s port %d returned error " 4829 "%d for lun_enable on target %ju lun %jd\n", 4830 __func__, port->port_name, port->targ_port, retval, 4831 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4832 } 4833 #if 0 4834 else { 4835 /* NOTE: TODO: why does lun enable affect port status? */ 4836 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4837 } 4838 #endif 4839 } 4840 4841 mtx_unlock(&ctl_softc->ctl_lock); 4842 4843 return (0); 4844 } 4845 4846 int 4847 ctl_disable_lun(struct ctl_be_lun *be_lun) 4848 { 4849 struct ctl_softc *ctl_softc; 4850 struct ctl_port *port; 4851 struct ctl_lun *lun; 4852 int retval; 4853 4854 ctl_softc = control_softc; 4855 4856 lun = (struct ctl_lun *)be_lun->ctl_lun; 4857 4858 mtx_lock(&ctl_softc->ctl_lock); 4859 mtx_lock(&lun->lun_lock); 4860 if (lun->flags & CTL_LUN_DISABLED) { 4861 mtx_unlock(&lun->lun_lock); 4862 mtx_unlock(&ctl_softc->ctl_lock); 4863 return (0); 4864 } 4865 lun->flags |= CTL_LUN_DISABLED; 4866 mtx_unlock(&lun->lun_lock); 4867 4868 STAILQ_FOREACH(port, &ctl_softc->port_list, links) { 4869 mtx_unlock(&ctl_softc->ctl_lock); 4870 /* 4871 * Drop the lock before we call the frontend's disable 4872 * routine, to avoid lock order reversals. 4873 * 4874 * XXX KDM what happens if the frontend list changes while 4875 * we're traversing it? It's unlikely, but should be handled. 4876 */ 4877 retval = port->lun_disable(port->targ_lun_arg, lun->target, 4878 lun->lun); 4879 mtx_lock(&ctl_softc->ctl_lock); 4880 if (retval != 0) { 4881 printf("ctl_alloc_lun: FETD %s port %d returned error " 4882 "%d for lun_disable on target %ju lun %jd\n", 4883 port->port_name, port->targ_port, retval, 4884 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4885 } 4886 } 4887 4888 mtx_unlock(&ctl_softc->ctl_lock); 4889 4890 return (0); 4891 } 4892 4893 int 4894 ctl_start_lun(struct ctl_be_lun *be_lun) 4895 { 4896 struct ctl_softc *ctl_softc; 4897 struct ctl_lun *lun; 4898 4899 ctl_softc = control_softc; 4900 4901 lun = (struct ctl_lun *)be_lun->ctl_lun; 4902 4903 mtx_lock(&lun->lun_lock); 4904 lun->flags &= ~CTL_LUN_STOPPED; 4905 mtx_unlock(&lun->lun_lock); 4906 4907 return (0); 4908 } 4909 4910 int 4911 ctl_stop_lun(struct ctl_be_lun *be_lun) 4912 { 4913 struct ctl_softc *ctl_softc; 4914 struct ctl_lun *lun; 4915 4916 ctl_softc = control_softc; 4917 4918 lun = (struct ctl_lun *)be_lun->ctl_lun; 4919 4920 mtx_lock(&lun->lun_lock); 4921 lun->flags |= CTL_LUN_STOPPED; 4922 mtx_unlock(&lun->lun_lock); 4923 4924 return (0); 4925 } 4926 4927 int 4928 ctl_lun_offline(struct ctl_be_lun *be_lun) 4929 { 4930 struct ctl_softc *ctl_softc; 4931 struct ctl_lun *lun; 4932 4933 ctl_softc = control_softc; 4934 4935 lun = (struct ctl_lun *)be_lun->ctl_lun; 4936 4937 mtx_lock(&lun->lun_lock); 4938 lun->flags |= CTL_LUN_OFFLINE; 4939 mtx_unlock(&lun->lun_lock); 4940 4941 return (0); 4942 } 4943 4944 int 4945 ctl_lun_online(struct ctl_be_lun *be_lun) 4946 { 4947 struct ctl_softc *ctl_softc; 4948 struct ctl_lun *lun; 4949 4950 ctl_softc = control_softc; 4951 4952 lun = (struct ctl_lun *)be_lun->ctl_lun; 4953 4954 mtx_lock(&lun->lun_lock); 4955 lun->flags &= ~CTL_LUN_OFFLINE; 4956 mtx_unlock(&lun->lun_lock); 4957 4958 return (0); 4959 } 4960 4961 int 4962 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4963 { 4964 struct ctl_softc *ctl_softc; 4965 struct ctl_lun *lun; 4966 4967 ctl_softc = control_softc; 4968 4969 lun = (struct ctl_lun *)be_lun->ctl_lun; 4970 4971 mtx_lock(&lun->lun_lock); 4972 4973 /* 4974 * The LUN needs to be disabled before it can be marked invalid. 4975 */ 4976 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4977 mtx_unlock(&lun->lun_lock); 4978 return (-1); 4979 } 4980 /* 4981 * Mark the LUN invalid. 4982 */ 4983 lun->flags |= CTL_LUN_INVALID; 4984 4985 /* 4986 * If there is nothing in the OOA queue, go ahead and free the LUN. 4987 * If we have something in the OOA queue, we'll free it when the 4988 * last I/O completes. 4989 */ 4990 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4991 mtx_unlock(&lun->lun_lock); 4992 mtx_lock(&ctl_softc->ctl_lock); 4993 ctl_free_lun(lun); 4994 mtx_unlock(&ctl_softc->ctl_lock); 4995 } else 4996 mtx_unlock(&lun->lun_lock); 4997 4998 return (0); 4999 } 5000 5001 int 5002 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 5003 { 5004 struct ctl_softc *ctl_softc; 5005 struct ctl_lun *lun; 5006 5007 ctl_softc = control_softc; 5008 lun = (struct ctl_lun *)be_lun->ctl_lun; 5009 5010 mtx_lock(&lun->lun_lock); 5011 lun->flags |= CTL_LUN_INOPERABLE; 5012 mtx_unlock(&lun->lun_lock); 5013 5014 return (0); 5015 } 5016 5017 int 5018 ctl_lun_operable(struct ctl_be_lun *be_lun) 5019 { 5020 struct ctl_softc *ctl_softc; 5021 struct ctl_lun *lun; 5022 5023 ctl_softc = control_softc; 5024 lun = (struct ctl_lun *)be_lun->ctl_lun; 5025 5026 mtx_lock(&lun->lun_lock); 5027 lun->flags &= ~CTL_LUN_INOPERABLE; 5028 mtx_unlock(&lun->lun_lock); 5029 5030 return (0); 5031 } 5032 5033 void 5034 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5035 { 5036 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5037 5038 mtx_lock(&lun->lun_lock); 5039 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 5040 mtx_unlock(&lun->lun_lock); 5041 } 5042 5043 /* 5044 * Backend "memory move is complete" callback for requests that never 5045 * make it down to say RAIDCore's configuration code. 5046 */ 5047 int 5048 ctl_config_move_done(union ctl_io *io) 5049 { 5050 int retval; 5051 5052 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5053 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5054 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5055 5056 if ((io->io_hdr.port_status != 0) && 5057 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5058 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5059 /* 5060 * For hardware error sense keys, the sense key 5061 * specific value is defined to be a retry count, 5062 * but we use it to pass back an internal FETD 5063 * error code. XXX KDM Hopefully the FETD is only 5064 * using 16 bits for an error code, since that's 5065 * all the space we have in the sks field. 5066 */ 5067 ctl_set_internal_failure(&io->scsiio, 5068 /*sks_valid*/ 1, 5069 /*retry_count*/ 5070 io->io_hdr.port_status); 5071 } 5072 5073 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5074 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5075 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5076 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5077 /* 5078 * XXX KDM just assuming a single pointer here, and not a 5079 * S/G list. If we start using S/G lists for config data, 5080 * we'll need to know how to clean them up here as well. 5081 */ 5082 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5083 free(io->scsiio.kern_data_ptr, M_CTL); 5084 ctl_done(io); 5085 retval = CTL_RETVAL_COMPLETE; 5086 } else { 5087 /* 5088 * XXX KDM now we need to continue data movement. Some 5089 * options: 5090 * - call ctl_scsiio() again? We don't do this for data 5091 * writes, because for those at least we know ahead of 5092 * time where the write will go and how long it is. For 5093 * config writes, though, that information is largely 5094 * contained within the write itself, thus we need to 5095 * parse out the data again. 5096 * 5097 * - Call some other function once the data is in? 5098 */ 5099 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5100 ctl_data_print(io); 5101 5102 /* 5103 * XXX KDM call ctl_scsiio() again for now, and check flag 5104 * bits to see whether we're allocated or not. 5105 */ 5106 retval = ctl_scsiio(&io->scsiio); 5107 } 5108 return (retval); 5109 } 5110 5111 /* 5112 * This gets called by a backend driver when it is done with a 5113 * data_submit method. 5114 */ 5115 void 5116 ctl_data_submit_done(union ctl_io *io) 5117 { 5118 /* 5119 * If the IO_CONT flag is set, we need to call the supplied 5120 * function to continue processing the I/O, instead of completing 5121 * the I/O just yet. 5122 * 5123 * If there is an error, though, we don't want to keep processing. 5124 * Instead, just send status back to the initiator. 5125 */ 5126 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5127 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5128 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5129 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5130 io->scsiio.io_cont(io); 5131 return; 5132 } 5133 ctl_done(io); 5134 } 5135 5136 /* 5137 * This gets called by a backend driver when it is done with a 5138 * configuration write. 5139 */ 5140 void 5141 ctl_config_write_done(union ctl_io *io) 5142 { 5143 uint8_t *buf; 5144 5145 /* 5146 * If the IO_CONT flag is set, we need to call the supplied 5147 * function to continue processing the I/O, instead of completing 5148 * the I/O just yet. 5149 * 5150 * If there is an error, though, we don't want to keep processing. 5151 * Instead, just send status back to the initiator. 5152 */ 5153 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5154 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5155 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5156 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5157 io->scsiio.io_cont(io); 5158 return; 5159 } 5160 /* 5161 * Since a configuration write can be done for commands that actually 5162 * have data allocated, like write buffer, and commands that have 5163 * no data, like start/stop unit, we need to check here. 5164 */ 5165 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5166 buf = io->scsiio.kern_data_ptr; 5167 else 5168 buf = NULL; 5169 ctl_done(io); 5170 if (buf) 5171 free(buf, M_CTL); 5172 } 5173 5174 void 5175 ctl_config_read_done(union ctl_io *io) 5176 { 5177 uint8_t *buf; 5178 5179 /* 5180 * If there is some error -- we are done, skip data transfer. 5181 */ 5182 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5183 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5184 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5185 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5186 buf = io->scsiio.kern_data_ptr; 5187 else 5188 buf = NULL; 5189 ctl_done(io); 5190 if (buf) 5191 free(buf, M_CTL); 5192 return; 5193 } 5194 5195 /* 5196 * If the IO_CONT flag is set, we need to call the supplied 5197 * function to continue processing the I/O, instead of completing 5198 * the I/O just yet. 5199 */ 5200 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5201 io->scsiio.io_cont(io); 5202 return; 5203 } 5204 5205 ctl_datamove(io); 5206 } 5207 5208 /* 5209 * SCSI release command. 5210 */ 5211 int 5212 ctl_scsi_release(struct ctl_scsiio *ctsio) 5213 { 5214 int length, longid, thirdparty_id, resv_id; 5215 struct ctl_softc *ctl_softc; 5216 struct ctl_lun *lun; 5217 uint32_t residx; 5218 5219 length = 0; 5220 resv_id = 0; 5221 5222 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5223 5224 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5225 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5226 ctl_softc = control_softc; 5227 5228 switch (ctsio->cdb[0]) { 5229 case RELEASE_10: { 5230 struct scsi_release_10 *cdb; 5231 5232 cdb = (struct scsi_release_10 *)ctsio->cdb; 5233 5234 if (cdb->byte2 & SR10_LONGID) 5235 longid = 1; 5236 else 5237 thirdparty_id = cdb->thirdparty_id; 5238 5239 resv_id = cdb->resv_id; 5240 length = scsi_2btoul(cdb->length); 5241 break; 5242 } 5243 } 5244 5245 5246 /* 5247 * XXX KDM right now, we only support LUN reservation. We don't 5248 * support 3rd party reservations, or extent reservations, which 5249 * might actually need the parameter list. If we've gotten this 5250 * far, we've got a LUN reservation. Anything else got kicked out 5251 * above. So, according to SPC, ignore the length. 5252 */ 5253 length = 0; 5254 5255 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5256 && (length > 0)) { 5257 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5258 ctsio->kern_data_len = length; 5259 ctsio->kern_total_len = length; 5260 ctsio->kern_data_resid = 0; 5261 ctsio->kern_rel_offset = 0; 5262 ctsio->kern_sg_entries = 0; 5263 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5264 ctsio->be_move_done = ctl_config_move_done; 5265 ctl_datamove((union ctl_io *)ctsio); 5266 5267 return (CTL_RETVAL_COMPLETE); 5268 } 5269 5270 if (length > 0) 5271 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5272 5273 mtx_lock(&lun->lun_lock); 5274 5275 /* 5276 * According to SPC, it is not an error for an intiator to attempt 5277 * to release a reservation on a LUN that isn't reserved, or that 5278 * is reserved by another initiator. The reservation can only be 5279 * released, though, by the initiator who made it or by one of 5280 * several reset type events. 5281 */ 5282 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5283 lun->flags &= ~CTL_LUN_RESERVED; 5284 5285 mtx_unlock(&lun->lun_lock); 5286 5287 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5288 free(ctsio->kern_data_ptr, M_CTL); 5289 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5290 } 5291 5292 ctl_set_success(ctsio); 5293 ctl_done((union ctl_io *)ctsio); 5294 return (CTL_RETVAL_COMPLETE); 5295 } 5296 5297 int 5298 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5299 { 5300 int extent, thirdparty, longid; 5301 int resv_id, length; 5302 uint64_t thirdparty_id; 5303 struct ctl_softc *ctl_softc; 5304 struct ctl_lun *lun; 5305 uint32_t residx; 5306 5307 extent = 0; 5308 thirdparty = 0; 5309 longid = 0; 5310 resv_id = 0; 5311 length = 0; 5312 thirdparty_id = 0; 5313 5314 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5315 5316 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5317 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5318 ctl_softc = control_softc; 5319 5320 switch (ctsio->cdb[0]) { 5321 case RESERVE_10: { 5322 struct scsi_reserve_10 *cdb; 5323 5324 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5325 5326 if (cdb->byte2 & SR10_LONGID) 5327 longid = 1; 5328 else 5329 thirdparty_id = cdb->thirdparty_id; 5330 5331 resv_id = cdb->resv_id; 5332 length = scsi_2btoul(cdb->length); 5333 break; 5334 } 5335 } 5336 5337 /* 5338 * XXX KDM right now, we only support LUN reservation. We don't 5339 * support 3rd party reservations, or extent reservations, which 5340 * might actually need the parameter list. If we've gotten this 5341 * far, we've got a LUN reservation. Anything else got kicked out 5342 * above. So, according to SPC, ignore the length. 5343 */ 5344 length = 0; 5345 5346 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5347 && (length > 0)) { 5348 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5349 ctsio->kern_data_len = length; 5350 ctsio->kern_total_len = length; 5351 ctsio->kern_data_resid = 0; 5352 ctsio->kern_rel_offset = 0; 5353 ctsio->kern_sg_entries = 0; 5354 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5355 ctsio->be_move_done = ctl_config_move_done; 5356 ctl_datamove((union ctl_io *)ctsio); 5357 5358 return (CTL_RETVAL_COMPLETE); 5359 } 5360 5361 if (length > 0) 5362 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5363 5364 mtx_lock(&lun->lun_lock); 5365 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5366 ctl_set_reservation_conflict(ctsio); 5367 goto bailout; 5368 } 5369 5370 lun->flags |= CTL_LUN_RESERVED; 5371 lun->res_idx = residx; 5372 5373 ctl_set_success(ctsio); 5374 5375 bailout: 5376 mtx_unlock(&lun->lun_lock); 5377 5378 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5379 free(ctsio->kern_data_ptr, M_CTL); 5380 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5381 } 5382 5383 ctl_done((union ctl_io *)ctsio); 5384 return (CTL_RETVAL_COMPLETE); 5385 } 5386 5387 int 5388 ctl_start_stop(struct ctl_scsiio *ctsio) 5389 { 5390 struct scsi_start_stop_unit *cdb; 5391 struct ctl_lun *lun; 5392 struct ctl_softc *ctl_softc; 5393 int retval; 5394 5395 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5396 5397 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5398 ctl_softc = control_softc; 5399 retval = 0; 5400 5401 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5402 5403 /* 5404 * XXX KDM 5405 * We don't support the immediate bit on a stop unit. In order to 5406 * do that, we would need to code up a way to know that a stop is 5407 * pending, and hold off any new commands until it completes, one 5408 * way or another. Then we could accept or reject those commands 5409 * depending on its status. We would almost need to do the reverse 5410 * of what we do below for an immediate start -- return the copy of 5411 * the ctl_io to the FETD with status to send to the host (and to 5412 * free the copy!) and then free the original I/O once the stop 5413 * actually completes. That way, the OOA queue mechanism can work 5414 * to block commands that shouldn't proceed. Another alternative 5415 * would be to put the copy in the queue in place of the original, 5416 * and return the original back to the caller. That could be 5417 * slightly safer.. 5418 */ 5419 if ((cdb->byte2 & SSS_IMMED) 5420 && ((cdb->how & SSS_START) == 0)) { 5421 ctl_set_invalid_field(ctsio, 5422 /*sks_valid*/ 1, 5423 /*command*/ 1, 5424 /*field*/ 1, 5425 /*bit_valid*/ 1, 5426 /*bit*/ 0); 5427 ctl_done((union ctl_io *)ctsio); 5428 return (CTL_RETVAL_COMPLETE); 5429 } 5430 5431 if ((lun->flags & CTL_LUN_PR_RESERVED) 5432 && ((cdb->how & SSS_START)==0)) { 5433 uint32_t residx; 5434 5435 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5436 if (ctl_get_prkey(lun, residx) == 0 5437 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5438 5439 ctl_set_reservation_conflict(ctsio); 5440 ctl_done((union ctl_io *)ctsio); 5441 return (CTL_RETVAL_COMPLETE); 5442 } 5443 } 5444 5445 /* 5446 * If there is no backend on this device, we can't start or stop 5447 * it. In theory we shouldn't get any start/stop commands in the 5448 * first place at this level if the LUN doesn't have a backend. 5449 * That should get stopped by the command decode code. 5450 */ 5451 if (lun->backend == NULL) { 5452 ctl_set_invalid_opcode(ctsio); 5453 ctl_done((union ctl_io *)ctsio); 5454 return (CTL_RETVAL_COMPLETE); 5455 } 5456 5457 /* 5458 * XXX KDM Copan-specific offline behavior. 5459 * Figure out a reasonable way to port this? 5460 */ 5461 #ifdef NEEDTOPORT 5462 mtx_lock(&lun->lun_lock); 5463 5464 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5465 && (lun->flags & CTL_LUN_OFFLINE)) { 5466 /* 5467 * If the LUN is offline, and the on/offline bit isn't set, 5468 * reject the start or stop. Otherwise, let it through. 5469 */ 5470 mtx_unlock(&lun->lun_lock); 5471 ctl_set_lun_not_ready(ctsio); 5472 ctl_done((union ctl_io *)ctsio); 5473 } else { 5474 mtx_unlock(&lun->lun_lock); 5475 #endif /* NEEDTOPORT */ 5476 /* 5477 * This could be a start or a stop when we're online, 5478 * or a stop/offline or start/online. A start or stop when 5479 * we're offline is covered in the case above. 5480 */ 5481 /* 5482 * In the non-immediate case, we send the request to 5483 * the backend and return status to the user when 5484 * it is done. 5485 * 5486 * In the immediate case, we allocate a new ctl_io 5487 * to hold a copy of the request, and send that to 5488 * the backend. We then set good status on the 5489 * user's request and return it immediately. 5490 */ 5491 if (cdb->byte2 & SSS_IMMED) { 5492 union ctl_io *new_io; 5493 5494 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5495 ctl_copy_io((union ctl_io *)ctsio, new_io); 5496 retval = lun->backend->config_write(new_io); 5497 ctl_set_success(ctsio); 5498 ctl_done((union ctl_io *)ctsio); 5499 } else { 5500 retval = lun->backend->config_write( 5501 (union ctl_io *)ctsio); 5502 } 5503 #ifdef NEEDTOPORT 5504 } 5505 #endif 5506 return (retval); 5507 } 5508 5509 /* 5510 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5511 * we don't really do anything with the LBA and length fields if the user 5512 * passes them in. Instead we'll just flush out the cache for the entire 5513 * LUN. 5514 */ 5515 int 5516 ctl_sync_cache(struct ctl_scsiio *ctsio) 5517 { 5518 struct ctl_lun *lun; 5519 struct ctl_softc *ctl_softc; 5520 uint64_t starting_lba; 5521 uint32_t block_count; 5522 int retval; 5523 5524 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5525 5526 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5527 ctl_softc = control_softc; 5528 retval = 0; 5529 5530 switch (ctsio->cdb[0]) { 5531 case SYNCHRONIZE_CACHE: { 5532 struct scsi_sync_cache *cdb; 5533 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5534 5535 starting_lba = scsi_4btoul(cdb->begin_lba); 5536 block_count = scsi_2btoul(cdb->lb_count); 5537 break; 5538 } 5539 case SYNCHRONIZE_CACHE_16: { 5540 struct scsi_sync_cache_16 *cdb; 5541 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5542 5543 starting_lba = scsi_8btou64(cdb->begin_lba); 5544 block_count = scsi_4btoul(cdb->lb_count); 5545 break; 5546 } 5547 default: 5548 ctl_set_invalid_opcode(ctsio); 5549 ctl_done((union ctl_io *)ctsio); 5550 goto bailout; 5551 break; /* NOTREACHED */ 5552 } 5553 5554 /* 5555 * We check the LBA and length, but don't do anything with them. 5556 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5557 * get flushed. This check will just help satisfy anyone who wants 5558 * to see an error for an out of range LBA. 5559 */ 5560 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5561 ctl_set_lba_out_of_range(ctsio); 5562 ctl_done((union ctl_io *)ctsio); 5563 goto bailout; 5564 } 5565 5566 /* 5567 * If this LUN has no backend, we can't flush the cache anyway. 5568 */ 5569 if (lun->backend == NULL) { 5570 ctl_set_invalid_opcode(ctsio); 5571 ctl_done((union ctl_io *)ctsio); 5572 goto bailout; 5573 } 5574 5575 /* 5576 * Check to see whether we're configured to send the SYNCHRONIZE 5577 * CACHE command directly to the back end. 5578 */ 5579 mtx_lock(&lun->lun_lock); 5580 if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC) 5581 && (++(lun->sync_count) >= lun->sync_interval)) { 5582 lun->sync_count = 0; 5583 mtx_unlock(&lun->lun_lock); 5584 retval = lun->backend->config_write((union ctl_io *)ctsio); 5585 } else { 5586 mtx_unlock(&lun->lun_lock); 5587 ctl_set_success(ctsio); 5588 ctl_done((union ctl_io *)ctsio); 5589 } 5590 5591 bailout: 5592 5593 return (retval); 5594 } 5595 5596 int 5597 ctl_format(struct ctl_scsiio *ctsio) 5598 { 5599 struct scsi_format *cdb; 5600 struct ctl_lun *lun; 5601 struct ctl_softc *ctl_softc; 5602 int length, defect_list_len; 5603 5604 CTL_DEBUG_PRINT(("ctl_format\n")); 5605 5606 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5607 ctl_softc = control_softc; 5608 5609 cdb = (struct scsi_format *)ctsio->cdb; 5610 5611 length = 0; 5612 if (cdb->byte2 & SF_FMTDATA) { 5613 if (cdb->byte2 & SF_LONGLIST) 5614 length = sizeof(struct scsi_format_header_long); 5615 else 5616 length = sizeof(struct scsi_format_header_short); 5617 } 5618 5619 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5620 && (length > 0)) { 5621 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5622 ctsio->kern_data_len = length; 5623 ctsio->kern_total_len = length; 5624 ctsio->kern_data_resid = 0; 5625 ctsio->kern_rel_offset = 0; 5626 ctsio->kern_sg_entries = 0; 5627 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5628 ctsio->be_move_done = ctl_config_move_done; 5629 ctl_datamove((union ctl_io *)ctsio); 5630 5631 return (CTL_RETVAL_COMPLETE); 5632 } 5633 5634 defect_list_len = 0; 5635 5636 if (cdb->byte2 & SF_FMTDATA) { 5637 if (cdb->byte2 & SF_LONGLIST) { 5638 struct scsi_format_header_long *header; 5639 5640 header = (struct scsi_format_header_long *) 5641 ctsio->kern_data_ptr; 5642 5643 defect_list_len = scsi_4btoul(header->defect_list_len); 5644 if (defect_list_len != 0) { 5645 ctl_set_invalid_field(ctsio, 5646 /*sks_valid*/ 1, 5647 /*command*/ 0, 5648 /*field*/ 2, 5649 /*bit_valid*/ 0, 5650 /*bit*/ 0); 5651 goto bailout; 5652 } 5653 } else { 5654 struct scsi_format_header_short *header; 5655 5656 header = (struct scsi_format_header_short *) 5657 ctsio->kern_data_ptr; 5658 5659 defect_list_len = scsi_2btoul(header->defect_list_len); 5660 if (defect_list_len != 0) { 5661 ctl_set_invalid_field(ctsio, 5662 /*sks_valid*/ 1, 5663 /*command*/ 0, 5664 /*field*/ 2, 5665 /*bit_valid*/ 0, 5666 /*bit*/ 0); 5667 goto bailout; 5668 } 5669 } 5670 } 5671 5672 /* 5673 * The format command will clear out the "Medium format corrupted" 5674 * status if set by the configuration code. That status is really 5675 * just a way to notify the host that we have lost the media, and 5676 * get them to issue a command that will basically make them think 5677 * they're blowing away the media. 5678 */ 5679 mtx_lock(&lun->lun_lock); 5680 lun->flags &= ~CTL_LUN_INOPERABLE; 5681 mtx_unlock(&lun->lun_lock); 5682 5683 ctl_set_success(ctsio); 5684 bailout: 5685 5686 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5687 free(ctsio->kern_data_ptr, M_CTL); 5688 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5689 } 5690 5691 ctl_done((union ctl_io *)ctsio); 5692 return (CTL_RETVAL_COMPLETE); 5693 } 5694 5695 int 5696 ctl_read_buffer(struct ctl_scsiio *ctsio) 5697 { 5698 struct scsi_read_buffer *cdb; 5699 struct ctl_lun *lun; 5700 int buffer_offset, len; 5701 static uint8_t descr[4]; 5702 static uint8_t echo_descr[4] = { 0 }; 5703 5704 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5705 5706 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5707 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5708 5709 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5710 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5711 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5712 ctl_set_invalid_field(ctsio, 5713 /*sks_valid*/ 1, 5714 /*command*/ 1, 5715 /*field*/ 1, 5716 /*bit_valid*/ 1, 5717 /*bit*/ 4); 5718 ctl_done((union ctl_io *)ctsio); 5719 return (CTL_RETVAL_COMPLETE); 5720 } 5721 5722 len = scsi_3btoul(cdb->length); 5723 buffer_offset = scsi_3btoul(cdb->offset); 5724 5725 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5726 ctl_set_invalid_field(ctsio, 5727 /*sks_valid*/ 1, 5728 /*command*/ 1, 5729 /*field*/ 6, 5730 /*bit_valid*/ 0, 5731 /*bit*/ 0); 5732 ctl_done((union ctl_io *)ctsio); 5733 return (CTL_RETVAL_COMPLETE); 5734 } 5735 5736 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5737 descr[0] = 0; 5738 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5739 ctsio->kern_data_ptr = descr; 5740 len = min(len, sizeof(descr)); 5741 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5742 ctsio->kern_data_ptr = echo_descr; 5743 len = min(len, sizeof(echo_descr)); 5744 } else { 5745 if (lun->write_buffer == NULL) { 5746 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5747 M_CTL, M_WAITOK); 5748 } 5749 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5750 } 5751 ctsio->kern_data_len = len; 5752 ctsio->kern_total_len = len; 5753 ctsio->kern_data_resid = 0; 5754 ctsio->kern_rel_offset = 0; 5755 ctsio->kern_sg_entries = 0; 5756 ctl_set_success(ctsio); 5757 ctsio->be_move_done = ctl_config_move_done; 5758 ctl_datamove((union ctl_io *)ctsio); 5759 return (CTL_RETVAL_COMPLETE); 5760 } 5761 5762 int 5763 ctl_write_buffer(struct ctl_scsiio *ctsio) 5764 { 5765 struct scsi_write_buffer *cdb; 5766 struct ctl_lun *lun; 5767 int buffer_offset, len; 5768 5769 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5770 5771 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5772 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5773 5774 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5775 ctl_set_invalid_field(ctsio, 5776 /*sks_valid*/ 1, 5777 /*command*/ 1, 5778 /*field*/ 1, 5779 /*bit_valid*/ 1, 5780 /*bit*/ 4); 5781 ctl_done((union ctl_io *)ctsio); 5782 return (CTL_RETVAL_COMPLETE); 5783 } 5784 5785 len = scsi_3btoul(cdb->length); 5786 buffer_offset = scsi_3btoul(cdb->offset); 5787 5788 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5789 ctl_set_invalid_field(ctsio, 5790 /*sks_valid*/ 1, 5791 /*command*/ 1, 5792 /*field*/ 6, 5793 /*bit_valid*/ 0, 5794 /*bit*/ 0); 5795 ctl_done((union ctl_io *)ctsio); 5796 return (CTL_RETVAL_COMPLETE); 5797 } 5798 5799 /* 5800 * If we've got a kernel request that hasn't been malloced yet, 5801 * malloc it and tell the caller the data buffer is here. 5802 */ 5803 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5804 if (lun->write_buffer == NULL) { 5805 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5806 M_CTL, M_WAITOK); 5807 } 5808 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5809 ctsio->kern_data_len = len; 5810 ctsio->kern_total_len = len; 5811 ctsio->kern_data_resid = 0; 5812 ctsio->kern_rel_offset = 0; 5813 ctsio->kern_sg_entries = 0; 5814 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5815 ctsio->be_move_done = ctl_config_move_done; 5816 ctl_datamove((union ctl_io *)ctsio); 5817 5818 return (CTL_RETVAL_COMPLETE); 5819 } 5820 5821 ctl_set_success(ctsio); 5822 ctl_done((union ctl_io *)ctsio); 5823 return (CTL_RETVAL_COMPLETE); 5824 } 5825 5826 int 5827 ctl_write_same(struct ctl_scsiio *ctsio) 5828 { 5829 struct ctl_lun *lun; 5830 struct ctl_lba_len_flags *lbalen; 5831 uint64_t lba; 5832 uint32_t num_blocks; 5833 int len, retval; 5834 uint8_t byte2; 5835 5836 retval = CTL_RETVAL_COMPLETE; 5837 5838 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5839 5840 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5841 5842 switch (ctsio->cdb[0]) { 5843 case WRITE_SAME_10: { 5844 struct scsi_write_same_10 *cdb; 5845 5846 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5847 5848 lba = scsi_4btoul(cdb->addr); 5849 num_blocks = scsi_2btoul(cdb->length); 5850 byte2 = cdb->byte2; 5851 break; 5852 } 5853 case WRITE_SAME_16: { 5854 struct scsi_write_same_16 *cdb; 5855 5856 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5857 5858 lba = scsi_8btou64(cdb->addr); 5859 num_blocks = scsi_4btoul(cdb->length); 5860 byte2 = cdb->byte2; 5861 break; 5862 } 5863 default: 5864 /* 5865 * We got a command we don't support. This shouldn't 5866 * happen, commands should be filtered out above us. 5867 */ 5868 ctl_set_invalid_opcode(ctsio); 5869 ctl_done((union ctl_io *)ctsio); 5870 5871 return (CTL_RETVAL_COMPLETE); 5872 break; /* NOTREACHED */ 5873 } 5874 5875 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5876 if ((byte2 & SWS_UNMAP) == 0 && 5877 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5878 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5879 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5880 ctl_done((union ctl_io *)ctsio); 5881 return (CTL_RETVAL_COMPLETE); 5882 } 5883 5884 /* 5885 * The first check is to make sure we're in bounds, the second 5886 * check is to catch wrap-around problems. If the lba + num blocks 5887 * is less than the lba, then we've wrapped around and the block 5888 * range is invalid anyway. 5889 */ 5890 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5891 || ((lba + num_blocks) < lba)) { 5892 ctl_set_lba_out_of_range(ctsio); 5893 ctl_done((union ctl_io *)ctsio); 5894 return (CTL_RETVAL_COMPLETE); 5895 } 5896 5897 /* Zero number of blocks means "to the last logical block" */ 5898 if (num_blocks == 0) { 5899 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5900 ctl_set_invalid_field(ctsio, 5901 /*sks_valid*/ 0, 5902 /*command*/ 1, 5903 /*field*/ 0, 5904 /*bit_valid*/ 0, 5905 /*bit*/ 0); 5906 ctl_done((union ctl_io *)ctsio); 5907 return (CTL_RETVAL_COMPLETE); 5908 } 5909 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5910 } 5911 5912 len = lun->be_lun->blocksize; 5913 5914 /* 5915 * If we've got a kernel request that hasn't been malloced yet, 5916 * malloc it and tell the caller the data buffer is here. 5917 */ 5918 if ((byte2 & SWS_NDOB) == 0 && 5919 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5920 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5921 ctsio->kern_data_len = len; 5922 ctsio->kern_total_len = len; 5923 ctsio->kern_data_resid = 0; 5924 ctsio->kern_rel_offset = 0; 5925 ctsio->kern_sg_entries = 0; 5926 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5927 ctsio->be_move_done = ctl_config_move_done; 5928 ctl_datamove((union ctl_io *)ctsio); 5929 5930 return (CTL_RETVAL_COMPLETE); 5931 } 5932 5933 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5934 lbalen->lba = lba; 5935 lbalen->len = num_blocks; 5936 lbalen->flags = byte2; 5937 retval = lun->backend->config_write((union ctl_io *)ctsio); 5938 5939 return (retval); 5940 } 5941 5942 int 5943 ctl_unmap(struct ctl_scsiio *ctsio) 5944 { 5945 struct ctl_lun *lun; 5946 struct scsi_unmap *cdb; 5947 struct ctl_ptr_len_flags *ptrlen; 5948 struct scsi_unmap_header *hdr; 5949 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5950 uint64_t lba; 5951 uint32_t num_blocks; 5952 int len, retval; 5953 uint8_t byte2; 5954 5955 retval = CTL_RETVAL_COMPLETE; 5956 5957 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5958 5959 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5960 cdb = (struct scsi_unmap *)ctsio->cdb; 5961 5962 len = scsi_2btoul(cdb->length); 5963 byte2 = cdb->byte2; 5964 5965 /* 5966 * If we've got a kernel request that hasn't been malloced yet, 5967 * malloc it and tell the caller the data buffer is here. 5968 */ 5969 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5970 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5971 ctsio->kern_data_len = len; 5972 ctsio->kern_total_len = len; 5973 ctsio->kern_data_resid = 0; 5974 ctsio->kern_rel_offset = 0; 5975 ctsio->kern_sg_entries = 0; 5976 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5977 ctsio->be_move_done = ctl_config_move_done; 5978 ctl_datamove((union ctl_io *)ctsio); 5979 5980 return (CTL_RETVAL_COMPLETE); 5981 } 5982 5983 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5984 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5985 if (len < sizeof (*hdr) || 5986 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5987 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5988 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5989 ctl_set_invalid_field(ctsio, 5990 /*sks_valid*/ 0, 5991 /*command*/ 0, 5992 /*field*/ 0, 5993 /*bit_valid*/ 0, 5994 /*bit*/ 0); 5995 goto done; 5996 } 5997 len = scsi_2btoul(hdr->desc_length); 5998 buf = (struct scsi_unmap_desc *)(hdr + 1); 5999 end = buf + len / sizeof(*buf); 6000 6001 endnz = buf; 6002 for (range = buf; range < end; range++) { 6003 lba = scsi_8btou64(range->lba); 6004 num_blocks = scsi_4btoul(range->length); 6005 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 6006 || ((lba + num_blocks) < lba)) { 6007 ctl_set_lba_out_of_range(ctsio); 6008 ctl_done((union ctl_io *)ctsio); 6009 return (CTL_RETVAL_COMPLETE); 6010 } 6011 if (num_blocks != 0) 6012 endnz = range + 1; 6013 } 6014 6015 /* 6016 * Block backend can not handle zero last range. 6017 * Filter it out and return if there is nothing left. 6018 */ 6019 len = (uint8_t *)endnz - (uint8_t *)buf; 6020 if (len == 0) { 6021 ctl_set_success(ctsio); 6022 goto done; 6023 } 6024 6025 mtx_lock(&lun->lun_lock); 6026 ptrlen = (struct ctl_ptr_len_flags *) 6027 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6028 ptrlen->ptr = (void *)buf; 6029 ptrlen->len = len; 6030 ptrlen->flags = byte2; 6031 ctl_check_blocked(lun); 6032 mtx_unlock(&lun->lun_lock); 6033 6034 retval = lun->backend->config_write((union ctl_io *)ctsio); 6035 return (retval); 6036 6037 done: 6038 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 6039 free(ctsio->kern_data_ptr, M_CTL); 6040 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 6041 } 6042 ctl_done((union ctl_io *)ctsio); 6043 return (CTL_RETVAL_COMPLETE); 6044 } 6045 6046 /* 6047 * Note that this function currently doesn't actually do anything inside 6048 * CTL to enforce things if the DQue bit is turned on. 6049 * 6050 * Also note that this function can't be used in the default case, because 6051 * the DQue bit isn't set in the changeable mask for the control mode page 6052 * anyway. This is just here as an example for how to implement a page 6053 * handler, and a placeholder in case we want to allow the user to turn 6054 * tagged queueing on and off. 6055 * 6056 * The D_SENSE bit handling is functional, however, and will turn 6057 * descriptor sense on and off for a given LUN. 6058 */ 6059 int 6060 ctl_control_page_handler(struct ctl_scsiio *ctsio, 6061 struct ctl_page_index *page_index, uint8_t *page_ptr) 6062 { 6063 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 6064 struct ctl_lun *lun; 6065 struct ctl_softc *softc; 6066 int set_ua; 6067 uint32_t initidx; 6068 6069 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6070 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6071 set_ua = 0; 6072 6073 user_cp = (struct scsi_control_page *)page_ptr; 6074 current_cp = (struct scsi_control_page *) 6075 (page_index->page_data + (page_index->page_len * 6076 CTL_PAGE_CURRENT)); 6077 saved_cp = (struct scsi_control_page *) 6078 (page_index->page_data + (page_index->page_len * 6079 CTL_PAGE_SAVED)); 6080 6081 softc = control_softc; 6082 6083 mtx_lock(&lun->lun_lock); 6084 if (((current_cp->rlec & SCP_DSENSE) == 0) 6085 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 6086 /* 6087 * Descriptor sense is currently turned off and the user 6088 * wants to turn it on. 6089 */ 6090 current_cp->rlec |= SCP_DSENSE; 6091 saved_cp->rlec |= SCP_DSENSE; 6092 lun->flags |= CTL_LUN_SENSE_DESC; 6093 set_ua = 1; 6094 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 6095 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 6096 /* 6097 * Descriptor sense is currently turned on, and the user 6098 * wants to turn it off. 6099 */ 6100 current_cp->rlec &= ~SCP_DSENSE; 6101 saved_cp->rlec &= ~SCP_DSENSE; 6102 lun->flags &= ~CTL_LUN_SENSE_DESC; 6103 set_ua = 1; 6104 } 6105 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 6106 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 6107 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6108 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6109 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6110 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6111 set_ua = 1; 6112 } 6113 if ((current_cp->eca_and_aen & SCP_SWP) != 6114 (user_cp->eca_and_aen & SCP_SWP)) { 6115 current_cp->eca_and_aen &= ~SCP_SWP; 6116 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6117 saved_cp->eca_and_aen &= ~SCP_SWP; 6118 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6119 set_ua = 1; 6120 } 6121 if (set_ua != 0) 6122 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6123 mtx_unlock(&lun->lun_lock); 6124 6125 return (0); 6126 } 6127 6128 int 6129 ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 6130 struct ctl_page_index *page_index, uint8_t *page_ptr) 6131 { 6132 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 6133 struct ctl_lun *lun; 6134 int set_ua; 6135 uint32_t initidx; 6136 6137 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6138 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6139 set_ua = 0; 6140 6141 user_cp = (struct scsi_caching_page *)page_ptr; 6142 current_cp = (struct scsi_caching_page *) 6143 (page_index->page_data + (page_index->page_len * 6144 CTL_PAGE_CURRENT)); 6145 saved_cp = (struct scsi_caching_page *) 6146 (page_index->page_data + (page_index->page_len * 6147 CTL_PAGE_SAVED)); 6148 6149 mtx_lock(&lun->lun_lock); 6150 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 6151 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 6152 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6153 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6154 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6155 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6156 set_ua = 1; 6157 } 6158 if (set_ua != 0) 6159 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6160 mtx_unlock(&lun->lun_lock); 6161 6162 return (0); 6163 } 6164 6165 int 6166 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6167 struct ctl_page_index *page_index, 6168 uint8_t *page_ptr) 6169 { 6170 uint8_t *c; 6171 int i; 6172 6173 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6174 ctl_time_io_secs = 6175 (c[0] << 8) | 6176 (c[1] << 0) | 6177 0; 6178 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6179 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6180 printf("page data:"); 6181 for (i=0; i<8; i++) 6182 printf(" %.2x",page_ptr[i]); 6183 printf("\n"); 6184 return (0); 6185 } 6186 6187 int 6188 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6189 struct ctl_page_index *page_index, 6190 int pc) 6191 { 6192 struct copan_debugconf_subpage *page; 6193 6194 page = (struct copan_debugconf_subpage *)page_index->page_data + 6195 (page_index->page_len * pc); 6196 6197 switch (pc) { 6198 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6199 case SMS_PAGE_CTRL_DEFAULT >> 6: 6200 case SMS_PAGE_CTRL_SAVED >> 6: 6201 /* 6202 * We don't update the changable or default bits for this page. 6203 */ 6204 break; 6205 case SMS_PAGE_CTRL_CURRENT >> 6: 6206 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6207 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6208 break; 6209 default: 6210 #ifdef NEEDTOPORT 6211 EPRINT(0, "Invalid PC %d!!", pc); 6212 #endif /* NEEDTOPORT */ 6213 break; 6214 } 6215 return (0); 6216 } 6217 6218 6219 static int 6220 ctl_do_mode_select(union ctl_io *io) 6221 { 6222 struct scsi_mode_page_header *page_header; 6223 struct ctl_page_index *page_index; 6224 struct ctl_scsiio *ctsio; 6225 int control_dev, page_len; 6226 int page_len_offset, page_len_size; 6227 union ctl_modepage_info *modepage_info; 6228 struct ctl_lun *lun; 6229 int *len_left, *len_used; 6230 int retval, i; 6231 6232 ctsio = &io->scsiio; 6233 page_index = NULL; 6234 page_len = 0; 6235 retval = CTL_RETVAL_COMPLETE; 6236 6237 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6238 6239 if (lun->be_lun->lun_type != T_DIRECT) 6240 control_dev = 1; 6241 else 6242 control_dev = 0; 6243 6244 modepage_info = (union ctl_modepage_info *) 6245 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6246 len_left = &modepage_info->header.len_left; 6247 len_used = &modepage_info->header.len_used; 6248 6249 do_next_page: 6250 6251 page_header = (struct scsi_mode_page_header *) 6252 (ctsio->kern_data_ptr + *len_used); 6253 6254 if (*len_left == 0) { 6255 free(ctsio->kern_data_ptr, M_CTL); 6256 ctl_set_success(ctsio); 6257 ctl_done((union ctl_io *)ctsio); 6258 return (CTL_RETVAL_COMPLETE); 6259 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6260 6261 free(ctsio->kern_data_ptr, M_CTL); 6262 ctl_set_param_len_error(ctsio); 6263 ctl_done((union ctl_io *)ctsio); 6264 return (CTL_RETVAL_COMPLETE); 6265 6266 } else if ((page_header->page_code & SMPH_SPF) 6267 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6268 6269 free(ctsio->kern_data_ptr, M_CTL); 6270 ctl_set_param_len_error(ctsio); 6271 ctl_done((union ctl_io *)ctsio); 6272 return (CTL_RETVAL_COMPLETE); 6273 } 6274 6275 6276 /* 6277 * XXX KDM should we do something with the block descriptor? 6278 */ 6279 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6280 6281 if ((control_dev != 0) 6282 && (lun->mode_pages.index[i].page_flags & 6283 CTL_PAGE_FLAG_DISK_ONLY)) 6284 continue; 6285 6286 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6287 (page_header->page_code & SMPH_PC_MASK)) 6288 continue; 6289 6290 /* 6291 * If neither page has a subpage code, then we've got a 6292 * match. 6293 */ 6294 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6295 && ((page_header->page_code & SMPH_SPF) == 0)) { 6296 page_index = &lun->mode_pages.index[i]; 6297 page_len = page_header->page_length; 6298 break; 6299 } 6300 6301 /* 6302 * If both pages have subpages, then the subpage numbers 6303 * have to match. 6304 */ 6305 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6306 && (page_header->page_code & SMPH_SPF)) { 6307 struct scsi_mode_page_header_sp *sph; 6308 6309 sph = (struct scsi_mode_page_header_sp *)page_header; 6310 6311 if (lun->mode_pages.index[i].subpage == 6312 sph->subpage) { 6313 page_index = &lun->mode_pages.index[i]; 6314 page_len = scsi_2btoul(sph->page_length); 6315 break; 6316 } 6317 } 6318 } 6319 6320 /* 6321 * If we couldn't find the page, or if we don't have a mode select 6322 * handler for it, send back an error to the user. 6323 */ 6324 if ((page_index == NULL) 6325 || (page_index->select_handler == NULL)) { 6326 ctl_set_invalid_field(ctsio, 6327 /*sks_valid*/ 1, 6328 /*command*/ 0, 6329 /*field*/ *len_used, 6330 /*bit_valid*/ 0, 6331 /*bit*/ 0); 6332 free(ctsio->kern_data_ptr, M_CTL); 6333 ctl_done((union ctl_io *)ctsio); 6334 return (CTL_RETVAL_COMPLETE); 6335 } 6336 6337 if (page_index->page_code & SMPH_SPF) { 6338 page_len_offset = 2; 6339 page_len_size = 2; 6340 } else { 6341 page_len_size = 1; 6342 page_len_offset = 1; 6343 } 6344 6345 /* 6346 * If the length the initiator gives us isn't the one we specify in 6347 * the mode page header, or if they didn't specify enough data in 6348 * the CDB to avoid truncating this page, kick out the request. 6349 */ 6350 if ((page_len != (page_index->page_len - page_len_offset - 6351 page_len_size)) 6352 || (*len_left < page_index->page_len)) { 6353 6354 6355 ctl_set_invalid_field(ctsio, 6356 /*sks_valid*/ 1, 6357 /*command*/ 0, 6358 /*field*/ *len_used + page_len_offset, 6359 /*bit_valid*/ 0, 6360 /*bit*/ 0); 6361 free(ctsio->kern_data_ptr, M_CTL); 6362 ctl_done((union ctl_io *)ctsio); 6363 return (CTL_RETVAL_COMPLETE); 6364 } 6365 6366 /* 6367 * Run through the mode page, checking to make sure that the bits 6368 * the user changed are actually legal for him to change. 6369 */ 6370 for (i = 0; i < page_index->page_len; i++) { 6371 uint8_t *user_byte, *change_mask, *current_byte; 6372 int bad_bit; 6373 int j; 6374 6375 user_byte = (uint8_t *)page_header + i; 6376 change_mask = page_index->page_data + 6377 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6378 current_byte = page_index->page_data + 6379 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6380 6381 /* 6382 * Check to see whether the user set any bits in this byte 6383 * that he is not allowed to set. 6384 */ 6385 if ((*user_byte & ~(*change_mask)) == 6386 (*current_byte & ~(*change_mask))) 6387 continue; 6388 6389 /* 6390 * Go through bit by bit to determine which one is illegal. 6391 */ 6392 bad_bit = 0; 6393 for (j = 7; j >= 0; j--) { 6394 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6395 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6396 bad_bit = i; 6397 break; 6398 } 6399 } 6400 ctl_set_invalid_field(ctsio, 6401 /*sks_valid*/ 1, 6402 /*command*/ 0, 6403 /*field*/ *len_used + i, 6404 /*bit_valid*/ 1, 6405 /*bit*/ bad_bit); 6406 free(ctsio->kern_data_ptr, M_CTL); 6407 ctl_done((union ctl_io *)ctsio); 6408 return (CTL_RETVAL_COMPLETE); 6409 } 6410 6411 /* 6412 * Decrement these before we call the page handler, since we may 6413 * end up getting called back one way or another before the handler 6414 * returns to this context. 6415 */ 6416 *len_left -= page_index->page_len; 6417 *len_used += page_index->page_len; 6418 6419 retval = page_index->select_handler(ctsio, page_index, 6420 (uint8_t *)page_header); 6421 6422 /* 6423 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6424 * wait until this queued command completes to finish processing 6425 * the mode page. If it returns anything other than 6426 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6427 * already set the sense information, freed the data pointer, and 6428 * completed the io for us. 6429 */ 6430 if (retval != CTL_RETVAL_COMPLETE) 6431 goto bailout_no_done; 6432 6433 /* 6434 * If the initiator sent us more than one page, parse the next one. 6435 */ 6436 if (*len_left > 0) 6437 goto do_next_page; 6438 6439 ctl_set_success(ctsio); 6440 free(ctsio->kern_data_ptr, M_CTL); 6441 ctl_done((union ctl_io *)ctsio); 6442 6443 bailout_no_done: 6444 6445 return (CTL_RETVAL_COMPLETE); 6446 6447 } 6448 6449 int 6450 ctl_mode_select(struct ctl_scsiio *ctsio) 6451 { 6452 int param_len, pf, sp; 6453 int header_size, bd_len; 6454 int len_left, len_used; 6455 struct ctl_page_index *page_index; 6456 struct ctl_lun *lun; 6457 int control_dev, page_len; 6458 union ctl_modepage_info *modepage_info; 6459 int retval; 6460 6461 pf = 0; 6462 sp = 0; 6463 page_len = 0; 6464 len_used = 0; 6465 len_left = 0; 6466 retval = 0; 6467 bd_len = 0; 6468 page_index = NULL; 6469 6470 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6471 6472 if (lun->be_lun->lun_type != T_DIRECT) 6473 control_dev = 1; 6474 else 6475 control_dev = 0; 6476 6477 switch (ctsio->cdb[0]) { 6478 case MODE_SELECT_6: { 6479 struct scsi_mode_select_6 *cdb; 6480 6481 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6482 6483 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6484 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6485 6486 param_len = cdb->length; 6487 header_size = sizeof(struct scsi_mode_header_6); 6488 break; 6489 } 6490 case MODE_SELECT_10: { 6491 struct scsi_mode_select_10 *cdb; 6492 6493 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6494 6495 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6496 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6497 6498 param_len = scsi_2btoul(cdb->length); 6499 header_size = sizeof(struct scsi_mode_header_10); 6500 break; 6501 } 6502 default: 6503 ctl_set_invalid_opcode(ctsio); 6504 ctl_done((union ctl_io *)ctsio); 6505 return (CTL_RETVAL_COMPLETE); 6506 break; /* NOTREACHED */ 6507 } 6508 6509 /* 6510 * From SPC-3: 6511 * "A parameter list length of zero indicates that the Data-Out Buffer 6512 * shall be empty. This condition shall not be considered as an error." 6513 */ 6514 if (param_len == 0) { 6515 ctl_set_success(ctsio); 6516 ctl_done((union ctl_io *)ctsio); 6517 return (CTL_RETVAL_COMPLETE); 6518 } 6519 6520 /* 6521 * Since we'll hit this the first time through, prior to 6522 * allocation, we don't need to free a data buffer here. 6523 */ 6524 if (param_len < header_size) { 6525 ctl_set_param_len_error(ctsio); 6526 ctl_done((union ctl_io *)ctsio); 6527 return (CTL_RETVAL_COMPLETE); 6528 } 6529 6530 /* 6531 * Allocate the data buffer and grab the user's data. In theory, 6532 * we shouldn't have to sanity check the parameter list length here 6533 * because the maximum size is 64K. We should be able to malloc 6534 * that much without too many problems. 6535 */ 6536 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6537 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6538 ctsio->kern_data_len = param_len; 6539 ctsio->kern_total_len = param_len; 6540 ctsio->kern_data_resid = 0; 6541 ctsio->kern_rel_offset = 0; 6542 ctsio->kern_sg_entries = 0; 6543 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6544 ctsio->be_move_done = ctl_config_move_done; 6545 ctl_datamove((union ctl_io *)ctsio); 6546 6547 return (CTL_RETVAL_COMPLETE); 6548 } 6549 6550 switch (ctsio->cdb[0]) { 6551 case MODE_SELECT_6: { 6552 struct scsi_mode_header_6 *mh6; 6553 6554 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6555 bd_len = mh6->blk_desc_len; 6556 break; 6557 } 6558 case MODE_SELECT_10: { 6559 struct scsi_mode_header_10 *mh10; 6560 6561 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6562 bd_len = scsi_2btoul(mh10->blk_desc_len); 6563 break; 6564 } 6565 default: 6566 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6567 break; 6568 } 6569 6570 if (param_len < (header_size + bd_len)) { 6571 free(ctsio->kern_data_ptr, M_CTL); 6572 ctl_set_param_len_error(ctsio); 6573 ctl_done((union ctl_io *)ctsio); 6574 return (CTL_RETVAL_COMPLETE); 6575 } 6576 6577 /* 6578 * Set the IO_CONT flag, so that if this I/O gets passed to 6579 * ctl_config_write_done(), it'll get passed back to 6580 * ctl_do_mode_select() for further processing, or completion if 6581 * we're all done. 6582 */ 6583 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6584 ctsio->io_cont = ctl_do_mode_select; 6585 6586 modepage_info = (union ctl_modepage_info *) 6587 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6588 6589 memset(modepage_info, 0, sizeof(*modepage_info)); 6590 6591 len_left = param_len - header_size - bd_len; 6592 len_used = header_size + bd_len; 6593 6594 modepage_info->header.len_left = len_left; 6595 modepage_info->header.len_used = len_used; 6596 6597 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6598 } 6599 6600 int 6601 ctl_mode_sense(struct ctl_scsiio *ctsio) 6602 { 6603 struct ctl_lun *lun; 6604 int pc, page_code, dbd, llba, subpage; 6605 int alloc_len, page_len, header_len, total_len; 6606 struct scsi_mode_block_descr *block_desc; 6607 struct ctl_page_index *page_index; 6608 int control_dev; 6609 6610 dbd = 0; 6611 llba = 0; 6612 block_desc = NULL; 6613 page_index = NULL; 6614 6615 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6616 6617 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6618 6619 if (lun->be_lun->lun_type != T_DIRECT) 6620 control_dev = 1; 6621 else 6622 control_dev = 0; 6623 6624 switch (ctsio->cdb[0]) { 6625 case MODE_SENSE_6: { 6626 struct scsi_mode_sense_6 *cdb; 6627 6628 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6629 6630 header_len = sizeof(struct scsi_mode_hdr_6); 6631 if (cdb->byte2 & SMS_DBD) 6632 dbd = 1; 6633 else 6634 header_len += sizeof(struct scsi_mode_block_descr); 6635 6636 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6637 page_code = cdb->page & SMS_PAGE_CODE; 6638 subpage = cdb->subpage; 6639 alloc_len = cdb->length; 6640 break; 6641 } 6642 case MODE_SENSE_10: { 6643 struct scsi_mode_sense_10 *cdb; 6644 6645 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6646 6647 header_len = sizeof(struct scsi_mode_hdr_10); 6648 6649 if (cdb->byte2 & SMS_DBD) 6650 dbd = 1; 6651 else 6652 header_len += sizeof(struct scsi_mode_block_descr); 6653 if (cdb->byte2 & SMS10_LLBAA) 6654 llba = 1; 6655 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6656 page_code = cdb->page & SMS_PAGE_CODE; 6657 subpage = cdb->subpage; 6658 alloc_len = scsi_2btoul(cdb->length); 6659 break; 6660 } 6661 default: 6662 ctl_set_invalid_opcode(ctsio); 6663 ctl_done((union ctl_io *)ctsio); 6664 return (CTL_RETVAL_COMPLETE); 6665 break; /* NOTREACHED */ 6666 } 6667 6668 /* 6669 * We have to make a first pass through to calculate the size of 6670 * the pages that match the user's query. Then we allocate enough 6671 * memory to hold it, and actually copy the data into the buffer. 6672 */ 6673 switch (page_code) { 6674 case SMS_ALL_PAGES_PAGE: { 6675 int i; 6676 6677 page_len = 0; 6678 6679 /* 6680 * At the moment, values other than 0 and 0xff here are 6681 * reserved according to SPC-3. 6682 */ 6683 if ((subpage != SMS_SUBPAGE_PAGE_0) 6684 && (subpage != SMS_SUBPAGE_ALL)) { 6685 ctl_set_invalid_field(ctsio, 6686 /*sks_valid*/ 1, 6687 /*command*/ 1, 6688 /*field*/ 3, 6689 /*bit_valid*/ 0, 6690 /*bit*/ 0); 6691 ctl_done((union ctl_io *)ctsio); 6692 return (CTL_RETVAL_COMPLETE); 6693 } 6694 6695 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6696 if ((control_dev != 0) 6697 && (lun->mode_pages.index[i].page_flags & 6698 CTL_PAGE_FLAG_DISK_ONLY)) 6699 continue; 6700 6701 /* 6702 * We don't use this subpage if the user didn't 6703 * request all subpages. 6704 */ 6705 if ((lun->mode_pages.index[i].subpage != 0) 6706 && (subpage == SMS_SUBPAGE_PAGE_0)) 6707 continue; 6708 6709 #if 0 6710 printf("found page %#x len %d\n", 6711 lun->mode_pages.index[i].page_code & 6712 SMPH_PC_MASK, 6713 lun->mode_pages.index[i].page_len); 6714 #endif 6715 page_len += lun->mode_pages.index[i].page_len; 6716 } 6717 break; 6718 } 6719 default: { 6720 int i; 6721 6722 page_len = 0; 6723 6724 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6725 /* Look for the right page code */ 6726 if ((lun->mode_pages.index[i].page_code & 6727 SMPH_PC_MASK) != page_code) 6728 continue; 6729 6730 /* Look for the right subpage or the subpage wildcard*/ 6731 if ((lun->mode_pages.index[i].subpage != subpage) 6732 && (subpage != SMS_SUBPAGE_ALL)) 6733 continue; 6734 6735 /* Make sure the page is supported for this dev type */ 6736 if ((control_dev != 0) 6737 && (lun->mode_pages.index[i].page_flags & 6738 CTL_PAGE_FLAG_DISK_ONLY)) 6739 continue; 6740 6741 #if 0 6742 printf("found page %#x len %d\n", 6743 lun->mode_pages.index[i].page_code & 6744 SMPH_PC_MASK, 6745 lun->mode_pages.index[i].page_len); 6746 #endif 6747 6748 page_len += lun->mode_pages.index[i].page_len; 6749 } 6750 6751 if (page_len == 0) { 6752 ctl_set_invalid_field(ctsio, 6753 /*sks_valid*/ 1, 6754 /*command*/ 1, 6755 /*field*/ 2, 6756 /*bit_valid*/ 1, 6757 /*bit*/ 5); 6758 ctl_done((union ctl_io *)ctsio); 6759 return (CTL_RETVAL_COMPLETE); 6760 } 6761 break; 6762 } 6763 } 6764 6765 total_len = header_len + page_len; 6766 #if 0 6767 printf("header_len = %d, page_len = %d, total_len = %d\n", 6768 header_len, page_len, total_len); 6769 #endif 6770 6771 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6772 ctsio->kern_sg_entries = 0; 6773 ctsio->kern_data_resid = 0; 6774 ctsio->kern_rel_offset = 0; 6775 if (total_len < alloc_len) { 6776 ctsio->residual = alloc_len - total_len; 6777 ctsio->kern_data_len = total_len; 6778 ctsio->kern_total_len = total_len; 6779 } else { 6780 ctsio->residual = 0; 6781 ctsio->kern_data_len = alloc_len; 6782 ctsio->kern_total_len = alloc_len; 6783 } 6784 6785 switch (ctsio->cdb[0]) { 6786 case MODE_SENSE_6: { 6787 struct scsi_mode_hdr_6 *header; 6788 6789 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6790 6791 header->datalen = ctl_min(total_len - 1, 254); 6792 if (control_dev == 0) { 6793 header->dev_specific = 0x10; /* DPOFUA */ 6794 if ((lun->flags & CTL_LUN_READONLY) || 6795 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6796 .eca_and_aen & SCP_SWP) != 0) 6797 header->dev_specific |= 0x80; /* WP */ 6798 } 6799 if (dbd) 6800 header->block_descr_len = 0; 6801 else 6802 header->block_descr_len = 6803 sizeof(struct scsi_mode_block_descr); 6804 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6805 break; 6806 } 6807 case MODE_SENSE_10: { 6808 struct scsi_mode_hdr_10 *header; 6809 int datalen; 6810 6811 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6812 6813 datalen = ctl_min(total_len - 2, 65533); 6814 scsi_ulto2b(datalen, header->datalen); 6815 if (control_dev == 0) { 6816 header->dev_specific = 0x10; /* DPOFUA */ 6817 if ((lun->flags & CTL_LUN_READONLY) || 6818 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6819 .eca_and_aen & SCP_SWP) != 0) 6820 header->dev_specific |= 0x80; /* WP */ 6821 } 6822 if (dbd) 6823 scsi_ulto2b(0, header->block_descr_len); 6824 else 6825 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6826 header->block_descr_len); 6827 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6828 break; 6829 } 6830 default: 6831 panic("invalid CDB type %#x", ctsio->cdb[0]); 6832 break; /* NOTREACHED */ 6833 } 6834 6835 /* 6836 * If we've got a disk, use its blocksize in the block 6837 * descriptor. Otherwise, just set it to 0. 6838 */ 6839 if (dbd == 0) { 6840 if (control_dev == 0) 6841 scsi_ulto3b(lun->be_lun->blocksize, 6842 block_desc->block_len); 6843 else 6844 scsi_ulto3b(0, block_desc->block_len); 6845 } 6846 6847 switch (page_code) { 6848 case SMS_ALL_PAGES_PAGE: { 6849 int i, data_used; 6850 6851 data_used = header_len; 6852 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6853 struct ctl_page_index *page_index; 6854 6855 page_index = &lun->mode_pages.index[i]; 6856 6857 if ((control_dev != 0) 6858 && (page_index->page_flags & 6859 CTL_PAGE_FLAG_DISK_ONLY)) 6860 continue; 6861 6862 /* 6863 * We don't use this subpage if the user didn't 6864 * request all subpages. We already checked (above) 6865 * to make sure the user only specified a subpage 6866 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6867 */ 6868 if ((page_index->subpage != 0) 6869 && (subpage == SMS_SUBPAGE_PAGE_0)) 6870 continue; 6871 6872 /* 6873 * Call the handler, if it exists, to update the 6874 * page to the latest values. 6875 */ 6876 if (page_index->sense_handler != NULL) 6877 page_index->sense_handler(ctsio, page_index,pc); 6878 6879 memcpy(ctsio->kern_data_ptr + data_used, 6880 page_index->page_data + 6881 (page_index->page_len * pc), 6882 page_index->page_len); 6883 data_used += page_index->page_len; 6884 } 6885 break; 6886 } 6887 default: { 6888 int i, data_used; 6889 6890 data_used = header_len; 6891 6892 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6893 struct ctl_page_index *page_index; 6894 6895 page_index = &lun->mode_pages.index[i]; 6896 6897 /* Look for the right page code */ 6898 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6899 continue; 6900 6901 /* Look for the right subpage or the subpage wildcard*/ 6902 if ((page_index->subpage != subpage) 6903 && (subpage != SMS_SUBPAGE_ALL)) 6904 continue; 6905 6906 /* Make sure the page is supported for this dev type */ 6907 if ((control_dev != 0) 6908 && (page_index->page_flags & 6909 CTL_PAGE_FLAG_DISK_ONLY)) 6910 continue; 6911 6912 /* 6913 * Call the handler, if it exists, to update the 6914 * page to the latest values. 6915 */ 6916 if (page_index->sense_handler != NULL) 6917 page_index->sense_handler(ctsio, page_index,pc); 6918 6919 memcpy(ctsio->kern_data_ptr + data_used, 6920 page_index->page_data + 6921 (page_index->page_len * pc), 6922 page_index->page_len); 6923 data_used += page_index->page_len; 6924 } 6925 break; 6926 } 6927 } 6928 6929 ctl_set_success(ctsio); 6930 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6931 ctsio->be_move_done = ctl_config_move_done; 6932 ctl_datamove((union ctl_io *)ctsio); 6933 return (CTL_RETVAL_COMPLETE); 6934 } 6935 6936 int 6937 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6938 struct ctl_page_index *page_index, 6939 int pc) 6940 { 6941 struct ctl_lun *lun; 6942 struct scsi_log_param_header *phdr; 6943 uint8_t *data; 6944 uint64_t val; 6945 6946 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6947 data = page_index->page_data; 6948 6949 if (lun->backend->lun_attr != NULL && 6950 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6951 != UINT64_MAX) { 6952 phdr = (struct scsi_log_param_header *)data; 6953 scsi_ulto2b(0x0001, phdr->param_code); 6954 phdr->param_control = SLP_LBIN | SLP_LP; 6955 phdr->param_len = 8; 6956 data = (uint8_t *)(phdr + 1); 6957 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6958 data[4] = 0x02; /* per-pool */ 6959 data += phdr->param_len; 6960 } 6961 6962 if (lun->backend->lun_attr != NULL && 6963 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6964 != UINT64_MAX) { 6965 phdr = (struct scsi_log_param_header *)data; 6966 scsi_ulto2b(0x0002, phdr->param_code); 6967 phdr->param_control = SLP_LBIN | SLP_LP; 6968 phdr->param_len = 8; 6969 data = (uint8_t *)(phdr + 1); 6970 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6971 data[4] = 0x01; /* per-LUN */ 6972 data += phdr->param_len; 6973 } 6974 6975 if (lun->backend->lun_attr != NULL && 6976 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6977 != UINT64_MAX) { 6978 phdr = (struct scsi_log_param_header *)data; 6979 scsi_ulto2b(0x00f1, phdr->param_code); 6980 phdr->param_control = SLP_LBIN | SLP_LP; 6981 phdr->param_len = 8; 6982 data = (uint8_t *)(phdr + 1); 6983 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6984 data[4] = 0x02; /* per-pool */ 6985 data += phdr->param_len; 6986 } 6987 6988 if (lun->backend->lun_attr != NULL && 6989 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6990 != UINT64_MAX) { 6991 phdr = (struct scsi_log_param_header *)data; 6992 scsi_ulto2b(0x00f2, phdr->param_code); 6993 phdr->param_control = SLP_LBIN | SLP_LP; 6994 phdr->param_len = 8; 6995 data = (uint8_t *)(phdr + 1); 6996 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6997 data[4] = 0x02; /* per-pool */ 6998 data += phdr->param_len; 6999 } 7000 7001 page_index->page_len = data - page_index->page_data; 7002 return (0); 7003 } 7004 7005 int 7006 ctl_log_sense(struct ctl_scsiio *ctsio) 7007 { 7008 struct ctl_lun *lun; 7009 int i, pc, page_code, subpage; 7010 int alloc_len, total_len; 7011 struct ctl_page_index *page_index; 7012 struct scsi_log_sense *cdb; 7013 struct scsi_log_header *header; 7014 7015 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 7016 7017 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7018 cdb = (struct scsi_log_sense *)ctsio->cdb; 7019 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 7020 page_code = cdb->page & SLS_PAGE_CODE; 7021 subpage = cdb->subpage; 7022 alloc_len = scsi_2btoul(cdb->length); 7023 7024 page_index = NULL; 7025 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 7026 page_index = &lun->log_pages.index[i]; 7027 7028 /* Look for the right page code */ 7029 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 7030 continue; 7031 7032 /* Look for the right subpage or the subpage wildcard*/ 7033 if (page_index->subpage != subpage) 7034 continue; 7035 7036 break; 7037 } 7038 if (i >= CTL_NUM_LOG_PAGES) { 7039 ctl_set_invalid_field(ctsio, 7040 /*sks_valid*/ 1, 7041 /*command*/ 1, 7042 /*field*/ 2, 7043 /*bit_valid*/ 0, 7044 /*bit*/ 0); 7045 ctl_done((union ctl_io *)ctsio); 7046 return (CTL_RETVAL_COMPLETE); 7047 } 7048 7049 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 7050 7051 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7052 ctsio->kern_sg_entries = 0; 7053 ctsio->kern_data_resid = 0; 7054 ctsio->kern_rel_offset = 0; 7055 if (total_len < alloc_len) { 7056 ctsio->residual = alloc_len - total_len; 7057 ctsio->kern_data_len = total_len; 7058 ctsio->kern_total_len = total_len; 7059 } else { 7060 ctsio->residual = 0; 7061 ctsio->kern_data_len = alloc_len; 7062 ctsio->kern_total_len = alloc_len; 7063 } 7064 7065 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 7066 header->page = page_index->page_code; 7067 if (page_index->subpage) { 7068 header->page |= SL_SPF; 7069 header->subpage = page_index->subpage; 7070 } 7071 scsi_ulto2b(page_index->page_len, header->datalen); 7072 7073 /* 7074 * Call the handler, if it exists, to update the 7075 * page to the latest values. 7076 */ 7077 if (page_index->sense_handler != NULL) 7078 page_index->sense_handler(ctsio, page_index, pc); 7079 7080 memcpy(header + 1, page_index->page_data, page_index->page_len); 7081 7082 ctl_set_success(ctsio); 7083 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7084 ctsio->be_move_done = ctl_config_move_done; 7085 ctl_datamove((union ctl_io *)ctsio); 7086 return (CTL_RETVAL_COMPLETE); 7087 } 7088 7089 int 7090 ctl_read_capacity(struct ctl_scsiio *ctsio) 7091 { 7092 struct scsi_read_capacity *cdb; 7093 struct scsi_read_capacity_data *data; 7094 struct ctl_lun *lun; 7095 uint32_t lba; 7096 7097 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7098 7099 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7100 7101 lba = scsi_4btoul(cdb->addr); 7102 if (((cdb->pmi & SRC_PMI) == 0) 7103 && (lba != 0)) { 7104 ctl_set_invalid_field(/*ctsio*/ ctsio, 7105 /*sks_valid*/ 1, 7106 /*command*/ 1, 7107 /*field*/ 2, 7108 /*bit_valid*/ 0, 7109 /*bit*/ 0); 7110 ctl_done((union ctl_io *)ctsio); 7111 return (CTL_RETVAL_COMPLETE); 7112 } 7113 7114 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7115 7116 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7117 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7118 ctsio->residual = 0; 7119 ctsio->kern_data_len = sizeof(*data); 7120 ctsio->kern_total_len = sizeof(*data); 7121 ctsio->kern_data_resid = 0; 7122 ctsio->kern_rel_offset = 0; 7123 ctsio->kern_sg_entries = 0; 7124 7125 /* 7126 * If the maximum LBA is greater than 0xfffffffe, the user must 7127 * issue a SERVICE ACTION IN (16) command, with the read capacity 7128 * serivce action set. 7129 */ 7130 if (lun->be_lun->maxlba > 0xfffffffe) 7131 scsi_ulto4b(0xffffffff, data->addr); 7132 else 7133 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7134 7135 /* 7136 * XXX KDM this may not be 512 bytes... 7137 */ 7138 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7139 7140 ctl_set_success(ctsio); 7141 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7142 ctsio->be_move_done = ctl_config_move_done; 7143 ctl_datamove((union ctl_io *)ctsio); 7144 return (CTL_RETVAL_COMPLETE); 7145 } 7146 7147 int 7148 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7149 { 7150 struct scsi_read_capacity_16 *cdb; 7151 struct scsi_read_capacity_data_long *data; 7152 struct ctl_lun *lun; 7153 uint64_t lba; 7154 uint32_t alloc_len; 7155 7156 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7157 7158 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7159 7160 alloc_len = scsi_4btoul(cdb->alloc_len); 7161 lba = scsi_8btou64(cdb->addr); 7162 7163 if ((cdb->reladr & SRC16_PMI) 7164 && (lba != 0)) { 7165 ctl_set_invalid_field(/*ctsio*/ ctsio, 7166 /*sks_valid*/ 1, 7167 /*command*/ 1, 7168 /*field*/ 2, 7169 /*bit_valid*/ 0, 7170 /*bit*/ 0); 7171 ctl_done((union ctl_io *)ctsio); 7172 return (CTL_RETVAL_COMPLETE); 7173 } 7174 7175 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7176 7177 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7178 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7179 7180 if (sizeof(*data) < alloc_len) { 7181 ctsio->residual = alloc_len - sizeof(*data); 7182 ctsio->kern_data_len = sizeof(*data); 7183 ctsio->kern_total_len = sizeof(*data); 7184 } else { 7185 ctsio->residual = 0; 7186 ctsio->kern_data_len = alloc_len; 7187 ctsio->kern_total_len = alloc_len; 7188 } 7189 ctsio->kern_data_resid = 0; 7190 ctsio->kern_rel_offset = 0; 7191 ctsio->kern_sg_entries = 0; 7192 7193 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7194 /* XXX KDM this may not be 512 bytes... */ 7195 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7196 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7197 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7198 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7199 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7200 7201 ctl_set_success(ctsio); 7202 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7203 ctsio->be_move_done = ctl_config_move_done; 7204 ctl_datamove((union ctl_io *)ctsio); 7205 return (CTL_RETVAL_COMPLETE); 7206 } 7207 7208 int 7209 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7210 { 7211 struct scsi_get_lba_status *cdb; 7212 struct scsi_get_lba_status_data *data; 7213 struct ctl_lun *lun; 7214 struct ctl_lba_len_flags *lbalen; 7215 uint64_t lba; 7216 uint32_t alloc_len, total_len; 7217 int retval; 7218 7219 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7220 7221 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7222 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7223 lba = scsi_8btou64(cdb->addr); 7224 alloc_len = scsi_4btoul(cdb->alloc_len); 7225 7226 if (lba > lun->be_lun->maxlba) { 7227 ctl_set_lba_out_of_range(ctsio); 7228 ctl_done((union ctl_io *)ctsio); 7229 return (CTL_RETVAL_COMPLETE); 7230 } 7231 7232 total_len = sizeof(*data) + sizeof(data->descr[0]); 7233 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7234 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7235 7236 if (total_len < alloc_len) { 7237 ctsio->residual = alloc_len - total_len; 7238 ctsio->kern_data_len = total_len; 7239 ctsio->kern_total_len = total_len; 7240 } else { 7241 ctsio->residual = 0; 7242 ctsio->kern_data_len = alloc_len; 7243 ctsio->kern_total_len = alloc_len; 7244 } 7245 ctsio->kern_data_resid = 0; 7246 ctsio->kern_rel_offset = 0; 7247 ctsio->kern_sg_entries = 0; 7248 7249 /* Fill dummy data in case backend can't tell anything. */ 7250 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7251 scsi_u64to8b(lba, data->descr[0].addr); 7252 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7253 data->descr[0].length); 7254 data->descr[0].status = 0; /* Mapped or unknown. */ 7255 7256 ctl_set_success(ctsio); 7257 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7258 ctsio->be_move_done = ctl_config_move_done; 7259 7260 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7261 lbalen->lba = lba; 7262 lbalen->len = total_len; 7263 lbalen->flags = 0; 7264 retval = lun->backend->config_read((union ctl_io *)ctsio); 7265 return (CTL_RETVAL_COMPLETE); 7266 } 7267 7268 int 7269 ctl_read_defect(struct ctl_scsiio *ctsio) 7270 { 7271 struct scsi_read_defect_data_10 *ccb10; 7272 struct scsi_read_defect_data_12 *ccb12; 7273 struct scsi_read_defect_data_hdr_10 *data10; 7274 struct scsi_read_defect_data_hdr_12 *data12; 7275 uint32_t alloc_len, data_len; 7276 uint8_t format; 7277 7278 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7279 7280 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7281 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7282 format = ccb10->format; 7283 alloc_len = scsi_2btoul(ccb10->alloc_length); 7284 data_len = sizeof(*data10); 7285 } else { 7286 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7287 format = ccb12->format; 7288 alloc_len = scsi_4btoul(ccb12->alloc_length); 7289 data_len = sizeof(*data12); 7290 } 7291 if (alloc_len == 0) { 7292 ctl_set_success(ctsio); 7293 ctl_done((union ctl_io *)ctsio); 7294 return (CTL_RETVAL_COMPLETE); 7295 } 7296 7297 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7298 if (data_len < alloc_len) { 7299 ctsio->residual = alloc_len - data_len; 7300 ctsio->kern_data_len = data_len; 7301 ctsio->kern_total_len = data_len; 7302 } else { 7303 ctsio->residual = 0; 7304 ctsio->kern_data_len = alloc_len; 7305 ctsio->kern_total_len = alloc_len; 7306 } 7307 ctsio->kern_data_resid = 0; 7308 ctsio->kern_rel_offset = 0; 7309 ctsio->kern_sg_entries = 0; 7310 7311 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7312 data10 = (struct scsi_read_defect_data_hdr_10 *) 7313 ctsio->kern_data_ptr; 7314 data10->format = format; 7315 scsi_ulto2b(0, data10->length); 7316 } else { 7317 data12 = (struct scsi_read_defect_data_hdr_12 *) 7318 ctsio->kern_data_ptr; 7319 data12->format = format; 7320 scsi_ulto2b(0, data12->generation); 7321 scsi_ulto4b(0, data12->length); 7322 } 7323 7324 ctl_set_success(ctsio); 7325 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7326 ctsio->be_move_done = ctl_config_move_done; 7327 ctl_datamove((union ctl_io *)ctsio); 7328 return (CTL_RETVAL_COMPLETE); 7329 } 7330 7331 int 7332 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7333 { 7334 struct scsi_maintenance_in *cdb; 7335 int retval; 7336 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os; 7337 int num_target_port_groups, num_target_ports; 7338 struct ctl_lun *lun; 7339 struct ctl_softc *softc; 7340 struct ctl_port *port; 7341 struct scsi_target_group_data *rtg_ptr; 7342 struct scsi_target_group_data_extended *rtg_ext_ptr; 7343 struct scsi_target_port_group_descriptor *tpg_desc; 7344 7345 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7346 7347 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7348 softc = control_softc; 7349 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7350 7351 retval = CTL_RETVAL_COMPLETE; 7352 7353 switch (cdb->byte2 & STG_PDF_MASK) { 7354 case STG_PDF_LENGTH: 7355 ext = 0; 7356 break; 7357 case STG_PDF_EXTENDED: 7358 ext = 1; 7359 break; 7360 default: 7361 ctl_set_invalid_field(/*ctsio*/ ctsio, 7362 /*sks_valid*/ 1, 7363 /*command*/ 1, 7364 /*field*/ 2, 7365 /*bit_valid*/ 1, 7366 /*bit*/ 5); 7367 ctl_done((union ctl_io *)ctsio); 7368 return(retval); 7369 } 7370 7371 if (softc->is_single) 7372 num_target_port_groups = 1; 7373 else 7374 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7375 num_target_ports = 0; 7376 mtx_lock(&softc->ctl_lock); 7377 STAILQ_FOREACH(port, &softc->port_list, links) { 7378 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7379 continue; 7380 if (ctl_map_lun_back(port->targ_port, lun->lun) >= CTL_MAX_LUNS) 7381 continue; 7382 num_target_ports++; 7383 } 7384 mtx_unlock(&softc->ctl_lock); 7385 7386 if (ext) 7387 total_len = sizeof(struct scsi_target_group_data_extended); 7388 else 7389 total_len = sizeof(struct scsi_target_group_data); 7390 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7391 num_target_port_groups + 7392 sizeof(struct scsi_target_port_descriptor) * 7393 num_target_ports * num_target_port_groups; 7394 7395 alloc_len = scsi_4btoul(cdb->length); 7396 7397 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7398 7399 ctsio->kern_sg_entries = 0; 7400 7401 if (total_len < alloc_len) { 7402 ctsio->residual = alloc_len - total_len; 7403 ctsio->kern_data_len = total_len; 7404 ctsio->kern_total_len = total_len; 7405 } else { 7406 ctsio->residual = 0; 7407 ctsio->kern_data_len = alloc_len; 7408 ctsio->kern_total_len = alloc_len; 7409 } 7410 ctsio->kern_data_resid = 0; 7411 ctsio->kern_rel_offset = 0; 7412 7413 if (ext) { 7414 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7415 ctsio->kern_data_ptr; 7416 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7417 rtg_ext_ptr->format_type = 0x10; 7418 rtg_ext_ptr->implicit_transition_time = 0; 7419 tpg_desc = &rtg_ext_ptr->groups[0]; 7420 } else { 7421 rtg_ptr = (struct scsi_target_group_data *) 7422 ctsio->kern_data_ptr; 7423 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7424 tpg_desc = &rtg_ptr->groups[0]; 7425 } 7426 7427 mtx_lock(&softc->ctl_lock); 7428 pg = softc->port_offset / CTL_MAX_PORTS; 7429 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) { 7430 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) { 7431 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7432 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7433 } else if (lun->flags & CTL_LUN_PRIMARY_SC) { 7434 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7435 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7436 } else { 7437 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7438 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7439 } 7440 } else { 7441 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 7442 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7443 } 7444 for (g = 0; g < num_target_port_groups; g++) { 7445 tpg_desc->pref_state = (g == pg) ? gs : os; 7446 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP; 7447 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7448 tpg_desc->status = TPG_IMPLICIT; 7449 pc = 0; 7450 STAILQ_FOREACH(port, &softc->port_list, links) { 7451 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7452 continue; 7453 if (ctl_map_lun_back(port->targ_port, lun->lun) >= 7454 CTL_MAX_LUNS) 7455 continue; 7456 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 7457 scsi_ulto2b(p, tpg_desc->descriptors[pc]. 7458 relative_target_port_identifier); 7459 pc++; 7460 } 7461 tpg_desc->target_port_count = pc; 7462 tpg_desc = (struct scsi_target_port_group_descriptor *) 7463 &tpg_desc->descriptors[pc]; 7464 } 7465 mtx_unlock(&softc->ctl_lock); 7466 7467 ctl_set_success(ctsio); 7468 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7469 ctsio->be_move_done = ctl_config_move_done; 7470 ctl_datamove((union ctl_io *)ctsio); 7471 return(retval); 7472 } 7473 7474 int 7475 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7476 { 7477 struct ctl_lun *lun; 7478 struct scsi_report_supported_opcodes *cdb; 7479 const struct ctl_cmd_entry *entry, *sentry; 7480 struct scsi_report_supported_opcodes_all *all; 7481 struct scsi_report_supported_opcodes_descr *descr; 7482 struct scsi_report_supported_opcodes_one *one; 7483 int retval; 7484 int alloc_len, total_len; 7485 int opcode, service_action, i, j, num; 7486 7487 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7488 7489 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7490 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7491 7492 retval = CTL_RETVAL_COMPLETE; 7493 7494 opcode = cdb->requested_opcode; 7495 service_action = scsi_2btoul(cdb->requested_service_action); 7496 switch (cdb->options & RSO_OPTIONS_MASK) { 7497 case RSO_OPTIONS_ALL: 7498 num = 0; 7499 for (i = 0; i < 256; i++) { 7500 entry = &ctl_cmd_table[i]; 7501 if (entry->flags & CTL_CMD_FLAG_SA5) { 7502 for (j = 0; j < 32; j++) { 7503 sentry = &((const struct ctl_cmd_entry *) 7504 entry->execute)[j]; 7505 if (ctl_cmd_applicable( 7506 lun->be_lun->lun_type, sentry)) 7507 num++; 7508 } 7509 } else { 7510 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7511 entry)) 7512 num++; 7513 } 7514 } 7515 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7516 num * sizeof(struct scsi_report_supported_opcodes_descr); 7517 break; 7518 case RSO_OPTIONS_OC: 7519 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7520 ctl_set_invalid_field(/*ctsio*/ ctsio, 7521 /*sks_valid*/ 1, 7522 /*command*/ 1, 7523 /*field*/ 2, 7524 /*bit_valid*/ 1, 7525 /*bit*/ 2); 7526 ctl_done((union ctl_io *)ctsio); 7527 return (CTL_RETVAL_COMPLETE); 7528 } 7529 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7530 break; 7531 case RSO_OPTIONS_OC_SA: 7532 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7533 service_action >= 32) { 7534 ctl_set_invalid_field(/*ctsio*/ ctsio, 7535 /*sks_valid*/ 1, 7536 /*command*/ 1, 7537 /*field*/ 2, 7538 /*bit_valid*/ 1, 7539 /*bit*/ 2); 7540 ctl_done((union ctl_io *)ctsio); 7541 return (CTL_RETVAL_COMPLETE); 7542 } 7543 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7544 break; 7545 default: 7546 ctl_set_invalid_field(/*ctsio*/ ctsio, 7547 /*sks_valid*/ 1, 7548 /*command*/ 1, 7549 /*field*/ 2, 7550 /*bit_valid*/ 1, 7551 /*bit*/ 2); 7552 ctl_done((union ctl_io *)ctsio); 7553 return (CTL_RETVAL_COMPLETE); 7554 } 7555 7556 alloc_len = scsi_4btoul(cdb->length); 7557 7558 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7559 7560 ctsio->kern_sg_entries = 0; 7561 7562 if (total_len < alloc_len) { 7563 ctsio->residual = alloc_len - total_len; 7564 ctsio->kern_data_len = total_len; 7565 ctsio->kern_total_len = total_len; 7566 } else { 7567 ctsio->residual = 0; 7568 ctsio->kern_data_len = alloc_len; 7569 ctsio->kern_total_len = alloc_len; 7570 } 7571 ctsio->kern_data_resid = 0; 7572 ctsio->kern_rel_offset = 0; 7573 7574 switch (cdb->options & RSO_OPTIONS_MASK) { 7575 case RSO_OPTIONS_ALL: 7576 all = (struct scsi_report_supported_opcodes_all *) 7577 ctsio->kern_data_ptr; 7578 num = 0; 7579 for (i = 0; i < 256; i++) { 7580 entry = &ctl_cmd_table[i]; 7581 if (entry->flags & CTL_CMD_FLAG_SA5) { 7582 for (j = 0; j < 32; j++) { 7583 sentry = &((const struct ctl_cmd_entry *) 7584 entry->execute)[j]; 7585 if (!ctl_cmd_applicable( 7586 lun->be_lun->lun_type, sentry)) 7587 continue; 7588 descr = &all->descr[num++]; 7589 descr->opcode = i; 7590 scsi_ulto2b(j, descr->service_action); 7591 descr->flags = RSO_SERVACTV; 7592 scsi_ulto2b(sentry->length, 7593 descr->cdb_length); 7594 } 7595 } else { 7596 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7597 entry)) 7598 continue; 7599 descr = &all->descr[num++]; 7600 descr->opcode = i; 7601 scsi_ulto2b(0, descr->service_action); 7602 descr->flags = 0; 7603 scsi_ulto2b(entry->length, descr->cdb_length); 7604 } 7605 } 7606 scsi_ulto4b( 7607 num * sizeof(struct scsi_report_supported_opcodes_descr), 7608 all->length); 7609 break; 7610 case RSO_OPTIONS_OC: 7611 one = (struct scsi_report_supported_opcodes_one *) 7612 ctsio->kern_data_ptr; 7613 entry = &ctl_cmd_table[opcode]; 7614 goto fill_one; 7615 case RSO_OPTIONS_OC_SA: 7616 one = (struct scsi_report_supported_opcodes_one *) 7617 ctsio->kern_data_ptr; 7618 entry = &ctl_cmd_table[opcode]; 7619 entry = &((const struct ctl_cmd_entry *) 7620 entry->execute)[service_action]; 7621 fill_one: 7622 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7623 one->support = 3; 7624 scsi_ulto2b(entry->length, one->cdb_length); 7625 one->cdb_usage[0] = opcode; 7626 memcpy(&one->cdb_usage[1], entry->usage, 7627 entry->length - 1); 7628 } else 7629 one->support = 1; 7630 break; 7631 } 7632 7633 ctl_set_success(ctsio); 7634 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7635 ctsio->be_move_done = ctl_config_move_done; 7636 ctl_datamove((union ctl_io *)ctsio); 7637 return(retval); 7638 } 7639 7640 int 7641 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7642 { 7643 struct scsi_report_supported_tmf *cdb; 7644 struct scsi_report_supported_tmf_data *data; 7645 int retval; 7646 int alloc_len, total_len; 7647 7648 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7649 7650 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7651 7652 retval = CTL_RETVAL_COMPLETE; 7653 7654 total_len = sizeof(struct scsi_report_supported_tmf_data); 7655 alloc_len = scsi_4btoul(cdb->length); 7656 7657 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7658 7659 ctsio->kern_sg_entries = 0; 7660 7661 if (total_len < alloc_len) { 7662 ctsio->residual = alloc_len - total_len; 7663 ctsio->kern_data_len = total_len; 7664 ctsio->kern_total_len = total_len; 7665 } else { 7666 ctsio->residual = 0; 7667 ctsio->kern_data_len = alloc_len; 7668 ctsio->kern_total_len = alloc_len; 7669 } 7670 ctsio->kern_data_resid = 0; 7671 ctsio->kern_rel_offset = 0; 7672 7673 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7674 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 7675 data->byte2 |= RST_ITNRS; 7676 7677 ctl_set_success(ctsio); 7678 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7679 ctsio->be_move_done = ctl_config_move_done; 7680 ctl_datamove((union ctl_io *)ctsio); 7681 return (retval); 7682 } 7683 7684 int 7685 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7686 { 7687 struct scsi_report_timestamp *cdb; 7688 struct scsi_report_timestamp_data *data; 7689 struct timeval tv; 7690 int64_t timestamp; 7691 int retval; 7692 int alloc_len, total_len; 7693 7694 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7695 7696 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7697 7698 retval = CTL_RETVAL_COMPLETE; 7699 7700 total_len = sizeof(struct scsi_report_timestamp_data); 7701 alloc_len = scsi_4btoul(cdb->length); 7702 7703 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7704 7705 ctsio->kern_sg_entries = 0; 7706 7707 if (total_len < alloc_len) { 7708 ctsio->residual = alloc_len - total_len; 7709 ctsio->kern_data_len = total_len; 7710 ctsio->kern_total_len = total_len; 7711 } else { 7712 ctsio->residual = 0; 7713 ctsio->kern_data_len = alloc_len; 7714 ctsio->kern_total_len = alloc_len; 7715 } 7716 ctsio->kern_data_resid = 0; 7717 ctsio->kern_rel_offset = 0; 7718 7719 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7720 scsi_ulto2b(sizeof(*data) - 2, data->length); 7721 data->origin = RTS_ORIG_OUTSIDE; 7722 getmicrotime(&tv); 7723 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7724 scsi_ulto4b(timestamp >> 16, data->timestamp); 7725 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7726 7727 ctl_set_success(ctsio); 7728 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7729 ctsio->be_move_done = ctl_config_move_done; 7730 ctl_datamove((union ctl_io *)ctsio); 7731 return (retval); 7732 } 7733 7734 int 7735 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7736 { 7737 struct scsi_per_res_in *cdb; 7738 int alloc_len, total_len = 0; 7739 /* struct scsi_per_res_in_rsrv in_data; */ 7740 struct ctl_lun *lun; 7741 struct ctl_softc *softc; 7742 uint64_t key; 7743 7744 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7745 7746 softc = control_softc; 7747 7748 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7749 7750 alloc_len = scsi_2btoul(cdb->length); 7751 7752 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7753 7754 retry: 7755 mtx_lock(&lun->lun_lock); 7756 switch (cdb->action) { 7757 case SPRI_RK: /* read keys */ 7758 total_len = sizeof(struct scsi_per_res_in_keys) + 7759 lun->pr_key_count * 7760 sizeof(struct scsi_per_res_key); 7761 break; 7762 case SPRI_RR: /* read reservation */ 7763 if (lun->flags & CTL_LUN_PR_RESERVED) 7764 total_len = sizeof(struct scsi_per_res_in_rsrv); 7765 else 7766 total_len = sizeof(struct scsi_per_res_in_header); 7767 break; 7768 case SPRI_RC: /* report capabilities */ 7769 total_len = sizeof(struct scsi_per_res_cap); 7770 break; 7771 case SPRI_RS: /* read full status */ 7772 total_len = sizeof(struct scsi_per_res_in_header) + 7773 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7774 lun->pr_key_count; 7775 break; 7776 default: 7777 panic("Invalid PR type %x", cdb->action); 7778 } 7779 mtx_unlock(&lun->lun_lock); 7780 7781 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7782 7783 if (total_len < alloc_len) { 7784 ctsio->residual = alloc_len - total_len; 7785 ctsio->kern_data_len = total_len; 7786 ctsio->kern_total_len = total_len; 7787 } else { 7788 ctsio->residual = 0; 7789 ctsio->kern_data_len = alloc_len; 7790 ctsio->kern_total_len = alloc_len; 7791 } 7792 7793 ctsio->kern_data_resid = 0; 7794 ctsio->kern_rel_offset = 0; 7795 ctsio->kern_sg_entries = 0; 7796 7797 mtx_lock(&lun->lun_lock); 7798 switch (cdb->action) { 7799 case SPRI_RK: { // read keys 7800 struct scsi_per_res_in_keys *res_keys; 7801 int i, key_count; 7802 7803 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7804 7805 /* 7806 * We had to drop the lock to allocate our buffer, which 7807 * leaves time for someone to come in with another 7808 * persistent reservation. (That is unlikely, though, 7809 * since this should be the only persistent reservation 7810 * command active right now.) 7811 */ 7812 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7813 (lun->pr_key_count * 7814 sizeof(struct scsi_per_res_key)))){ 7815 mtx_unlock(&lun->lun_lock); 7816 free(ctsio->kern_data_ptr, M_CTL); 7817 printf("%s: reservation length changed, retrying\n", 7818 __func__); 7819 goto retry; 7820 } 7821 7822 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7823 7824 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7825 lun->pr_key_count, res_keys->header.length); 7826 7827 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7828 if ((key = ctl_get_prkey(lun, i)) == 0) 7829 continue; 7830 7831 /* 7832 * We used lun->pr_key_count to calculate the 7833 * size to allocate. If it turns out the number of 7834 * initiators with the registered flag set is 7835 * larger than that (i.e. they haven't been kept in 7836 * sync), we've got a problem. 7837 */ 7838 if (key_count >= lun->pr_key_count) { 7839 #ifdef NEEDTOPORT 7840 csevent_log(CSC_CTL | CSC_SHELF_SW | 7841 CTL_PR_ERROR, 7842 csevent_LogType_Fault, 7843 csevent_AlertLevel_Yellow, 7844 csevent_FRU_ShelfController, 7845 csevent_FRU_Firmware, 7846 csevent_FRU_Unknown, 7847 "registered keys %d >= key " 7848 "count %d", key_count, 7849 lun->pr_key_count); 7850 #endif 7851 key_count++; 7852 continue; 7853 } 7854 scsi_u64to8b(key, res_keys->keys[key_count].key); 7855 key_count++; 7856 } 7857 break; 7858 } 7859 case SPRI_RR: { // read reservation 7860 struct scsi_per_res_in_rsrv *res; 7861 int tmp_len, header_only; 7862 7863 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7864 7865 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7866 7867 if (lun->flags & CTL_LUN_PR_RESERVED) 7868 { 7869 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7870 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7871 res->header.length); 7872 header_only = 0; 7873 } else { 7874 tmp_len = sizeof(struct scsi_per_res_in_header); 7875 scsi_ulto4b(0, res->header.length); 7876 header_only = 1; 7877 } 7878 7879 /* 7880 * We had to drop the lock to allocate our buffer, which 7881 * leaves time for someone to come in with another 7882 * persistent reservation. (That is unlikely, though, 7883 * since this should be the only persistent reservation 7884 * command active right now.) 7885 */ 7886 if (tmp_len != total_len) { 7887 mtx_unlock(&lun->lun_lock); 7888 free(ctsio->kern_data_ptr, M_CTL); 7889 printf("%s: reservation status changed, retrying\n", 7890 __func__); 7891 goto retry; 7892 } 7893 7894 /* 7895 * No reservation held, so we're done. 7896 */ 7897 if (header_only != 0) 7898 break; 7899 7900 /* 7901 * If the registration is an All Registrants type, the key 7902 * is 0, since it doesn't really matter. 7903 */ 7904 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7905 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7906 res->data.reservation); 7907 } 7908 res->data.scopetype = lun->res_type; 7909 break; 7910 } 7911 case SPRI_RC: //report capabilities 7912 { 7913 struct scsi_per_res_cap *res_cap; 7914 uint16_t type_mask; 7915 7916 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7917 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7918 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7919 type_mask = SPRI_TM_WR_EX_AR | 7920 SPRI_TM_EX_AC_RO | 7921 SPRI_TM_WR_EX_RO | 7922 SPRI_TM_EX_AC | 7923 SPRI_TM_WR_EX | 7924 SPRI_TM_EX_AC_AR; 7925 scsi_ulto2b(type_mask, res_cap->type_mask); 7926 break; 7927 } 7928 case SPRI_RS: { // read full status 7929 struct scsi_per_res_in_full *res_status; 7930 struct scsi_per_res_in_full_desc *res_desc; 7931 struct ctl_port *port; 7932 int i, len; 7933 7934 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7935 7936 /* 7937 * We had to drop the lock to allocate our buffer, which 7938 * leaves time for someone to come in with another 7939 * persistent reservation. (That is unlikely, though, 7940 * since this should be the only persistent reservation 7941 * command active right now.) 7942 */ 7943 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7944 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7945 lun->pr_key_count)){ 7946 mtx_unlock(&lun->lun_lock); 7947 free(ctsio->kern_data_ptr, M_CTL); 7948 printf("%s: reservation length changed, retrying\n", 7949 __func__); 7950 goto retry; 7951 } 7952 7953 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7954 7955 res_desc = &res_status->desc[0]; 7956 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7957 if ((key = ctl_get_prkey(lun, i)) == 0) 7958 continue; 7959 7960 scsi_u64to8b(key, res_desc->res_key.key); 7961 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7962 (lun->pr_res_idx == i || 7963 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7964 res_desc->flags = SPRI_FULL_R_HOLDER; 7965 res_desc->scopetype = lun->res_type; 7966 } 7967 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7968 res_desc->rel_trgt_port_id); 7969 len = 0; 7970 port = softc->ctl_ports[ 7971 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; 7972 if (port != NULL) 7973 len = ctl_create_iid(port, 7974 i % CTL_MAX_INIT_PER_PORT, 7975 res_desc->transport_id); 7976 scsi_ulto4b(len, res_desc->additional_length); 7977 res_desc = (struct scsi_per_res_in_full_desc *) 7978 &res_desc->transport_id[len]; 7979 } 7980 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7981 res_status->header.length); 7982 break; 7983 } 7984 default: 7985 /* 7986 * This is a bug, because we just checked for this above, 7987 * and should have returned an error. 7988 */ 7989 panic("Invalid PR type %x", cdb->action); 7990 break; /* NOTREACHED */ 7991 } 7992 mtx_unlock(&lun->lun_lock); 7993 7994 ctl_set_success(ctsio); 7995 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7996 ctsio->be_move_done = ctl_config_move_done; 7997 ctl_datamove((union ctl_io *)ctsio); 7998 return (CTL_RETVAL_COMPLETE); 7999 } 8000 8001 static void 8002 ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua) 8003 { 8004 int off = lun->ctl_softc->persis_offset; 8005 8006 if (residx >= off && residx < off + CTL_MAX_INITIATORS) 8007 ctl_est_ua(lun, residx - off, ua); 8008 } 8009 8010 /* 8011 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 8012 * it should return. 8013 */ 8014 static int 8015 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 8016 uint64_t sa_res_key, uint8_t type, uint32_t residx, 8017 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 8018 struct scsi_per_res_out_parms* param) 8019 { 8020 union ctl_ha_msg persis_io; 8021 int retval, i; 8022 int isc_retval; 8023 8024 retval = 0; 8025 8026 mtx_lock(&lun->lun_lock); 8027 if (sa_res_key == 0) { 8028 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8029 /* validate scope and type */ 8030 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8031 SPR_LU_SCOPE) { 8032 mtx_unlock(&lun->lun_lock); 8033 ctl_set_invalid_field(/*ctsio*/ ctsio, 8034 /*sks_valid*/ 1, 8035 /*command*/ 1, 8036 /*field*/ 2, 8037 /*bit_valid*/ 1, 8038 /*bit*/ 4); 8039 ctl_done((union ctl_io *)ctsio); 8040 return (1); 8041 } 8042 8043 if (type>8 || type==2 || type==4 || type==0) { 8044 mtx_unlock(&lun->lun_lock); 8045 ctl_set_invalid_field(/*ctsio*/ ctsio, 8046 /*sks_valid*/ 1, 8047 /*command*/ 1, 8048 /*field*/ 2, 8049 /*bit_valid*/ 1, 8050 /*bit*/ 0); 8051 ctl_done((union ctl_io *)ctsio); 8052 return (1); 8053 } 8054 8055 /* 8056 * Unregister everybody else and build UA for 8057 * them 8058 */ 8059 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8060 if (i == residx || ctl_get_prkey(lun, i) == 0) 8061 continue; 8062 8063 ctl_clr_prkey(lun, i); 8064 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8065 } 8066 lun->pr_key_count = 1; 8067 lun->res_type = type; 8068 if (lun->res_type != SPR_TYPE_WR_EX_AR 8069 && lun->res_type != SPR_TYPE_EX_AC_AR) 8070 lun->pr_res_idx = residx; 8071 8072 /* send msg to other side */ 8073 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8074 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8075 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8076 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8077 persis_io.pr.pr_info.res_type = type; 8078 memcpy(persis_io.pr.pr_info.sa_res_key, 8079 param->serv_act_res_key, 8080 sizeof(param->serv_act_res_key)); 8081 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8082 &persis_io, sizeof(persis_io), 0)) > 8083 CTL_HA_STATUS_SUCCESS) { 8084 printf("CTL:Persis Out error returned " 8085 "from ctl_ha_msg_send %d\n", 8086 isc_retval); 8087 } 8088 } else { 8089 /* not all registrants */ 8090 mtx_unlock(&lun->lun_lock); 8091 free(ctsio->kern_data_ptr, M_CTL); 8092 ctl_set_invalid_field(ctsio, 8093 /*sks_valid*/ 1, 8094 /*command*/ 0, 8095 /*field*/ 8, 8096 /*bit_valid*/ 0, 8097 /*bit*/ 0); 8098 ctl_done((union ctl_io *)ctsio); 8099 return (1); 8100 } 8101 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8102 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 8103 int found = 0; 8104 8105 if (res_key == sa_res_key) { 8106 /* special case */ 8107 /* 8108 * The spec implies this is not good but doesn't 8109 * say what to do. There are two choices either 8110 * generate a res conflict or check condition 8111 * with illegal field in parameter data. Since 8112 * that is what is done when the sa_res_key is 8113 * zero I'll take that approach since this has 8114 * to do with the sa_res_key. 8115 */ 8116 mtx_unlock(&lun->lun_lock); 8117 free(ctsio->kern_data_ptr, M_CTL); 8118 ctl_set_invalid_field(ctsio, 8119 /*sks_valid*/ 1, 8120 /*command*/ 0, 8121 /*field*/ 8, 8122 /*bit_valid*/ 0, 8123 /*bit*/ 0); 8124 ctl_done((union ctl_io *)ctsio); 8125 return (1); 8126 } 8127 8128 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8129 if (ctl_get_prkey(lun, i) != sa_res_key) 8130 continue; 8131 8132 found = 1; 8133 ctl_clr_prkey(lun, i); 8134 lun->pr_key_count--; 8135 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8136 } 8137 if (!found) { 8138 mtx_unlock(&lun->lun_lock); 8139 free(ctsio->kern_data_ptr, M_CTL); 8140 ctl_set_reservation_conflict(ctsio); 8141 ctl_done((union ctl_io *)ctsio); 8142 return (CTL_RETVAL_COMPLETE); 8143 } 8144 /* send msg to other side */ 8145 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8146 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8147 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8148 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8149 persis_io.pr.pr_info.res_type = type; 8150 memcpy(persis_io.pr.pr_info.sa_res_key, 8151 param->serv_act_res_key, 8152 sizeof(param->serv_act_res_key)); 8153 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8154 &persis_io, sizeof(persis_io), 0)) > 8155 CTL_HA_STATUS_SUCCESS) { 8156 printf("CTL:Persis Out error returned from " 8157 "ctl_ha_msg_send %d\n", isc_retval); 8158 } 8159 } else { 8160 /* Reserved but not all registrants */ 8161 /* sa_res_key is res holder */ 8162 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 8163 /* validate scope and type */ 8164 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8165 SPR_LU_SCOPE) { 8166 mtx_unlock(&lun->lun_lock); 8167 ctl_set_invalid_field(/*ctsio*/ ctsio, 8168 /*sks_valid*/ 1, 8169 /*command*/ 1, 8170 /*field*/ 2, 8171 /*bit_valid*/ 1, 8172 /*bit*/ 4); 8173 ctl_done((union ctl_io *)ctsio); 8174 return (1); 8175 } 8176 8177 if (type>8 || type==2 || type==4 || type==0) { 8178 mtx_unlock(&lun->lun_lock); 8179 ctl_set_invalid_field(/*ctsio*/ ctsio, 8180 /*sks_valid*/ 1, 8181 /*command*/ 1, 8182 /*field*/ 2, 8183 /*bit_valid*/ 1, 8184 /*bit*/ 0); 8185 ctl_done((union ctl_io *)ctsio); 8186 return (1); 8187 } 8188 8189 /* 8190 * Do the following: 8191 * if sa_res_key != res_key remove all 8192 * registrants w/sa_res_key and generate UA 8193 * for these registrants(Registrations 8194 * Preempted) if it wasn't an exclusive 8195 * reservation generate UA(Reservations 8196 * Preempted) for all other registered nexuses 8197 * if the type has changed. Establish the new 8198 * reservation and holder. If res_key and 8199 * sa_res_key are the same do the above 8200 * except don't unregister the res holder. 8201 */ 8202 8203 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8204 if (i == residx || ctl_get_prkey(lun, i) == 0) 8205 continue; 8206 8207 if (sa_res_key == ctl_get_prkey(lun, i)) { 8208 ctl_clr_prkey(lun, i); 8209 lun->pr_key_count--; 8210 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8211 } else if (type != lun->res_type 8212 && (lun->res_type == SPR_TYPE_WR_EX_RO 8213 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 8214 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 8215 } 8216 } 8217 lun->res_type = type; 8218 if (lun->res_type != SPR_TYPE_WR_EX_AR 8219 && lun->res_type != SPR_TYPE_EX_AC_AR) 8220 lun->pr_res_idx = residx; 8221 else 8222 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8223 8224 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8225 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8226 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8227 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8228 persis_io.pr.pr_info.res_type = type; 8229 memcpy(persis_io.pr.pr_info.sa_res_key, 8230 param->serv_act_res_key, 8231 sizeof(param->serv_act_res_key)); 8232 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8233 &persis_io, sizeof(persis_io), 0)) > 8234 CTL_HA_STATUS_SUCCESS) { 8235 printf("CTL:Persis Out error returned " 8236 "from ctl_ha_msg_send %d\n", 8237 isc_retval); 8238 } 8239 } else { 8240 /* 8241 * sa_res_key is not the res holder just 8242 * remove registrants 8243 */ 8244 int found=0; 8245 8246 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8247 if (sa_res_key != ctl_get_prkey(lun, i)) 8248 continue; 8249 8250 found = 1; 8251 ctl_clr_prkey(lun, i); 8252 lun->pr_key_count--; 8253 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8254 } 8255 8256 if (!found) { 8257 mtx_unlock(&lun->lun_lock); 8258 free(ctsio->kern_data_ptr, M_CTL); 8259 ctl_set_reservation_conflict(ctsio); 8260 ctl_done((union ctl_io *)ctsio); 8261 return (1); 8262 } 8263 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8264 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8265 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8266 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8267 persis_io.pr.pr_info.res_type = type; 8268 memcpy(persis_io.pr.pr_info.sa_res_key, 8269 param->serv_act_res_key, 8270 sizeof(param->serv_act_res_key)); 8271 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8272 &persis_io, sizeof(persis_io), 0)) > 8273 CTL_HA_STATUS_SUCCESS) { 8274 printf("CTL:Persis Out error returned " 8275 "from ctl_ha_msg_send %d\n", 8276 isc_retval); 8277 } 8278 } 8279 } 8280 8281 lun->PRGeneration++; 8282 mtx_unlock(&lun->lun_lock); 8283 8284 return (retval); 8285 } 8286 8287 static void 8288 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8289 { 8290 uint64_t sa_res_key; 8291 int i; 8292 8293 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8294 8295 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8296 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8297 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8298 if (sa_res_key == 0) { 8299 /* 8300 * Unregister everybody else and build UA for 8301 * them 8302 */ 8303 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8304 if (i == msg->pr.pr_info.residx || 8305 ctl_get_prkey(lun, i) == 0) 8306 continue; 8307 8308 ctl_clr_prkey(lun, i); 8309 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8310 } 8311 8312 lun->pr_key_count = 1; 8313 lun->res_type = msg->pr.pr_info.res_type; 8314 if (lun->res_type != SPR_TYPE_WR_EX_AR 8315 && lun->res_type != SPR_TYPE_EX_AC_AR) 8316 lun->pr_res_idx = msg->pr.pr_info.residx; 8317 } else { 8318 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8319 if (sa_res_key == ctl_get_prkey(lun, i)) 8320 continue; 8321 8322 ctl_clr_prkey(lun, i); 8323 lun->pr_key_count--; 8324 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8325 } 8326 } 8327 } else { 8328 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8329 if (i == msg->pr.pr_info.residx || 8330 ctl_get_prkey(lun, i) == 0) 8331 continue; 8332 8333 if (sa_res_key == ctl_get_prkey(lun, i)) { 8334 ctl_clr_prkey(lun, i); 8335 lun->pr_key_count--; 8336 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8337 } else if (msg->pr.pr_info.res_type != lun->res_type 8338 && (lun->res_type == SPR_TYPE_WR_EX_RO 8339 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8340 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 8341 } 8342 } 8343 lun->res_type = msg->pr.pr_info.res_type; 8344 if (lun->res_type != SPR_TYPE_WR_EX_AR 8345 && lun->res_type != SPR_TYPE_EX_AC_AR) 8346 lun->pr_res_idx = msg->pr.pr_info.residx; 8347 else 8348 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8349 } 8350 lun->PRGeneration++; 8351 8352 } 8353 8354 8355 int 8356 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8357 { 8358 int retval; 8359 int isc_retval; 8360 u_int32_t param_len; 8361 struct scsi_per_res_out *cdb; 8362 struct ctl_lun *lun; 8363 struct scsi_per_res_out_parms* param; 8364 struct ctl_softc *softc; 8365 uint32_t residx; 8366 uint64_t res_key, sa_res_key, key; 8367 uint8_t type; 8368 union ctl_ha_msg persis_io; 8369 int i; 8370 8371 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8372 8373 retval = CTL_RETVAL_COMPLETE; 8374 8375 softc = control_softc; 8376 8377 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8378 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8379 8380 /* 8381 * We only support whole-LUN scope. The scope & type are ignored for 8382 * register, register and ignore existing key and clear. 8383 * We sometimes ignore scope and type on preempts too!! 8384 * Verify reservation type here as well. 8385 */ 8386 type = cdb->scope_type & SPR_TYPE_MASK; 8387 if ((cdb->action == SPRO_RESERVE) 8388 || (cdb->action == SPRO_RELEASE)) { 8389 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8390 ctl_set_invalid_field(/*ctsio*/ ctsio, 8391 /*sks_valid*/ 1, 8392 /*command*/ 1, 8393 /*field*/ 2, 8394 /*bit_valid*/ 1, 8395 /*bit*/ 4); 8396 ctl_done((union ctl_io *)ctsio); 8397 return (CTL_RETVAL_COMPLETE); 8398 } 8399 8400 if (type>8 || type==2 || type==4 || type==0) { 8401 ctl_set_invalid_field(/*ctsio*/ ctsio, 8402 /*sks_valid*/ 1, 8403 /*command*/ 1, 8404 /*field*/ 2, 8405 /*bit_valid*/ 1, 8406 /*bit*/ 0); 8407 ctl_done((union ctl_io *)ctsio); 8408 return (CTL_RETVAL_COMPLETE); 8409 } 8410 } 8411 8412 param_len = scsi_4btoul(cdb->length); 8413 8414 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8415 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8416 ctsio->kern_data_len = param_len; 8417 ctsio->kern_total_len = param_len; 8418 ctsio->kern_data_resid = 0; 8419 ctsio->kern_rel_offset = 0; 8420 ctsio->kern_sg_entries = 0; 8421 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8422 ctsio->be_move_done = ctl_config_move_done; 8423 ctl_datamove((union ctl_io *)ctsio); 8424 8425 return (CTL_RETVAL_COMPLETE); 8426 } 8427 8428 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8429 8430 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8431 res_key = scsi_8btou64(param->res_key.key); 8432 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8433 8434 /* 8435 * Validate the reservation key here except for SPRO_REG_IGNO 8436 * This must be done for all other service actions 8437 */ 8438 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8439 mtx_lock(&lun->lun_lock); 8440 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8441 if (res_key != key) { 8442 /* 8443 * The current key passed in doesn't match 8444 * the one the initiator previously 8445 * registered. 8446 */ 8447 mtx_unlock(&lun->lun_lock); 8448 free(ctsio->kern_data_ptr, M_CTL); 8449 ctl_set_reservation_conflict(ctsio); 8450 ctl_done((union ctl_io *)ctsio); 8451 return (CTL_RETVAL_COMPLETE); 8452 } 8453 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8454 /* 8455 * We are not registered 8456 */ 8457 mtx_unlock(&lun->lun_lock); 8458 free(ctsio->kern_data_ptr, M_CTL); 8459 ctl_set_reservation_conflict(ctsio); 8460 ctl_done((union ctl_io *)ctsio); 8461 return (CTL_RETVAL_COMPLETE); 8462 } else if (res_key != 0) { 8463 /* 8464 * We are not registered and trying to register but 8465 * the register key isn't zero. 8466 */ 8467 mtx_unlock(&lun->lun_lock); 8468 free(ctsio->kern_data_ptr, M_CTL); 8469 ctl_set_reservation_conflict(ctsio); 8470 ctl_done((union ctl_io *)ctsio); 8471 return (CTL_RETVAL_COMPLETE); 8472 } 8473 mtx_unlock(&lun->lun_lock); 8474 } 8475 8476 switch (cdb->action & SPRO_ACTION_MASK) { 8477 case SPRO_REGISTER: 8478 case SPRO_REG_IGNO: { 8479 8480 #if 0 8481 printf("Registration received\n"); 8482 #endif 8483 8484 /* 8485 * We don't support any of these options, as we report in 8486 * the read capabilities request (see 8487 * ctl_persistent_reserve_in(), above). 8488 */ 8489 if ((param->flags & SPR_SPEC_I_PT) 8490 || (param->flags & SPR_ALL_TG_PT) 8491 || (param->flags & SPR_APTPL)) { 8492 int bit_ptr; 8493 8494 if (param->flags & SPR_APTPL) 8495 bit_ptr = 0; 8496 else if (param->flags & SPR_ALL_TG_PT) 8497 bit_ptr = 2; 8498 else /* SPR_SPEC_I_PT */ 8499 bit_ptr = 3; 8500 8501 free(ctsio->kern_data_ptr, M_CTL); 8502 ctl_set_invalid_field(ctsio, 8503 /*sks_valid*/ 1, 8504 /*command*/ 0, 8505 /*field*/ 20, 8506 /*bit_valid*/ 1, 8507 /*bit*/ bit_ptr); 8508 ctl_done((union ctl_io *)ctsio); 8509 return (CTL_RETVAL_COMPLETE); 8510 } 8511 8512 mtx_lock(&lun->lun_lock); 8513 8514 /* 8515 * The initiator wants to clear the 8516 * key/unregister. 8517 */ 8518 if (sa_res_key == 0) { 8519 if ((res_key == 0 8520 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8521 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8522 && ctl_get_prkey(lun, residx) == 0)) { 8523 mtx_unlock(&lun->lun_lock); 8524 goto done; 8525 } 8526 8527 ctl_clr_prkey(lun, residx); 8528 lun->pr_key_count--; 8529 8530 if (residx == lun->pr_res_idx) { 8531 lun->flags &= ~CTL_LUN_PR_RESERVED; 8532 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8533 8534 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8535 || lun->res_type == SPR_TYPE_EX_AC_RO) 8536 && lun->pr_key_count) { 8537 /* 8538 * If the reservation is a registrants 8539 * only type we need to generate a UA 8540 * for other registered inits. The 8541 * sense code should be RESERVATIONS 8542 * RELEASED 8543 */ 8544 8545 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8546 if (ctl_get_prkey(lun, i + 8547 softc->persis_offset) == 0) 8548 continue; 8549 ctl_est_ua(lun, i, 8550 CTL_UA_RES_RELEASE); 8551 } 8552 } 8553 lun->res_type = 0; 8554 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8555 if (lun->pr_key_count==0) { 8556 lun->flags &= ~CTL_LUN_PR_RESERVED; 8557 lun->res_type = 0; 8558 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8559 } 8560 } 8561 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8562 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8563 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8564 persis_io.pr.pr_info.residx = residx; 8565 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8566 &persis_io, sizeof(persis_io), 0 )) > 8567 CTL_HA_STATUS_SUCCESS) { 8568 printf("CTL:Persis Out error returned from " 8569 "ctl_ha_msg_send %d\n", isc_retval); 8570 } 8571 } else /* sa_res_key != 0 */ { 8572 8573 /* 8574 * If we aren't registered currently then increment 8575 * the key count and set the registered flag. 8576 */ 8577 ctl_alloc_prkey(lun, residx); 8578 if (ctl_get_prkey(lun, residx) == 0) 8579 lun->pr_key_count++; 8580 ctl_set_prkey(lun, residx, sa_res_key); 8581 8582 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8583 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8584 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8585 persis_io.pr.pr_info.residx = residx; 8586 memcpy(persis_io.pr.pr_info.sa_res_key, 8587 param->serv_act_res_key, 8588 sizeof(param->serv_act_res_key)); 8589 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8590 &persis_io, sizeof(persis_io), 0)) > 8591 CTL_HA_STATUS_SUCCESS) { 8592 printf("CTL:Persis Out error returned from " 8593 "ctl_ha_msg_send %d\n", isc_retval); 8594 } 8595 } 8596 lun->PRGeneration++; 8597 mtx_unlock(&lun->lun_lock); 8598 8599 break; 8600 } 8601 case SPRO_RESERVE: 8602 #if 0 8603 printf("Reserve executed type %d\n", type); 8604 #endif 8605 mtx_lock(&lun->lun_lock); 8606 if (lun->flags & CTL_LUN_PR_RESERVED) { 8607 /* 8608 * if this isn't the reservation holder and it's 8609 * not a "all registrants" type or if the type is 8610 * different then we have a conflict 8611 */ 8612 if ((lun->pr_res_idx != residx 8613 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8614 || lun->res_type != type) { 8615 mtx_unlock(&lun->lun_lock); 8616 free(ctsio->kern_data_ptr, M_CTL); 8617 ctl_set_reservation_conflict(ctsio); 8618 ctl_done((union ctl_io *)ctsio); 8619 return (CTL_RETVAL_COMPLETE); 8620 } 8621 mtx_unlock(&lun->lun_lock); 8622 } else /* create a reservation */ { 8623 /* 8624 * If it's not an "all registrants" type record 8625 * reservation holder 8626 */ 8627 if (type != SPR_TYPE_WR_EX_AR 8628 && type != SPR_TYPE_EX_AC_AR) 8629 lun->pr_res_idx = residx; /* Res holder */ 8630 else 8631 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8632 8633 lun->flags |= CTL_LUN_PR_RESERVED; 8634 lun->res_type = type; 8635 8636 mtx_unlock(&lun->lun_lock); 8637 8638 /* send msg to other side */ 8639 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8640 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8641 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8642 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8643 persis_io.pr.pr_info.res_type = type; 8644 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8645 &persis_io, sizeof(persis_io), 0)) > 8646 CTL_HA_STATUS_SUCCESS) { 8647 printf("CTL:Persis Out error returned from " 8648 "ctl_ha_msg_send %d\n", isc_retval); 8649 } 8650 } 8651 break; 8652 8653 case SPRO_RELEASE: 8654 mtx_lock(&lun->lun_lock); 8655 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8656 /* No reservation exists return good status */ 8657 mtx_unlock(&lun->lun_lock); 8658 goto done; 8659 } 8660 /* 8661 * Is this nexus a reservation holder? 8662 */ 8663 if (lun->pr_res_idx != residx 8664 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8665 /* 8666 * not a res holder return good status but 8667 * do nothing 8668 */ 8669 mtx_unlock(&lun->lun_lock); 8670 goto done; 8671 } 8672 8673 if (lun->res_type != type) { 8674 mtx_unlock(&lun->lun_lock); 8675 free(ctsio->kern_data_ptr, M_CTL); 8676 ctl_set_illegal_pr_release(ctsio); 8677 ctl_done((union ctl_io *)ctsio); 8678 return (CTL_RETVAL_COMPLETE); 8679 } 8680 8681 /* okay to release */ 8682 lun->flags &= ~CTL_LUN_PR_RESERVED; 8683 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8684 lun->res_type = 0; 8685 8686 /* 8687 * if this isn't an exclusive access 8688 * res generate UA for all other 8689 * registrants. 8690 */ 8691 if (type != SPR_TYPE_EX_AC 8692 && type != SPR_TYPE_WR_EX) { 8693 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8694 if (i == residx || 8695 ctl_get_prkey(lun, 8696 i + softc->persis_offset) == 0) 8697 continue; 8698 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8699 } 8700 } 8701 mtx_unlock(&lun->lun_lock); 8702 /* Send msg to other side */ 8703 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8704 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8705 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8706 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8707 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8708 printf("CTL:Persis Out error returned from " 8709 "ctl_ha_msg_send %d\n", isc_retval); 8710 } 8711 break; 8712 8713 case SPRO_CLEAR: 8714 /* send msg to other side */ 8715 8716 mtx_lock(&lun->lun_lock); 8717 lun->flags &= ~CTL_LUN_PR_RESERVED; 8718 lun->res_type = 0; 8719 lun->pr_key_count = 0; 8720 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8721 8722 ctl_clr_prkey(lun, residx); 8723 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8724 if (ctl_get_prkey(lun, i) != 0) { 8725 ctl_clr_prkey(lun, i); 8726 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8727 } 8728 lun->PRGeneration++; 8729 mtx_unlock(&lun->lun_lock); 8730 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8731 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8732 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8733 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8734 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8735 printf("CTL:Persis Out error returned from " 8736 "ctl_ha_msg_send %d\n", isc_retval); 8737 } 8738 break; 8739 8740 case SPRO_PREEMPT: 8741 case SPRO_PRE_ABO: { 8742 int nretval; 8743 8744 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8745 residx, ctsio, cdb, param); 8746 if (nretval != 0) 8747 return (CTL_RETVAL_COMPLETE); 8748 break; 8749 } 8750 default: 8751 panic("Invalid PR type %x", cdb->action); 8752 } 8753 8754 done: 8755 free(ctsio->kern_data_ptr, M_CTL); 8756 ctl_set_success(ctsio); 8757 ctl_done((union ctl_io *)ctsio); 8758 8759 return (retval); 8760 } 8761 8762 /* 8763 * This routine is for handling a message from the other SC pertaining to 8764 * persistent reserve out. All the error checking will have been done 8765 * so only perorming the action need be done here to keep the two 8766 * in sync. 8767 */ 8768 static void 8769 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8770 { 8771 struct ctl_lun *lun; 8772 struct ctl_softc *softc; 8773 int i; 8774 uint32_t targ_lun; 8775 8776 softc = control_softc; 8777 8778 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8779 lun = softc->ctl_luns[targ_lun]; 8780 mtx_lock(&lun->lun_lock); 8781 switch(msg->pr.pr_info.action) { 8782 case CTL_PR_REG_KEY: 8783 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8784 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8785 lun->pr_key_count++; 8786 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8787 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8788 lun->PRGeneration++; 8789 break; 8790 8791 case CTL_PR_UNREG_KEY: 8792 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8793 lun->pr_key_count--; 8794 8795 /* XXX Need to see if the reservation has been released */ 8796 /* if so do we need to generate UA? */ 8797 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8798 lun->flags &= ~CTL_LUN_PR_RESERVED; 8799 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8800 8801 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8802 || lun->res_type == SPR_TYPE_EX_AC_RO) 8803 && lun->pr_key_count) { 8804 /* 8805 * If the reservation is a registrants 8806 * only type we need to generate a UA 8807 * for other registered inits. The 8808 * sense code should be RESERVATIONS 8809 * RELEASED 8810 */ 8811 8812 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8813 if (ctl_get_prkey(lun, i + 8814 softc->persis_offset) == 0) 8815 continue; 8816 8817 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8818 } 8819 } 8820 lun->res_type = 0; 8821 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8822 if (lun->pr_key_count==0) { 8823 lun->flags &= ~CTL_LUN_PR_RESERVED; 8824 lun->res_type = 0; 8825 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8826 } 8827 } 8828 lun->PRGeneration++; 8829 break; 8830 8831 case CTL_PR_RESERVE: 8832 lun->flags |= CTL_LUN_PR_RESERVED; 8833 lun->res_type = msg->pr.pr_info.res_type; 8834 lun->pr_res_idx = msg->pr.pr_info.residx; 8835 8836 break; 8837 8838 case CTL_PR_RELEASE: 8839 /* 8840 * if this isn't an exclusive access res generate UA for all 8841 * other registrants. 8842 */ 8843 if (lun->res_type != SPR_TYPE_EX_AC 8844 && lun->res_type != SPR_TYPE_WR_EX) { 8845 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8846 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0) 8847 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8848 } 8849 8850 lun->flags &= ~CTL_LUN_PR_RESERVED; 8851 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8852 lun->res_type = 0; 8853 break; 8854 8855 case CTL_PR_PREEMPT: 8856 ctl_pro_preempt_other(lun, msg); 8857 break; 8858 case CTL_PR_CLEAR: 8859 lun->flags &= ~CTL_LUN_PR_RESERVED; 8860 lun->res_type = 0; 8861 lun->pr_key_count = 0; 8862 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8863 8864 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8865 if (ctl_get_prkey(lun, i) == 0) 8866 continue; 8867 ctl_clr_prkey(lun, i); 8868 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8869 } 8870 lun->PRGeneration++; 8871 break; 8872 } 8873 8874 mtx_unlock(&lun->lun_lock); 8875 } 8876 8877 int 8878 ctl_read_write(struct ctl_scsiio *ctsio) 8879 { 8880 struct ctl_lun *lun; 8881 struct ctl_lba_len_flags *lbalen; 8882 uint64_t lba; 8883 uint32_t num_blocks; 8884 int flags, retval; 8885 int isread; 8886 8887 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8888 8889 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8890 8891 flags = 0; 8892 retval = CTL_RETVAL_COMPLETE; 8893 8894 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8895 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8896 switch (ctsio->cdb[0]) { 8897 case READ_6: 8898 case WRITE_6: { 8899 struct scsi_rw_6 *cdb; 8900 8901 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8902 8903 lba = scsi_3btoul(cdb->addr); 8904 /* only 5 bits are valid in the most significant address byte */ 8905 lba &= 0x1fffff; 8906 num_blocks = cdb->length; 8907 /* 8908 * This is correct according to SBC-2. 8909 */ 8910 if (num_blocks == 0) 8911 num_blocks = 256; 8912 break; 8913 } 8914 case READ_10: 8915 case WRITE_10: { 8916 struct scsi_rw_10 *cdb; 8917 8918 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8919 if (cdb->byte2 & SRW10_FUA) 8920 flags |= CTL_LLF_FUA; 8921 if (cdb->byte2 & SRW10_DPO) 8922 flags |= CTL_LLF_DPO; 8923 lba = scsi_4btoul(cdb->addr); 8924 num_blocks = scsi_2btoul(cdb->length); 8925 break; 8926 } 8927 case WRITE_VERIFY_10: { 8928 struct scsi_write_verify_10 *cdb; 8929 8930 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8931 flags |= CTL_LLF_FUA; 8932 if (cdb->byte2 & SWV_DPO) 8933 flags |= CTL_LLF_DPO; 8934 lba = scsi_4btoul(cdb->addr); 8935 num_blocks = scsi_2btoul(cdb->length); 8936 break; 8937 } 8938 case READ_12: 8939 case WRITE_12: { 8940 struct scsi_rw_12 *cdb; 8941 8942 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8943 if (cdb->byte2 & SRW12_FUA) 8944 flags |= CTL_LLF_FUA; 8945 if (cdb->byte2 & SRW12_DPO) 8946 flags |= CTL_LLF_DPO; 8947 lba = scsi_4btoul(cdb->addr); 8948 num_blocks = scsi_4btoul(cdb->length); 8949 break; 8950 } 8951 case WRITE_VERIFY_12: { 8952 struct scsi_write_verify_12 *cdb; 8953 8954 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8955 flags |= CTL_LLF_FUA; 8956 if (cdb->byte2 & SWV_DPO) 8957 flags |= CTL_LLF_DPO; 8958 lba = scsi_4btoul(cdb->addr); 8959 num_blocks = scsi_4btoul(cdb->length); 8960 break; 8961 } 8962 case READ_16: 8963 case WRITE_16: { 8964 struct scsi_rw_16 *cdb; 8965 8966 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8967 if (cdb->byte2 & SRW12_FUA) 8968 flags |= CTL_LLF_FUA; 8969 if (cdb->byte2 & SRW12_DPO) 8970 flags |= CTL_LLF_DPO; 8971 lba = scsi_8btou64(cdb->addr); 8972 num_blocks = scsi_4btoul(cdb->length); 8973 break; 8974 } 8975 case WRITE_ATOMIC_16: { 8976 struct scsi_rw_16 *cdb; 8977 8978 if (lun->be_lun->atomicblock == 0) { 8979 ctl_set_invalid_opcode(ctsio); 8980 ctl_done((union ctl_io *)ctsio); 8981 return (CTL_RETVAL_COMPLETE); 8982 } 8983 8984 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8985 if (cdb->byte2 & SRW12_FUA) 8986 flags |= CTL_LLF_FUA; 8987 if (cdb->byte2 & SRW12_DPO) 8988 flags |= CTL_LLF_DPO; 8989 lba = scsi_8btou64(cdb->addr); 8990 num_blocks = scsi_4btoul(cdb->length); 8991 if (num_blocks > lun->be_lun->atomicblock) { 8992 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8993 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8994 /*bit*/ 0); 8995 ctl_done((union ctl_io *)ctsio); 8996 return (CTL_RETVAL_COMPLETE); 8997 } 8998 break; 8999 } 9000 case WRITE_VERIFY_16: { 9001 struct scsi_write_verify_16 *cdb; 9002 9003 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 9004 flags |= CTL_LLF_FUA; 9005 if (cdb->byte2 & SWV_DPO) 9006 flags |= CTL_LLF_DPO; 9007 lba = scsi_8btou64(cdb->addr); 9008 num_blocks = scsi_4btoul(cdb->length); 9009 break; 9010 } 9011 default: 9012 /* 9013 * We got a command we don't support. This shouldn't 9014 * happen, commands should be filtered out above us. 9015 */ 9016 ctl_set_invalid_opcode(ctsio); 9017 ctl_done((union ctl_io *)ctsio); 9018 9019 return (CTL_RETVAL_COMPLETE); 9020 break; /* NOTREACHED */ 9021 } 9022 9023 /* 9024 * The first check is to make sure we're in bounds, the second 9025 * check is to catch wrap-around problems. If the lba + num blocks 9026 * is less than the lba, then we've wrapped around and the block 9027 * range is invalid anyway. 9028 */ 9029 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9030 || ((lba + num_blocks) < lba)) { 9031 ctl_set_lba_out_of_range(ctsio); 9032 ctl_done((union ctl_io *)ctsio); 9033 return (CTL_RETVAL_COMPLETE); 9034 } 9035 9036 /* 9037 * According to SBC-3, a transfer length of 0 is not an error. 9038 * Note that this cannot happen with WRITE(6) or READ(6), since 0 9039 * translates to 256 blocks for those commands. 9040 */ 9041 if (num_blocks == 0) { 9042 ctl_set_success(ctsio); 9043 ctl_done((union ctl_io *)ctsio); 9044 return (CTL_RETVAL_COMPLETE); 9045 } 9046 9047 /* Set FUA and/or DPO if caches are disabled. */ 9048 if (isread) { 9049 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9050 SCP_RCD) != 0) 9051 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 9052 } else { 9053 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9054 SCP_WCE) == 0) 9055 flags |= CTL_LLF_FUA; 9056 } 9057 9058 lbalen = (struct ctl_lba_len_flags *) 9059 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9060 lbalen->lba = lba; 9061 lbalen->len = num_blocks; 9062 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 9063 9064 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9065 ctsio->kern_rel_offset = 0; 9066 9067 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 9068 9069 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9070 9071 return (retval); 9072 } 9073 9074 static int 9075 ctl_cnw_cont(union ctl_io *io) 9076 { 9077 struct ctl_scsiio *ctsio; 9078 struct ctl_lun *lun; 9079 struct ctl_lba_len_flags *lbalen; 9080 int retval; 9081 9082 ctsio = &io->scsiio; 9083 ctsio->io_hdr.status = CTL_STATUS_NONE; 9084 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 9085 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9086 lbalen = (struct ctl_lba_len_flags *) 9087 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9088 lbalen->flags &= ~CTL_LLF_COMPARE; 9089 lbalen->flags |= CTL_LLF_WRITE; 9090 9091 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 9092 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9093 return (retval); 9094 } 9095 9096 int 9097 ctl_cnw(struct ctl_scsiio *ctsio) 9098 { 9099 struct ctl_lun *lun; 9100 struct ctl_lba_len_flags *lbalen; 9101 uint64_t lba; 9102 uint32_t num_blocks; 9103 int flags, retval; 9104 9105 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9106 9107 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 9108 9109 flags = 0; 9110 retval = CTL_RETVAL_COMPLETE; 9111 9112 switch (ctsio->cdb[0]) { 9113 case COMPARE_AND_WRITE: { 9114 struct scsi_compare_and_write *cdb; 9115 9116 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 9117 if (cdb->byte2 & SRW10_FUA) 9118 flags |= CTL_LLF_FUA; 9119 if (cdb->byte2 & SRW10_DPO) 9120 flags |= CTL_LLF_DPO; 9121 lba = scsi_8btou64(cdb->addr); 9122 num_blocks = cdb->length; 9123 break; 9124 } 9125 default: 9126 /* 9127 * We got a command we don't support. This shouldn't 9128 * happen, commands should be filtered out above us. 9129 */ 9130 ctl_set_invalid_opcode(ctsio); 9131 ctl_done((union ctl_io *)ctsio); 9132 9133 return (CTL_RETVAL_COMPLETE); 9134 break; /* NOTREACHED */ 9135 } 9136 9137 /* 9138 * The first check is to make sure we're in bounds, the second 9139 * check is to catch wrap-around problems. If the lba + num blocks 9140 * is less than the lba, then we've wrapped around and the block 9141 * range is invalid anyway. 9142 */ 9143 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9144 || ((lba + num_blocks) < lba)) { 9145 ctl_set_lba_out_of_range(ctsio); 9146 ctl_done((union ctl_io *)ctsio); 9147 return (CTL_RETVAL_COMPLETE); 9148 } 9149 9150 /* 9151 * According to SBC-3, a transfer length of 0 is not an error. 9152 */ 9153 if (num_blocks == 0) { 9154 ctl_set_success(ctsio); 9155 ctl_done((union ctl_io *)ctsio); 9156 return (CTL_RETVAL_COMPLETE); 9157 } 9158 9159 /* Set FUA if write cache is disabled. */ 9160 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9161 SCP_WCE) == 0) 9162 flags |= CTL_LLF_FUA; 9163 9164 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 9165 ctsio->kern_rel_offset = 0; 9166 9167 /* 9168 * Set the IO_CONT flag, so that if this I/O gets passed to 9169 * ctl_data_submit_done(), it'll get passed back to 9170 * ctl_ctl_cnw_cont() for further processing. 9171 */ 9172 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 9173 ctsio->io_cont = ctl_cnw_cont; 9174 9175 lbalen = (struct ctl_lba_len_flags *) 9176 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9177 lbalen->lba = lba; 9178 lbalen->len = num_blocks; 9179 lbalen->flags = CTL_LLF_COMPARE | flags; 9180 9181 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 9182 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9183 return (retval); 9184 } 9185 9186 int 9187 ctl_verify(struct ctl_scsiio *ctsio) 9188 { 9189 struct ctl_lun *lun; 9190 struct ctl_lba_len_flags *lbalen; 9191 uint64_t lba; 9192 uint32_t num_blocks; 9193 int bytchk, flags; 9194 int retval; 9195 9196 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9197 9198 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9199 9200 bytchk = 0; 9201 flags = CTL_LLF_FUA; 9202 retval = CTL_RETVAL_COMPLETE; 9203 9204 switch (ctsio->cdb[0]) { 9205 case VERIFY_10: { 9206 struct scsi_verify_10 *cdb; 9207 9208 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9209 if (cdb->byte2 & SVFY_BYTCHK) 9210 bytchk = 1; 9211 if (cdb->byte2 & SVFY_DPO) 9212 flags |= CTL_LLF_DPO; 9213 lba = scsi_4btoul(cdb->addr); 9214 num_blocks = scsi_2btoul(cdb->length); 9215 break; 9216 } 9217 case VERIFY_12: { 9218 struct scsi_verify_12 *cdb; 9219 9220 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9221 if (cdb->byte2 & SVFY_BYTCHK) 9222 bytchk = 1; 9223 if (cdb->byte2 & SVFY_DPO) 9224 flags |= CTL_LLF_DPO; 9225 lba = scsi_4btoul(cdb->addr); 9226 num_blocks = scsi_4btoul(cdb->length); 9227 break; 9228 } 9229 case VERIFY_16: { 9230 struct scsi_rw_16 *cdb; 9231 9232 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9233 if (cdb->byte2 & SVFY_BYTCHK) 9234 bytchk = 1; 9235 if (cdb->byte2 & SVFY_DPO) 9236 flags |= CTL_LLF_DPO; 9237 lba = scsi_8btou64(cdb->addr); 9238 num_blocks = scsi_4btoul(cdb->length); 9239 break; 9240 } 9241 default: 9242 /* 9243 * We got a command we don't support. This shouldn't 9244 * happen, commands should be filtered out above us. 9245 */ 9246 ctl_set_invalid_opcode(ctsio); 9247 ctl_done((union ctl_io *)ctsio); 9248 return (CTL_RETVAL_COMPLETE); 9249 } 9250 9251 /* 9252 * The first check is to make sure we're in bounds, the second 9253 * check is to catch wrap-around problems. If the lba + num blocks 9254 * is less than the lba, then we've wrapped around and the block 9255 * range is invalid anyway. 9256 */ 9257 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9258 || ((lba + num_blocks) < lba)) { 9259 ctl_set_lba_out_of_range(ctsio); 9260 ctl_done((union ctl_io *)ctsio); 9261 return (CTL_RETVAL_COMPLETE); 9262 } 9263 9264 /* 9265 * According to SBC-3, a transfer length of 0 is not an error. 9266 */ 9267 if (num_blocks == 0) { 9268 ctl_set_success(ctsio); 9269 ctl_done((union ctl_io *)ctsio); 9270 return (CTL_RETVAL_COMPLETE); 9271 } 9272 9273 lbalen = (struct ctl_lba_len_flags *) 9274 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9275 lbalen->lba = lba; 9276 lbalen->len = num_blocks; 9277 if (bytchk) { 9278 lbalen->flags = CTL_LLF_COMPARE | flags; 9279 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9280 } else { 9281 lbalen->flags = CTL_LLF_VERIFY | flags; 9282 ctsio->kern_total_len = 0; 9283 } 9284 ctsio->kern_rel_offset = 0; 9285 9286 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9287 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9288 return (retval); 9289 } 9290 9291 int 9292 ctl_report_luns(struct ctl_scsiio *ctsio) 9293 { 9294 struct scsi_report_luns *cdb; 9295 struct scsi_report_luns_data *lun_data; 9296 struct ctl_lun *lun, *request_lun; 9297 int num_luns, retval; 9298 uint32_t alloc_len, lun_datalen; 9299 int num_filled, well_known; 9300 uint32_t initidx, targ_lun_id, lun_id; 9301 9302 retval = CTL_RETVAL_COMPLETE; 9303 well_known = 0; 9304 9305 cdb = (struct scsi_report_luns *)ctsio->cdb; 9306 9307 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9308 9309 mtx_lock(&control_softc->ctl_lock); 9310 num_luns = control_softc->num_luns; 9311 mtx_unlock(&control_softc->ctl_lock); 9312 9313 switch (cdb->select_report) { 9314 case RPL_REPORT_DEFAULT: 9315 case RPL_REPORT_ALL: 9316 break; 9317 case RPL_REPORT_WELLKNOWN: 9318 well_known = 1; 9319 num_luns = 0; 9320 break; 9321 default: 9322 ctl_set_invalid_field(ctsio, 9323 /*sks_valid*/ 1, 9324 /*command*/ 1, 9325 /*field*/ 2, 9326 /*bit_valid*/ 0, 9327 /*bit*/ 0); 9328 ctl_done((union ctl_io *)ctsio); 9329 return (retval); 9330 break; /* NOTREACHED */ 9331 } 9332 9333 alloc_len = scsi_4btoul(cdb->length); 9334 /* 9335 * The initiator has to allocate at least 16 bytes for this request, 9336 * so he can at least get the header and the first LUN. Otherwise 9337 * we reject the request (per SPC-3 rev 14, section 6.21). 9338 */ 9339 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9340 sizeof(struct scsi_report_luns_lundata))) { 9341 ctl_set_invalid_field(ctsio, 9342 /*sks_valid*/ 1, 9343 /*command*/ 1, 9344 /*field*/ 6, 9345 /*bit_valid*/ 0, 9346 /*bit*/ 0); 9347 ctl_done((union ctl_io *)ctsio); 9348 return (retval); 9349 } 9350 9351 request_lun = (struct ctl_lun *) 9352 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9353 9354 lun_datalen = sizeof(*lun_data) + 9355 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9356 9357 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9358 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9359 ctsio->kern_sg_entries = 0; 9360 9361 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9362 9363 mtx_lock(&control_softc->ctl_lock); 9364 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9365 lun_id = ctl_map_lun(ctsio->io_hdr.nexus.targ_port, targ_lun_id); 9366 if (lun_id >= CTL_MAX_LUNS) 9367 continue; 9368 lun = control_softc->ctl_luns[lun_id]; 9369 if (lun == NULL) 9370 continue; 9371 9372 if (targ_lun_id <= 0xff) { 9373 /* 9374 * Peripheral addressing method, bus number 0. 9375 */ 9376 lun_data->luns[num_filled].lundata[0] = 9377 RPL_LUNDATA_ATYP_PERIPH; 9378 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9379 num_filled++; 9380 } else if (targ_lun_id <= 0x3fff) { 9381 /* 9382 * Flat addressing method. 9383 */ 9384 lun_data->luns[num_filled].lundata[0] = 9385 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 9386 lun_data->luns[num_filled].lundata[1] = 9387 (targ_lun_id & 0xff); 9388 num_filled++; 9389 } else if (targ_lun_id <= 0xffffff) { 9390 /* 9391 * Extended flat addressing method. 9392 */ 9393 lun_data->luns[num_filled].lundata[0] = 9394 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 9395 scsi_ulto3b(targ_lun_id, 9396 &lun_data->luns[num_filled].lundata[1]); 9397 num_filled++; 9398 } else { 9399 printf("ctl_report_luns: bogus LUN number %jd, " 9400 "skipping\n", (intmax_t)targ_lun_id); 9401 } 9402 /* 9403 * According to SPC-3, rev 14 section 6.21: 9404 * 9405 * "The execution of a REPORT LUNS command to any valid and 9406 * installed logical unit shall clear the REPORTED LUNS DATA 9407 * HAS CHANGED unit attention condition for all logical 9408 * units of that target with respect to the requesting 9409 * initiator. A valid and installed logical unit is one 9410 * having a PERIPHERAL QUALIFIER of 000b in the standard 9411 * INQUIRY data (see 6.4.2)." 9412 * 9413 * If request_lun is NULL, the LUN this report luns command 9414 * was issued to is either disabled or doesn't exist. In that 9415 * case, we shouldn't clear any pending lun change unit 9416 * attention. 9417 */ 9418 if (request_lun != NULL) { 9419 mtx_lock(&lun->lun_lock); 9420 ctl_clr_ua(lun, initidx, CTL_UA_RES_RELEASE); 9421 mtx_unlock(&lun->lun_lock); 9422 } 9423 } 9424 mtx_unlock(&control_softc->ctl_lock); 9425 9426 /* 9427 * It's quite possible that we've returned fewer LUNs than we allocated 9428 * space for. Trim it. 9429 */ 9430 lun_datalen = sizeof(*lun_data) + 9431 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9432 9433 if (lun_datalen < alloc_len) { 9434 ctsio->residual = alloc_len - lun_datalen; 9435 ctsio->kern_data_len = lun_datalen; 9436 ctsio->kern_total_len = lun_datalen; 9437 } else { 9438 ctsio->residual = 0; 9439 ctsio->kern_data_len = alloc_len; 9440 ctsio->kern_total_len = alloc_len; 9441 } 9442 ctsio->kern_data_resid = 0; 9443 ctsio->kern_rel_offset = 0; 9444 ctsio->kern_sg_entries = 0; 9445 9446 /* 9447 * We set this to the actual data length, regardless of how much 9448 * space we actually have to return results. If the user looks at 9449 * this value, he'll know whether or not he allocated enough space 9450 * and reissue the command if necessary. We don't support well 9451 * known logical units, so if the user asks for that, return none. 9452 */ 9453 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9454 9455 /* 9456 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9457 * this request. 9458 */ 9459 ctl_set_success(ctsio); 9460 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9461 ctsio->be_move_done = ctl_config_move_done; 9462 ctl_datamove((union ctl_io *)ctsio); 9463 return (retval); 9464 } 9465 9466 int 9467 ctl_request_sense(struct ctl_scsiio *ctsio) 9468 { 9469 struct scsi_request_sense *cdb; 9470 struct scsi_sense_data *sense_ptr; 9471 struct ctl_lun *lun; 9472 uint32_t initidx; 9473 int have_error; 9474 scsi_sense_data_type sense_format; 9475 ctl_ua_type ua_type; 9476 9477 cdb = (struct scsi_request_sense *)ctsio->cdb; 9478 9479 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9480 9481 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9482 9483 /* 9484 * Determine which sense format the user wants. 9485 */ 9486 if (cdb->byte2 & SRS_DESC) 9487 sense_format = SSD_TYPE_DESC; 9488 else 9489 sense_format = SSD_TYPE_FIXED; 9490 9491 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9492 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9493 ctsio->kern_sg_entries = 0; 9494 9495 /* 9496 * struct scsi_sense_data, which is currently set to 256 bytes, is 9497 * larger than the largest allowed value for the length field in the 9498 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9499 */ 9500 ctsio->residual = 0; 9501 ctsio->kern_data_len = cdb->length; 9502 ctsio->kern_total_len = cdb->length; 9503 9504 ctsio->kern_data_resid = 0; 9505 ctsio->kern_rel_offset = 0; 9506 ctsio->kern_sg_entries = 0; 9507 9508 /* 9509 * If we don't have a LUN, we don't have any pending sense. 9510 */ 9511 if (lun == NULL) 9512 goto no_sense; 9513 9514 have_error = 0; 9515 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9516 /* 9517 * Check for pending sense, and then for pending unit attentions. 9518 * Pending sense gets returned first, then pending unit attentions. 9519 */ 9520 mtx_lock(&lun->lun_lock); 9521 #ifdef CTL_WITH_CA 9522 if (ctl_is_set(lun->have_ca, initidx)) { 9523 scsi_sense_data_type stored_format; 9524 9525 /* 9526 * Check to see which sense format was used for the stored 9527 * sense data. 9528 */ 9529 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9530 9531 /* 9532 * If the user requested a different sense format than the 9533 * one we stored, then we need to convert it to the other 9534 * format. If we're going from descriptor to fixed format 9535 * sense data, we may lose things in translation, depending 9536 * on what options were used. 9537 * 9538 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9539 * for some reason we'll just copy it out as-is. 9540 */ 9541 if ((stored_format == SSD_TYPE_FIXED) 9542 && (sense_format == SSD_TYPE_DESC)) 9543 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9544 &lun->pending_sense[initidx], 9545 (struct scsi_sense_data_desc *)sense_ptr); 9546 else if ((stored_format == SSD_TYPE_DESC) 9547 && (sense_format == SSD_TYPE_FIXED)) 9548 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9549 &lun->pending_sense[initidx], 9550 (struct scsi_sense_data_fixed *)sense_ptr); 9551 else 9552 memcpy(sense_ptr, &lun->pending_sense[initidx], 9553 ctl_min(sizeof(*sense_ptr), 9554 sizeof(lun->pending_sense[initidx]))); 9555 9556 ctl_clear_mask(lun->have_ca, initidx); 9557 have_error = 1; 9558 } else 9559 #endif 9560 { 9561 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9562 if (ua_type != CTL_UA_NONE) 9563 have_error = 1; 9564 } 9565 mtx_unlock(&lun->lun_lock); 9566 9567 /* 9568 * We already have a pending error, return it. 9569 */ 9570 if (have_error != 0) { 9571 /* 9572 * We report the SCSI status as OK, since the status of the 9573 * request sense command itself is OK. 9574 * We report 0 for the sense length, because we aren't doing 9575 * autosense in this case. We're reporting sense as 9576 * parameter data. 9577 */ 9578 ctl_set_success(ctsio); 9579 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9580 ctsio->be_move_done = ctl_config_move_done; 9581 ctl_datamove((union ctl_io *)ctsio); 9582 return (CTL_RETVAL_COMPLETE); 9583 } 9584 9585 no_sense: 9586 9587 /* 9588 * No sense information to report, so we report that everything is 9589 * okay. 9590 */ 9591 ctl_set_sense_data(sense_ptr, 9592 lun, 9593 sense_format, 9594 /*current_error*/ 1, 9595 /*sense_key*/ SSD_KEY_NO_SENSE, 9596 /*asc*/ 0x00, 9597 /*ascq*/ 0x00, 9598 SSD_ELEM_NONE); 9599 9600 /* 9601 * We report 0 for the sense length, because we aren't doing 9602 * autosense in this case. We're reporting sense as parameter data. 9603 */ 9604 ctl_set_success(ctsio); 9605 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9606 ctsio->be_move_done = ctl_config_move_done; 9607 ctl_datamove((union ctl_io *)ctsio); 9608 return (CTL_RETVAL_COMPLETE); 9609 } 9610 9611 int 9612 ctl_tur(struct ctl_scsiio *ctsio) 9613 { 9614 9615 CTL_DEBUG_PRINT(("ctl_tur\n")); 9616 9617 ctl_set_success(ctsio); 9618 ctl_done((union ctl_io *)ctsio); 9619 9620 return (CTL_RETVAL_COMPLETE); 9621 } 9622 9623 #ifdef notyet 9624 static int 9625 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9626 { 9627 9628 } 9629 #endif 9630 9631 static int 9632 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9633 { 9634 struct scsi_vpd_supported_pages *pages; 9635 int sup_page_size; 9636 struct ctl_lun *lun; 9637 9638 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9639 9640 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9641 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9642 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9643 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9644 ctsio->kern_sg_entries = 0; 9645 9646 if (sup_page_size < alloc_len) { 9647 ctsio->residual = alloc_len - sup_page_size; 9648 ctsio->kern_data_len = sup_page_size; 9649 ctsio->kern_total_len = sup_page_size; 9650 } else { 9651 ctsio->residual = 0; 9652 ctsio->kern_data_len = alloc_len; 9653 ctsio->kern_total_len = alloc_len; 9654 } 9655 ctsio->kern_data_resid = 0; 9656 ctsio->kern_rel_offset = 0; 9657 ctsio->kern_sg_entries = 0; 9658 9659 /* 9660 * The control device is always connected. The disk device, on the 9661 * other hand, may not be online all the time. Need to change this 9662 * to figure out whether the disk device is actually online or not. 9663 */ 9664 if (lun != NULL) 9665 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9666 lun->be_lun->lun_type; 9667 else 9668 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9669 9670 pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES; 9671 /* Supported VPD pages */ 9672 pages->page_list[0] = SVPD_SUPPORTED_PAGES; 9673 /* Serial Number */ 9674 pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER; 9675 /* Device Identification */ 9676 pages->page_list[2] = SVPD_DEVICE_ID; 9677 /* Extended INQUIRY Data */ 9678 pages->page_list[3] = SVPD_EXTENDED_INQUIRY_DATA; 9679 /* Mode Page Policy */ 9680 pages->page_list[4] = SVPD_MODE_PAGE_POLICY; 9681 /* SCSI Ports */ 9682 pages->page_list[5] = SVPD_SCSI_PORTS; 9683 /* Third-party Copy */ 9684 pages->page_list[6] = SVPD_SCSI_TPC; 9685 /* Block limits */ 9686 pages->page_list[7] = SVPD_BLOCK_LIMITS; 9687 /* Block Device Characteristics */ 9688 pages->page_list[8] = SVPD_BDC; 9689 /* Logical Block Provisioning */ 9690 pages->page_list[9] = SVPD_LBP; 9691 9692 ctl_set_success(ctsio); 9693 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9694 ctsio->be_move_done = ctl_config_move_done; 9695 ctl_datamove((union ctl_io *)ctsio); 9696 return (CTL_RETVAL_COMPLETE); 9697 } 9698 9699 static int 9700 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9701 { 9702 struct scsi_vpd_unit_serial_number *sn_ptr; 9703 struct ctl_lun *lun; 9704 int data_len; 9705 9706 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9707 9708 data_len = 4 + CTL_SN_LEN; 9709 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9710 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9711 if (data_len < alloc_len) { 9712 ctsio->residual = alloc_len - data_len; 9713 ctsio->kern_data_len = data_len; 9714 ctsio->kern_total_len = data_len; 9715 } else { 9716 ctsio->residual = 0; 9717 ctsio->kern_data_len = alloc_len; 9718 ctsio->kern_total_len = alloc_len; 9719 } 9720 ctsio->kern_data_resid = 0; 9721 ctsio->kern_rel_offset = 0; 9722 ctsio->kern_sg_entries = 0; 9723 9724 /* 9725 * The control device is always connected. The disk device, on the 9726 * other hand, may not be online all the time. Need to change this 9727 * to figure out whether the disk device is actually online or not. 9728 */ 9729 if (lun != NULL) 9730 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9731 lun->be_lun->lun_type; 9732 else 9733 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9734 9735 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9736 sn_ptr->length = CTL_SN_LEN; 9737 /* 9738 * If we don't have a LUN, we just leave the serial number as 9739 * all spaces. 9740 */ 9741 if (lun != NULL) { 9742 strncpy((char *)sn_ptr->serial_num, 9743 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9744 } else 9745 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9746 9747 ctl_set_success(ctsio); 9748 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9749 ctsio->be_move_done = ctl_config_move_done; 9750 ctl_datamove((union ctl_io *)ctsio); 9751 return (CTL_RETVAL_COMPLETE); 9752 } 9753 9754 9755 static int 9756 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9757 { 9758 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9759 struct ctl_lun *lun; 9760 int data_len; 9761 9762 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9763 9764 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9765 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9766 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9767 ctsio->kern_sg_entries = 0; 9768 9769 if (data_len < alloc_len) { 9770 ctsio->residual = alloc_len - data_len; 9771 ctsio->kern_data_len = data_len; 9772 ctsio->kern_total_len = data_len; 9773 } else { 9774 ctsio->residual = 0; 9775 ctsio->kern_data_len = alloc_len; 9776 ctsio->kern_total_len = alloc_len; 9777 } 9778 ctsio->kern_data_resid = 0; 9779 ctsio->kern_rel_offset = 0; 9780 ctsio->kern_sg_entries = 0; 9781 9782 /* 9783 * The control device is always connected. The disk device, on the 9784 * other hand, may not be online all the time. 9785 */ 9786 if (lun != NULL) 9787 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9788 lun->be_lun->lun_type; 9789 else 9790 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9791 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9792 eid_ptr->page_length = data_len - 4; 9793 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9794 eid_ptr->flags3 = SVPD_EID_V_SUP; 9795 9796 ctl_set_success(ctsio); 9797 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9798 ctsio->be_move_done = ctl_config_move_done; 9799 ctl_datamove((union ctl_io *)ctsio); 9800 return (CTL_RETVAL_COMPLETE); 9801 } 9802 9803 static int 9804 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9805 { 9806 struct scsi_vpd_mode_page_policy *mpp_ptr; 9807 struct ctl_lun *lun; 9808 int data_len; 9809 9810 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9811 9812 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9813 sizeof(struct scsi_vpd_mode_page_policy_descr); 9814 9815 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9816 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9817 ctsio->kern_sg_entries = 0; 9818 9819 if (data_len < alloc_len) { 9820 ctsio->residual = alloc_len - data_len; 9821 ctsio->kern_data_len = data_len; 9822 ctsio->kern_total_len = data_len; 9823 } else { 9824 ctsio->residual = 0; 9825 ctsio->kern_data_len = alloc_len; 9826 ctsio->kern_total_len = alloc_len; 9827 } 9828 ctsio->kern_data_resid = 0; 9829 ctsio->kern_rel_offset = 0; 9830 ctsio->kern_sg_entries = 0; 9831 9832 /* 9833 * The control device is always connected. The disk device, on the 9834 * other hand, may not be online all the time. 9835 */ 9836 if (lun != NULL) 9837 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9838 lun->be_lun->lun_type; 9839 else 9840 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9841 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9842 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9843 mpp_ptr->descr[0].page_code = 0x3f; 9844 mpp_ptr->descr[0].subpage_code = 0xff; 9845 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9846 9847 ctl_set_success(ctsio); 9848 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9849 ctsio->be_move_done = ctl_config_move_done; 9850 ctl_datamove((union ctl_io *)ctsio); 9851 return (CTL_RETVAL_COMPLETE); 9852 } 9853 9854 static int 9855 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9856 { 9857 struct scsi_vpd_device_id *devid_ptr; 9858 struct scsi_vpd_id_descriptor *desc; 9859 struct ctl_softc *ctl_softc; 9860 struct ctl_lun *lun; 9861 struct ctl_port *port; 9862 int data_len; 9863 uint8_t proto; 9864 9865 ctl_softc = control_softc; 9866 9867 port = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9868 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9869 9870 data_len = sizeof(struct scsi_vpd_device_id) + 9871 sizeof(struct scsi_vpd_id_descriptor) + 9872 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9873 sizeof(struct scsi_vpd_id_descriptor) + 9874 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9875 if (lun && lun->lun_devid) 9876 data_len += lun->lun_devid->len; 9877 if (port->port_devid) 9878 data_len += port->port_devid->len; 9879 if (port->target_devid) 9880 data_len += port->target_devid->len; 9881 9882 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9883 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9884 ctsio->kern_sg_entries = 0; 9885 9886 if (data_len < alloc_len) { 9887 ctsio->residual = alloc_len - data_len; 9888 ctsio->kern_data_len = data_len; 9889 ctsio->kern_total_len = data_len; 9890 } else { 9891 ctsio->residual = 0; 9892 ctsio->kern_data_len = alloc_len; 9893 ctsio->kern_total_len = alloc_len; 9894 } 9895 ctsio->kern_data_resid = 0; 9896 ctsio->kern_rel_offset = 0; 9897 ctsio->kern_sg_entries = 0; 9898 9899 /* 9900 * The control device is always connected. The disk device, on the 9901 * other hand, may not be online all the time. 9902 */ 9903 if (lun != NULL) 9904 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9905 lun->be_lun->lun_type; 9906 else 9907 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9908 devid_ptr->page_code = SVPD_DEVICE_ID; 9909 scsi_ulto2b(data_len - 4, devid_ptr->length); 9910 9911 if (port->port_type == CTL_PORT_FC) 9912 proto = SCSI_PROTO_FC << 4; 9913 else if (port->port_type == CTL_PORT_ISCSI) 9914 proto = SCSI_PROTO_ISCSI << 4; 9915 else 9916 proto = SCSI_PROTO_SPI << 4; 9917 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9918 9919 /* 9920 * We're using a LUN association here. i.e., this device ID is a 9921 * per-LUN identifier. 9922 */ 9923 if (lun && lun->lun_devid) { 9924 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9925 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9926 lun->lun_devid->len); 9927 } 9928 9929 /* 9930 * This is for the WWPN which is a port association. 9931 */ 9932 if (port->port_devid) { 9933 memcpy(desc, port->port_devid->data, port->port_devid->len); 9934 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9935 port->port_devid->len); 9936 } 9937 9938 /* 9939 * This is for the Relative Target Port(type 4h) identifier 9940 */ 9941 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9942 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9943 SVPD_ID_TYPE_RELTARG; 9944 desc->length = 4; 9945 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9946 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9947 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9948 9949 /* 9950 * This is for the Target Port Group(type 5h) identifier 9951 */ 9952 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9953 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9954 SVPD_ID_TYPE_TPORTGRP; 9955 desc->length = 4; 9956 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, 9957 &desc->identifier[2]); 9958 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9959 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9960 9961 /* 9962 * This is for the Target identifier 9963 */ 9964 if (port->target_devid) { 9965 memcpy(desc, port->target_devid->data, port->target_devid->len); 9966 } 9967 9968 ctl_set_success(ctsio); 9969 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9970 ctsio->be_move_done = ctl_config_move_done; 9971 ctl_datamove((union ctl_io *)ctsio); 9972 return (CTL_RETVAL_COMPLETE); 9973 } 9974 9975 static int 9976 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9977 { 9978 struct ctl_softc *softc = control_softc; 9979 struct scsi_vpd_scsi_ports *sp; 9980 struct scsi_vpd_port_designation *pd; 9981 struct scsi_vpd_port_designation_cont *pdc; 9982 struct ctl_lun *lun; 9983 struct ctl_port *port; 9984 int data_len, num_target_ports, iid_len, id_len, g, pg, p; 9985 int num_target_port_groups; 9986 9987 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9988 9989 if (softc->is_single) 9990 num_target_port_groups = 1; 9991 else 9992 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 9993 num_target_ports = 0; 9994 iid_len = 0; 9995 id_len = 0; 9996 mtx_lock(&softc->ctl_lock); 9997 STAILQ_FOREACH(port, &softc->port_list, links) { 9998 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9999 continue; 10000 if (lun != NULL && 10001 ctl_map_lun_back(port->targ_port, lun->lun) >= 10002 CTL_MAX_LUNS) 10003 continue; 10004 num_target_ports++; 10005 if (port->init_devid) 10006 iid_len += port->init_devid->len; 10007 if (port->port_devid) 10008 id_len += port->port_devid->len; 10009 } 10010 mtx_unlock(&softc->ctl_lock); 10011 10012 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * 10013 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 10014 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 10015 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10016 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 10017 ctsio->kern_sg_entries = 0; 10018 10019 if (data_len < alloc_len) { 10020 ctsio->residual = alloc_len - data_len; 10021 ctsio->kern_data_len = data_len; 10022 ctsio->kern_total_len = data_len; 10023 } else { 10024 ctsio->residual = 0; 10025 ctsio->kern_data_len = alloc_len; 10026 ctsio->kern_total_len = alloc_len; 10027 } 10028 ctsio->kern_data_resid = 0; 10029 ctsio->kern_rel_offset = 0; 10030 ctsio->kern_sg_entries = 0; 10031 10032 /* 10033 * The control device is always connected. The disk device, on the 10034 * other hand, may not be online all the time. Need to change this 10035 * to figure out whether the disk device is actually online or not. 10036 */ 10037 if (lun != NULL) 10038 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 10039 lun->be_lun->lun_type; 10040 else 10041 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10042 10043 sp->page_code = SVPD_SCSI_PORTS; 10044 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 10045 sp->page_length); 10046 pd = &sp->design[0]; 10047 10048 mtx_lock(&softc->ctl_lock); 10049 pg = softc->port_offset / CTL_MAX_PORTS; 10050 for (g = 0; g < num_target_port_groups; g++) { 10051 STAILQ_FOREACH(port, &softc->port_list, links) { 10052 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10053 continue; 10054 if (lun != NULL && 10055 ctl_map_lun_back(port->targ_port, lun->lun) >= 10056 CTL_MAX_LUNS) 10057 continue; 10058 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 10059 scsi_ulto2b(p, pd->relative_port_id); 10060 if (port->init_devid && g == pg) { 10061 iid_len = port->init_devid->len; 10062 memcpy(pd->initiator_transportid, 10063 port->init_devid->data, port->init_devid->len); 10064 } else 10065 iid_len = 0; 10066 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 10067 pdc = (struct scsi_vpd_port_designation_cont *) 10068 (&pd->initiator_transportid[iid_len]); 10069 if (port->port_devid && g == pg) { 10070 id_len = port->port_devid->len; 10071 memcpy(pdc->target_port_descriptors, 10072 port->port_devid->data, port->port_devid->len); 10073 } else 10074 id_len = 0; 10075 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 10076 pd = (struct scsi_vpd_port_designation *) 10077 ((uint8_t *)pdc->target_port_descriptors + id_len); 10078 } 10079 } 10080 mtx_unlock(&softc->ctl_lock); 10081 10082 ctl_set_success(ctsio); 10083 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10084 ctsio->be_move_done = ctl_config_move_done; 10085 ctl_datamove((union ctl_io *)ctsio); 10086 return (CTL_RETVAL_COMPLETE); 10087 } 10088 10089 static int 10090 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 10091 { 10092 struct scsi_vpd_block_limits *bl_ptr; 10093 struct ctl_lun *lun; 10094 int bs; 10095 10096 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10097 10098 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 10099 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 10100 ctsio->kern_sg_entries = 0; 10101 10102 if (sizeof(*bl_ptr) < alloc_len) { 10103 ctsio->residual = alloc_len - sizeof(*bl_ptr); 10104 ctsio->kern_data_len = sizeof(*bl_ptr); 10105 ctsio->kern_total_len = sizeof(*bl_ptr); 10106 } else { 10107 ctsio->residual = 0; 10108 ctsio->kern_data_len = alloc_len; 10109 ctsio->kern_total_len = alloc_len; 10110 } 10111 ctsio->kern_data_resid = 0; 10112 ctsio->kern_rel_offset = 0; 10113 ctsio->kern_sg_entries = 0; 10114 10115 /* 10116 * The control device is always connected. The disk device, on the 10117 * other hand, may not be online all the time. Need to change this 10118 * to figure out whether the disk device is actually online or not. 10119 */ 10120 if (lun != NULL) 10121 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10122 lun->be_lun->lun_type; 10123 else 10124 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10125 10126 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 10127 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 10128 bl_ptr->max_cmp_write_len = 0xff; 10129 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 10130 if (lun != NULL) { 10131 bs = lun->be_lun->blocksize; 10132 scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len); 10133 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10134 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 10135 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 10136 if (lun->be_lun->pblockexp != 0) { 10137 scsi_ulto4b((1 << lun->be_lun->pblockexp), 10138 bl_ptr->opt_unmap_grain); 10139 scsi_ulto4b(0x80000000 | lun->be_lun->pblockoff, 10140 bl_ptr->unmap_grain_align); 10141 } 10142 } 10143 scsi_ulto4b(lun->be_lun->atomicblock, 10144 bl_ptr->max_atomic_transfer_length); 10145 scsi_ulto4b(0, bl_ptr->atomic_alignment); 10146 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 10147 } 10148 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 10149 10150 ctl_set_success(ctsio); 10151 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10152 ctsio->be_move_done = ctl_config_move_done; 10153 ctl_datamove((union ctl_io *)ctsio); 10154 return (CTL_RETVAL_COMPLETE); 10155 } 10156 10157 static int 10158 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 10159 { 10160 struct scsi_vpd_block_device_characteristics *bdc_ptr; 10161 struct ctl_lun *lun; 10162 const char *value; 10163 u_int i; 10164 10165 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10166 10167 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 10168 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 10169 ctsio->kern_sg_entries = 0; 10170 10171 if (sizeof(*bdc_ptr) < alloc_len) { 10172 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 10173 ctsio->kern_data_len = sizeof(*bdc_ptr); 10174 ctsio->kern_total_len = sizeof(*bdc_ptr); 10175 } else { 10176 ctsio->residual = 0; 10177 ctsio->kern_data_len = alloc_len; 10178 ctsio->kern_total_len = alloc_len; 10179 } 10180 ctsio->kern_data_resid = 0; 10181 ctsio->kern_rel_offset = 0; 10182 ctsio->kern_sg_entries = 0; 10183 10184 /* 10185 * The control device is always connected. The disk device, on the 10186 * other hand, may not be online all the time. Need to change this 10187 * to figure out whether the disk device is actually online or not. 10188 */ 10189 if (lun != NULL) 10190 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10191 lun->be_lun->lun_type; 10192 else 10193 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10194 bdc_ptr->page_code = SVPD_BDC; 10195 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10196 if (lun != NULL && 10197 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 10198 i = strtol(value, NULL, 0); 10199 else 10200 i = CTL_DEFAULT_ROTATION_RATE; 10201 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10202 if (lun != NULL && 10203 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 10204 i = strtol(value, NULL, 0); 10205 else 10206 i = 0; 10207 bdc_ptr->wab_wac_ff = (i & 0x0f); 10208 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 10209 10210 ctl_set_success(ctsio); 10211 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10212 ctsio->be_move_done = ctl_config_move_done; 10213 ctl_datamove((union ctl_io *)ctsio); 10214 return (CTL_RETVAL_COMPLETE); 10215 } 10216 10217 static int 10218 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10219 { 10220 struct scsi_vpd_logical_block_prov *lbp_ptr; 10221 struct ctl_lun *lun; 10222 10223 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10224 10225 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10226 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10227 ctsio->kern_sg_entries = 0; 10228 10229 if (sizeof(*lbp_ptr) < alloc_len) { 10230 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10231 ctsio->kern_data_len = sizeof(*lbp_ptr); 10232 ctsio->kern_total_len = sizeof(*lbp_ptr); 10233 } else { 10234 ctsio->residual = 0; 10235 ctsio->kern_data_len = alloc_len; 10236 ctsio->kern_total_len = alloc_len; 10237 } 10238 ctsio->kern_data_resid = 0; 10239 ctsio->kern_rel_offset = 0; 10240 ctsio->kern_sg_entries = 0; 10241 10242 /* 10243 * The control device is always connected. The disk device, on the 10244 * other hand, may not be online all the time. Need to change this 10245 * to figure out whether the disk device is actually online or not. 10246 */ 10247 if (lun != NULL) 10248 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10249 lun->be_lun->lun_type; 10250 else 10251 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10252 10253 lbp_ptr->page_code = SVPD_LBP; 10254 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10255 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10256 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10257 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10258 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10259 lbp_ptr->prov_type = SVPD_LBP_THIN; 10260 } 10261 10262 ctl_set_success(ctsio); 10263 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10264 ctsio->be_move_done = ctl_config_move_done; 10265 ctl_datamove((union ctl_io *)ctsio); 10266 return (CTL_RETVAL_COMPLETE); 10267 } 10268 10269 static int 10270 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10271 { 10272 struct scsi_inquiry *cdb; 10273 int alloc_len, retval; 10274 10275 cdb = (struct scsi_inquiry *)ctsio->cdb; 10276 10277 retval = CTL_RETVAL_COMPLETE; 10278 10279 alloc_len = scsi_2btoul(cdb->length); 10280 10281 switch (cdb->page_code) { 10282 case SVPD_SUPPORTED_PAGES: 10283 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10284 break; 10285 case SVPD_UNIT_SERIAL_NUMBER: 10286 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10287 break; 10288 case SVPD_DEVICE_ID: 10289 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10290 break; 10291 case SVPD_EXTENDED_INQUIRY_DATA: 10292 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10293 break; 10294 case SVPD_MODE_PAGE_POLICY: 10295 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10296 break; 10297 case SVPD_SCSI_PORTS: 10298 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10299 break; 10300 case SVPD_SCSI_TPC: 10301 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10302 break; 10303 case SVPD_BLOCK_LIMITS: 10304 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10305 break; 10306 case SVPD_BDC: 10307 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10308 break; 10309 case SVPD_LBP: 10310 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10311 break; 10312 default: 10313 ctl_set_invalid_field(ctsio, 10314 /*sks_valid*/ 1, 10315 /*command*/ 1, 10316 /*field*/ 2, 10317 /*bit_valid*/ 0, 10318 /*bit*/ 0); 10319 ctl_done((union ctl_io *)ctsio); 10320 retval = CTL_RETVAL_COMPLETE; 10321 break; 10322 } 10323 10324 return (retval); 10325 } 10326 10327 static int 10328 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10329 { 10330 struct scsi_inquiry_data *inq_ptr; 10331 struct scsi_inquiry *cdb; 10332 struct ctl_softc *ctl_softc; 10333 struct ctl_lun *lun; 10334 char *val; 10335 uint32_t alloc_len, data_len; 10336 ctl_port_type port_type; 10337 10338 ctl_softc = control_softc; 10339 10340 /* 10341 * Figure out whether we're talking to a Fibre Channel port or not. 10342 * We treat the ioctl front end, and any SCSI adapters, as packetized 10343 * SCSI front ends. 10344 */ 10345 port_type = ctl_softc->ctl_ports[ 10346 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; 10347 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10348 port_type = CTL_PORT_SCSI; 10349 10350 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10351 cdb = (struct scsi_inquiry *)ctsio->cdb; 10352 alloc_len = scsi_2btoul(cdb->length); 10353 10354 /* 10355 * We malloc the full inquiry data size here and fill it 10356 * in. If the user only asks for less, we'll give him 10357 * that much. 10358 */ 10359 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10360 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10361 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10362 ctsio->kern_sg_entries = 0; 10363 ctsio->kern_data_resid = 0; 10364 ctsio->kern_rel_offset = 0; 10365 10366 if (data_len < alloc_len) { 10367 ctsio->residual = alloc_len - data_len; 10368 ctsio->kern_data_len = data_len; 10369 ctsio->kern_total_len = data_len; 10370 } else { 10371 ctsio->residual = 0; 10372 ctsio->kern_data_len = alloc_len; 10373 ctsio->kern_total_len = alloc_len; 10374 } 10375 10376 /* 10377 * If we have a LUN configured, report it as connected. Otherwise, 10378 * report that it is offline or no device is supported, depending 10379 * on the value of inquiry_pq_no_lun. 10380 * 10381 * According to the spec (SPC-4 r34), the peripheral qualifier 10382 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 10383 * 10384 * "A peripheral device having the specified peripheral device type 10385 * is not connected to this logical unit. However, the device 10386 * server is capable of supporting the specified peripheral device 10387 * type on this logical unit." 10388 * 10389 * According to the same spec, the peripheral qualifier 10390 * SID_QUAL_BAD_LU (011b) is used in this scenario: 10391 * 10392 * "The device server is not capable of supporting a peripheral 10393 * device on this logical unit. For this peripheral qualifier the 10394 * peripheral device type shall be set to 1Fh. All other peripheral 10395 * device type values are reserved for this peripheral qualifier." 10396 * 10397 * Given the text, it would seem that we probably want to report that 10398 * the LUN is offline here. There is no LUN connected, but we can 10399 * support a LUN at the given LUN number. 10400 * 10401 * In the real world, though, it sounds like things are a little 10402 * different: 10403 * 10404 * - Linux, when presented with a LUN with the offline peripheral 10405 * qualifier, will create an sg driver instance for it. So when 10406 * you attach it to CTL, you wind up with a ton of sg driver 10407 * instances. (One for every LUN that Linux bothered to probe.) 10408 * Linux does this despite the fact that it issues a REPORT LUNs 10409 * to LUN 0 to get the inventory of supported LUNs. 10410 * 10411 * - There is other anecdotal evidence (from Emulex folks) about 10412 * arrays that use the offline peripheral qualifier for LUNs that 10413 * are on the "passive" path in an active/passive array. 10414 * 10415 * So the solution is provide a hopefully reasonable default 10416 * (return bad/no LUN) and allow the user to change the behavior 10417 * with a tunable/sysctl variable. 10418 */ 10419 if (lun != NULL) 10420 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10421 lun->be_lun->lun_type; 10422 else if (ctl_softc->inquiry_pq_no_lun == 0) 10423 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10424 else 10425 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10426 10427 /* RMB in byte 2 is 0 */ 10428 inq_ptr->version = SCSI_REV_SPC4; 10429 10430 /* 10431 * According to SAM-3, even if a device only supports a single 10432 * level of LUN addressing, it should still set the HISUP bit: 10433 * 10434 * 4.9.1 Logical unit numbers overview 10435 * 10436 * All logical unit number formats described in this standard are 10437 * hierarchical in structure even when only a single level in that 10438 * hierarchy is used. The HISUP bit shall be set to one in the 10439 * standard INQUIRY data (see SPC-2) when any logical unit number 10440 * format described in this standard is used. Non-hierarchical 10441 * formats are outside the scope of this standard. 10442 * 10443 * Therefore we set the HiSup bit here. 10444 * 10445 * The reponse format is 2, per SPC-3. 10446 */ 10447 inq_ptr->response_format = SID_HiSup | 2; 10448 10449 inq_ptr->additional_length = data_len - 10450 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10451 CTL_DEBUG_PRINT(("additional_length = %d\n", 10452 inq_ptr->additional_length)); 10453 10454 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10455 /* 16 bit addressing */ 10456 if (port_type == CTL_PORT_SCSI) 10457 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10458 /* XXX set the SID_MultiP bit here if we're actually going to 10459 respond on multiple ports */ 10460 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10461 10462 /* 16 bit data bus, synchronous transfers */ 10463 if (port_type == CTL_PORT_SCSI) 10464 inq_ptr->flags = SID_WBus16 | SID_Sync; 10465 /* 10466 * XXX KDM do we want to support tagged queueing on the control 10467 * device at all? 10468 */ 10469 if ((lun == NULL) 10470 || (lun->be_lun->lun_type != T_PROCESSOR)) 10471 inq_ptr->flags |= SID_CmdQue; 10472 /* 10473 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10474 * We have 8 bytes for the vendor name, and 16 bytes for the device 10475 * name and 4 bytes for the revision. 10476 */ 10477 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10478 "vendor")) == NULL) { 10479 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10480 } else { 10481 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10482 strncpy(inq_ptr->vendor, val, 10483 min(sizeof(inq_ptr->vendor), strlen(val))); 10484 } 10485 if (lun == NULL) { 10486 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10487 sizeof(inq_ptr->product)); 10488 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10489 switch (lun->be_lun->lun_type) { 10490 case T_DIRECT: 10491 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10492 sizeof(inq_ptr->product)); 10493 break; 10494 case T_PROCESSOR: 10495 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10496 sizeof(inq_ptr->product)); 10497 break; 10498 default: 10499 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10500 sizeof(inq_ptr->product)); 10501 break; 10502 } 10503 } else { 10504 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10505 strncpy(inq_ptr->product, val, 10506 min(sizeof(inq_ptr->product), strlen(val))); 10507 } 10508 10509 /* 10510 * XXX make this a macro somewhere so it automatically gets 10511 * incremented when we make changes. 10512 */ 10513 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10514 "revision")) == NULL) { 10515 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10516 } else { 10517 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10518 strncpy(inq_ptr->revision, val, 10519 min(sizeof(inq_ptr->revision), strlen(val))); 10520 } 10521 10522 /* 10523 * For parallel SCSI, we support double transition and single 10524 * transition clocking. We also support QAS (Quick Arbitration 10525 * and Selection) and Information Unit transfers on both the 10526 * control and array devices. 10527 */ 10528 if (port_type == CTL_PORT_SCSI) 10529 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10530 SID_SPI_IUS; 10531 10532 /* SAM-5 (no version claimed) */ 10533 scsi_ulto2b(0x00A0, inq_ptr->version1); 10534 /* SPC-4 (no version claimed) */ 10535 scsi_ulto2b(0x0460, inq_ptr->version2); 10536 if (port_type == CTL_PORT_FC) { 10537 /* FCP-2 ANSI INCITS.350:2003 */ 10538 scsi_ulto2b(0x0917, inq_ptr->version3); 10539 } else if (port_type == CTL_PORT_SCSI) { 10540 /* SPI-4 ANSI INCITS.362:200x */ 10541 scsi_ulto2b(0x0B56, inq_ptr->version3); 10542 } else if (port_type == CTL_PORT_ISCSI) { 10543 /* iSCSI (no version claimed) */ 10544 scsi_ulto2b(0x0960, inq_ptr->version3); 10545 } else if (port_type == CTL_PORT_SAS) { 10546 /* SAS (no version claimed) */ 10547 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10548 } 10549 10550 if (lun == NULL) { 10551 /* SBC-4 (no version claimed) */ 10552 scsi_ulto2b(0x0600, inq_ptr->version4); 10553 } else { 10554 switch (lun->be_lun->lun_type) { 10555 case T_DIRECT: 10556 /* SBC-4 (no version claimed) */ 10557 scsi_ulto2b(0x0600, inq_ptr->version4); 10558 break; 10559 case T_PROCESSOR: 10560 default: 10561 break; 10562 } 10563 } 10564 10565 ctl_set_success(ctsio); 10566 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10567 ctsio->be_move_done = ctl_config_move_done; 10568 ctl_datamove((union ctl_io *)ctsio); 10569 return (CTL_RETVAL_COMPLETE); 10570 } 10571 10572 int 10573 ctl_inquiry(struct ctl_scsiio *ctsio) 10574 { 10575 struct scsi_inquiry *cdb; 10576 int retval; 10577 10578 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10579 10580 cdb = (struct scsi_inquiry *)ctsio->cdb; 10581 if (cdb->byte2 & SI_EVPD) 10582 retval = ctl_inquiry_evpd(ctsio); 10583 else if (cdb->page_code == 0) 10584 retval = ctl_inquiry_std(ctsio); 10585 else { 10586 ctl_set_invalid_field(ctsio, 10587 /*sks_valid*/ 1, 10588 /*command*/ 1, 10589 /*field*/ 2, 10590 /*bit_valid*/ 0, 10591 /*bit*/ 0); 10592 ctl_done((union ctl_io *)ctsio); 10593 return (CTL_RETVAL_COMPLETE); 10594 } 10595 10596 return (retval); 10597 } 10598 10599 /* 10600 * For known CDB types, parse the LBA and length. 10601 */ 10602 static int 10603 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10604 { 10605 if (io->io_hdr.io_type != CTL_IO_SCSI) 10606 return (1); 10607 10608 switch (io->scsiio.cdb[0]) { 10609 case COMPARE_AND_WRITE: { 10610 struct scsi_compare_and_write *cdb; 10611 10612 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10613 10614 *lba = scsi_8btou64(cdb->addr); 10615 *len = cdb->length; 10616 break; 10617 } 10618 case READ_6: 10619 case WRITE_6: { 10620 struct scsi_rw_6 *cdb; 10621 10622 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10623 10624 *lba = scsi_3btoul(cdb->addr); 10625 /* only 5 bits are valid in the most significant address byte */ 10626 *lba &= 0x1fffff; 10627 *len = cdb->length; 10628 break; 10629 } 10630 case READ_10: 10631 case WRITE_10: { 10632 struct scsi_rw_10 *cdb; 10633 10634 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10635 10636 *lba = scsi_4btoul(cdb->addr); 10637 *len = scsi_2btoul(cdb->length); 10638 break; 10639 } 10640 case WRITE_VERIFY_10: { 10641 struct scsi_write_verify_10 *cdb; 10642 10643 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10644 10645 *lba = scsi_4btoul(cdb->addr); 10646 *len = scsi_2btoul(cdb->length); 10647 break; 10648 } 10649 case READ_12: 10650 case WRITE_12: { 10651 struct scsi_rw_12 *cdb; 10652 10653 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10654 10655 *lba = scsi_4btoul(cdb->addr); 10656 *len = scsi_4btoul(cdb->length); 10657 break; 10658 } 10659 case WRITE_VERIFY_12: { 10660 struct scsi_write_verify_12 *cdb; 10661 10662 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10663 10664 *lba = scsi_4btoul(cdb->addr); 10665 *len = scsi_4btoul(cdb->length); 10666 break; 10667 } 10668 case READ_16: 10669 case WRITE_16: 10670 case WRITE_ATOMIC_16: { 10671 struct scsi_rw_16 *cdb; 10672 10673 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10674 10675 *lba = scsi_8btou64(cdb->addr); 10676 *len = scsi_4btoul(cdb->length); 10677 break; 10678 } 10679 case WRITE_VERIFY_16: { 10680 struct scsi_write_verify_16 *cdb; 10681 10682 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10683 10684 *lba = scsi_8btou64(cdb->addr); 10685 *len = scsi_4btoul(cdb->length); 10686 break; 10687 } 10688 case WRITE_SAME_10: { 10689 struct scsi_write_same_10 *cdb; 10690 10691 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10692 10693 *lba = scsi_4btoul(cdb->addr); 10694 *len = scsi_2btoul(cdb->length); 10695 break; 10696 } 10697 case WRITE_SAME_16: { 10698 struct scsi_write_same_16 *cdb; 10699 10700 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10701 10702 *lba = scsi_8btou64(cdb->addr); 10703 *len = scsi_4btoul(cdb->length); 10704 break; 10705 } 10706 case VERIFY_10: { 10707 struct scsi_verify_10 *cdb; 10708 10709 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10710 10711 *lba = scsi_4btoul(cdb->addr); 10712 *len = scsi_2btoul(cdb->length); 10713 break; 10714 } 10715 case VERIFY_12: { 10716 struct scsi_verify_12 *cdb; 10717 10718 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10719 10720 *lba = scsi_4btoul(cdb->addr); 10721 *len = scsi_4btoul(cdb->length); 10722 break; 10723 } 10724 case VERIFY_16: { 10725 struct scsi_verify_16 *cdb; 10726 10727 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10728 10729 *lba = scsi_8btou64(cdb->addr); 10730 *len = scsi_4btoul(cdb->length); 10731 break; 10732 } 10733 case UNMAP: { 10734 *lba = 0; 10735 *len = UINT64_MAX; 10736 break; 10737 } 10738 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10739 struct scsi_get_lba_status *cdb; 10740 10741 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10742 *lba = scsi_8btou64(cdb->addr); 10743 *len = UINT32_MAX; 10744 break; 10745 } 10746 default: 10747 return (1); 10748 break; /* NOTREACHED */ 10749 } 10750 10751 return (0); 10752 } 10753 10754 static ctl_action 10755 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2) 10756 { 10757 uint64_t endlba1, endlba2; 10758 10759 endlba1 = lba1 + len1 - 1; 10760 endlba2 = lba2 + len2 - 1; 10761 10762 if ((endlba1 < lba2) 10763 || (endlba2 < lba1)) 10764 return (CTL_ACTION_PASS); 10765 else 10766 return (CTL_ACTION_BLOCK); 10767 } 10768 10769 static int 10770 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10771 { 10772 struct ctl_ptr_len_flags *ptrlen; 10773 struct scsi_unmap_desc *buf, *end, *range; 10774 uint64_t lba; 10775 uint32_t len; 10776 10777 /* If not UNMAP -- go other way. */ 10778 if (io->io_hdr.io_type != CTL_IO_SCSI || 10779 io->scsiio.cdb[0] != UNMAP) 10780 return (CTL_ACTION_ERROR); 10781 10782 /* If UNMAP without data -- block and wait for data. */ 10783 ptrlen = (struct ctl_ptr_len_flags *) 10784 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10785 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10786 ptrlen->ptr == NULL) 10787 return (CTL_ACTION_BLOCK); 10788 10789 /* UNMAP with data -- check for collision. */ 10790 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10791 end = buf + ptrlen->len / sizeof(*buf); 10792 for (range = buf; range < end; range++) { 10793 lba = scsi_8btou64(range->lba); 10794 len = scsi_4btoul(range->length); 10795 if ((lba < lba2 + len2) && (lba + len > lba2)) 10796 return (CTL_ACTION_BLOCK); 10797 } 10798 return (CTL_ACTION_PASS); 10799 } 10800 10801 static ctl_action 10802 ctl_extent_check(union ctl_io *io1, union ctl_io *io2) 10803 { 10804 uint64_t lba1, lba2; 10805 uint64_t len1, len2; 10806 int retval; 10807 10808 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10809 return (CTL_ACTION_ERROR); 10810 10811 retval = ctl_extent_check_unmap(io2, lba1, len1); 10812 if (retval != CTL_ACTION_ERROR) 10813 return (retval); 10814 10815 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10816 return (CTL_ACTION_ERROR); 10817 10818 return (ctl_extent_check_lba(lba1, len1, lba2, len2)); 10819 } 10820 10821 static ctl_action 10822 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10823 union ctl_io *ooa_io) 10824 { 10825 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10826 ctl_serialize_action *serialize_row; 10827 10828 /* 10829 * The initiator attempted multiple untagged commands at the same 10830 * time. Can't do that. 10831 */ 10832 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10833 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10834 && ((pending_io->io_hdr.nexus.targ_port == 10835 ooa_io->io_hdr.nexus.targ_port) 10836 && (pending_io->io_hdr.nexus.initid.id == 10837 ooa_io->io_hdr.nexus.initid.id)) 10838 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 10839 return (CTL_ACTION_OVERLAP); 10840 10841 /* 10842 * The initiator attempted to send multiple tagged commands with 10843 * the same ID. (It's fine if different initiators have the same 10844 * tag ID.) 10845 * 10846 * Even if all of those conditions are true, we don't kill the I/O 10847 * if the command ahead of us has been aborted. We won't end up 10848 * sending it to the FETD, and it's perfectly legal to resend a 10849 * command with the same tag number as long as the previous 10850 * instance of this tag number has been aborted somehow. 10851 */ 10852 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10853 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10854 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10855 && ((pending_io->io_hdr.nexus.targ_port == 10856 ooa_io->io_hdr.nexus.targ_port) 10857 && (pending_io->io_hdr.nexus.initid.id == 10858 ooa_io->io_hdr.nexus.initid.id)) 10859 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 10860 return (CTL_ACTION_OVERLAP_TAG); 10861 10862 /* 10863 * If we get a head of queue tag, SAM-3 says that we should 10864 * immediately execute it. 10865 * 10866 * What happens if this command would normally block for some other 10867 * reason? e.g. a request sense with a head of queue tag 10868 * immediately after a write. Normally that would block, but this 10869 * will result in its getting executed immediately... 10870 * 10871 * We currently return "pass" instead of "skip", so we'll end up 10872 * going through the rest of the queue to check for overlapped tags. 10873 * 10874 * XXX KDM check for other types of blockage first?? 10875 */ 10876 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10877 return (CTL_ACTION_PASS); 10878 10879 /* 10880 * Ordered tags have to block until all items ahead of them 10881 * have completed. If we get called with an ordered tag, we always 10882 * block, if something else is ahead of us in the queue. 10883 */ 10884 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10885 return (CTL_ACTION_BLOCK); 10886 10887 /* 10888 * Simple tags get blocked until all head of queue and ordered tags 10889 * ahead of them have completed. I'm lumping untagged commands in 10890 * with simple tags here. XXX KDM is that the right thing to do? 10891 */ 10892 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10893 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10894 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10895 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10896 return (CTL_ACTION_BLOCK); 10897 10898 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10899 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10900 10901 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10902 10903 switch (serialize_row[pending_entry->seridx]) { 10904 case CTL_SER_BLOCK: 10905 return (CTL_ACTION_BLOCK); 10906 case CTL_SER_EXTENT: 10907 return (ctl_extent_check(pending_io, ooa_io)); 10908 case CTL_SER_EXTENTOPT: 10909 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10910 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10911 return (ctl_extent_check(pending_io, ooa_io)); 10912 /* FALLTHROUGH */ 10913 case CTL_SER_PASS: 10914 return (CTL_ACTION_PASS); 10915 case CTL_SER_BLOCKOPT: 10916 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10917 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10918 return (CTL_ACTION_BLOCK); 10919 return (CTL_ACTION_PASS); 10920 case CTL_SER_SKIP: 10921 return (CTL_ACTION_SKIP); 10922 default: 10923 panic("invalid serialization value %d", 10924 serialize_row[pending_entry->seridx]); 10925 } 10926 10927 return (CTL_ACTION_ERROR); 10928 } 10929 10930 /* 10931 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10932 * Assumptions: 10933 * - pending_io is generally either incoming, or on the blocked queue 10934 * - starting I/O is the I/O we want to start the check with. 10935 */ 10936 static ctl_action 10937 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10938 union ctl_io *starting_io) 10939 { 10940 union ctl_io *ooa_io; 10941 ctl_action action; 10942 10943 mtx_assert(&lun->lun_lock, MA_OWNED); 10944 10945 /* 10946 * Run back along the OOA queue, starting with the current 10947 * blocked I/O and going through every I/O before it on the 10948 * queue. If starting_io is NULL, we'll just end up returning 10949 * CTL_ACTION_PASS. 10950 */ 10951 for (ooa_io = starting_io; ooa_io != NULL; 10952 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10953 ooa_links)){ 10954 10955 /* 10956 * This routine just checks to see whether 10957 * cur_blocked is blocked by ooa_io, which is ahead 10958 * of it in the queue. It doesn't queue/dequeue 10959 * cur_blocked. 10960 */ 10961 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10962 switch (action) { 10963 case CTL_ACTION_BLOCK: 10964 case CTL_ACTION_OVERLAP: 10965 case CTL_ACTION_OVERLAP_TAG: 10966 case CTL_ACTION_SKIP: 10967 case CTL_ACTION_ERROR: 10968 return (action); 10969 break; /* NOTREACHED */ 10970 case CTL_ACTION_PASS: 10971 break; 10972 default: 10973 panic("invalid action %d", action); 10974 break; /* NOTREACHED */ 10975 } 10976 } 10977 10978 return (CTL_ACTION_PASS); 10979 } 10980 10981 /* 10982 * Assumptions: 10983 * - An I/O has just completed, and has been removed from the per-LUN OOA 10984 * queue, so some items on the blocked queue may now be unblocked. 10985 */ 10986 static int 10987 ctl_check_blocked(struct ctl_lun *lun) 10988 { 10989 union ctl_io *cur_blocked, *next_blocked; 10990 10991 mtx_assert(&lun->lun_lock, MA_OWNED); 10992 10993 /* 10994 * Run forward from the head of the blocked queue, checking each 10995 * entry against the I/Os prior to it on the OOA queue to see if 10996 * there is still any blockage. 10997 * 10998 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10999 * with our removing a variable on it while it is traversing the 11000 * list. 11001 */ 11002 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 11003 cur_blocked != NULL; cur_blocked = next_blocked) { 11004 union ctl_io *prev_ooa; 11005 ctl_action action; 11006 11007 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 11008 blocked_links); 11009 11010 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 11011 ctl_ooaq, ooa_links); 11012 11013 /* 11014 * If cur_blocked happens to be the first item in the OOA 11015 * queue now, prev_ooa will be NULL, and the action 11016 * returned will just be CTL_ACTION_PASS. 11017 */ 11018 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 11019 11020 switch (action) { 11021 case CTL_ACTION_BLOCK: 11022 /* Nothing to do here, still blocked */ 11023 break; 11024 case CTL_ACTION_OVERLAP: 11025 case CTL_ACTION_OVERLAP_TAG: 11026 /* 11027 * This shouldn't happen! In theory we've already 11028 * checked this command for overlap... 11029 */ 11030 break; 11031 case CTL_ACTION_PASS: 11032 case CTL_ACTION_SKIP: { 11033 struct ctl_softc *softc; 11034 const struct ctl_cmd_entry *entry; 11035 int isc_retval; 11036 11037 /* 11038 * The skip case shouldn't happen, this transaction 11039 * should have never made it onto the blocked queue. 11040 */ 11041 /* 11042 * This I/O is no longer blocked, we can remove it 11043 * from the blocked queue. Since this is a TAILQ 11044 * (doubly linked list), we can do O(1) removals 11045 * from any place on the list. 11046 */ 11047 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11048 blocked_links); 11049 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11050 11051 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 11052 /* 11053 * Need to send IO back to original side to 11054 * run 11055 */ 11056 union ctl_ha_msg msg_info; 11057 11058 msg_info.hdr.original_sc = 11059 cur_blocked->io_hdr.original_sc; 11060 msg_info.hdr.serializing_sc = cur_blocked; 11061 msg_info.hdr.msg_type = CTL_MSG_R2R; 11062 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11063 &msg_info, sizeof(msg_info), 0)) > 11064 CTL_HA_STATUS_SUCCESS) { 11065 printf("CTL:Check Blocked error from " 11066 "ctl_ha_msg_send %d\n", 11067 isc_retval); 11068 } 11069 break; 11070 } 11071 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11072 softc = control_softc; 11073 11074 /* 11075 * Check this I/O for LUN state changes that may 11076 * have happened while this command was blocked. 11077 * The LUN state may have been changed by a command 11078 * ahead of us in the queue, so we need to re-check 11079 * for any states that can be caused by SCSI 11080 * commands. 11081 */ 11082 if (ctl_scsiio_lun_check(softc, lun, entry, 11083 &cur_blocked->scsiio) == 0) { 11084 cur_blocked->io_hdr.flags |= 11085 CTL_FLAG_IS_WAS_ON_RTR; 11086 ctl_enqueue_rtr(cur_blocked); 11087 } else 11088 ctl_done(cur_blocked); 11089 break; 11090 } 11091 default: 11092 /* 11093 * This probably shouldn't happen -- we shouldn't 11094 * get CTL_ACTION_ERROR, or anything else. 11095 */ 11096 break; 11097 } 11098 } 11099 11100 return (CTL_RETVAL_COMPLETE); 11101 } 11102 11103 /* 11104 * This routine (with one exception) checks LUN flags that can be set by 11105 * commands ahead of us in the OOA queue. These flags have to be checked 11106 * when a command initially comes in, and when we pull a command off the 11107 * blocked queue and are preparing to execute it. The reason we have to 11108 * check these flags for commands on the blocked queue is that the LUN 11109 * state may have been changed by a command ahead of us while we're on the 11110 * blocked queue. 11111 * 11112 * Ordering is somewhat important with these checks, so please pay 11113 * careful attention to the placement of any new checks. 11114 */ 11115 static int 11116 ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 11117 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11118 { 11119 int retval; 11120 uint32_t residx; 11121 11122 retval = 0; 11123 11124 mtx_assert(&lun->lun_lock, MA_OWNED); 11125 11126 /* 11127 * If this shelf is a secondary shelf controller, we have to reject 11128 * any media access commands. 11129 */ 11130 if ((ctl_softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 && 11131 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) { 11132 ctl_set_lun_standby(ctsio); 11133 retval = 1; 11134 goto bailout; 11135 } 11136 11137 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11138 if (lun->flags & CTL_LUN_READONLY) { 11139 ctl_set_sense(ctsio, /*current_error*/ 1, 11140 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11141 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 11142 retval = 1; 11143 goto bailout; 11144 } 11145 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 11146 .eca_and_aen & SCP_SWP) != 0) { 11147 ctl_set_sense(ctsio, /*current_error*/ 1, 11148 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11149 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11150 retval = 1; 11151 goto bailout; 11152 } 11153 } 11154 11155 /* 11156 * Check for a reservation conflict. If this command isn't allowed 11157 * even on reserved LUNs, and if this initiator isn't the one who 11158 * reserved us, reject the command with a reservation conflict. 11159 */ 11160 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 11161 if ((lun->flags & CTL_LUN_RESERVED) 11162 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11163 if (lun->res_idx != residx) { 11164 ctl_set_reservation_conflict(ctsio); 11165 retval = 1; 11166 goto bailout; 11167 } 11168 } 11169 11170 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11171 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11172 /* No reservation or command is allowed. */; 11173 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11174 (lun->res_type == SPR_TYPE_WR_EX || 11175 lun->res_type == SPR_TYPE_WR_EX_RO || 11176 lun->res_type == SPR_TYPE_WR_EX_AR)) { 11177 /* The command is allowed for Write Exclusive resv. */; 11178 } else { 11179 /* 11180 * if we aren't registered or it's a res holder type 11181 * reservation and this isn't the res holder then set a 11182 * conflict. 11183 */ 11184 if (ctl_get_prkey(lun, residx) == 0 11185 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 11186 ctl_set_reservation_conflict(ctsio); 11187 retval = 1; 11188 goto bailout; 11189 } 11190 11191 } 11192 11193 if ((lun->flags & CTL_LUN_OFFLINE) 11194 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 11195 ctl_set_lun_not_ready(ctsio); 11196 retval = 1; 11197 goto bailout; 11198 } 11199 11200 /* 11201 * If the LUN is stopped, see if this particular command is allowed 11202 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 11203 */ 11204 if ((lun->flags & CTL_LUN_STOPPED) 11205 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 11206 /* "Logical unit not ready, initializing cmd. required" */ 11207 ctl_set_lun_stopped(ctsio); 11208 retval = 1; 11209 goto bailout; 11210 } 11211 11212 if ((lun->flags & CTL_LUN_INOPERABLE) 11213 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 11214 /* "Medium format corrupted" */ 11215 ctl_set_medium_format_corrupted(ctsio); 11216 retval = 1; 11217 goto bailout; 11218 } 11219 11220 bailout: 11221 return (retval); 11222 11223 } 11224 11225 static void 11226 ctl_failover_io(union ctl_io *io, int have_lock) 11227 { 11228 ctl_set_busy(&io->scsiio); 11229 ctl_done(io); 11230 } 11231 11232 static void 11233 ctl_failover(void) 11234 { 11235 struct ctl_lun *lun; 11236 struct ctl_softc *ctl_softc; 11237 union ctl_io *next_io, *pending_io; 11238 union ctl_io *io; 11239 int lun_idx; 11240 11241 ctl_softc = control_softc; 11242 11243 mtx_lock(&ctl_softc->ctl_lock); 11244 /* 11245 * Remove any cmds from the other SC from the rtr queue. These 11246 * will obviously only be for LUNs for which we're the primary. 11247 * We can't send status or get/send data for these commands. 11248 * Since they haven't been executed yet, we can just remove them. 11249 * We'll either abort them or delete them below, depending on 11250 * which HA mode we're in. 11251 */ 11252 #ifdef notyet 11253 mtx_lock(&ctl_softc->queue_lock); 11254 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue); 11255 io != NULL; io = next_io) { 11256 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 11257 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11258 STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr, 11259 ctl_io_hdr, links); 11260 } 11261 mtx_unlock(&ctl_softc->queue_lock); 11262 #endif 11263 11264 for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) { 11265 lun = ctl_softc->ctl_luns[lun_idx]; 11266 if (lun==NULL) 11267 continue; 11268 11269 /* 11270 * Processor LUNs are primary on both sides. 11271 * XXX will this always be true? 11272 */ 11273 if (lun->be_lun->lun_type == T_PROCESSOR) 11274 continue; 11275 11276 if ((lun->flags & CTL_LUN_PRIMARY_SC) 11277 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11278 printf("FAILOVER: primary lun %d\n", lun_idx); 11279 /* 11280 * Remove all commands from the other SC. First from the 11281 * blocked queue then from the ooa queue. Once we have 11282 * removed them. Call ctl_check_blocked to see if there 11283 * is anything that can run. 11284 */ 11285 for (io = (union ctl_io *)TAILQ_FIRST( 11286 &lun->blocked_queue); io != NULL; io = next_io) { 11287 11288 next_io = (union ctl_io *)TAILQ_NEXT( 11289 &io->io_hdr, blocked_links); 11290 11291 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11292 TAILQ_REMOVE(&lun->blocked_queue, 11293 &io->io_hdr,blocked_links); 11294 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11295 TAILQ_REMOVE(&lun->ooa_queue, 11296 &io->io_hdr, ooa_links); 11297 11298 ctl_free_io(io); 11299 } 11300 } 11301 11302 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11303 io != NULL; io = next_io) { 11304 11305 next_io = (union ctl_io *)TAILQ_NEXT( 11306 &io->io_hdr, ooa_links); 11307 11308 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11309 11310 TAILQ_REMOVE(&lun->ooa_queue, 11311 &io->io_hdr, 11312 ooa_links); 11313 11314 ctl_free_io(io); 11315 } 11316 } 11317 ctl_check_blocked(lun); 11318 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 11319 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 11320 11321 printf("FAILOVER: primary lun %d\n", lun_idx); 11322 /* 11323 * Abort all commands from the other SC. We can't 11324 * send status back for them now. These should get 11325 * cleaned up when they are completed or come out 11326 * for a datamove operation. 11327 */ 11328 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11329 io != NULL; io = next_io) { 11330 next_io = (union ctl_io *)TAILQ_NEXT( 11331 &io->io_hdr, ooa_links); 11332 11333 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11334 io->io_hdr.flags |= CTL_FLAG_ABORT; 11335 } 11336 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11337 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 11338 11339 printf("FAILOVER: secondary lun %d\n", lun_idx); 11340 11341 lun->flags |= CTL_LUN_PRIMARY_SC; 11342 11343 /* 11344 * We send all I/O that was sent to this controller 11345 * and redirected to the other side back with 11346 * busy status, and have the initiator retry it. 11347 * Figuring out how much data has been transferred, 11348 * etc. and picking up where we left off would be 11349 * very tricky. 11350 * 11351 * XXX KDM need to remove I/O from the blocked 11352 * queue as well! 11353 */ 11354 for (pending_io = (union ctl_io *)TAILQ_FIRST( 11355 &lun->ooa_queue); pending_io != NULL; 11356 pending_io = next_io) { 11357 11358 next_io = (union ctl_io *)TAILQ_NEXT( 11359 &pending_io->io_hdr, ooa_links); 11360 11361 pending_io->io_hdr.flags &= 11362 ~CTL_FLAG_SENT_2OTHER_SC; 11363 11364 if (pending_io->io_hdr.flags & 11365 CTL_FLAG_IO_ACTIVE) { 11366 pending_io->io_hdr.flags |= 11367 CTL_FLAG_FAILOVER; 11368 } else { 11369 ctl_set_busy(&pending_io->scsiio); 11370 ctl_done(pending_io); 11371 } 11372 } 11373 11374 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11375 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11376 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11377 printf("FAILOVER: secondary lun %d\n", lun_idx); 11378 /* 11379 * if the first io on the OOA is not on the RtR queue 11380 * add it. 11381 */ 11382 lun->flags |= CTL_LUN_PRIMARY_SC; 11383 11384 pending_io = (union ctl_io *)TAILQ_FIRST( 11385 &lun->ooa_queue); 11386 if (pending_io==NULL) { 11387 printf("Nothing on OOA queue\n"); 11388 continue; 11389 } 11390 11391 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11392 if ((pending_io->io_hdr.flags & 11393 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11394 pending_io->io_hdr.flags |= 11395 CTL_FLAG_IS_WAS_ON_RTR; 11396 ctl_enqueue_rtr(pending_io); 11397 } 11398 #if 0 11399 else 11400 { 11401 printf("Tag 0x%04x is running\n", 11402 pending_io->scsiio.tag_num); 11403 } 11404 #endif 11405 11406 next_io = (union ctl_io *)TAILQ_NEXT( 11407 &pending_io->io_hdr, ooa_links); 11408 for (pending_io=next_io; pending_io != NULL; 11409 pending_io = next_io) { 11410 pending_io->io_hdr.flags &= 11411 ~CTL_FLAG_SENT_2OTHER_SC; 11412 next_io = (union ctl_io *)TAILQ_NEXT( 11413 &pending_io->io_hdr, ooa_links); 11414 if (pending_io->io_hdr.flags & 11415 CTL_FLAG_IS_WAS_ON_RTR) { 11416 #if 0 11417 printf("Tag 0x%04x is running\n", 11418 pending_io->scsiio.tag_num); 11419 #endif 11420 continue; 11421 } 11422 11423 switch (ctl_check_ooa(lun, pending_io, 11424 (union ctl_io *)TAILQ_PREV( 11425 &pending_io->io_hdr, ctl_ooaq, 11426 ooa_links))) { 11427 11428 case CTL_ACTION_BLOCK: 11429 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11430 &pending_io->io_hdr, 11431 blocked_links); 11432 pending_io->io_hdr.flags |= 11433 CTL_FLAG_BLOCKED; 11434 break; 11435 case CTL_ACTION_PASS: 11436 case CTL_ACTION_SKIP: 11437 pending_io->io_hdr.flags |= 11438 CTL_FLAG_IS_WAS_ON_RTR; 11439 ctl_enqueue_rtr(pending_io); 11440 break; 11441 case CTL_ACTION_OVERLAP: 11442 ctl_set_overlapped_cmd( 11443 (struct ctl_scsiio *)pending_io); 11444 ctl_done(pending_io); 11445 break; 11446 case CTL_ACTION_OVERLAP_TAG: 11447 ctl_set_overlapped_tag( 11448 (struct ctl_scsiio *)pending_io, 11449 pending_io->scsiio.tag_num & 0xff); 11450 ctl_done(pending_io); 11451 break; 11452 case CTL_ACTION_ERROR: 11453 default: 11454 ctl_set_internal_failure( 11455 (struct ctl_scsiio *)pending_io, 11456 0, // sks_valid 11457 0); //retry count 11458 ctl_done(pending_io); 11459 break; 11460 } 11461 } 11462 11463 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11464 } else { 11465 panic("Unhandled HA mode failover, LUN flags = %#x, " 11466 "ha_mode = #%x", lun->flags, ctl_softc->ha_mode); 11467 } 11468 } 11469 ctl_pause_rtr = 0; 11470 mtx_unlock(&ctl_softc->ctl_lock); 11471 } 11472 11473 static int 11474 ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio) 11475 { 11476 struct ctl_lun *lun; 11477 const struct ctl_cmd_entry *entry; 11478 uint32_t initidx, targ_lun; 11479 int retval; 11480 11481 retval = 0; 11482 11483 lun = NULL; 11484 11485 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11486 if ((targ_lun < CTL_MAX_LUNS) 11487 && ((lun = ctl_softc->ctl_luns[targ_lun]) != NULL)) { 11488 /* 11489 * If the LUN is invalid, pretend that it doesn't exist. 11490 * It will go away as soon as all pending I/O has been 11491 * completed. 11492 */ 11493 mtx_lock(&lun->lun_lock); 11494 if (lun->flags & CTL_LUN_DISABLED) { 11495 mtx_unlock(&lun->lun_lock); 11496 lun = NULL; 11497 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11498 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11499 } else { 11500 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11501 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11502 lun->be_lun; 11503 if (lun->be_lun->lun_type == T_PROCESSOR) { 11504 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11505 } 11506 11507 /* 11508 * Every I/O goes into the OOA queue for a 11509 * particular LUN, and stays there until completion. 11510 */ 11511 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11512 ooa_links); 11513 } 11514 } else { 11515 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11516 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11517 } 11518 11519 /* Get command entry and return error if it is unsuppotyed. */ 11520 entry = ctl_validate_command(ctsio); 11521 if (entry == NULL) { 11522 if (lun) 11523 mtx_unlock(&lun->lun_lock); 11524 return (retval); 11525 } 11526 11527 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11528 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11529 11530 /* 11531 * Check to see whether we can send this command to LUNs that don't 11532 * exist. This should pretty much only be the case for inquiry 11533 * and request sense. Further checks, below, really require having 11534 * a LUN, so we can't really check the command anymore. Just put 11535 * it on the rtr queue. 11536 */ 11537 if (lun == NULL) { 11538 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11539 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11540 ctl_enqueue_rtr((union ctl_io *)ctsio); 11541 return (retval); 11542 } 11543 11544 ctl_set_unsupported_lun(ctsio); 11545 ctl_done((union ctl_io *)ctsio); 11546 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11547 return (retval); 11548 } else { 11549 /* 11550 * Make sure we support this particular command on this LUN. 11551 * e.g., we don't support writes to the control LUN. 11552 */ 11553 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11554 mtx_unlock(&lun->lun_lock); 11555 ctl_set_invalid_opcode(ctsio); 11556 ctl_done((union ctl_io *)ctsio); 11557 return (retval); 11558 } 11559 } 11560 11561 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11562 11563 #ifdef CTL_WITH_CA 11564 /* 11565 * If we've got a request sense, it'll clear the contingent 11566 * allegiance condition. Otherwise, if we have a CA condition for 11567 * this initiator, clear it, because it sent down a command other 11568 * than request sense. 11569 */ 11570 if ((ctsio->cdb[0] != REQUEST_SENSE) 11571 && (ctl_is_set(lun->have_ca, initidx))) 11572 ctl_clear_mask(lun->have_ca, initidx); 11573 #endif 11574 11575 /* 11576 * If the command has this flag set, it handles its own unit 11577 * attention reporting, we shouldn't do anything. Otherwise we 11578 * check for any pending unit attentions, and send them back to the 11579 * initiator. We only do this when a command initially comes in, 11580 * not when we pull it off the blocked queue. 11581 * 11582 * According to SAM-3, section 5.3.2, the order that things get 11583 * presented back to the host is basically unit attentions caused 11584 * by some sort of reset event, busy status, reservation conflicts 11585 * or task set full, and finally any other status. 11586 * 11587 * One issue here is that some of the unit attentions we report 11588 * don't fall into the "reset" category (e.g. "reported luns data 11589 * has changed"). So reporting it here, before the reservation 11590 * check, may be technically wrong. I guess the only thing to do 11591 * would be to check for and report the reset events here, and then 11592 * check for the other unit attention types after we check for a 11593 * reservation conflict. 11594 * 11595 * XXX KDM need to fix this 11596 */ 11597 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11598 ctl_ua_type ua_type; 11599 scsi_sense_data_type sense_format; 11600 11601 if (lun->flags & CTL_LUN_SENSE_DESC) 11602 sense_format = SSD_TYPE_DESC; 11603 else 11604 sense_format = SSD_TYPE_FIXED; 11605 11606 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11607 sense_format); 11608 if (ua_type != CTL_UA_NONE) { 11609 mtx_unlock(&lun->lun_lock); 11610 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11611 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11612 ctsio->sense_len = SSD_FULL_SIZE; 11613 ctl_done((union ctl_io *)ctsio); 11614 return (retval); 11615 } 11616 } 11617 11618 11619 if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) { 11620 mtx_unlock(&lun->lun_lock); 11621 ctl_done((union ctl_io *)ctsio); 11622 return (retval); 11623 } 11624 11625 /* 11626 * XXX CHD this is where we want to send IO to other side if 11627 * this LUN is secondary on this SC. We will need to make a copy 11628 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11629 * the copy we send as FROM_OTHER. 11630 * We also need to stuff the address of the original IO so we can 11631 * find it easily. Something similar will need be done on the other 11632 * side so when we are done we can find the copy. 11633 */ 11634 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11635 union ctl_ha_msg msg_info; 11636 int isc_retval; 11637 11638 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11639 11640 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11641 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11642 #if 0 11643 printf("1. ctsio %p\n", ctsio); 11644 #endif 11645 msg_info.hdr.serializing_sc = NULL; 11646 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11647 msg_info.scsi.tag_num = ctsio->tag_num; 11648 msg_info.scsi.tag_type = ctsio->tag_type; 11649 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11650 11651 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11652 11653 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11654 (void *)&msg_info, sizeof(msg_info), 0)) > 11655 CTL_HA_STATUS_SUCCESS) { 11656 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11657 isc_retval); 11658 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 11659 } else { 11660 #if 0 11661 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11662 #endif 11663 } 11664 11665 /* 11666 * XXX KDM this I/O is off the incoming queue, but hasn't 11667 * been inserted on any other queue. We may need to come 11668 * up with a holding queue while we wait for serialization 11669 * so that we have an idea of what we're waiting for from 11670 * the other side. 11671 */ 11672 mtx_unlock(&lun->lun_lock); 11673 return (retval); 11674 } 11675 11676 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11677 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11678 ctl_ooaq, ooa_links))) { 11679 case CTL_ACTION_BLOCK: 11680 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11681 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11682 blocked_links); 11683 mtx_unlock(&lun->lun_lock); 11684 return (retval); 11685 case CTL_ACTION_PASS: 11686 case CTL_ACTION_SKIP: 11687 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11688 mtx_unlock(&lun->lun_lock); 11689 ctl_enqueue_rtr((union ctl_io *)ctsio); 11690 break; 11691 case CTL_ACTION_OVERLAP: 11692 mtx_unlock(&lun->lun_lock); 11693 ctl_set_overlapped_cmd(ctsio); 11694 ctl_done((union ctl_io *)ctsio); 11695 break; 11696 case CTL_ACTION_OVERLAP_TAG: 11697 mtx_unlock(&lun->lun_lock); 11698 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11699 ctl_done((union ctl_io *)ctsio); 11700 break; 11701 case CTL_ACTION_ERROR: 11702 default: 11703 mtx_unlock(&lun->lun_lock); 11704 ctl_set_internal_failure(ctsio, 11705 /*sks_valid*/ 0, 11706 /*retry_count*/ 0); 11707 ctl_done((union ctl_io *)ctsio); 11708 break; 11709 } 11710 return (retval); 11711 } 11712 11713 const struct ctl_cmd_entry * 11714 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11715 { 11716 const struct ctl_cmd_entry *entry; 11717 int service_action; 11718 11719 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11720 if (sa) 11721 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11722 if (entry->flags & CTL_CMD_FLAG_SA5) { 11723 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11724 entry = &((const struct ctl_cmd_entry *) 11725 entry->execute)[service_action]; 11726 } 11727 return (entry); 11728 } 11729 11730 const struct ctl_cmd_entry * 11731 ctl_validate_command(struct ctl_scsiio *ctsio) 11732 { 11733 const struct ctl_cmd_entry *entry; 11734 int i, sa; 11735 uint8_t diff; 11736 11737 entry = ctl_get_cmd_entry(ctsio, &sa); 11738 if (entry->execute == NULL) { 11739 if (sa) 11740 ctl_set_invalid_field(ctsio, 11741 /*sks_valid*/ 1, 11742 /*command*/ 1, 11743 /*field*/ 1, 11744 /*bit_valid*/ 1, 11745 /*bit*/ 4); 11746 else 11747 ctl_set_invalid_opcode(ctsio); 11748 ctl_done((union ctl_io *)ctsio); 11749 return (NULL); 11750 } 11751 KASSERT(entry->length > 0, 11752 ("Not defined length for command 0x%02x/0x%02x", 11753 ctsio->cdb[0], ctsio->cdb[1])); 11754 for (i = 1; i < entry->length; i++) { 11755 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11756 if (diff == 0) 11757 continue; 11758 ctl_set_invalid_field(ctsio, 11759 /*sks_valid*/ 1, 11760 /*command*/ 1, 11761 /*field*/ i, 11762 /*bit_valid*/ 1, 11763 /*bit*/ fls(diff) - 1); 11764 ctl_done((union ctl_io *)ctsio); 11765 return (NULL); 11766 } 11767 return (entry); 11768 } 11769 11770 static int 11771 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11772 { 11773 11774 switch (lun_type) { 11775 case T_PROCESSOR: 11776 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11777 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11778 return (0); 11779 break; 11780 case T_DIRECT: 11781 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11782 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11783 return (0); 11784 break; 11785 default: 11786 return (0); 11787 } 11788 return (1); 11789 } 11790 11791 static int 11792 ctl_scsiio(struct ctl_scsiio *ctsio) 11793 { 11794 int retval; 11795 const struct ctl_cmd_entry *entry; 11796 11797 retval = CTL_RETVAL_COMPLETE; 11798 11799 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11800 11801 entry = ctl_get_cmd_entry(ctsio, NULL); 11802 11803 /* 11804 * If this I/O has been aborted, just send it straight to 11805 * ctl_done() without executing it. 11806 */ 11807 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11808 ctl_done((union ctl_io *)ctsio); 11809 goto bailout; 11810 } 11811 11812 /* 11813 * All the checks should have been handled by ctl_scsiio_precheck(). 11814 * We should be clear now to just execute the I/O. 11815 */ 11816 retval = entry->execute(ctsio); 11817 11818 bailout: 11819 return (retval); 11820 } 11821 11822 /* 11823 * Since we only implement one target right now, a bus reset simply resets 11824 * our single target. 11825 */ 11826 static int 11827 ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io) 11828 { 11829 return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET)); 11830 } 11831 11832 static int 11833 ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 11834 ctl_ua_type ua_type) 11835 { 11836 struct ctl_lun *lun; 11837 int retval; 11838 11839 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11840 union ctl_ha_msg msg_info; 11841 11842 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11843 msg_info.hdr.nexus = io->io_hdr.nexus; 11844 if (ua_type==CTL_UA_TARG_RESET) 11845 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11846 else 11847 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11848 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11849 msg_info.hdr.original_sc = NULL; 11850 msg_info.hdr.serializing_sc = NULL; 11851 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11852 (void *)&msg_info, sizeof(msg_info), 0)) { 11853 } 11854 } 11855 retval = 0; 11856 11857 mtx_lock(&ctl_softc->ctl_lock); 11858 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) 11859 retval += ctl_lun_reset(lun, io, ua_type); 11860 mtx_unlock(&ctl_softc->ctl_lock); 11861 11862 return (retval); 11863 } 11864 11865 /* 11866 * The LUN should always be set. The I/O is optional, and is used to 11867 * distinguish between I/Os sent by this initiator, and by other 11868 * initiators. We set unit attention for initiators other than this one. 11869 * SAM-3 is vague on this point. It does say that a unit attention should 11870 * be established for other initiators when a LUN is reset (see section 11871 * 5.7.3), but it doesn't specifically say that the unit attention should 11872 * be established for this particular initiator when a LUN is reset. Here 11873 * is the relevant text, from SAM-3 rev 8: 11874 * 11875 * 5.7.2 When a SCSI initiator port aborts its own tasks 11876 * 11877 * When a SCSI initiator port causes its own task(s) to be aborted, no 11878 * notification that the task(s) have been aborted shall be returned to 11879 * the SCSI initiator port other than the completion response for the 11880 * command or task management function action that caused the task(s) to 11881 * be aborted and notification(s) associated with related effects of the 11882 * action (e.g., a reset unit attention condition). 11883 * 11884 * XXX KDM for now, we're setting unit attention for all initiators. 11885 */ 11886 static int 11887 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11888 { 11889 union ctl_io *xio; 11890 #if 0 11891 uint32_t initidx; 11892 #endif 11893 #ifdef CTL_WITH_CA 11894 int i; 11895 #endif 11896 11897 mtx_lock(&lun->lun_lock); 11898 /* 11899 * Run through the OOA queue and abort each I/O. 11900 */ 11901 #if 0 11902 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 11903 #endif 11904 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11905 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11906 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11907 } 11908 11909 /* 11910 * This version sets unit attention for every 11911 */ 11912 #if 0 11913 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11914 ctl_est_ua_all(lun, initidx, ua_type); 11915 #else 11916 ctl_est_ua_all(lun, -1, ua_type); 11917 #endif 11918 11919 /* 11920 * A reset (any kind, really) clears reservations established with 11921 * RESERVE/RELEASE. It does not clear reservations established 11922 * with PERSISTENT RESERVE OUT, but we don't support that at the 11923 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11924 * reservations made with the RESERVE/RELEASE commands, because 11925 * those commands are obsolete in SPC-3. 11926 */ 11927 lun->flags &= ~CTL_LUN_RESERVED; 11928 11929 #ifdef CTL_WITH_CA 11930 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11931 ctl_clear_mask(lun->have_ca, i); 11932 #endif 11933 mtx_unlock(&lun->lun_lock); 11934 11935 return (0); 11936 } 11937 11938 static void 11939 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11940 int other_sc) 11941 { 11942 union ctl_io *xio; 11943 11944 mtx_assert(&lun->lun_lock, MA_OWNED); 11945 11946 /* 11947 * Run through the OOA queue and attempt to find the given I/O. 11948 * The target port, initiator ID, tag type and tag number have to 11949 * match the values that we got from the initiator. If we have an 11950 * untagged command to abort, simply abort the first untagged command 11951 * we come to. We only allow one untagged command at a time of course. 11952 */ 11953 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11954 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11955 11956 if ((targ_port == UINT32_MAX || 11957 targ_port == xio->io_hdr.nexus.targ_port) && 11958 (init_id == UINT32_MAX || 11959 init_id == xio->io_hdr.nexus.initid.id)) { 11960 if (targ_port != xio->io_hdr.nexus.targ_port || 11961 init_id != xio->io_hdr.nexus.initid.id) 11962 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11963 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11964 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11965 union ctl_ha_msg msg_info; 11966 11967 msg_info.hdr.nexus = xio->io_hdr.nexus; 11968 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11969 msg_info.task.tag_num = xio->scsiio.tag_num; 11970 msg_info.task.tag_type = xio->scsiio.tag_type; 11971 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11972 msg_info.hdr.original_sc = NULL; 11973 msg_info.hdr.serializing_sc = NULL; 11974 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11975 (void *)&msg_info, sizeof(msg_info), 0); 11976 } 11977 } 11978 } 11979 } 11980 11981 static int 11982 ctl_abort_task_set(union ctl_io *io) 11983 { 11984 struct ctl_softc *softc = control_softc; 11985 struct ctl_lun *lun; 11986 uint32_t targ_lun; 11987 11988 /* 11989 * Look up the LUN. 11990 */ 11991 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11992 mtx_lock(&softc->ctl_lock); 11993 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 11994 lun = softc->ctl_luns[targ_lun]; 11995 else { 11996 mtx_unlock(&softc->ctl_lock); 11997 return (1); 11998 } 11999 12000 mtx_lock(&lun->lun_lock); 12001 mtx_unlock(&softc->ctl_lock); 12002 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 12003 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12004 io->io_hdr.nexus.initid.id, 12005 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12006 } else { /* CTL_TASK_CLEAR_TASK_SET */ 12007 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 12008 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12009 } 12010 mtx_unlock(&lun->lun_lock); 12011 return (0); 12012 } 12013 12014 static int 12015 ctl_i_t_nexus_reset(union ctl_io *io) 12016 { 12017 struct ctl_softc *softc = control_softc; 12018 struct ctl_lun *lun; 12019 uint32_t initidx, residx; 12020 12021 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12022 residx = ctl_get_resindex(&io->io_hdr.nexus); 12023 mtx_lock(&softc->ctl_lock); 12024 STAILQ_FOREACH(lun, &softc->lun_list, links) { 12025 mtx_lock(&lun->lun_lock); 12026 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12027 io->io_hdr.nexus.initid.id, 12028 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12029 #ifdef CTL_WITH_CA 12030 ctl_clear_mask(lun->have_ca, initidx); 12031 #endif 12032 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 12033 lun->flags &= ~CTL_LUN_RESERVED; 12034 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 12035 mtx_unlock(&lun->lun_lock); 12036 } 12037 mtx_unlock(&softc->ctl_lock); 12038 return (0); 12039 } 12040 12041 static int 12042 ctl_abort_task(union ctl_io *io) 12043 { 12044 union ctl_io *xio; 12045 struct ctl_lun *lun; 12046 struct ctl_softc *ctl_softc; 12047 #if 0 12048 struct sbuf sb; 12049 char printbuf[128]; 12050 #endif 12051 int found; 12052 uint32_t targ_lun; 12053 12054 ctl_softc = control_softc; 12055 found = 0; 12056 12057 /* 12058 * Look up the LUN. 12059 */ 12060 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12061 mtx_lock(&ctl_softc->ctl_lock); 12062 if ((targ_lun < CTL_MAX_LUNS) 12063 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 12064 lun = ctl_softc->ctl_luns[targ_lun]; 12065 else { 12066 mtx_unlock(&ctl_softc->ctl_lock); 12067 return (1); 12068 } 12069 12070 #if 0 12071 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 12072 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 12073 #endif 12074 12075 mtx_lock(&lun->lun_lock); 12076 mtx_unlock(&ctl_softc->ctl_lock); 12077 /* 12078 * Run through the OOA queue and attempt to find the given I/O. 12079 * The target port, initiator ID, tag type and tag number have to 12080 * match the values that we got from the initiator. If we have an 12081 * untagged command to abort, simply abort the first untagged command 12082 * we come to. We only allow one untagged command at a time of course. 12083 */ 12084 #if 0 12085 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 12086 #endif 12087 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12088 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12089 #if 0 12090 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 12091 12092 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 12093 lun->lun, xio->scsiio.tag_num, 12094 xio->scsiio.tag_type, 12095 (xio->io_hdr.blocked_links.tqe_prev 12096 == NULL) ? "" : " BLOCKED", 12097 (xio->io_hdr.flags & 12098 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 12099 (xio->io_hdr.flags & 12100 CTL_FLAG_ABORT) ? " ABORT" : "", 12101 (xio->io_hdr.flags & 12102 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 12103 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 12104 sbuf_finish(&sb); 12105 printf("%s\n", sbuf_data(&sb)); 12106 #endif 12107 12108 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port) 12109 && (xio->io_hdr.nexus.initid.id == 12110 io->io_hdr.nexus.initid.id)) { 12111 /* 12112 * If the abort says that the task is untagged, the 12113 * task in the queue must be untagged. Otherwise, 12114 * we just check to see whether the tag numbers 12115 * match. This is because the QLogic firmware 12116 * doesn't pass back the tag type in an abort 12117 * request. 12118 */ 12119 #if 0 12120 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12121 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12122 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12123 #endif 12124 /* 12125 * XXX KDM we've got problems with FC, because it 12126 * doesn't send down a tag type with aborts. So we 12127 * can only really go by the tag number... 12128 * This may cause problems with parallel SCSI. 12129 * Need to figure that out!! 12130 */ 12131 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12132 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12133 found = 1; 12134 if ((io->io_hdr.flags & 12135 CTL_FLAG_FROM_OTHER_SC) == 0 && 12136 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12137 union ctl_ha_msg msg_info; 12138 12139 io->io_hdr.flags |= 12140 CTL_FLAG_SENT_2OTHER_SC; 12141 msg_info.hdr.nexus = io->io_hdr.nexus; 12142 msg_info.task.task_action = 12143 CTL_TASK_ABORT_TASK; 12144 msg_info.task.tag_num = 12145 io->taskio.tag_num; 12146 msg_info.task.tag_type = 12147 io->taskio.tag_type; 12148 msg_info.hdr.msg_type = 12149 CTL_MSG_MANAGE_TASKS; 12150 msg_info.hdr.original_sc = NULL; 12151 msg_info.hdr.serializing_sc = NULL; 12152 #if 0 12153 printf("Sent Abort to other side\n"); 12154 #endif 12155 if (CTL_HA_STATUS_SUCCESS != 12156 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12157 (void *)&msg_info, 12158 sizeof(msg_info), 0)) { 12159 } 12160 } 12161 #if 0 12162 printf("ctl_abort_task: found I/O to abort\n"); 12163 #endif 12164 break; 12165 } 12166 } 12167 } 12168 mtx_unlock(&lun->lun_lock); 12169 12170 if (found == 0) { 12171 /* 12172 * This isn't really an error. It's entirely possible for 12173 * the abort and command completion to cross on the wire. 12174 * This is more of an informative/diagnostic error. 12175 */ 12176 #if 0 12177 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12178 "%d:%d:%d:%d tag %d type %d\n", 12179 io->io_hdr.nexus.initid.id, 12180 io->io_hdr.nexus.targ_port, 12181 io->io_hdr.nexus.targ_target.id, 12182 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12183 io->taskio.tag_type); 12184 #endif 12185 } 12186 return (0); 12187 } 12188 12189 static void 12190 ctl_run_task(union ctl_io *io) 12191 { 12192 struct ctl_softc *ctl_softc = control_softc; 12193 int retval = 1; 12194 const char *task_desc; 12195 12196 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12197 12198 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12199 ("ctl_run_task: Unextected io_type %d\n", 12200 io->io_hdr.io_type)); 12201 12202 task_desc = ctl_scsi_task_string(&io->taskio); 12203 if (task_desc != NULL) { 12204 #ifdef NEEDTOPORT 12205 csevent_log(CSC_CTL | CSC_SHELF_SW | 12206 CTL_TASK_REPORT, 12207 csevent_LogType_Trace, 12208 csevent_Severity_Information, 12209 csevent_AlertLevel_Green, 12210 csevent_FRU_Firmware, 12211 csevent_FRU_Unknown, 12212 "CTL: received task: %s",task_desc); 12213 #endif 12214 } else { 12215 #ifdef NEEDTOPORT 12216 csevent_log(CSC_CTL | CSC_SHELF_SW | 12217 CTL_TASK_REPORT, 12218 csevent_LogType_Trace, 12219 csevent_Severity_Information, 12220 csevent_AlertLevel_Green, 12221 csevent_FRU_Firmware, 12222 csevent_FRU_Unknown, 12223 "CTL: received unknown task " 12224 "type: %d (%#x)", 12225 io->taskio.task_action, 12226 io->taskio.task_action); 12227 #endif 12228 } 12229 switch (io->taskio.task_action) { 12230 case CTL_TASK_ABORT_TASK: 12231 retval = ctl_abort_task(io); 12232 break; 12233 case CTL_TASK_ABORT_TASK_SET: 12234 case CTL_TASK_CLEAR_TASK_SET: 12235 retval = ctl_abort_task_set(io); 12236 break; 12237 case CTL_TASK_CLEAR_ACA: 12238 break; 12239 case CTL_TASK_I_T_NEXUS_RESET: 12240 retval = ctl_i_t_nexus_reset(io); 12241 break; 12242 case CTL_TASK_LUN_RESET: { 12243 struct ctl_lun *lun; 12244 uint32_t targ_lun; 12245 12246 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12247 mtx_lock(&ctl_softc->ctl_lock); 12248 if ((targ_lun < CTL_MAX_LUNS) 12249 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 12250 lun = ctl_softc->ctl_luns[targ_lun]; 12251 else { 12252 mtx_unlock(&ctl_softc->ctl_lock); 12253 retval = 1; 12254 break; 12255 } 12256 12257 if (!(io->io_hdr.flags & 12258 CTL_FLAG_FROM_OTHER_SC)) { 12259 union ctl_ha_msg msg_info; 12260 12261 io->io_hdr.flags |= 12262 CTL_FLAG_SENT_2OTHER_SC; 12263 msg_info.hdr.msg_type = 12264 CTL_MSG_MANAGE_TASKS; 12265 msg_info.hdr.nexus = io->io_hdr.nexus; 12266 msg_info.task.task_action = 12267 CTL_TASK_LUN_RESET; 12268 msg_info.hdr.original_sc = NULL; 12269 msg_info.hdr.serializing_sc = NULL; 12270 if (CTL_HA_STATUS_SUCCESS != 12271 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12272 (void *)&msg_info, 12273 sizeof(msg_info), 0)) { 12274 } 12275 } 12276 12277 retval = ctl_lun_reset(lun, io, 12278 CTL_UA_LUN_RESET); 12279 mtx_unlock(&ctl_softc->ctl_lock); 12280 break; 12281 } 12282 case CTL_TASK_TARGET_RESET: 12283 retval = ctl_target_reset(ctl_softc, io, CTL_UA_TARG_RESET); 12284 break; 12285 case CTL_TASK_BUS_RESET: 12286 retval = ctl_bus_reset(ctl_softc, io); 12287 break; 12288 case CTL_TASK_PORT_LOGIN: 12289 break; 12290 case CTL_TASK_PORT_LOGOUT: 12291 break; 12292 default: 12293 printf("ctl_run_task: got unknown task management event %d\n", 12294 io->taskio.task_action); 12295 break; 12296 } 12297 if (retval == 0) 12298 io->io_hdr.status = CTL_SUCCESS; 12299 else 12300 io->io_hdr.status = CTL_ERROR; 12301 ctl_done(io); 12302 } 12303 12304 /* 12305 * For HA operation. Handle commands that come in from the other 12306 * controller. 12307 */ 12308 static void 12309 ctl_handle_isc(union ctl_io *io) 12310 { 12311 int free_io; 12312 struct ctl_lun *lun; 12313 struct ctl_softc *ctl_softc; 12314 uint32_t targ_lun; 12315 12316 ctl_softc = control_softc; 12317 12318 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12319 lun = ctl_softc->ctl_luns[targ_lun]; 12320 12321 switch (io->io_hdr.msg_type) { 12322 case CTL_MSG_SERIALIZE: 12323 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 12324 break; 12325 case CTL_MSG_R2R: { 12326 const struct ctl_cmd_entry *entry; 12327 12328 /* 12329 * This is only used in SER_ONLY mode. 12330 */ 12331 free_io = 0; 12332 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12333 mtx_lock(&lun->lun_lock); 12334 if (ctl_scsiio_lun_check(ctl_softc, lun, 12335 entry, (struct ctl_scsiio *)io) != 0) { 12336 mtx_unlock(&lun->lun_lock); 12337 ctl_done(io); 12338 break; 12339 } 12340 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12341 mtx_unlock(&lun->lun_lock); 12342 ctl_enqueue_rtr(io); 12343 break; 12344 } 12345 case CTL_MSG_FINISH_IO: 12346 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 12347 free_io = 0; 12348 ctl_done(io); 12349 } else { 12350 free_io = 1; 12351 mtx_lock(&lun->lun_lock); 12352 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 12353 ooa_links); 12354 ctl_check_blocked(lun); 12355 mtx_unlock(&lun->lun_lock); 12356 } 12357 break; 12358 case CTL_MSG_PERS_ACTION: 12359 ctl_hndl_per_res_out_on_other_sc( 12360 (union ctl_ha_msg *)&io->presio.pr_msg); 12361 free_io = 1; 12362 break; 12363 case CTL_MSG_BAD_JUJU: 12364 free_io = 0; 12365 ctl_done(io); 12366 break; 12367 case CTL_MSG_DATAMOVE: 12368 /* Only used in XFER mode */ 12369 free_io = 0; 12370 ctl_datamove_remote(io); 12371 break; 12372 case CTL_MSG_DATAMOVE_DONE: 12373 /* Only used in XFER mode */ 12374 free_io = 0; 12375 io->scsiio.be_move_done(io); 12376 break; 12377 default: 12378 free_io = 1; 12379 printf("%s: Invalid message type %d\n", 12380 __func__, io->io_hdr.msg_type); 12381 break; 12382 } 12383 if (free_io) 12384 ctl_free_io(io); 12385 12386 } 12387 12388 12389 /* 12390 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12391 * there is no match. 12392 */ 12393 static ctl_lun_error_pattern 12394 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12395 { 12396 const struct ctl_cmd_entry *entry; 12397 ctl_lun_error_pattern filtered_pattern, pattern; 12398 12399 pattern = desc->error_pattern; 12400 12401 /* 12402 * XXX KDM we need more data passed into this function to match a 12403 * custom pattern, and we actually need to implement custom pattern 12404 * matching. 12405 */ 12406 if (pattern & CTL_LUN_PAT_CMD) 12407 return (CTL_LUN_PAT_CMD); 12408 12409 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12410 return (CTL_LUN_PAT_ANY); 12411 12412 entry = ctl_get_cmd_entry(ctsio, NULL); 12413 12414 filtered_pattern = entry->pattern & pattern; 12415 12416 /* 12417 * If the user requested specific flags in the pattern (e.g. 12418 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12419 * flags. 12420 * 12421 * If the user did not specify any flags, it doesn't matter whether 12422 * or not the command supports the flags. 12423 */ 12424 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12425 (pattern & ~CTL_LUN_PAT_MASK)) 12426 return (CTL_LUN_PAT_NONE); 12427 12428 /* 12429 * If the user asked for a range check, see if the requested LBA 12430 * range overlaps with this command's LBA range. 12431 */ 12432 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12433 uint64_t lba1; 12434 uint64_t len1; 12435 ctl_action action; 12436 int retval; 12437 12438 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12439 if (retval != 0) 12440 return (CTL_LUN_PAT_NONE); 12441 12442 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12443 desc->lba_range.len); 12444 /* 12445 * A "pass" means that the LBA ranges don't overlap, so 12446 * this doesn't match the user's range criteria. 12447 */ 12448 if (action == CTL_ACTION_PASS) 12449 return (CTL_LUN_PAT_NONE); 12450 } 12451 12452 return (filtered_pattern); 12453 } 12454 12455 static void 12456 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12457 { 12458 struct ctl_error_desc *desc, *desc2; 12459 12460 mtx_assert(&lun->lun_lock, MA_OWNED); 12461 12462 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12463 ctl_lun_error_pattern pattern; 12464 /* 12465 * Check to see whether this particular command matches 12466 * the pattern in the descriptor. 12467 */ 12468 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12469 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12470 continue; 12471 12472 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12473 case CTL_LUN_INJ_ABORTED: 12474 ctl_set_aborted(&io->scsiio); 12475 break; 12476 case CTL_LUN_INJ_MEDIUM_ERR: 12477 ctl_set_medium_error(&io->scsiio); 12478 break; 12479 case CTL_LUN_INJ_UA: 12480 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12481 * OCCURRED */ 12482 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12483 break; 12484 case CTL_LUN_INJ_CUSTOM: 12485 /* 12486 * We're assuming the user knows what he is doing. 12487 * Just copy the sense information without doing 12488 * checks. 12489 */ 12490 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12491 ctl_min(sizeof(desc->custom_sense), 12492 sizeof(io->scsiio.sense_data))); 12493 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12494 io->scsiio.sense_len = SSD_FULL_SIZE; 12495 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12496 break; 12497 case CTL_LUN_INJ_NONE: 12498 default: 12499 /* 12500 * If this is an error injection type we don't know 12501 * about, clear the continuous flag (if it is set) 12502 * so it will get deleted below. 12503 */ 12504 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12505 break; 12506 } 12507 /* 12508 * By default, each error injection action is a one-shot 12509 */ 12510 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12511 continue; 12512 12513 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12514 12515 free(desc, M_CTL); 12516 } 12517 } 12518 12519 #ifdef CTL_IO_DELAY 12520 static void 12521 ctl_datamove_timer_wakeup(void *arg) 12522 { 12523 union ctl_io *io; 12524 12525 io = (union ctl_io *)arg; 12526 12527 ctl_datamove(io); 12528 } 12529 #endif /* CTL_IO_DELAY */ 12530 12531 void 12532 ctl_datamove(union ctl_io *io) 12533 { 12534 void (*fe_datamove)(union ctl_io *io); 12535 12536 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12537 12538 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12539 12540 #ifdef CTL_TIME_IO 12541 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12542 char str[256]; 12543 char path_str[64]; 12544 struct sbuf sb; 12545 12546 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12547 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12548 12549 sbuf_cat(&sb, path_str); 12550 switch (io->io_hdr.io_type) { 12551 case CTL_IO_SCSI: 12552 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12553 sbuf_printf(&sb, "\n"); 12554 sbuf_cat(&sb, path_str); 12555 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12556 io->scsiio.tag_num, io->scsiio.tag_type); 12557 break; 12558 case CTL_IO_TASK: 12559 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12560 "Tag Type: %d\n", io->taskio.task_action, 12561 io->taskio.tag_num, io->taskio.tag_type); 12562 break; 12563 default: 12564 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12565 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12566 break; 12567 } 12568 sbuf_cat(&sb, path_str); 12569 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12570 (intmax_t)time_uptime - io->io_hdr.start_time); 12571 sbuf_finish(&sb); 12572 printf("%s", sbuf_data(&sb)); 12573 } 12574 #endif /* CTL_TIME_IO */ 12575 12576 #ifdef CTL_IO_DELAY 12577 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12578 struct ctl_lun *lun; 12579 12580 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12581 12582 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12583 } else { 12584 struct ctl_lun *lun; 12585 12586 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12587 if ((lun != NULL) 12588 && (lun->delay_info.datamove_delay > 0)) { 12589 struct callout *callout; 12590 12591 callout = (struct callout *)&io->io_hdr.timer_bytes; 12592 callout_init(callout, /*mpsafe*/ 1); 12593 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12594 callout_reset(callout, 12595 lun->delay_info.datamove_delay * hz, 12596 ctl_datamove_timer_wakeup, io); 12597 if (lun->delay_info.datamove_type == 12598 CTL_DELAY_TYPE_ONESHOT) 12599 lun->delay_info.datamove_delay = 0; 12600 return; 12601 } 12602 } 12603 #endif 12604 12605 /* 12606 * This command has been aborted. Set the port status, so we fail 12607 * the data move. 12608 */ 12609 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12610 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 12611 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 12612 io->io_hdr.nexus.targ_port, 12613 (uintmax_t)io->io_hdr.nexus.targ_target.id, 12614 io->io_hdr.nexus.targ_lun); 12615 io->io_hdr.port_status = 31337; 12616 /* 12617 * Note that the backend, in this case, will get the 12618 * callback in its context. In other cases it may get 12619 * called in the frontend's interrupt thread context. 12620 */ 12621 io->scsiio.be_move_done(io); 12622 return; 12623 } 12624 12625 /* Don't confuse frontend with zero length data move. */ 12626 if (io->scsiio.kern_data_len == 0) { 12627 io->scsiio.be_move_done(io); 12628 return; 12629 } 12630 12631 /* 12632 * If we're in XFER mode and this I/O is from the other shelf 12633 * controller, we need to send the DMA to the other side to 12634 * actually transfer the data to/from the host. In serialize only 12635 * mode the transfer happens below CTL and ctl_datamove() is only 12636 * called on the machine that originally received the I/O. 12637 */ 12638 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12639 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12640 union ctl_ha_msg msg; 12641 uint32_t sg_entries_sent; 12642 int do_sg_copy; 12643 int i; 12644 12645 memset(&msg, 0, sizeof(msg)); 12646 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12647 msg.hdr.original_sc = io->io_hdr.original_sc; 12648 msg.hdr.serializing_sc = io; 12649 msg.hdr.nexus = io->io_hdr.nexus; 12650 msg.dt.flags = io->io_hdr.flags; 12651 /* 12652 * We convert everything into a S/G list here. We can't 12653 * pass by reference, only by value between controllers. 12654 * So we can't pass a pointer to the S/G list, only as many 12655 * S/G entries as we can fit in here. If it's possible for 12656 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12657 * then we need to break this up into multiple transfers. 12658 */ 12659 if (io->scsiio.kern_sg_entries == 0) { 12660 msg.dt.kern_sg_entries = 1; 12661 /* 12662 * If this is in cached memory, flush the cache 12663 * before we send the DMA request to the other 12664 * controller. We want to do this in either the 12665 * read or the write case. The read case is 12666 * straightforward. In the write case, we want to 12667 * make sure nothing is in the local cache that 12668 * could overwrite the DMAed data. 12669 */ 12670 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12671 /* 12672 * XXX KDM use bus_dmamap_sync() here. 12673 */ 12674 } 12675 12676 /* 12677 * Convert to a physical address if this is a 12678 * virtual address. 12679 */ 12680 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12681 msg.dt.sg_list[0].addr = 12682 io->scsiio.kern_data_ptr; 12683 } else { 12684 /* 12685 * XXX KDM use busdma here! 12686 */ 12687 #if 0 12688 msg.dt.sg_list[0].addr = (void *) 12689 vtophys(io->scsiio.kern_data_ptr); 12690 #endif 12691 } 12692 12693 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12694 do_sg_copy = 0; 12695 } else { 12696 struct ctl_sg_entry *sgl; 12697 12698 do_sg_copy = 1; 12699 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12700 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12701 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12702 /* 12703 * XXX KDM use bus_dmamap_sync() here. 12704 */ 12705 } 12706 } 12707 12708 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12709 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12710 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12711 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12712 msg.dt.sg_sequence = 0; 12713 12714 /* 12715 * Loop until we've sent all of the S/G entries. On the 12716 * other end, we'll recompose these S/G entries into one 12717 * contiguous list before passing it to the 12718 */ 12719 for (sg_entries_sent = 0; sg_entries_sent < 12720 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12721 msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/ 12722 sizeof(msg.dt.sg_list[0])), 12723 msg.dt.kern_sg_entries - sg_entries_sent); 12724 12725 if (do_sg_copy != 0) { 12726 struct ctl_sg_entry *sgl; 12727 int j; 12728 12729 sgl = (struct ctl_sg_entry *) 12730 io->scsiio.kern_data_ptr; 12731 /* 12732 * If this is in cached memory, flush the cache 12733 * before we send the DMA request to the other 12734 * controller. We want to do this in either 12735 * the * read or the write case. The read 12736 * case is straightforward. In the write 12737 * case, we want to make sure nothing is 12738 * in the local cache that could overwrite 12739 * the DMAed data. 12740 */ 12741 12742 for (i = sg_entries_sent, j = 0; 12743 i < msg.dt.cur_sg_entries; i++, j++) { 12744 if ((io->io_hdr.flags & 12745 CTL_FLAG_NO_DATASYNC) == 0) { 12746 /* 12747 * XXX KDM use bus_dmamap_sync() 12748 */ 12749 } 12750 if ((io->io_hdr.flags & 12751 CTL_FLAG_BUS_ADDR) == 0) { 12752 /* 12753 * XXX KDM use busdma. 12754 */ 12755 #if 0 12756 msg.dt.sg_list[j].addr =(void *) 12757 vtophys(sgl[i].addr); 12758 #endif 12759 } else { 12760 msg.dt.sg_list[j].addr = 12761 sgl[i].addr; 12762 } 12763 msg.dt.sg_list[j].len = sgl[i].len; 12764 } 12765 } 12766 12767 sg_entries_sent += msg.dt.cur_sg_entries; 12768 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12769 msg.dt.sg_last = 1; 12770 else 12771 msg.dt.sg_last = 0; 12772 12773 /* 12774 * XXX KDM drop and reacquire the lock here? 12775 */ 12776 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12777 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12778 /* 12779 * XXX do something here. 12780 */ 12781 } 12782 12783 msg.dt.sent_sg_entries = sg_entries_sent; 12784 } 12785 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12786 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12787 ctl_failover_io(io, /*have_lock*/ 0); 12788 12789 } else { 12790 12791 /* 12792 * Lookup the fe_datamove() function for this particular 12793 * front end. 12794 */ 12795 fe_datamove = 12796 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12797 12798 fe_datamove(io); 12799 } 12800 } 12801 12802 static void 12803 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12804 { 12805 union ctl_ha_msg msg; 12806 int isc_status; 12807 12808 memset(&msg, 0, sizeof(msg)); 12809 12810 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12811 msg.hdr.original_sc = io; 12812 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12813 msg.hdr.nexus = io->io_hdr.nexus; 12814 msg.hdr.status = io->io_hdr.status; 12815 msg.scsi.tag_num = io->scsiio.tag_num; 12816 msg.scsi.tag_type = io->scsiio.tag_type; 12817 msg.scsi.scsi_status = io->scsiio.scsi_status; 12818 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12819 sizeof(io->scsiio.sense_data)); 12820 msg.scsi.sense_len = io->scsiio.sense_len; 12821 msg.scsi.sense_residual = io->scsiio.sense_residual; 12822 msg.scsi.fetd_status = io->io_hdr.port_status; 12823 msg.scsi.residual = io->scsiio.residual; 12824 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12825 12826 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12827 ctl_failover_io(io, /*have_lock*/ have_lock); 12828 return; 12829 } 12830 12831 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12832 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12833 /* XXX do something if this fails */ 12834 } 12835 12836 } 12837 12838 /* 12839 * The DMA to the remote side is done, now we need to tell the other side 12840 * we're done so it can continue with its data movement. 12841 */ 12842 static void 12843 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12844 { 12845 union ctl_io *io; 12846 12847 io = rq->context; 12848 12849 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12850 printf("%s: ISC DMA write failed with error %d", __func__, 12851 rq->ret); 12852 ctl_set_internal_failure(&io->scsiio, 12853 /*sks_valid*/ 1, 12854 /*retry_count*/ rq->ret); 12855 } 12856 12857 ctl_dt_req_free(rq); 12858 12859 /* 12860 * In this case, we had to malloc the memory locally. Free it. 12861 */ 12862 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12863 int i; 12864 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12865 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12866 } 12867 /* 12868 * The data is in local and remote memory, so now we need to send 12869 * status (good or back) back to the other side. 12870 */ 12871 ctl_send_datamove_done(io, /*have_lock*/ 0); 12872 } 12873 12874 /* 12875 * We've moved the data from the host/controller into local memory. Now we 12876 * need to push it over to the remote controller's memory. 12877 */ 12878 static int 12879 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12880 { 12881 int retval; 12882 12883 retval = 0; 12884 12885 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12886 ctl_datamove_remote_write_cb); 12887 12888 return (retval); 12889 } 12890 12891 static void 12892 ctl_datamove_remote_write(union ctl_io *io) 12893 { 12894 int retval; 12895 void (*fe_datamove)(union ctl_io *io); 12896 12897 /* 12898 * - Get the data from the host/HBA into local memory. 12899 * - DMA memory from the local controller to the remote controller. 12900 * - Send status back to the remote controller. 12901 */ 12902 12903 retval = ctl_datamove_remote_sgl_setup(io); 12904 if (retval != 0) 12905 return; 12906 12907 /* Switch the pointer over so the FETD knows what to do */ 12908 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12909 12910 /* 12911 * Use a custom move done callback, since we need to send completion 12912 * back to the other controller, not to the backend on this side. 12913 */ 12914 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12915 12916 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12917 12918 fe_datamove(io); 12919 12920 return; 12921 12922 } 12923 12924 static int 12925 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12926 { 12927 #if 0 12928 char str[256]; 12929 char path_str[64]; 12930 struct sbuf sb; 12931 #endif 12932 12933 /* 12934 * In this case, we had to malloc the memory locally. Free it. 12935 */ 12936 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12937 int i; 12938 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12939 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12940 } 12941 12942 #if 0 12943 scsi_path_string(io, path_str, sizeof(path_str)); 12944 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12945 sbuf_cat(&sb, path_str); 12946 scsi_command_string(&io->scsiio, NULL, &sb); 12947 sbuf_printf(&sb, "\n"); 12948 sbuf_cat(&sb, path_str); 12949 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12950 io->scsiio.tag_num, io->scsiio.tag_type); 12951 sbuf_cat(&sb, path_str); 12952 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12953 io->io_hdr.flags, io->io_hdr.status); 12954 sbuf_finish(&sb); 12955 printk("%s", sbuf_data(&sb)); 12956 #endif 12957 12958 12959 /* 12960 * The read is done, now we need to send status (good or bad) back 12961 * to the other side. 12962 */ 12963 ctl_send_datamove_done(io, /*have_lock*/ 0); 12964 12965 return (0); 12966 } 12967 12968 static void 12969 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12970 { 12971 union ctl_io *io; 12972 void (*fe_datamove)(union ctl_io *io); 12973 12974 io = rq->context; 12975 12976 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12977 printf("%s: ISC DMA read failed with error %d", __func__, 12978 rq->ret); 12979 ctl_set_internal_failure(&io->scsiio, 12980 /*sks_valid*/ 1, 12981 /*retry_count*/ rq->ret); 12982 } 12983 12984 ctl_dt_req_free(rq); 12985 12986 /* Switch the pointer over so the FETD knows what to do */ 12987 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12988 12989 /* 12990 * Use a custom move done callback, since we need to send completion 12991 * back to the other controller, not to the backend on this side. 12992 */ 12993 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12994 12995 /* XXX KDM add checks like the ones in ctl_datamove? */ 12996 12997 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12998 12999 fe_datamove(io); 13000 } 13001 13002 static int 13003 ctl_datamove_remote_sgl_setup(union ctl_io *io) 13004 { 13005 struct ctl_sg_entry *local_sglist, *remote_sglist; 13006 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 13007 struct ctl_softc *softc; 13008 int retval; 13009 int i; 13010 13011 retval = 0; 13012 softc = control_softc; 13013 13014 local_sglist = io->io_hdr.local_sglist; 13015 local_dma_sglist = io->io_hdr.local_dma_sglist; 13016 remote_sglist = io->io_hdr.remote_sglist; 13017 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13018 13019 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 13020 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 13021 local_sglist[i].len = remote_sglist[i].len; 13022 13023 /* 13024 * XXX Detect the situation where the RS-level I/O 13025 * redirector on the other side has already read the 13026 * data off of the AOR RS on this side, and 13027 * transferred it to remote (mirror) memory on the 13028 * other side. Since we already have the data in 13029 * memory here, we just need to use it. 13030 * 13031 * XXX KDM this can probably be removed once we 13032 * get the cache device code in and take the 13033 * current AOR implementation out. 13034 */ 13035 #ifdef NEEDTOPORT 13036 if ((remote_sglist[i].addr >= 13037 (void *)vtophys(softc->mirr->addr)) 13038 && (remote_sglist[i].addr < 13039 ((void *)vtophys(softc->mirr->addr) + 13040 CacheMirrorOffset))) { 13041 local_sglist[i].addr = remote_sglist[i].addr - 13042 CacheMirrorOffset; 13043 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13044 CTL_FLAG_DATA_IN) 13045 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 13046 } else { 13047 local_sglist[i].addr = remote_sglist[i].addr + 13048 CacheMirrorOffset; 13049 } 13050 #endif 13051 #if 0 13052 printf("%s: local %p, remote %p, len %d\n", 13053 __func__, local_sglist[i].addr, 13054 remote_sglist[i].addr, local_sglist[i].len); 13055 #endif 13056 } 13057 } else { 13058 uint32_t len_to_go; 13059 13060 /* 13061 * In this case, we don't have automatically allocated 13062 * memory for this I/O on this controller. This typically 13063 * happens with internal CTL I/O -- e.g. inquiry, mode 13064 * sense, etc. Anything coming from RAIDCore will have 13065 * a mirror area available. 13066 */ 13067 len_to_go = io->scsiio.kern_data_len; 13068 13069 /* 13070 * Clear the no datasync flag, we have to use malloced 13071 * buffers. 13072 */ 13073 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 13074 13075 /* 13076 * The difficult thing here is that the size of the various 13077 * S/G segments may be different than the size from the 13078 * remote controller. That'll make it harder when DMAing 13079 * the data back to the other side. 13080 */ 13081 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 13082 sizeof(io->io_hdr.remote_sglist[0])) && 13083 (len_to_go > 0); i++) { 13084 local_sglist[i].len = ctl_min(len_to_go, 131072); 13085 CTL_SIZE_8B(local_dma_sglist[i].len, 13086 local_sglist[i].len); 13087 local_sglist[i].addr = 13088 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 13089 13090 local_dma_sglist[i].addr = local_sglist[i].addr; 13091 13092 if (local_sglist[i].addr == NULL) { 13093 int j; 13094 13095 printf("malloc failed for %zd bytes!", 13096 local_dma_sglist[i].len); 13097 for (j = 0; j < i; j++) { 13098 free(local_sglist[j].addr, M_CTL); 13099 } 13100 ctl_set_internal_failure(&io->scsiio, 13101 /*sks_valid*/ 1, 13102 /*retry_count*/ 4857); 13103 retval = 1; 13104 goto bailout_error; 13105 13106 } 13107 /* XXX KDM do we need a sync here? */ 13108 13109 len_to_go -= local_sglist[i].len; 13110 } 13111 /* 13112 * Reset the number of S/G entries accordingly. The 13113 * original number of S/G entries is available in 13114 * rem_sg_entries. 13115 */ 13116 io->scsiio.kern_sg_entries = i; 13117 13118 #if 0 13119 printf("%s: kern_sg_entries = %d\n", __func__, 13120 io->scsiio.kern_sg_entries); 13121 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13122 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 13123 local_sglist[i].addr, local_sglist[i].len, 13124 local_dma_sglist[i].len); 13125 #endif 13126 } 13127 13128 13129 return (retval); 13130 13131 bailout_error: 13132 13133 ctl_send_datamove_done(io, /*have_lock*/ 0); 13134 13135 return (retval); 13136 } 13137 13138 static int 13139 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 13140 ctl_ha_dt_cb callback) 13141 { 13142 struct ctl_ha_dt_req *rq; 13143 struct ctl_sg_entry *remote_sglist, *local_sglist; 13144 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 13145 uint32_t local_used, remote_used, total_used; 13146 int retval; 13147 int i, j; 13148 13149 retval = 0; 13150 13151 rq = ctl_dt_req_alloc(); 13152 13153 /* 13154 * If we failed to allocate the request, and if the DMA didn't fail 13155 * anyway, set busy status. This is just a resource allocation 13156 * failure. 13157 */ 13158 if ((rq == NULL) 13159 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 13160 ctl_set_busy(&io->scsiio); 13161 13162 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 13163 13164 if (rq != NULL) 13165 ctl_dt_req_free(rq); 13166 13167 /* 13168 * The data move failed. We need to return status back 13169 * to the other controller. No point in trying to DMA 13170 * data to the remote controller. 13171 */ 13172 13173 ctl_send_datamove_done(io, /*have_lock*/ 0); 13174 13175 retval = 1; 13176 13177 goto bailout; 13178 } 13179 13180 local_sglist = io->io_hdr.local_sglist; 13181 local_dma_sglist = io->io_hdr.local_dma_sglist; 13182 remote_sglist = io->io_hdr.remote_sglist; 13183 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13184 local_used = 0; 13185 remote_used = 0; 13186 total_used = 0; 13187 13188 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 13189 rq->ret = CTL_HA_STATUS_SUCCESS; 13190 rq->context = io; 13191 callback(rq); 13192 goto bailout; 13193 } 13194 13195 /* 13196 * Pull/push the data over the wire from/to the other controller. 13197 * This takes into account the possibility that the local and 13198 * remote sglists may not be identical in terms of the size of 13199 * the elements and the number of elements. 13200 * 13201 * One fundamental assumption here is that the length allocated for 13202 * both the local and remote sglists is identical. Otherwise, we've 13203 * essentially got a coding error of some sort. 13204 */ 13205 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 13206 int isc_ret; 13207 uint32_t cur_len, dma_length; 13208 uint8_t *tmp_ptr; 13209 13210 rq->id = CTL_HA_DATA_CTL; 13211 rq->command = command; 13212 rq->context = io; 13213 13214 /* 13215 * Both pointers should be aligned. But it is possible 13216 * that the allocation length is not. They should both 13217 * also have enough slack left over at the end, though, 13218 * to round up to the next 8 byte boundary. 13219 */ 13220 cur_len = ctl_min(local_sglist[i].len - local_used, 13221 remote_sglist[j].len - remote_used); 13222 13223 /* 13224 * In this case, we have a size issue and need to decrease 13225 * the size, except in the case where we actually have less 13226 * than 8 bytes left. In that case, we need to increase 13227 * the DMA length to get the last bit. 13228 */ 13229 if ((cur_len & 0x7) != 0) { 13230 if (cur_len > 0x7) { 13231 cur_len = cur_len - (cur_len & 0x7); 13232 dma_length = cur_len; 13233 } else { 13234 CTL_SIZE_8B(dma_length, cur_len); 13235 } 13236 13237 } else 13238 dma_length = cur_len; 13239 13240 /* 13241 * If we had to allocate memory for this I/O, instead of using 13242 * the non-cached mirror memory, we'll need to flush the cache 13243 * before trying to DMA to the other controller. 13244 * 13245 * We could end up doing this multiple times for the same 13246 * segment if we have a larger local segment than remote 13247 * segment. That shouldn't be an issue. 13248 */ 13249 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 13250 /* 13251 * XXX KDM use bus_dmamap_sync() here. 13252 */ 13253 } 13254 13255 rq->size = dma_length; 13256 13257 tmp_ptr = (uint8_t *)local_sglist[i].addr; 13258 tmp_ptr += local_used; 13259 13260 /* Use physical addresses when talking to ISC hardware */ 13261 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 13262 /* XXX KDM use busdma */ 13263 #if 0 13264 rq->local = vtophys(tmp_ptr); 13265 #endif 13266 } else 13267 rq->local = tmp_ptr; 13268 13269 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 13270 tmp_ptr += remote_used; 13271 rq->remote = tmp_ptr; 13272 13273 rq->callback = NULL; 13274 13275 local_used += cur_len; 13276 if (local_used >= local_sglist[i].len) { 13277 i++; 13278 local_used = 0; 13279 } 13280 13281 remote_used += cur_len; 13282 if (remote_used >= remote_sglist[j].len) { 13283 j++; 13284 remote_used = 0; 13285 } 13286 total_used += cur_len; 13287 13288 if (total_used >= io->scsiio.kern_data_len) 13289 rq->callback = callback; 13290 13291 if ((rq->size & 0x7) != 0) { 13292 printf("%s: warning: size %d is not on 8b boundary\n", 13293 __func__, rq->size); 13294 } 13295 if (((uintptr_t)rq->local & 0x7) != 0) { 13296 printf("%s: warning: local %p not on 8b boundary\n", 13297 __func__, rq->local); 13298 } 13299 if (((uintptr_t)rq->remote & 0x7) != 0) { 13300 printf("%s: warning: remote %p not on 8b boundary\n", 13301 __func__, rq->local); 13302 } 13303 #if 0 13304 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 13305 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 13306 rq->local, rq->remote, rq->size); 13307 #endif 13308 13309 isc_ret = ctl_dt_single(rq); 13310 if (isc_ret == CTL_HA_STATUS_WAIT) 13311 continue; 13312 13313 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 13314 rq->ret = CTL_HA_STATUS_SUCCESS; 13315 } else { 13316 rq->ret = isc_ret; 13317 } 13318 callback(rq); 13319 goto bailout; 13320 } 13321 13322 bailout: 13323 return (retval); 13324 13325 } 13326 13327 static void 13328 ctl_datamove_remote_read(union ctl_io *io) 13329 { 13330 int retval; 13331 int i; 13332 13333 /* 13334 * This will send an error to the other controller in the case of a 13335 * failure. 13336 */ 13337 retval = ctl_datamove_remote_sgl_setup(io); 13338 if (retval != 0) 13339 return; 13340 13341 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 13342 ctl_datamove_remote_read_cb); 13343 if ((retval != 0) 13344 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 13345 /* 13346 * Make sure we free memory if there was an error.. The 13347 * ctl_datamove_remote_xfer() function will send the 13348 * datamove done message, or call the callback with an 13349 * error if there is a problem. 13350 */ 13351 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13352 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13353 } 13354 13355 return; 13356 } 13357 13358 /* 13359 * Process a datamove request from the other controller. This is used for 13360 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 13361 * first. Once that is complete, the data gets DMAed into the remote 13362 * controller's memory. For reads, we DMA from the remote controller's 13363 * memory into our memory first, and then move it out to the FETD. 13364 */ 13365 static void 13366 ctl_datamove_remote(union ctl_io *io) 13367 { 13368 struct ctl_softc *softc; 13369 13370 softc = control_softc; 13371 13372 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 13373 13374 /* 13375 * Note that we look for an aborted I/O here, but don't do some of 13376 * the other checks that ctl_datamove() normally does. 13377 * We don't need to run the datamove delay code, since that should 13378 * have been done if need be on the other controller. 13379 */ 13380 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13381 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 13382 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 13383 io->io_hdr.nexus.targ_port, 13384 io->io_hdr.nexus.targ_target.id, 13385 io->io_hdr.nexus.targ_lun); 13386 io->io_hdr.port_status = 31338; 13387 ctl_send_datamove_done(io, /*have_lock*/ 0); 13388 return; 13389 } 13390 13391 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 13392 ctl_datamove_remote_write(io); 13393 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 13394 ctl_datamove_remote_read(io); 13395 } else { 13396 union ctl_ha_msg msg; 13397 struct scsi_sense_data *sense; 13398 uint8_t sks[3]; 13399 int retry_count; 13400 13401 memset(&msg, 0, sizeof(msg)); 13402 13403 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13404 msg.hdr.status = CTL_SCSI_ERROR; 13405 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13406 13407 retry_count = 4243; 13408 13409 sense = &msg.scsi.sense_data; 13410 sks[0] = SSD_SCS_VALID; 13411 sks[1] = (retry_count >> 8) & 0xff; 13412 sks[2] = retry_count & 0xff; 13413 13414 /* "Internal target failure" */ 13415 scsi_set_sense_data(sense, 13416 /*sense_format*/ SSD_TYPE_NONE, 13417 /*current_error*/ 1, 13418 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13419 /*asc*/ 0x44, 13420 /*ascq*/ 0x00, 13421 /*type*/ SSD_ELEM_SKS, 13422 /*size*/ sizeof(sks), 13423 /*data*/ sks, 13424 SSD_ELEM_NONE); 13425 13426 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13427 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13428 ctl_failover_io(io, /*have_lock*/ 1); 13429 return; 13430 } 13431 13432 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13433 CTL_HA_STATUS_SUCCESS) { 13434 /* XXX KDM what to do if this fails? */ 13435 } 13436 return; 13437 } 13438 13439 } 13440 13441 static int 13442 ctl_process_done(union ctl_io *io) 13443 { 13444 struct ctl_lun *lun; 13445 struct ctl_softc *ctl_softc = control_softc; 13446 void (*fe_done)(union ctl_io *io); 13447 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 13448 13449 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13450 13451 fe_done = 13452 control_softc->ctl_ports[targ_port]->fe_done; 13453 13454 #ifdef CTL_TIME_IO 13455 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13456 char str[256]; 13457 char path_str[64]; 13458 struct sbuf sb; 13459 13460 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13461 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13462 13463 sbuf_cat(&sb, path_str); 13464 switch (io->io_hdr.io_type) { 13465 case CTL_IO_SCSI: 13466 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13467 sbuf_printf(&sb, "\n"); 13468 sbuf_cat(&sb, path_str); 13469 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13470 io->scsiio.tag_num, io->scsiio.tag_type); 13471 break; 13472 case CTL_IO_TASK: 13473 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13474 "Tag Type: %d\n", io->taskio.task_action, 13475 io->taskio.tag_num, io->taskio.tag_type); 13476 break; 13477 default: 13478 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13479 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13480 break; 13481 } 13482 sbuf_cat(&sb, path_str); 13483 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13484 (intmax_t)time_uptime - io->io_hdr.start_time); 13485 sbuf_finish(&sb); 13486 printf("%s", sbuf_data(&sb)); 13487 } 13488 #endif /* CTL_TIME_IO */ 13489 13490 switch (io->io_hdr.io_type) { 13491 case CTL_IO_SCSI: 13492 break; 13493 case CTL_IO_TASK: 13494 if (bootverbose || (ctl_debug & CTL_DEBUG_INFO)) 13495 ctl_io_error_print(io, NULL); 13496 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13497 ctl_free_io(io); 13498 else 13499 fe_done(io); 13500 return (CTL_RETVAL_COMPLETE); 13501 default: 13502 panic("ctl_process_done: invalid io type %d\n", 13503 io->io_hdr.io_type); 13504 break; /* NOTREACHED */ 13505 } 13506 13507 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13508 if (lun == NULL) { 13509 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13510 io->io_hdr.nexus.targ_mapped_lun)); 13511 goto bailout; 13512 } 13513 13514 mtx_lock(&lun->lun_lock); 13515 13516 /* 13517 * Check to see if we have any errors to inject here. We only 13518 * inject errors for commands that don't already have errors set. 13519 */ 13520 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 13521 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13522 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13523 ctl_inject_error(lun, io); 13524 13525 /* 13526 * XXX KDM how do we treat commands that aren't completed 13527 * successfully? 13528 * 13529 * XXX KDM should we also track I/O latency? 13530 */ 13531 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13532 io->io_hdr.io_type == CTL_IO_SCSI) { 13533 #ifdef CTL_TIME_IO 13534 struct bintime cur_bt; 13535 #endif 13536 int type; 13537 13538 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13539 CTL_FLAG_DATA_IN) 13540 type = CTL_STATS_READ; 13541 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13542 CTL_FLAG_DATA_OUT) 13543 type = CTL_STATS_WRITE; 13544 else 13545 type = CTL_STATS_NO_IO; 13546 13547 lun->stats.ports[targ_port].bytes[type] += 13548 io->scsiio.kern_total_len; 13549 lun->stats.ports[targ_port].operations[type]++; 13550 #ifdef CTL_TIME_IO 13551 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13552 &io->io_hdr.dma_bt); 13553 lun->stats.ports[targ_port].num_dmas[type] += 13554 io->io_hdr.num_dmas; 13555 getbintime(&cur_bt); 13556 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13557 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13558 #endif 13559 } 13560 13561 /* 13562 * Remove this from the OOA queue. 13563 */ 13564 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13565 13566 /* 13567 * Run through the blocked queue on this LUN and see if anything 13568 * has become unblocked, now that this transaction is done. 13569 */ 13570 ctl_check_blocked(lun); 13571 13572 /* 13573 * If the LUN has been invalidated, free it if there is nothing 13574 * left on its OOA queue. 13575 */ 13576 if ((lun->flags & CTL_LUN_INVALID) 13577 && TAILQ_EMPTY(&lun->ooa_queue)) { 13578 mtx_unlock(&lun->lun_lock); 13579 mtx_lock(&ctl_softc->ctl_lock); 13580 ctl_free_lun(lun); 13581 mtx_unlock(&ctl_softc->ctl_lock); 13582 } else 13583 mtx_unlock(&lun->lun_lock); 13584 13585 bailout: 13586 13587 /* 13588 * If this command has been aborted, make sure we set the status 13589 * properly. The FETD is responsible for freeing the I/O and doing 13590 * whatever it needs to do to clean up its state. 13591 */ 13592 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13593 ctl_set_task_aborted(&io->scsiio); 13594 13595 /* 13596 * If enabled, print command error status. 13597 * We don't print UAs unless debugging was enabled explicitly. 13598 */ 13599 do { 13600 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 13601 break; 13602 if (!bootverbose && (ctl_debug & CTL_DEBUG_INFO) == 0) 13603 break; 13604 if ((ctl_debug & CTL_DEBUG_INFO) == 0 && 13605 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) && 13606 (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 13607 int error_code, sense_key, asc, ascq; 13608 13609 scsi_extract_sense_len(&io->scsiio.sense_data, 13610 io->scsiio.sense_len, &error_code, &sense_key, 13611 &asc, &ascq, /*show_errors*/ 0); 13612 if (sense_key == SSD_KEY_UNIT_ATTENTION) 13613 break; 13614 } 13615 13616 ctl_io_error_print(io, NULL); 13617 } while (0); 13618 13619 /* 13620 * Tell the FETD or the other shelf controller we're done with this 13621 * command. Note that only SCSI commands get to this point. Task 13622 * management commands are completed above. 13623 * 13624 * We only send status to the other controller if we're in XFER 13625 * mode. In SER_ONLY mode, the I/O is done on the controller that 13626 * received the I/O (from CTL's perspective), and so the status is 13627 * generated there. 13628 * 13629 * XXX KDM if we hold the lock here, we could cause a deadlock 13630 * if the frontend comes back in in this context to queue 13631 * something. 13632 */ 13633 if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER) 13634 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13635 union ctl_ha_msg msg; 13636 13637 memset(&msg, 0, sizeof(msg)); 13638 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13639 msg.hdr.original_sc = io->io_hdr.original_sc; 13640 msg.hdr.nexus = io->io_hdr.nexus; 13641 msg.hdr.status = io->io_hdr.status; 13642 msg.scsi.scsi_status = io->scsiio.scsi_status; 13643 msg.scsi.tag_num = io->scsiio.tag_num; 13644 msg.scsi.tag_type = io->scsiio.tag_type; 13645 msg.scsi.sense_len = io->scsiio.sense_len; 13646 msg.scsi.sense_residual = io->scsiio.sense_residual; 13647 msg.scsi.residual = io->scsiio.residual; 13648 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13649 sizeof(io->scsiio.sense_data)); 13650 /* 13651 * We copy this whether or not this is an I/O-related 13652 * command. Otherwise, we'd have to go and check to see 13653 * whether it's a read/write command, and it really isn't 13654 * worth it. 13655 */ 13656 memcpy(&msg.scsi.lbalen, 13657 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13658 sizeof(msg.scsi.lbalen)); 13659 13660 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13661 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13662 /* XXX do something here */ 13663 } 13664 13665 ctl_free_io(io); 13666 } else 13667 fe_done(io); 13668 13669 return (CTL_RETVAL_COMPLETE); 13670 } 13671 13672 #ifdef CTL_WITH_CA 13673 /* 13674 * Front end should call this if it doesn't do autosense. When the request 13675 * sense comes back in from the initiator, we'll dequeue this and send it. 13676 */ 13677 int 13678 ctl_queue_sense(union ctl_io *io) 13679 { 13680 struct ctl_lun *lun; 13681 struct ctl_softc *ctl_softc; 13682 uint32_t initidx, targ_lun; 13683 13684 ctl_softc = control_softc; 13685 13686 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13687 13688 /* 13689 * LUN lookup will likely move to the ctl_work_thread() once we 13690 * have our new queueing infrastructure (that doesn't put things on 13691 * a per-LUN queue initially). That is so that we can handle 13692 * things like an INQUIRY to a LUN that we don't have enabled. We 13693 * can't deal with that right now. 13694 */ 13695 mtx_lock(&ctl_softc->ctl_lock); 13696 13697 /* 13698 * If we don't have a LUN for this, just toss the sense 13699 * information. 13700 */ 13701 targ_lun = io->io_hdr.nexus.targ_lun; 13702 targ_lun = ctl_map_lun(io->io_hdr.nexus.targ_port, targ_lun); 13703 if ((targ_lun < CTL_MAX_LUNS) 13704 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 13705 lun = ctl_softc->ctl_luns[targ_lun]; 13706 else 13707 goto bailout; 13708 13709 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13710 13711 mtx_lock(&lun->lun_lock); 13712 /* 13713 * Already have CA set for this LUN...toss the sense information. 13714 */ 13715 if (ctl_is_set(lun->have_ca, initidx)) { 13716 mtx_unlock(&lun->lun_lock); 13717 goto bailout; 13718 } 13719 13720 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13721 ctl_min(sizeof(lun->pending_sense[initidx]), 13722 sizeof(io->scsiio.sense_data))); 13723 ctl_set_mask(lun->have_ca, initidx); 13724 mtx_unlock(&lun->lun_lock); 13725 13726 bailout: 13727 mtx_unlock(&ctl_softc->ctl_lock); 13728 13729 ctl_free_io(io); 13730 13731 return (CTL_RETVAL_COMPLETE); 13732 } 13733 #endif 13734 13735 /* 13736 * Primary command inlet from frontend ports. All SCSI and task I/O 13737 * requests must go through this function. 13738 */ 13739 int 13740 ctl_queue(union ctl_io *io) 13741 { 13742 struct ctl_softc *ctl_softc; 13743 13744 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13745 13746 ctl_softc = control_softc; 13747 13748 #ifdef CTL_TIME_IO 13749 io->io_hdr.start_time = time_uptime; 13750 getbintime(&io->io_hdr.start_bt); 13751 #endif /* CTL_TIME_IO */ 13752 13753 /* Map FE-specific LUN ID into global one. */ 13754 io->io_hdr.nexus.targ_mapped_lun = 13755 ctl_map_lun(io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); 13756 13757 switch (io->io_hdr.io_type) { 13758 case CTL_IO_SCSI: 13759 case CTL_IO_TASK: 13760 if (ctl_debug & CTL_DEBUG_CDB) 13761 ctl_io_print(io); 13762 ctl_enqueue_incoming(io); 13763 break; 13764 default: 13765 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13766 return (EINVAL); 13767 } 13768 13769 return (CTL_RETVAL_COMPLETE); 13770 } 13771 13772 #ifdef CTL_IO_DELAY 13773 static void 13774 ctl_done_timer_wakeup(void *arg) 13775 { 13776 union ctl_io *io; 13777 13778 io = (union ctl_io *)arg; 13779 ctl_done(io); 13780 } 13781 #endif /* CTL_IO_DELAY */ 13782 13783 void 13784 ctl_done(union ctl_io *io) 13785 { 13786 struct ctl_softc *ctl_softc; 13787 13788 ctl_softc = control_softc; 13789 13790 /* 13791 * Enable this to catch duplicate completion issues. 13792 */ 13793 #if 0 13794 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13795 printf("%s: type %d msg %d cdb %x iptl: " 13796 "%d:%d:%d:%d tag 0x%04x " 13797 "flag %#x status %x\n", 13798 __func__, 13799 io->io_hdr.io_type, 13800 io->io_hdr.msg_type, 13801 io->scsiio.cdb[0], 13802 io->io_hdr.nexus.initid.id, 13803 io->io_hdr.nexus.targ_port, 13804 io->io_hdr.nexus.targ_target.id, 13805 io->io_hdr.nexus.targ_lun, 13806 (io->io_hdr.io_type == 13807 CTL_IO_TASK) ? 13808 io->taskio.tag_num : 13809 io->scsiio.tag_num, 13810 io->io_hdr.flags, 13811 io->io_hdr.status); 13812 } else 13813 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13814 #endif 13815 13816 /* 13817 * This is an internal copy of an I/O, and should not go through 13818 * the normal done processing logic. 13819 */ 13820 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13821 return; 13822 13823 /* 13824 * We need to send a msg to the serializing shelf to finish the IO 13825 * as well. We don't send a finish message to the other shelf if 13826 * this is a task management command. Task management commands 13827 * aren't serialized in the OOA queue, but rather just executed on 13828 * both shelf controllers for commands that originated on that 13829 * controller. 13830 */ 13831 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13832 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13833 union ctl_ha_msg msg_io; 13834 13835 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13836 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13837 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13838 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13839 } 13840 /* continue on to finish IO */ 13841 } 13842 #ifdef CTL_IO_DELAY 13843 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13844 struct ctl_lun *lun; 13845 13846 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13847 13848 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13849 } else { 13850 struct ctl_lun *lun; 13851 13852 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13853 13854 if ((lun != NULL) 13855 && (lun->delay_info.done_delay > 0)) { 13856 struct callout *callout; 13857 13858 callout = (struct callout *)&io->io_hdr.timer_bytes; 13859 callout_init(callout, /*mpsafe*/ 1); 13860 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13861 callout_reset(callout, 13862 lun->delay_info.done_delay * hz, 13863 ctl_done_timer_wakeup, io); 13864 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13865 lun->delay_info.done_delay = 0; 13866 return; 13867 } 13868 } 13869 #endif /* CTL_IO_DELAY */ 13870 13871 ctl_enqueue_done(io); 13872 } 13873 13874 int 13875 ctl_isc(struct ctl_scsiio *ctsio) 13876 { 13877 struct ctl_lun *lun; 13878 int retval; 13879 13880 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13881 13882 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13883 13884 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13885 13886 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13887 13888 return (retval); 13889 } 13890 13891 13892 static void 13893 ctl_work_thread(void *arg) 13894 { 13895 struct ctl_thread *thr = (struct ctl_thread *)arg; 13896 struct ctl_softc *softc = thr->ctl_softc; 13897 union ctl_io *io; 13898 int retval; 13899 13900 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13901 13902 for (;;) { 13903 retval = 0; 13904 13905 /* 13906 * We handle the queues in this order: 13907 * - ISC 13908 * - done queue (to free up resources, unblock other commands) 13909 * - RtR queue 13910 * - incoming queue 13911 * 13912 * If those queues are empty, we break out of the loop and 13913 * go to sleep. 13914 */ 13915 mtx_lock(&thr->queue_lock); 13916 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13917 if (io != NULL) { 13918 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13919 mtx_unlock(&thr->queue_lock); 13920 ctl_handle_isc(io); 13921 continue; 13922 } 13923 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13924 if (io != NULL) { 13925 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13926 /* clear any blocked commands, call fe_done */ 13927 mtx_unlock(&thr->queue_lock); 13928 retval = ctl_process_done(io); 13929 continue; 13930 } 13931 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13932 if (io != NULL) { 13933 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13934 mtx_unlock(&thr->queue_lock); 13935 if (io->io_hdr.io_type == CTL_IO_TASK) 13936 ctl_run_task(io); 13937 else 13938 ctl_scsiio_precheck(softc, &io->scsiio); 13939 continue; 13940 } 13941 if (!ctl_pause_rtr) { 13942 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13943 if (io != NULL) { 13944 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13945 mtx_unlock(&thr->queue_lock); 13946 retval = ctl_scsiio(&io->scsiio); 13947 if (retval != CTL_RETVAL_COMPLETE) 13948 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13949 continue; 13950 } 13951 } 13952 13953 /* Sleep until we have something to do. */ 13954 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13955 } 13956 } 13957 13958 static void 13959 ctl_lun_thread(void *arg) 13960 { 13961 struct ctl_softc *softc = (struct ctl_softc *)arg; 13962 struct ctl_be_lun *be_lun; 13963 int retval; 13964 13965 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13966 13967 for (;;) { 13968 retval = 0; 13969 mtx_lock(&softc->ctl_lock); 13970 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13971 if (be_lun != NULL) { 13972 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13973 mtx_unlock(&softc->ctl_lock); 13974 ctl_create_lun(be_lun); 13975 continue; 13976 } 13977 13978 /* Sleep until we have something to do. */ 13979 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13980 PDROP | PRIBIO, "-", 0); 13981 } 13982 } 13983 13984 static void 13985 ctl_thresh_thread(void *arg) 13986 { 13987 struct ctl_softc *softc = (struct ctl_softc *)arg; 13988 struct ctl_lun *lun; 13989 struct ctl_be_lun *be_lun; 13990 struct scsi_da_rw_recovery_page *rwpage; 13991 struct ctl_logical_block_provisioning_page *page; 13992 const char *attr; 13993 uint64_t thres, val; 13994 int i, e; 13995 13996 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13997 13998 for (;;) { 13999 mtx_lock(&softc->ctl_lock); 14000 STAILQ_FOREACH(lun, &softc->lun_list, links) { 14001 be_lun = lun->be_lun; 14002 if ((lun->flags & CTL_LUN_DISABLED) || 14003 (lun->flags & CTL_LUN_OFFLINE) || 14004 lun->backend->lun_attr == NULL) 14005 continue; 14006 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 14007 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 14008 continue; 14009 e = 0; 14010 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 14011 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 14012 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 14013 continue; 14014 thres = scsi_4btoul(page->descr[i].count); 14015 thres <<= CTL_LBP_EXPONENT; 14016 switch (page->descr[i].resource) { 14017 case 0x01: 14018 attr = "blocksavail"; 14019 break; 14020 case 0x02: 14021 attr = "blocksused"; 14022 break; 14023 case 0xf1: 14024 attr = "poolblocksavail"; 14025 break; 14026 case 0xf2: 14027 attr = "poolblocksused"; 14028 break; 14029 default: 14030 continue; 14031 } 14032 mtx_unlock(&softc->ctl_lock); // XXX 14033 val = lun->backend->lun_attr( 14034 lun->be_lun->be_lun, attr); 14035 mtx_lock(&softc->ctl_lock); 14036 if (val == UINT64_MAX) 14037 continue; 14038 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 14039 == SLBPPD_ARMING_INC) 14040 e |= (val >= thres); 14041 else 14042 e |= (val <= thres); 14043 } 14044 mtx_lock(&lun->lun_lock); 14045 if (e) { 14046 if (lun->lasttpt == 0 || 14047 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 14048 lun->lasttpt = time_uptime; 14049 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 14050 } 14051 } else { 14052 lun->lasttpt = 0; 14053 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 14054 } 14055 mtx_unlock(&lun->lun_lock); 14056 } 14057 mtx_unlock(&softc->ctl_lock); 14058 pause("-", CTL_LBP_PERIOD * hz); 14059 } 14060 } 14061 14062 static void 14063 ctl_enqueue_incoming(union ctl_io *io) 14064 { 14065 struct ctl_softc *softc = control_softc; 14066 struct ctl_thread *thr; 14067 u_int idx; 14068 14069 idx = (io->io_hdr.nexus.targ_port * 127 + 14070 io->io_hdr.nexus.initid.id) % worker_threads; 14071 thr = &softc->threads[idx]; 14072 mtx_lock(&thr->queue_lock); 14073 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 14074 mtx_unlock(&thr->queue_lock); 14075 wakeup(thr); 14076 } 14077 14078 static void 14079 ctl_enqueue_rtr(union ctl_io *io) 14080 { 14081 struct ctl_softc *softc = control_softc; 14082 struct ctl_thread *thr; 14083 14084 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14085 mtx_lock(&thr->queue_lock); 14086 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 14087 mtx_unlock(&thr->queue_lock); 14088 wakeup(thr); 14089 } 14090 14091 static void 14092 ctl_enqueue_done(union ctl_io *io) 14093 { 14094 struct ctl_softc *softc = control_softc; 14095 struct ctl_thread *thr; 14096 14097 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14098 mtx_lock(&thr->queue_lock); 14099 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 14100 mtx_unlock(&thr->queue_lock); 14101 wakeup(thr); 14102 } 14103 14104 static void 14105 ctl_enqueue_isc(union ctl_io *io) 14106 { 14107 struct ctl_softc *softc = control_softc; 14108 struct ctl_thread *thr; 14109 14110 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14111 mtx_lock(&thr->queue_lock); 14112 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 14113 mtx_unlock(&thr->queue_lock); 14114 wakeup(thr); 14115 } 14116 14117 /* Initialization and failover */ 14118 14119 void 14120 ctl_init_isc_msg(void) 14121 { 14122 printf("CTL: Still calling this thing\n"); 14123 } 14124 14125 /* 14126 * Init component 14127 * Initializes component into configuration defined by bootMode 14128 * (see hasc-sv.c) 14129 * returns hasc_Status: 14130 * OK 14131 * ERROR - fatal error 14132 */ 14133 static ctl_ha_comp_status 14134 ctl_isc_init(struct ctl_ha_component *c) 14135 { 14136 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14137 14138 c->status = ret; 14139 return ret; 14140 } 14141 14142 /* Start component 14143 * Starts component in state requested. If component starts successfully, 14144 * it must set its own state to the requestrd state 14145 * When requested state is HASC_STATE_HA, the component may refine it 14146 * by adding _SLAVE or _MASTER flags. 14147 * Currently allowed state transitions are: 14148 * UNKNOWN->HA - initial startup 14149 * UNKNOWN->SINGLE - initial startup when no parter detected 14150 * HA->SINGLE - failover 14151 * returns ctl_ha_comp_status: 14152 * OK - component successfully started in requested state 14153 * FAILED - could not start the requested state, failover may 14154 * be possible 14155 * ERROR - fatal error detected, no future startup possible 14156 */ 14157 static ctl_ha_comp_status 14158 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 14159 { 14160 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14161 14162 printf("%s: go\n", __func__); 14163 14164 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 14165 if (c->state == CTL_HA_STATE_UNKNOWN ) { 14166 control_softc->is_single = 0; 14167 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 14168 != CTL_HA_STATUS_SUCCESS) { 14169 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 14170 ret = CTL_HA_COMP_STATUS_ERROR; 14171 } 14172 } else if (CTL_HA_STATE_IS_HA(c->state) 14173 && CTL_HA_STATE_IS_SINGLE(state)){ 14174 // HA->SINGLE transition 14175 ctl_failover(); 14176 control_softc->is_single = 1; 14177 } else { 14178 printf("ctl_isc_start:Invalid state transition %X->%X\n", 14179 c->state, state); 14180 ret = CTL_HA_COMP_STATUS_ERROR; 14181 } 14182 if (CTL_HA_STATE_IS_SINGLE(state)) 14183 control_softc->is_single = 1; 14184 14185 c->state = state; 14186 c->status = ret; 14187 return ret; 14188 } 14189 14190 /* 14191 * Quiesce component 14192 * The component must clear any error conditions (set status to OK) and 14193 * prepare itself to another Start call 14194 * returns ctl_ha_comp_status: 14195 * OK 14196 * ERROR 14197 */ 14198 static ctl_ha_comp_status 14199 ctl_isc_quiesce(struct ctl_ha_component *c) 14200 { 14201 int ret = CTL_HA_COMP_STATUS_OK; 14202 14203 ctl_pause_rtr = 1; 14204 c->status = ret; 14205 return ret; 14206 } 14207 14208 struct ctl_ha_component ctl_ha_component_ctlisc = 14209 { 14210 .name = "CTL ISC", 14211 .state = CTL_HA_STATE_UNKNOWN, 14212 .init = ctl_isc_init, 14213 .start = ctl_isc_start, 14214 .quiesce = ctl_isc_quiesce 14215 }; 14216 14217 /* 14218 * vim: ts=8 14219 */ 14220