1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id$ 35 */ 36 /* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #define _CTL_C 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/ctype.h> 50 #include <sys/kernel.h> 51 #include <sys/types.h> 52 #include <sys/kthread.h> 53 #include <sys/bio.h> 54 #include <sys/fcntl.h> 55 #include <sys/lock.h> 56 #include <sys/module.h> 57 #include <sys/mutex.h> 58 #include <sys/condvar.h> 59 #include <sys/malloc.h> 60 #include <sys/conf.h> 61 #include <sys/ioccom.h> 62 #include <sys/queue.h> 63 #include <sys/sbuf.h> 64 #include <sys/smp.h> 65 #include <sys/endian.h> 66 #include <sys/sysctl.h> 67 #include <vm/uma.h> 68 69 #include <cam/cam.h> 70 #include <cam/scsi/scsi_all.h> 71 #include <cam/scsi/scsi_da.h> 72 #include <cam/ctl/ctl_io.h> 73 #include <cam/ctl/ctl.h> 74 #include <cam/ctl/ctl_frontend.h> 75 #include <cam/ctl/ctl_util.h> 76 #include <cam/ctl/ctl_backend.h> 77 #include <cam/ctl/ctl_ioctl.h> 78 #include <cam/ctl/ctl_ha.h> 79 #include <cam/ctl/ctl_private.h> 80 #include <cam/ctl/ctl_debug.h> 81 #include <cam/ctl/ctl_scsi_all.h> 82 #include <cam/ctl/ctl_error.h> 83 84 struct ctl_softc *control_softc = NULL; 85 86 /* 87 * Size and alignment macros needed for Copan-specific HA hardware. These 88 * can go away when the HA code is re-written, and uses busdma for any 89 * hardware. 90 */ 91 #define CTL_ALIGN_8B(target, source, type) \ 92 if (((uint32_t)source & 0x7) != 0) \ 93 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 94 else \ 95 target = (type)source; 96 97 #define CTL_SIZE_8B(target, size) \ 98 if ((size & 0x7) != 0) \ 99 target = size + (0x8 - (size & 0x7)); \ 100 else \ 101 target = size; 102 103 #define CTL_ALIGN_8B_MARGIN 16 104 105 /* 106 * Template mode pages. 107 */ 108 109 /* 110 * Note that these are default values only. The actual values will be 111 * filled in when the user does a mode sense. 112 */ 113 const static struct copan_debugconf_subpage debugconf_page_default = { 114 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 115 DBGCNF_SUBPAGE_CODE, /* subpage */ 116 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 117 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 118 DBGCNF_VERSION, /* page_version */ 119 {CTL_TIME_IO_DEFAULT_SECS>>8, 120 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 121 }; 122 123 const static struct copan_debugconf_subpage debugconf_page_changeable = { 124 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 125 DBGCNF_SUBPAGE_CODE, /* subpage */ 126 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 127 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 128 0, /* page_version */ 129 {0xff,0xff}, /* ctl_time_io_secs */ 130 }; 131 132 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 133 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 134 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 135 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 136 /*read_retry_count*/0, 137 /*correction_span*/0, 138 /*head_offset_count*/0, 139 /*data_strobe_offset_cnt*/0, 140 /*byte8*/SMS_RWER_LBPERE, 141 /*write_retry_count*/0, 142 /*reserved2*/0, 143 /*recovery_time_limit*/{0, 0}, 144 }; 145 146 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 147 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 148 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 149 /*byte3*/0, 150 /*read_retry_count*/0, 151 /*correction_span*/0, 152 /*head_offset_count*/0, 153 /*data_strobe_offset_cnt*/0, 154 /*byte8*/0, 155 /*write_retry_count*/0, 156 /*reserved2*/0, 157 /*recovery_time_limit*/{0, 0}, 158 }; 159 160 const static struct scsi_format_page format_page_default = { 161 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 162 /*page_length*/sizeof(struct scsi_format_page) - 2, 163 /*tracks_per_zone*/ {0, 0}, 164 /*alt_sectors_per_zone*/ {0, 0}, 165 /*alt_tracks_per_zone*/ {0, 0}, 166 /*alt_tracks_per_lun*/ {0, 0}, 167 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 168 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 169 /*bytes_per_sector*/ {0, 0}, 170 /*interleave*/ {0, 0}, 171 /*track_skew*/ {0, 0}, 172 /*cylinder_skew*/ {0, 0}, 173 /*flags*/ SFP_HSEC, 174 /*reserved*/ {0, 0, 0} 175 }; 176 177 const static struct scsi_format_page format_page_changeable = { 178 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 179 /*page_length*/sizeof(struct scsi_format_page) - 2, 180 /*tracks_per_zone*/ {0, 0}, 181 /*alt_sectors_per_zone*/ {0, 0}, 182 /*alt_tracks_per_zone*/ {0, 0}, 183 /*alt_tracks_per_lun*/ {0, 0}, 184 /*sectors_per_track*/ {0, 0}, 185 /*bytes_per_sector*/ {0, 0}, 186 /*interleave*/ {0, 0}, 187 /*track_skew*/ {0, 0}, 188 /*cylinder_skew*/ {0, 0}, 189 /*flags*/ 0, 190 /*reserved*/ {0, 0, 0} 191 }; 192 193 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 194 /*page_code*/SMS_RIGID_DISK_PAGE, 195 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 196 /*cylinders*/ {0, 0, 0}, 197 /*heads*/ CTL_DEFAULT_HEADS, 198 /*start_write_precomp*/ {0, 0, 0}, 199 /*start_reduced_current*/ {0, 0, 0}, 200 /*step_rate*/ {0, 0}, 201 /*landing_zone_cylinder*/ {0, 0, 0}, 202 /*rpl*/ SRDP_RPL_DISABLED, 203 /*rotational_offset*/ 0, 204 /*reserved1*/ 0, 205 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 206 CTL_DEFAULT_ROTATION_RATE & 0xff}, 207 /*reserved2*/ {0, 0} 208 }; 209 210 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 211 /*page_code*/SMS_RIGID_DISK_PAGE, 212 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 213 /*cylinders*/ {0, 0, 0}, 214 /*heads*/ 0, 215 /*start_write_precomp*/ {0, 0, 0}, 216 /*start_reduced_current*/ {0, 0, 0}, 217 /*step_rate*/ {0, 0}, 218 /*landing_zone_cylinder*/ {0, 0, 0}, 219 /*rpl*/ 0, 220 /*rotational_offset*/ 0, 221 /*reserved1*/ 0, 222 /*rotation_rate*/ {0, 0}, 223 /*reserved2*/ {0, 0} 224 }; 225 226 const static struct scsi_caching_page caching_page_default = { 227 /*page_code*/SMS_CACHING_PAGE, 228 /*page_length*/sizeof(struct scsi_caching_page) - 2, 229 /*flags1*/ SCP_DISC | SCP_WCE, 230 /*ret_priority*/ 0, 231 /*disable_pf_transfer_len*/ {0xff, 0xff}, 232 /*min_prefetch*/ {0, 0}, 233 /*max_prefetch*/ {0xff, 0xff}, 234 /*max_pf_ceiling*/ {0xff, 0xff}, 235 /*flags2*/ 0, 236 /*cache_segments*/ 0, 237 /*cache_seg_size*/ {0, 0}, 238 /*reserved*/ 0, 239 /*non_cache_seg_size*/ {0, 0, 0} 240 }; 241 242 const static struct scsi_caching_page caching_page_changeable = { 243 /*page_code*/SMS_CACHING_PAGE, 244 /*page_length*/sizeof(struct scsi_caching_page) - 2, 245 /*flags1*/ SCP_WCE | SCP_RCD, 246 /*ret_priority*/ 0, 247 /*disable_pf_transfer_len*/ {0, 0}, 248 /*min_prefetch*/ {0, 0}, 249 /*max_prefetch*/ {0, 0}, 250 /*max_pf_ceiling*/ {0, 0}, 251 /*flags2*/ 0, 252 /*cache_segments*/ 0, 253 /*cache_seg_size*/ {0, 0}, 254 /*reserved*/ 0, 255 /*non_cache_seg_size*/ {0, 0, 0} 256 }; 257 258 const static struct scsi_control_page control_page_default = { 259 /*page_code*/SMS_CONTROL_MODE_PAGE, 260 /*page_length*/sizeof(struct scsi_control_page) - 2, 261 /*rlec*/0, 262 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 263 /*eca_and_aen*/0, 264 /*flags4*/SCP_TAS, 265 /*aen_holdoff_period*/{0, 0}, 266 /*busy_timeout_period*/{0, 0}, 267 /*extended_selftest_completion_time*/{0, 0} 268 }; 269 270 const static struct scsi_control_page control_page_changeable = { 271 /*page_code*/SMS_CONTROL_MODE_PAGE, 272 /*page_length*/sizeof(struct scsi_control_page) - 2, 273 /*rlec*/SCP_DSENSE, 274 /*queue_flags*/SCP_QUEUE_ALG_MASK, 275 /*eca_and_aen*/SCP_SWP, 276 /*flags4*/0, 277 /*aen_holdoff_period*/{0, 0}, 278 /*busy_timeout_period*/{0, 0}, 279 /*extended_selftest_completion_time*/{0, 0} 280 }; 281 282 const static struct scsi_info_exceptions_page ie_page_default = { 283 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 284 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 285 /*info_flags*/SIEP_FLAGS_DEXCPT, 286 /*mrie*/0, 287 /*interval_timer*/{0, 0, 0, 0}, 288 /*report_count*/{0, 0, 0, 0} 289 }; 290 291 const static struct scsi_info_exceptions_page ie_page_changeable = { 292 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 293 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 294 /*info_flags*/0, 295 /*mrie*/0, 296 /*interval_timer*/{0, 0, 0, 0}, 297 /*report_count*/{0, 0, 0, 0} 298 }; 299 300 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 301 302 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 303 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 304 /*subpage_code*/0x02, 305 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 306 /*flags*/0, 307 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 308 /*descr*/{}}, 309 {{/*flags*/0, 310 /*resource*/0x01, 311 /*reserved*/{0, 0}, 312 /*count*/{0, 0, 0, 0}}, 313 {/*flags*/0, 314 /*resource*/0x02, 315 /*reserved*/{0, 0}, 316 /*count*/{0, 0, 0, 0}}, 317 {/*flags*/0, 318 /*resource*/0xf1, 319 /*reserved*/{0, 0}, 320 /*count*/{0, 0, 0, 0}}, 321 {/*flags*/0, 322 /*resource*/0xf2, 323 /*reserved*/{0, 0}, 324 /*count*/{0, 0, 0, 0}} 325 } 326 }; 327 328 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 329 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 330 /*subpage_code*/0x02, 331 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 332 /*flags*/0, 333 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 334 /*descr*/{}}, 335 {{/*flags*/0, 336 /*resource*/0, 337 /*reserved*/{0, 0}, 338 /*count*/{0, 0, 0, 0}}, 339 {/*flags*/0, 340 /*resource*/0, 341 /*reserved*/{0, 0}, 342 /*count*/{0, 0, 0, 0}}, 343 {/*flags*/0, 344 /*resource*/0, 345 /*reserved*/{0, 0}, 346 /*count*/{0, 0, 0, 0}}, 347 {/*flags*/0, 348 /*resource*/0, 349 /*reserved*/{0, 0}, 350 /*count*/{0, 0, 0, 0}} 351 } 352 }; 353 354 /* 355 * XXX KDM move these into the softc. 356 */ 357 static int rcv_sync_msg; 358 static uint8_t ctl_pause_rtr; 359 360 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 361 static int worker_threads = -1; 362 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 363 &worker_threads, 1, "Number of worker threads"); 364 static int ctl_debug = CTL_DEBUG_NONE; 365 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 366 &ctl_debug, 0, "Enabled debug flags"); 367 368 /* 369 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 370 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 371 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 372 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 373 */ 374 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 375 376 #ifdef notyet 377 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 378 int param); 379 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 380 #endif 381 static int ctl_init(void); 382 void ctl_shutdown(void); 383 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 384 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 385 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 386 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 387 struct ctl_ooa *ooa_hdr, 388 struct ctl_ooa_entry *kern_entries); 389 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 390 struct thread *td); 391 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 392 struct ctl_be_lun *be_lun); 393 static int ctl_free_lun(struct ctl_lun *lun); 394 static void ctl_create_lun(struct ctl_be_lun *be_lun); 395 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 396 /** 397 static void ctl_failover_change_pages(struct ctl_softc *softc, 398 struct ctl_scsiio *ctsio, int master); 399 **/ 400 401 static int ctl_do_mode_select(union ctl_io *io); 402 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 403 uint64_t res_key, uint64_t sa_res_key, 404 uint8_t type, uint32_t residx, 405 struct ctl_scsiio *ctsio, 406 struct scsi_per_res_out *cdb, 407 struct scsi_per_res_out_parms* param); 408 static void ctl_pro_preempt_other(struct ctl_lun *lun, 409 union ctl_ha_msg *msg); 410 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 411 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 412 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 413 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 414 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 415 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 416 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 417 int alloc_len); 418 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 419 int alloc_len); 420 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 421 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 422 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 423 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 424 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 425 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 426 bool seq); 427 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 428 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 429 union ctl_io *pending_io, union ctl_io *ooa_io); 430 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 431 union ctl_io *starting_io); 432 static int ctl_check_blocked(struct ctl_lun *lun); 433 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 434 const struct ctl_cmd_entry *entry, 435 struct ctl_scsiio *ctsio); 436 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 437 #ifdef notyet 438 static void ctl_failover(void); 439 #endif 440 static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 441 ctl_ua_type ua_type); 442 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 443 struct ctl_scsiio *ctsio); 444 static int ctl_scsiio(struct ctl_scsiio *ctsio); 445 446 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 447 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 448 ctl_ua_type ua_type); 449 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 450 ctl_ua_type ua_type); 451 static int ctl_abort_task(union ctl_io *io); 452 static int ctl_abort_task_set(union ctl_io *io); 453 static int ctl_i_t_nexus_reset(union ctl_io *io); 454 static void ctl_run_task(union ctl_io *io); 455 #ifdef CTL_IO_DELAY 456 static void ctl_datamove_timer_wakeup(void *arg); 457 static void ctl_done_timer_wakeup(void *arg); 458 #endif /* CTL_IO_DELAY */ 459 460 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 461 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 462 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 463 static void ctl_datamove_remote_write(union ctl_io *io); 464 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 465 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 466 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 467 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 468 ctl_ha_dt_cb callback); 469 static void ctl_datamove_remote_read(union ctl_io *io); 470 static void ctl_datamove_remote(union ctl_io *io); 471 static int ctl_process_done(union ctl_io *io); 472 static void ctl_lun_thread(void *arg); 473 static void ctl_thresh_thread(void *arg); 474 static void ctl_work_thread(void *arg); 475 static void ctl_enqueue_incoming(union ctl_io *io); 476 static void ctl_enqueue_rtr(union ctl_io *io); 477 static void ctl_enqueue_done(union ctl_io *io); 478 #ifdef notyet 479 static void ctl_enqueue_isc(union ctl_io *io); 480 #endif 481 static const struct ctl_cmd_entry * 482 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 483 static const struct ctl_cmd_entry * 484 ctl_validate_command(struct ctl_scsiio *ctsio); 485 static int ctl_cmd_applicable(uint8_t lun_type, 486 const struct ctl_cmd_entry *entry); 487 488 /* 489 * Load the serialization table. This isn't very pretty, but is probably 490 * the easiest way to do it. 491 */ 492 #include "ctl_ser_table.c" 493 494 /* 495 * We only need to define open, close and ioctl routines for this driver. 496 */ 497 static struct cdevsw ctl_cdevsw = { 498 .d_version = D_VERSION, 499 .d_flags = 0, 500 .d_open = ctl_open, 501 .d_close = ctl_close, 502 .d_ioctl = ctl_ioctl, 503 .d_name = "ctl", 504 }; 505 506 507 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 508 509 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 510 511 static moduledata_t ctl_moduledata = { 512 "ctl", 513 ctl_module_event_handler, 514 NULL 515 }; 516 517 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 518 MODULE_VERSION(ctl, 1); 519 520 #ifdef notyet 521 static void 522 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 523 union ctl_ha_msg *msg_info) 524 { 525 struct ctl_scsiio *ctsio; 526 527 if (msg_info->hdr.original_sc == NULL) { 528 printf("%s: original_sc == NULL!\n", __func__); 529 /* XXX KDM now what? */ 530 return; 531 } 532 533 ctsio = &msg_info->hdr.original_sc->scsiio; 534 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 535 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 536 ctsio->io_hdr.status = msg_info->hdr.status; 537 ctsio->scsi_status = msg_info->scsi.scsi_status; 538 ctsio->sense_len = msg_info->scsi.sense_len; 539 ctsio->sense_residual = msg_info->scsi.sense_residual; 540 ctsio->residual = msg_info->scsi.residual; 541 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 542 sizeof(ctsio->sense_data)); 543 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 544 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 545 ctl_enqueue_isc((union ctl_io *)ctsio); 546 } 547 548 static void 549 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 550 union ctl_ha_msg *msg_info) 551 { 552 struct ctl_scsiio *ctsio; 553 554 if (msg_info->hdr.serializing_sc == NULL) { 555 printf("%s: serializing_sc == NULL!\n", __func__); 556 /* XXX KDM now what? */ 557 return; 558 } 559 560 ctsio = &msg_info->hdr.serializing_sc->scsiio; 561 #if 0 562 /* 563 * Attempt to catch the situation where an I/O has 564 * been freed, and we're using it again. 565 */ 566 if (ctsio->io_hdr.io_type == 0xff) { 567 union ctl_io *tmp_io; 568 tmp_io = (union ctl_io *)ctsio; 569 printf("%s: %p use after free!\n", __func__, 570 ctsio); 571 printf("%s: type %d msg %d cdb %x iptl: " 572 "%d:%d:%d:%d tag 0x%04x " 573 "flag %#x status %x\n", 574 __func__, 575 tmp_io->io_hdr.io_type, 576 tmp_io->io_hdr.msg_type, 577 tmp_io->scsiio.cdb[0], 578 tmp_io->io_hdr.nexus.initid.id, 579 tmp_io->io_hdr.nexus.targ_port, 580 tmp_io->io_hdr.nexus.targ_target.id, 581 tmp_io->io_hdr.nexus.targ_lun, 582 (tmp_io->io_hdr.io_type == 583 CTL_IO_TASK) ? 584 tmp_io->taskio.tag_num : 585 tmp_io->scsiio.tag_num, 586 tmp_io->io_hdr.flags, 587 tmp_io->io_hdr.status); 588 } 589 #endif 590 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 591 ctl_enqueue_isc((union ctl_io *)ctsio); 592 } 593 594 /* 595 * ISC (Inter Shelf Communication) event handler. Events from the HA 596 * subsystem come in here. 597 */ 598 static void 599 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 600 { 601 struct ctl_softc *softc; 602 union ctl_io *io; 603 struct ctl_prio *presio; 604 ctl_ha_status isc_status; 605 606 softc = control_softc; 607 io = NULL; 608 609 610 #if 0 611 printf("CTL: Isc Msg event %d\n", event); 612 #endif 613 if (event == CTL_HA_EVT_MSG_RECV) { 614 union ctl_ha_msg msg_info; 615 616 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 617 sizeof(msg_info), /*wait*/ 0); 618 #if 0 619 printf("CTL: msg_type %d\n", msg_info.msg_type); 620 #endif 621 if (isc_status != 0) { 622 printf("Error receiving message, status = %d\n", 623 isc_status); 624 return; 625 } 626 627 switch (msg_info.hdr.msg_type) { 628 case CTL_MSG_SERIALIZE: 629 #if 0 630 printf("Serialize\n"); 631 #endif 632 io = ctl_alloc_io_nowait(softc->othersc_pool); 633 if (io == NULL) { 634 printf("ctl_isc_event_handler: can't allocate " 635 "ctl_io!\n"); 636 /* Bad Juju */ 637 /* Need to set busy and send msg back */ 638 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 639 msg_info.hdr.status = CTL_SCSI_ERROR; 640 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 641 msg_info.scsi.sense_len = 0; 642 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 643 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 644 } 645 goto bailout; 646 } 647 ctl_zero_io(io); 648 // populate ctsio from msg_info 649 io->io_hdr.io_type = CTL_IO_SCSI; 650 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 651 io->io_hdr.original_sc = msg_info.hdr.original_sc; 652 #if 0 653 printf("pOrig %x\n", (int)msg_info.original_sc); 654 #endif 655 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 656 CTL_FLAG_IO_ACTIVE; 657 /* 658 * If we're in serialization-only mode, we don't 659 * want to go through full done processing. Thus 660 * the COPY flag. 661 * 662 * XXX KDM add another flag that is more specific. 663 */ 664 if (softc->ha_mode == CTL_HA_MODE_SER_ONLY) 665 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 666 io->io_hdr.nexus = msg_info.hdr.nexus; 667 #if 0 668 printf("targ %d, port %d, iid %d, lun %d\n", 669 io->io_hdr.nexus.targ_target.id, 670 io->io_hdr.nexus.targ_port, 671 io->io_hdr.nexus.initid.id, 672 io->io_hdr.nexus.targ_lun); 673 #endif 674 io->scsiio.tag_num = msg_info.scsi.tag_num; 675 io->scsiio.tag_type = msg_info.scsi.tag_type; 676 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 677 CTL_MAX_CDBLEN); 678 if (softc->ha_mode == CTL_HA_MODE_XFER) { 679 const struct ctl_cmd_entry *entry; 680 681 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 682 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 683 io->io_hdr.flags |= 684 entry->flags & CTL_FLAG_DATA_MASK; 685 } 686 ctl_enqueue_isc(io); 687 break; 688 689 /* Performed on the Originating SC, XFER mode only */ 690 case CTL_MSG_DATAMOVE: { 691 struct ctl_sg_entry *sgl; 692 int i, j; 693 694 io = msg_info.hdr.original_sc; 695 if (io == NULL) { 696 printf("%s: original_sc == NULL!\n", __func__); 697 /* XXX KDM do something here */ 698 break; 699 } 700 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 701 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 702 /* 703 * Keep track of this, we need to send it back over 704 * when the datamove is complete. 705 */ 706 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 707 708 if (msg_info.dt.sg_sequence == 0) { 709 /* 710 * XXX KDM we use the preallocated S/G list 711 * here, but we'll need to change this to 712 * dynamic allocation if we need larger S/G 713 * lists. 714 */ 715 if (msg_info.dt.kern_sg_entries > 716 sizeof(io->io_hdr.remote_sglist) / 717 sizeof(io->io_hdr.remote_sglist[0])) { 718 printf("%s: number of S/G entries " 719 "needed %u > allocated num %zd\n", 720 __func__, 721 msg_info.dt.kern_sg_entries, 722 sizeof(io->io_hdr.remote_sglist)/ 723 sizeof(io->io_hdr.remote_sglist[0])); 724 725 /* 726 * XXX KDM send a message back to 727 * the other side to shut down the 728 * DMA. The error will come back 729 * through via the normal channel. 730 */ 731 break; 732 } 733 sgl = io->io_hdr.remote_sglist; 734 memset(sgl, 0, 735 sizeof(io->io_hdr.remote_sglist)); 736 737 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 738 739 io->scsiio.kern_sg_entries = 740 msg_info.dt.kern_sg_entries; 741 io->scsiio.rem_sg_entries = 742 msg_info.dt.kern_sg_entries; 743 io->scsiio.kern_data_len = 744 msg_info.dt.kern_data_len; 745 io->scsiio.kern_total_len = 746 msg_info.dt.kern_total_len; 747 io->scsiio.kern_data_resid = 748 msg_info.dt.kern_data_resid; 749 io->scsiio.kern_rel_offset = 750 msg_info.dt.kern_rel_offset; 751 /* 752 * Clear out per-DMA flags. 753 */ 754 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 755 /* 756 * Add per-DMA flags that are set for this 757 * particular DMA request. 758 */ 759 io->io_hdr.flags |= msg_info.dt.flags & 760 CTL_FLAG_RDMA_MASK; 761 } else 762 sgl = (struct ctl_sg_entry *) 763 io->scsiio.kern_data_ptr; 764 765 for (i = msg_info.dt.sent_sg_entries, j = 0; 766 i < (msg_info.dt.sent_sg_entries + 767 msg_info.dt.cur_sg_entries); i++, j++) { 768 sgl[i].addr = msg_info.dt.sg_list[j].addr; 769 sgl[i].len = msg_info.dt.sg_list[j].len; 770 771 #if 0 772 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 773 __func__, 774 msg_info.dt.sg_list[j].addr, 775 msg_info.dt.sg_list[j].len, 776 sgl[i].addr, sgl[i].len, j, i); 777 #endif 778 } 779 #if 0 780 memcpy(&sgl[msg_info.dt.sent_sg_entries], 781 msg_info.dt.sg_list, 782 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 783 #endif 784 785 /* 786 * If this is the last piece of the I/O, we've got 787 * the full S/G list. Queue processing in the thread. 788 * Otherwise wait for the next piece. 789 */ 790 if (msg_info.dt.sg_last != 0) 791 ctl_enqueue_isc(io); 792 break; 793 } 794 /* Performed on the Serializing (primary) SC, XFER mode only */ 795 case CTL_MSG_DATAMOVE_DONE: { 796 if (msg_info.hdr.serializing_sc == NULL) { 797 printf("%s: serializing_sc == NULL!\n", 798 __func__); 799 /* XXX KDM now what? */ 800 break; 801 } 802 /* 803 * We grab the sense information here in case 804 * there was a failure, so we can return status 805 * back to the initiator. 806 */ 807 io = msg_info.hdr.serializing_sc; 808 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 809 io->io_hdr.status = msg_info.hdr.status; 810 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 811 io->scsiio.sense_len = msg_info.scsi.sense_len; 812 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 813 io->io_hdr.port_status = msg_info.scsi.fetd_status; 814 io->scsiio.residual = msg_info.scsi.residual; 815 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 816 sizeof(io->scsiio.sense_data)); 817 ctl_enqueue_isc(io); 818 break; 819 } 820 821 /* Preformed on Originating SC, SER_ONLY mode */ 822 case CTL_MSG_R2R: 823 io = msg_info.hdr.original_sc; 824 if (io == NULL) { 825 printf("%s: Major Bummer\n", __func__); 826 return; 827 } else { 828 #if 0 829 printf("pOrig %x\n",(int) ctsio); 830 #endif 831 } 832 io->io_hdr.msg_type = CTL_MSG_R2R; 833 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 834 ctl_enqueue_isc(io); 835 break; 836 837 /* 838 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 839 * mode. 840 * Performed on the Originating (i.e. secondary) SC in XFER 841 * mode 842 */ 843 case CTL_MSG_FINISH_IO: 844 if (softc->ha_mode == CTL_HA_MODE_XFER) 845 ctl_isc_handler_finish_xfer(softc, 846 &msg_info); 847 else 848 ctl_isc_handler_finish_ser_only(softc, 849 &msg_info); 850 break; 851 852 /* Preformed on Originating SC */ 853 case CTL_MSG_BAD_JUJU: 854 io = msg_info.hdr.original_sc; 855 if (io == NULL) { 856 printf("%s: Bad JUJU!, original_sc is NULL!\n", 857 __func__); 858 break; 859 } 860 ctl_copy_sense_data(&msg_info, io); 861 /* 862 * IO should have already been cleaned up on other 863 * SC so clear this flag so we won't send a message 864 * back to finish the IO there. 865 */ 866 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 867 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 868 869 /* io = msg_info.hdr.serializing_sc; */ 870 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 871 ctl_enqueue_isc(io); 872 break; 873 874 /* Handle resets sent from the other side */ 875 case CTL_MSG_MANAGE_TASKS: { 876 struct ctl_taskio *taskio; 877 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( 878 softc->othersc_pool); 879 if (taskio == NULL) { 880 printf("ctl_isc_event_handler: can't allocate " 881 "ctl_io!\n"); 882 /* Bad Juju */ 883 /* should I just call the proper reset func 884 here??? */ 885 goto bailout; 886 } 887 ctl_zero_io((union ctl_io *)taskio); 888 taskio->io_hdr.io_type = CTL_IO_TASK; 889 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 890 taskio->io_hdr.nexus = msg_info.hdr.nexus; 891 taskio->task_action = msg_info.task.task_action; 892 taskio->tag_num = msg_info.task.tag_num; 893 taskio->tag_type = msg_info.task.tag_type; 894 #ifdef CTL_TIME_IO 895 taskio->io_hdr.start_time = time_uptime; 896 getbintime(&taskio->io_hdr.start_bt); 897 #if 0 898 cs_prof_gettime(&taskio->io_hdr.start_ticks); 899 #endif 900 #endif /* CTL_TIME_IO */ 901 ctl_run_task((union ctl_io *)taskio); 902 break; 903 } 904 /* Persistent Reserve action which needs attention */ 905 case CTL_MSG_PERS_ACTION: 906 presio = (struct ctl_prio *)ctl_alloc_io_nowait( 907 softc->othersc_pool); 908 if (presio == NULL) { 909 printf("ctl_isc_event_handler: can't allocate " 910 "ctl_io!\n"); 911 /* Bad Juju */ 912 /* Need to set busy and send msg back */ 913 goto bailout; 914 } 915 ctl_zero_io((union ctl_io *)presio); 916 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 917 presio->pr_msg = msg_info.pr; 918 ctl_enqueue_isc((union ctl_io *)presio); 919 break; 920 case CTL_MSG_SYNC_FE: 921 rcv_sync_msg = 1; 922 break; 923 default: 924 printf("How did I get here?\n"); 925 } 926 } else if (event == CTL_HA_EVT_MSG_SENT) { 927 if (param != CTL_HA_STATUS_SUCCESS) { 928 printf("Bad status from ctl_ha_msg_send status %d\n", 929 param); 930 } 931 return; 932 } else if (event == CTL_HA_EVT_DISCONNECT) { 933 printf("CTL: Got a disconnect from Isc\n"); 934 return; 935 } else { 936 printf("ctl_isc_event_handler: Unknown event %d\n", event); 937 return; 938 } 939 940 bailout: 941 return; 942 } 943 944 static void 945 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 946 { 947 struct scsi_sense_data *sense; 948 949 sense = &dest->scsiio.sense_data; 950 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 951 dest->scsiio.scsi_status = src->scsi.scsi_status; 952 dest->scsiio.sense_len = src->scsi.sense_len; 953 dest->io_hdr.status = src->hdr.status; 954 } 955 #endif 956 957 static void 958 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 959 { 960 ctl_ua_type *pu; 961 962 mtx_assert(&lun->lun_lock, MA_OWNED); 963 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 964 if (pu == NULL) 965 return; 966 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 967 } 968 969 static void 970 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 971 { 972 int i, j; 973 974 mtx_assert(&lun->lun_lock, MA_OWNED); 975 for (i = 0; i < CTL_MAX_PORTS; i++) { 976 if (lun->pending_ua[i] == NULL) 977 continue; 978 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 979 if (i * CTL_MAX_INIT_PER_PORT + j == except) 980 continue; 981 lun->pending_ua[i][j] |= ua; 982 } 983 } 984 } 985 986 static void 987 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 988 { 989 ctl_ua_type *pu; 990 991 mtx_assert(&lun->lun_lock, MA_OWNED); 992 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 993 if (pu == NULL) 994 return; 995 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 996 } 997 998 static void 999 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1000 { 1001 int i, j; 1002 1003 mtx_assert(&lun->lun_lock, MA_OWNED); 1004 for (i = 0; i < CTL_MAX_PORTS; i++) { 1005 if (lun->pending_ua[i] == NULL) 1006 continue; 1007 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1008 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1009 continue; 1010 lun->pending_ua[i][j] &= ~ua; 1011 } 1012 } 1013 } 1014 1015 static void 1016 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1017 ctl_ua_type ua_type) 1018 { 1019 struct ctl_lun *lun; 1020 1021 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1022 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1023 mtx_lock(&lun->lun_lock); 1024 ctl_clr_ua(lun, initidx, ua_type); 1025 mtx_unlock(&lun->lun_lock); 1026 } 1027 } 1028 1029 static int 1030 ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS) 1031 { 1032 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1033 struct ctl_lun *lun; 1034 int error, value; 1035 1036 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) 1037 value = 0; 1038 else 1039 value = 1; 1040 1041 error = sysctl_handle_int(oidp, &value, 0, req); 1042 if ((error != 0) || (req->newptr == NULL)) 1043 return (error); 1044 1045 mtx_lock(&softc->ctl_lock); 1046 if (value == 0) 1047 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1048 else 1049 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1050 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1051 mtx_lock(&lun->lun_lock); 1052 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1053 mtx_unlock(&lun->lun_lock); 1054 } 1055 mtx_unlock(&softc->ctl_lock); 1056 return (0); 1057 } 1058 1059 static int 1060 ctl_init(void) 1061 { 1062 struct ctl_softc *softc; 1063 void *other_pool; 1064 int i, error, retval; 1065 //int isc_retval; 1066 1067 retval = 0; 1068 ctl_pause_rtr = 0; 1069 rcv_sync_msg = 0; 1070 1071 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1072 M_WAITOK | M_ZERO); 1073 softc = control_softc; 1074 1075 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1076 "cam/ctl"); 1077 1078 softc->dev->si_drv1 = softc; 1079 1080 /* 1081 * By default, return a "bad LUN" peripheral qualifier for unknown 1082 * LUNs. The user can override this default using the tunable or 1083 * sysctl. See the comment in ctl_inquiry_std() for more details. 1084 */ 1085 softc->inquiry_pq_no_lun = 1; 1086 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 1087 &softc->inquiry_pq_no_lun); 1088 sysctl_ctx_init(&softc->sysctl_ctx); 1089 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1090 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1091 CTLFLAG_RD, 0, "CAM Target Layer"); 1092 1093 if (softc->sysctl_tree == NULL) { 1094 printf("%s: unable to allocate sysctl tree\n", __func__); 1095 destroy_dev(softc->dev); 1096 free(control_softc, M_DEVBUF); 1097 control_softc = NULL; 1098 return (ENOMEM); 1099 } 1100 1101 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1102 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1103 "inquiry_pq_no_lun", CTLFLAG_RW, 1104 &softc->inquiry_pq_no_lun, 0, 1105 "Report no lun possible for invalid LUNs"); 1106 1107 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1108 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1109 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1110 softc->open_count = 0; 1111 1112 /* 1113 * Default to actually sending a SYNCHRONIZE CACHE command down to 1114 * the drive. 1115 */ 1116 softc->flags = CTL_FLAG_REAL_SYNC; 1117 1118 /* 1119 * In Copan's HA scheme, the "master" and "slave" roles are 1120 * figured out through the slot the controller is in. Although it 1121 * is an active/active system, someone has to be in charge. 1122 */ 1123 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1124 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1125 "HA head ID (0 - no HA)"); 1126 if (softc->ha_id == 0) { 1127 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1128 softc->is_single = 1; 1129 softc->port_offset = 0; 1130 } else 1131 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS; 1132 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT; 1133 1134 STAILQ_INIT(&softc->lun_list); 1135 STAILQ_INIT(&softc->pending_lun_queue); 1136 STAILQ_INIT(&softc->fe_list); 1137 STAILQ_INIT(&softc->port_list); 1138 STAILQ_INIT(&softc->be_list); 1139 ctl_tpc_init(softc); 1140 1141 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1142 &other_pool) != 0) 1143 { 1144 printf("ctl: can't allocate %d entry other SC pool, " 1145 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1146 return (ENOMEM); 1147 } 1148 softc->othersc_pool = other_pool; 1149 1150 if (worker_threads <= 0) 1151 worker_threads = max(1, mp_ncpus / 4); 1152 if (worker_threads > CTL_MAX_THREADS) 1153 worker_threads = CTL_MAX_THREADS; 1154 1155 for (i = 0; i < worker_threads; i++) { 1156 struct ctl_thread *thr = &softc->threads[i]; 1157 1158 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1159 thr->ctl_softc = softc; 1160 STAILQ_INIT(&thr->incoming_queue); 1161 STAILQ_INIT(&thr->rtr_queue); 1162 STAILQ_INIT(&thr->done_queue); 1163 STAILQ_INIT(&thr->isc_queue); 1164 1165 error = kproc_kthread_add(ctl_work_thread, thr, 1166 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1167 if (error != 0) { 1168 printf("error creating CTL work thread!\n"); 1169 ctl_pool_free(other_pool); 1170 return (error); 1171 } 1172 } 1173 error = kproc_kthread_add(ctl_lun_thread, softc, 1174 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1175 if (error != 0) { 1176 printf("error creating CTL lun thread!\n"); 1177 ctl_pool_free(other_pool); 1178 return (error); 1179 } 1180 error = kproc_kthread_add(ctl_thresh_thread, softc, 1181 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1182 if (error != 0) { 1183 printf("error creating CTL threshold thread!\n"); 1184 ctl_pool_free(other_pool); 1185 return (error); 1186 } 1187 1188 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1189 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN, 1190 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head"); 1191 1192 #ifdef CTL_IO_DELAY 1193 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1194 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1195 sizeof(struct callout), CTL_TIMER_BYTES); 1196 return (EINVAL); 1197 } 1198 #endif /* CTL_IO_DELAY */ 1199 1200 return (0); 1201 } 1202 1203 void 1204 ctl_shutdown(void) 1205 { 1206 struct ctl_softc *softc; 1207 struct ctl_lun *lun, *next_lun; 1208 1209 softc = (struct ctl_softc *)control_softc; 1210 1211 mtx_lock(&softc->ctl_lock); 1212 1213 /* 1214 * Free up each LUN. 1215 */ 1216 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1217 next_lun = STAILQ_NEXT(lun, links); 1218 ctl_free_lun(lun); 1219 } 1220 1221 mtx_unlock(&softc->ctl_lock); 1222 1223 #if 0 1224 ctl_shutdown_thread(softc->work_thread); 1225 mtx_destroy(&softc->queue_lock); 1226 #endif 1227 1228 ctl_tpc_shutdown(softc); 1229 uma_zdestroy(softc->io_zone); 1230 mtx_destroy(&softc->ctl_lock); 1231 1232 destroy_dev(softc->dev); 1233 1234 sysctl_ctx_free(&softc->sysctl_ctx); 1235 1236 free(control_softc, M_DEVBUF); 1237 control_softc = NULL; 1238 } 1239 1240 static int 1241 ctl_module_event_handler(module_t mod, int what, void *arg) 1242 { 1243 1244 switch (what) { 1245 case MOD_LOAD: 1246 return (ctl_init()); 1247 case MOD_UNLOAD: 1248 return (EBUSY); 1249 default: 1250 return (EOPNOTSUPP); 1251 } 1252 } 1253 1254 /* 1255 * XXX KDM should we do some access checks here? Bump a reference count to 1256 * prevent a CTL module from being unloaded while someone has it open? 1257 */ 1258 static int 1259 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1260 { 1261 return (0); 1262 } 1263 1264 static int 1265 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1266 { 1267 return (0); 1268 } 1269 1270 int 1271 ctl_port_enable(ctl_port_type port_type) 1272 { 1273 struct ctl_softc *softc = control_softc; 1274 struct ctl_port *port; 1275 1276 if (softc->is_single == 0) { 1277 union ctl_ha_msg msg_info; 1278 int isc_retval; 1279 1280 #if 0 1281 printf("%s: HA mode, synchronizing frontend enable\n", 1282 __func__); 1283 #endif 1284 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1285 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1286 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1287 printf("Sync msg send error retval %d\n", isc_retval); 1288 } 1289 if (!rcv_sync_msg) { 1290 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1291 sizeof(msg_info), 1); 1292 } 1293 #if 0 1294 printf("CTL:Frontend Enable\n"); 1295 } else { 1296 printf("%s: single mode, skipping frontend synchronization\n", 1297 __func__); 1298 #endif 1299 } 1300 1301 STAILQ_FOREACH(port, &softc->port_list, links) { 1302 if (port_type & port->port_type) 1303 { 1304 #if 0 1305 printf("port %d\n", port->targ_port); 1306 #endif 1307 ctl_port_online(port); 1308 } 1309 } 1310 1311 return (0); 1312 } 1313 1314 int 1315 ctl_port_disable(ctl_port_type port_type) 1316 { 1317 struct ctl_softc *softc; 1318 struct ctl_port *port; 1319 1320 softc = control_softc; 1321 1322 STAILQ_FOREACH(port, &softc->port_list, links) { 1323 if (port_type & port->port_type) 1324 ctl_port_offline(port); 1325 } 1326 1327 return (0); 1328 } 1329 1330 /* 1331 * Returns 0 for success, 1 for failure. 1332 * Currently the only failure mode is if there aren't enough entries 1333 * allocated. So, in case of a failure, look at num_entries_dropped, 1334 * reallocate and try again. 1335 */ 1336 int 1337 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1338 int *num_entries_filled, int *num_entries_dropped, 1339 ctl_port_type port_type, int no_virtual) 1340 { 1341 struct ctl_softc *softc; 1342 struct ctl_port *port; 1343 int entries_dropped, entries_filled; 1344 int retval; 1345 int i; 1346 1347 softc = control_softc; 1348 1349 retval = 0; 1350 entries_filled = 0; 1351 entries_dropped = 0; 1352 1353 i = 0; 1354 mtx_lock(&softc->ctl_lock); 1355 STAILQ_FOREACH(port, &softc->port_list, links) { 1356 struct ctl_port_entry *entry; 1357 1358 if ((port->port_type & port_type) == 0) 1359 continue; 1360 1361 if ((no_virtual != 0) 1362 && (port->virtual_port != 0)) 1363 continue; 1364 1365 if (entries_filled >= num_entries_alloced) { 1366 entries_dropped++; 1367 continue; 1368 } 1369 entry = &entries[i]; 1370 1371 entry->port_type = port->port_type; 1372 strlcpy(entry->port_name, port->port_name, 1373 sizeof(entry->port_name)); 1374 entry->physical_port = port->physical_port; 1375 entry->virtual_port = port->virtual_port; 1376 entry->wwnn = port->wwnn; 1377 entry->wwpn = port->wwpn; 1378 1379 i++; 1380 entries_filled++; 1381 } 1382 1383 mtx_unlock(&softc->ctl_lock); 1384 1385 if (entries_dropped > 0) 1386 retval = 1; 1387 1388 *num_entries_dropped = entries_dropped; 1389 *num_entries_filled = entries_filled; 1390 1391 return (retval); 1392 } 1393 1394 /* 1395 * Remove an initiator by port number and initiator ID. 1396 * Returns 0 for success, -1 for failure. 1397 */ 1398 int 1399 ctl_remove_initiator(struct ctl_port *port, int iid) 1400 { 1401 struct ctl_softc *softc = control_softc; 1402 1403 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1404 1405 if (iid > CTL_MAX_INIT_PER_PORT) { 1406 printf("%s: initiator ID %u > maximun %u!\n", 1407 __func__, iid, CTL_MAX_INIT_PER_PORT); 1408 return (-1); 1409 } 1410 1411 mtx_lock(&softc->ctl_lock); 1412 port->wwpn_iid[iid].in_use--; 1413 port->wwpn_iid[iid].last_use = time_uptime; 1414 mtx_unlock(&softc->ctl_lock); 1415 1416 return (0); 1417 } 1418 1419 /* 1420 * Add an initiator to the initiator map. 1421 * Returns iid for success, < 0 for failure. 1422 */ 1423 int 1424 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1425 { 1426 struct ctl_softc *softc = control_softc; 1427 time_t best_time; 1428 int i, best; 1429 1430 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1431 1432 if (iid >= CTL_MAX_INIT_PER_PORT) { 1433 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1434 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1435 free(name, M_CTL); 1436 return (-1); 1437 } 1438 1439 mtx_lock(&softc->ctl_lock); 1440 1441 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1442 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1443 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1444 iid = i; 1445 break; 1446 } 1447 if (name != NULL && port->wwpn_iid[i].name != NULL && 1448 strcmp(name, port->wwpn_iid[i].name) == 0) { 1449 iid = i; 1450 break; 1451 } 1452 } 1453 } 1454 1455 if (iid < 0) { 1456 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1457 if (port->wwpn_iid[i].in_use == 0 && 1458 port->wwpn_iid[i].wwpn == 0 && 1459 port->wwpn_iid[i].name == NULL) { 1460 iid = i; 1461 break; 1462 } 1463 } 1464 } 1465 1466 if (iid < 0) { 1467 best = -1; 1468 best_time = INT32_MAX; 1469 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1470 if (port->wwpn_iid[i].in_use == 0) { 1471 if (port->wwpn_iid[i].last_use < best_time) { 1472 best = i; 1473 best_time = port->wwpn_iid[i].last_use; 1474 } 1475 } 1476 } 1477 iid = best; 1478 } 1479 1480 if (iid < 0) { 1481 mtx_unlock(&softc->ctl_lock); 1482 free(name, M_CTL); 1483 return (-2); 1484 } 1485 1486 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1487 /* 1488 * This is not an error yet. 1489 */ 1490 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1491 #if 0 1492 printf("%s: port %d iid %u WWPN %#jx arrived" 1493 " again\n", __func__, port->targ_port, 1494 iid, (uintmax_t)wwpn); 1495 #endif 1496 goto take; 1497 } 1498 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1499 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1500 #if 0 1501 printf("%s: port %d iid %u name '%s' arrived" 1502 " again\n", __func__, port->targ_port, 1503 iid, name); 1504 #endif 1505 goto take; 1506 } 1507 1508 /* 1509 * This is an error, but what do we do about it? The 1510 * driver is telling us we have a new WWPN for this 1511 * initiator ID, so we pretty much need to use it. 1512 */ 1513 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1514 " but WWPN %#jx '%s' is still at that address\n", 1515 __func__, port->targ_port, iid, wwpn, name, 1516 (uintmax_t)port->wwpn_iid[iid].wwpn, 1517 port->wwpn_iid[iid].name); 1518 1519 /* 1520 * XXX KDM clear have_ca and ua_pending on each LUN for 1521 * this initiator. 1522 */ 1523 } 1524 take: 1525 free(port->wwpn_iid[iid].name, M_CTL); 1526 port->wwpn_iid[iid].name = name; 1527 port->wwpn_iid[iid].wwpn = wwpn; 1528 port->wwpn_iid[iid].in_use++; 1529 mtx_unlock(&softc->ctl_lock); 1530 1531 return (iid); 1532 } 1533 1534 static int 1535 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1536 { 1537 int len; 1538 1539 switch (port->port_type) { 1540 case CTL_PORT_FC: 1541 { 1542 struct scsi_transportid_fcp *id = 1543 (struct scsi_transportid_fcp *)buf; 1544 if (port->wwpn_iid[iid].wwpn == 0) 1545 return (0); 1546 memset(id, 0, sizeof(*id)); 1547 id->format_protocol = SCSI_PROTO_FC; 1548 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1549 return (sizeof(*id)); 1550 } 1551 case CTL_PORT_ISCSI: 1552 { 1553 struct scsi_transportid_iscsi_port *id = 1554 (struct scsi_transportid_iscsi_port *)buf; 1555 if (port->wwpn_iid[iid].name == NULL) 1556 return (0); 1557 memset(id, 0, 256); 1558 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1559 SCSI_PROTO_ISCSI; 1560 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1561 len = roundup2(min(len, 252), 4); 1562 scsi_ulto2b(len, id->additional_length); 1563 return (sizeof(*id) + len); 1564 } 1565 case CTL_PORT_SAS: 1566 { 1567 struct scsi_transportid_sas *id = 1568 (struct scsi_transportid_sas *)buf; 1569 if (port->wwpn_iid[iid].wwpn == 0) 1570 return (0); 1571 memset(id, 0, sizeof(*id)); 1572 id->format_protocol = SCSI_PROTO_SAS; 1573 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1574 return (sizeof(*id)); 1575 } 1576 default: 1577 { 1578 struct scsi_transportid_spi *id = 1579 (struct scsi_transportid_spi *)buf; 1580 memset(id, 0, sizeof(*id)); 1581 id->format_protocol = SCSI_PROTO_SPI; 1582 scsi_ulto2b(iid, id->scsi_addr); 1583 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1584 return (sizeof(*id)); 1585 } 1586 } 1587 } 1588 1589 /* 1590 * Serialize a command that went down the "wrong" side, and so was sent to 1591 * this controller for execution. The logic is a little different than the 1592 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1593 * sent back to the other side, but in the success case, we execute the 1594 * command on this side (XFER mode) or tell the other side to execute it 1595 * (SER_ONLY mode). 1596 */ 1597 static int 1598 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1599 { 1600 struct ctl_softc *softc; 1601 union ctl_ha_msg msg_info; 1602 struct ctl_lun *lun; 1603 int retval = 0; 1604 uint32_t targ_lun; 1605 1606 softc = control_softc; 1607 1608 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1609 lun = softc->ctl_luns[targ_lun]; 1610 if (lun==NULL) 1611 { 1612 /* 1613 * Why isn't LUN defined? The other side wouldn't 1614 * send a cmd if the LUN is undefined. 1615 */ 1616 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1617 1618 /* "Logical unit not supported" */ 1619 ctl_set_sense_data(&msg_info.scsi.sense_data, 1620 lun, 1621 /*sense_format*/SSD_TYPE_NONE, 1622 /*current_error*/ 1, 1623 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1624 /*asc*/ 0x25, 1625 /*ascq*/ 0x00, 1626 SSD_ELEM_NONE); 1627 1628 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1629 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1630 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1631 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1632 msg_info.hdr.serializing_sc = NULL; 1633 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1634 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1635 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1636 } 1637 return(1); 1638 1639 } 1640 1641 mtx_lock(&lun->lun_lock); 1642 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1643 1644 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1645 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1646 ooa_links))) { 1647 case CTL_ACTION_BLOCK: 1648 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1649 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1650 blocked_links); 1651 break; 1652 case CTL_ACTION_PASS: 1653 case CTL_ACTION_SKIP: 1654 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1655 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1656 ctl_enqueue_rtr((union ctl_io *)ctsio); 1657 } else { 1658 1659 /* send msg back to other side */ 1660 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1661 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1662 msg_info.hdr.msg_type = CTL_MSG_R2R; 1663 #if 0 1664 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1665 #endif 1666 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1667 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1668 } 1669 } 1670 break; 1671 case CTL_ACTION_OVERLAP: 1672 /* OVERLAPPED COMMANDS ATTEMPTED */ 1673 ctl_set_sense_data(&msg_info.scsi.sense_data, 1674 lun, 1675 /*sense_format*/SSD_TYPE_NONE, 1676 /*current_error*/ 1, 1677 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1678 /*asc*/ 0x4E, 1679 /*ascq*/ 0x00, 1680 SSD_ELEM_NONE); 1681 1682 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1683 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1684 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1685 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1686 msg_info.hdr.serializing_sc = NULL; 1687 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1688 #if 0 1689 printf("BAD JUJU:Major Bummer Overlap\n"); 1690 #endif 1691 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1692 retval = 1; 1693 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1694 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1695 } 1696 break; 1697 case CTL_ACTION_OVERLAP_TAG: 1698 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1699 ctl_set_sense_data(&msg_info.scsi.sense_data, 1700 lun, 1701 /*sense_format*/SSD_TYPE_NONE, 1702 /*current_error*/ 1, 1703 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1704 /*asc*/ 0x4D, 1705 /*ascq*/ ctsio->tag_num & 0xff, 1706 SSD_ELEM_NONE); 1707 1708 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1709 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1710 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1711 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1712 msg_info.hdr.serializing_sc = NULL; 1713 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1714 #if 0 1715 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1716 #endif 1717 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1718 retval = 1; 1719 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1720 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1721 } 1722 break; 1723 case CTL_ACTION_ERROR: 1724 default: 1725 /* "Internal target failure" */ 1726 ctl_set_sense_data(&msg_info.scsi.sense_data, 1727 lun, 1728 /*sense_format*/SSD_TYPE_NONE, 1729 /*current_error*/ 1, 1730 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1731 /*asc*/ 0x44, 1732 /*ascq*/ 0x00, 1733 SSD_ELEM_NONE); 1734 1735 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1736 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1737 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1738 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1739 msg_info.hdr.serializing_sc = NULL; 1740 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1741 #if 0 1742 printf("BAD JUJU:Major Bummer HW Error\n"); 1743 #endif 1744 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1745 retval = 1; 1746 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1747 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1748 } 1749 break; 1750 } 1751 mtx_unlock(&lun->lun_lock); 1752 return (retval); 1753 } 1754 1755 /* 1756 * Returns 0 for success, errno for failure. 1757 */ 1758 static int 1759 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1760 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 1761 { 1762 union ctl_io *io; 1763 int retval; 1764 1765 retval = 0; 1766 1767 mtx_lock(&lun->lun_lock); 1768 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1769 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1770 ooa_links)) { 1771 struct ctl_ooa_entry *entry; 1772 1773 /* 1774 * If we've got more than we can fit, just count the 1775 * remaining entries. 1776 */ 1777 if (*cur_fill_num >= ooa_hdr->alloc_num) 1778 continue; 1779 1780 entry = &kern_entries[*cur_fill_num]; 1781 1782 entry->tag_num = io->scsiio.tag_num; 1783 entry->lun_num = lun->lun; 1784 #ifdef CTL_TIME_IO 1785 entry->start_bt = io->io_hdr.start_bt; 1786 #endif 1787 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 1788 entry->cdb_len = io->scsiio.cdb_len; 1789 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 1790 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 1791 1792 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 1793 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 1794 1795 if (io->io_hdr.flags & CTL_FLAG_ABORT) 1796 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 1797 1798 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 1799 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 1800 1801 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 1802 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 1803 } 1804 mtx_unlock(&lun->lun_lock); 1805 1806 return (retval); 1807 } 1808 1809 static void * 1810 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 1811 size_t error_str_len) 1812 { 1813 void *kptr; 1814 1815 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 1816 1817 if (copyin(user_addr, kptr, len) != 0) { 1818 snprintf(error_str, error_str_len, "Error copying %d bytes " 1819 "from user address %p to kernel address %p", len, 1820 user_addr, kptr); 1821 free(kptr, M_CTL); 1822 return (NULL); 1823 } 1824 1825 return (kptr); 1826 } 1827 1828 static void 1829 ctl_free_args(int num_args, struct ctl_be_arg *args) 1830 { 1831 int i; 1832 1833 if (args == NULL) 1834 return; 1835 1836 for (i = 0; i < num_args; i++) { 1837 free(args[i].kname, M_CTL); 1838 free(args[i].kvalue, M_CTL); 1839 } 1840 1841 free(args, M_CTL); 1842 } 1843 1844 static struct ctl_be_arg * 1845 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 1846 char *error_str, size_t error_str_len) 1847 { 1848 struct ctl_be_arg *args; 1849 int i; 1850 1851 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 1852 error_str, error_str_len); 1853 1854 if (args == NULL) 1855 goto bailout; 1856 1857 for (i = 0; i < num_args; i++) { 1858 args[i].kname = NULL; 1859 args[i].kvalue = NULL; 1860 } 1861 1862 for (i = 0; i < num_args; i++) { 1863 uint8_t *tmpptr; 1864 1865 args[i].kname = ctl_copyin_alloc(args[i].name, 1866 args[i].namelen, error_str, error_str_len); 1867 if (args[i].kname == NULL) 1868 goto bailout; 1869 1870 if (args[i].kname[args[i].namelen - 1] != '\0') { 1871 snprintf(error_str, error_str_len, "Argument %d " 1872 "name is not NUL-terminated", i); 1873 goto bailout; 1874 } 1875 1876 if (args[i].flags & CTL_BEARG_RD) { 1877 tmpptr = ctl_copyin_alloc(args[i].value, 1878 args[i].vallen, error_str, error_str_len); 1879 if (tmpptr == NULL) 1880 goto bailout; 1881 if ((args[i].flags & CTL_BEARG_ASCII) 1882 && (tmpptr[args[i].vallen - 1] != '\0')) { 1883 snprintf(error_str, error_str_len, "Argument " 1884 "%d value is not NUL-terminated", i); 1885 goto bailout; 1886 } 1887 args[i].kvalue = tmpptr; 1888 } else { 1889 args[i].kvalue = malloc(args[i].vallen, 1890 M_CTL, M_WAITOK | M_ZERO); 1891 } 1892 } 1893 1894 return (args); 1895 bailout: 1896 1897 ctl_free_args(num_args, args); 1898 1899 return (NULL); 1900 } 1901 1902 static void 1903 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 1904 { 1905 int i; 1906 1907 for (i = 0; i < num_args; i++) { 1908 if (args[i].flags & CTL_BEARG_WR) 1909 copyout(args[i].kvalue, args[i].value, args[i].vallen); 1910 } 1911 } 1912 1913 /* 1914 * Escape characters that are illegal or not recommended in XML. 1915 */ 1916 int 1917 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 1918 { 1919 char *end = str + size; 1920 int retval; 1921 1922 retval = 0; 1923 1924 for (; *str && str < end; str++) { 1925 switch (*str) { 1926 case '&': 1927 retval = sbuf_printf(sb, "&"); 1928 break; 1929 case '>': 1930 retval = sbuf_printf(sb, ">"); 1931 break; 1932 case '<': 1933 retval = sbuf_printf(sb, "<"); 1934 break; 1935 default: 1936 retval = sbuf_putc(sb, *str); 1937 break; 1938 } 1939 1940 if (retval != 0) 1941 break; 1942 1943 } 1944 1945 return (retval); 1946 } 1947 1948 static void 1949 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 1950 { 1951 struct scsi_vpd_id_descriptor *desc; 1952 int i; 1953 1954 if (id == NULL || id->len < 4) 1955 return; 1956 desc = (struct scsi_vpd_id_descriptor *)id->data; 1957 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 1958 case SVPD_ID_TYPE_T10: 1959 sbuf_printf(sb, "t10."); 1960 break; 1961 case SVPD_ID_TYPE_EUI64: 1962 sbuf_printf(sb, "eui."); 1963 break; 1964 case SVPD_ID_TYPE_NAA: 1965 sbuf_printf(sb, "naa."); 1966 break; 1967 case SVPD_ID_TYPE_SCSI_NAME: 1968 break; 1969 } 1970 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 1971 case SVPD_ID_CODESET_BINARY: 1972 for (i = 0; i < desc->length; i++) 1973 sbuf_printf(sb, "%02x", desc->identifier[i]); 1974 break; 1975 case SVPD_ID_CODESET_ASCII: 1976 sbuf_printf(sb, "%.*s", (int)desc->length, 1977 (char *)desc->identifier); 1978 break; 1979 case SVPD_ID_CODESET_UTF8: 1980 sbuf_printf(sb, "%s", (char *)desc->identifier); 1981 break; 1982 } 1983 } 1984 1985 static int 1986 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 1987 struct thread *td) 1988 { 1989 struct ctl_softc *softc; 1990 int retval; 1991 1992 softc = control_softc; 1993 1994 retval = 0; 1995 1996 switch (cmd) { 1997 case CTL_IO: 1998 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 1999 break; 2000 case CTL_ENABLE_PORT: 2001 case CTL_DISABLE_PORT: 2002 case CTL_SET_PORT_WWNS: { 2003 struct ctl_port *port; 2004 struct ctl_port_entry *entry; 2005 2006 entry = (struct ctl_port_entry *)addr; 2007 2008 mtx_lock(&softc->ctl_lock); 2009 STAILQ_FOREACH(port, &softc->port_list, links) { 2010 int action, done; 2011 2012 action = 0; 2013 done = 0; 2014 2015 if ((entry->port_type == CTL_PORT_NONE) 2016 && (entry->targ_port == port->targ_port)) { 2017 /* 2018 * If the user only wants to enable or 2019 * disable or set WWNs on a specific port, 2020 * do the operation and we're done. 2021 */ 2022 action = 1; 2023 done = 1; 2024 } else if (entry->port_type & port->port_type) { 2025 /* 2026 * Compare the user's type mask with the 2027 * particular frontend type to see if we 2028 * have a match. 2029 */ 2030 action = 1; 2031 done = 0; 2032 2033 /* 2034 * Make sure the user isn't trying to set 2035 * WWNs on multiple ports at the same time. 2036 */ 2037 if (cmd == CTL_SET_PORT_WWNS) { 2038 printf("%s: Can't set WWNs on " 2039 "multiple ports\n", __func__); 2040 retval = EINVAL; 2041 break; 2042 } 2043 } 2044 if (action != 0) { 2045 /* 2046 * XXX KDM we have to drop the lock here, 2047 * because the online/offline operations 2048 * can potentially block. We need to 2049 * reference count the frontends so they 2050 * can't go away, 2051 */ 2052 mtx_unlock(&softc->ctl_lock); 2053 2054 if (cmd == CTL_ENABLE_PORT) { 2055 ctl_port_online(port); 2056 } else if (cmd == CTL_DISABLE_PORT) { 2057 ctl_port_offline(port); 2058 } 2059 2060 mtx_lock(&softc->ctl_lock); 2061 2062 if (cmd == CTL_SET_PORT_WWNS) 2063 ctl_port_set_wwns(port, 2064 (entry->flags & CTL_PORT_WWNN_VALID) ? 2065 1 : 0, entry->wwnn, 2066 (entry->flags & CTL_PORT_WWPN_VALID) ? 2067 1 : 0, entry->wwpn); 2068 } 2069 if (done != 0) 2070 break; 2071 } 2072 mtx_unlock(&softc->ctl_lock); 2073 break; 2074 } 2075 case CTL_GET_PORT_LIST: { 2076 struct ctl_port *port; 2077 struct ctl_port_list *list; 2078 int i; 2079 2080 list = (struct ctl_port_list *)addr; 2081 2082 if (list->alloc_len != (list->alloc_num * 2083 sizeof(struct ctl_port_entry))) { 2084 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2085 "alloc_num %u * sizeof(struct ctl_port_entry) " 2086 "%zu\n", __func__, list->alloc_len, 2087 list->alloc_num, sizeof(struct ctl_port_entry)); 2088 retval = EINVAL; 2089 break; 2090 } 2091 list->fill_len = 0; 2092 list->fill_num = 0; 2093 list->dropped_num = 0; 2094 i = 0; 2095 mtx_lock(&softc->ctl_lock); 2096 STAILQ_FOREACH(port, &softc->port_list, links) { 2097 struct ctl_port_entry entry, *list_entry; 2098 2099 if (list->fill_num >= list->alloc_num) { 2100 list->dropped_num++; 2101 continue; 2102 } 2103 2104 entry.port_type = port->port_type; 2105 strlcpy(entry.port_name, port->port_name, 2106 sizeof(entry.port_name)); 2107 entry.targ_port = port->targ_port; 2108 entry.physical_port = port->physical_port; 2109 entry.virtual_port = port->virtual_port; 2110 entry.wwnn = port->wwnn; 2111 entry.wwpn = port->wwpn; 2112 if (port->status & CTL_PORT_STATUS_ONLINE) 2113 entry.online = 1; 2114 else 2115 entry.online = 0; 2116 2117 list_entry = &list->entries[i]; 2118 2119 retval = copyout(&entry, list_entry, sizeof(entry)); 2120 if (retval != 0) { 2121 printf("%s: CTL_GET_PORT_LIST: copyout " 2122 "returned %d\n", __func__, retval); 2123 break; 2124 } 2125 i++; 2126 list->fill_num++; 2127 list->fill_len += sizeof(entry); 2128 } 2129 mtx_unlock(&softc->ctl_lock); 2130 2131 /* 2132 * If this is non-zero, we had a copyout fault, so there's 2133 * probably no point in attempting to set the status inside 2134 * the structure. 2135 */ 2136 if (retval != 0) 2137 break; 2138 2139 if (list->dropped_num > 0) 2140 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2141 else 2142 list->status = CTL_PORT_LIST_OK; 2143 break; 2144 } 2145 case CTL_DUMP_OOA: { 2146 struct ctl_lun *lun; 2147 union ctl_io *io; 2148 char printbuf[128]; 2149 struct sbuf sb; 2150 2151 mtx_lock(&softc->ctl_lock); 2152 printf("Dumping OOA queues:\n"); 2153 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2154 mtx_lock(&lun->lun_lock); 2155 for (io = (union ctl_io *)TAILQ_FIRST( 2156 &lun->ooa_queue); io != NULL; 2157 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2158 ooa_links)) { 2159 sbuf_new(&sb, printbuf, sizeof(printbuf), 2160 SBUF_FIXEDLEN); 2161 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2162 (intmax_t)lun->lun, 2163 io->scsiio.tag_num, 2164 (io->io_hdr.flags & 2165 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2166 (io->io_hdr.flags & 2167 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2168 (io->io_hdr.flags & 2169 CTL_FLAG_ABORT) ? " ABORT" : "", 2170 (io->io_hdr.flags & 2171 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2172 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2173 sbuf_finish(&sb); 2174 printf("%s\n", sbuf_data(&sb)); 2175 } 2176 mtx_unlock(&lun->lun_lock); 2177 } 2178 printf("OOA queues dump done\n"); 2179 mtx_unlock(&softc->ctl_lock); 2180 break; 2181 } 2182 case CTL_GET_OOA: { 2183 struct ctl_lun *lun; 2184 struct ctl_ooa *ooa_hdr; 2185 struct ctl_ooa_entry *entries; 2186 uint32_t cur_fill_num; 2187 2188 ooa_hdr = (struct ctl_ooa *)addr; 2189 2190 if ((ooa_hdr->alloc_len == 0) 2191 || (ooa_hdr->alloc_num == 0)) { 2192 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2193 "must be non-zero\n", __func__, 2194 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2195 retval = EINVAL; 2196 break; 2197 } 2198 2199 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2200 sizeof(struct ctl_ooa_entry))) { 2201 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2202 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2203 __func__, ooa_hdr->alloc_len, 2204 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2205 retval = EINVAL; 2206 break; 2207 } 2208 2209 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2210 if (entries == NULL) { 2211 printf("%s: could not allocate %d bytes for OOA " 2212 "dump\n", __func__, ooa_hdr->alloc_len); 2213 retval = ENOMEM; 2214 break; 2215 } 2216 2217 mtx_lock(&softc->ctl_lock); 2218 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2219 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2220 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2221 mtx_unlock(&softc->ctl_lock); 2222 free(entries, M_CTL); 2223 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2224 __func__, (uintmax_t)ooa_hdr->lun_num); 2225 retval = EINVAL; 2226 break; 2227 } 2228 2229 cur_fill_num = 0; 2230 2231 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2232 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2233 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2234 ooa_hdr, entries); 2235 if (retval != 0) 2236 break; 2237 } 2238 if (retval != 0) { 2239 mtx_unlock(&softc->ctl_lock); 2240 free(entries, M_CTL); 2241 break; 2242 } 2243 } else { 2244 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2245 2246 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2247 entries); 2248 } 2249 mtx_unlock(&softc->ctl_lock); 2250 2251 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2252 ooa_hdr->fill_len = ooa_hdr->fill_num * 2253 sizeof(struct ctl_ooa_entry); 2254 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2255 if (retval != 0) { 2256 printf("%s: error copying out %d bytes for OOA dump\n", 2257 __func__, ooa_hdr->fill_len); 2258 } 2259 2260 getbintime(&ooa_hdr->cur_bt); 2261 2262 if (cur_fill_num > ooa_hdr->alloc_num) { 2263 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2264 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2265 } else { 2266 ooa_hdr->dropped_num = 0; 2267 ooa_hdr->status = CTL_OOA_OK; 2268 } 2269 2270 free(entries, M_CTL); 2271 break; 2272 } 2273 case CTL_CHECK_OOA: { 2274 union ctl_io *io; 2275 struct ctl_lun *lun; 2276 struct ctl_ooa_info *ooa_info; 2277 2278 2279 ooa_info = (struct ctl_ooa_info *)addr; 2280 2281 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2282 ooa_info->status = CTL_OOA_INVALID_LUN; 2283 break; 2284 } 2285 mtx_lock(&softc->ctl_lock); 2286 lun = softc->ctl_luns[ooa_info->lun_id]; 2287 if (lun == NULL) { 2288 mtx_unlock(&softc->ctl_lock); 2289 ooa_info->status = CTL_OOA_INVALID_LUN; 2290 break; 2291 } 2292 mtx_lock(&lun->lun_lock); 2293 mtx_unlock(&softc->ctl_lock); 2294 ooa_info->num_entries = 0; 2295 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2296 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2297 &io->io_hdr, ooa_links)) { 2298 ooa_info->num_entries++; 2299 } 2300 mtx_unlock(&lun->lun_lock); 2301 2302 ooa_info->status = CTL_OOA_SUCCESS; 2303 2304 break; 2305 } 2306 case CTL_DELAY_IO: { 2307 struct ctl_io_delay_info *delay_info; 2308 #ifdef CTL_IO_DELAY 2309 struct ctl_lun *lun; 2310 #endif /* CTL_IO_DELAY */ 2311 2312 delay_info = (struct ctl_io_delay_info *)addr; 2313 2314 #ifdef CTL_IO_DELAY 2315 mtx_lock(&softc->ctl_lock); 2316 2317 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2318 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2319 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2320 } else { 2321 lun = softc->ctl_luns[delay_info->lun_id]; 2322 mtx_lock(&lun->lun_lock); 2323 2324 delay_info->status = CTL_DELAY_STATUS_OK; 2325 2326 switch (delay_info->delay_type) { 2327 case CTL_DELAY_TYPE_CONT: 2328 break; 2329 case CTL_DELAY_TYPE_ONESHOT: 2330 break; 2331 default: 2332 delay_info->status = 2333 CTL_DELAY_STATUS_INVALID_TYPE; 2334 break; 2335 } 2336 2337 switch (delay_info->delay_loc) { 2338 case CTL_DELAY_LOC_DATAMOVE: 2339 lun->delay_info.datamove_type = 2340 delay_info->delay_type; 2341 lun->delay_info.datamove_delay = 2342 delay_info->delay_secs; 2343 break; 2344 case CTL_DELAY_LOC_DONE: 2345 lun->delay_info.done_type = 2346 delay_info->delay_type; 2347 lun->delay_info.done_delay = 2348 delay_info->delay_secs; 2349 break; 2350 default: 2351 delay_info->status = 2352 CTL_DELAY_STATUS_INVALID_LOC; 2353 break; 2354 } 2355 mtx_unlock(&lun->lun_lock); 2356 } 2357 2358 mtx_unlock(&softc->ctl_lock); 2359 #else 2360 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2361 #endif /* CTL_IO_DELAY */ 2362 break; 2363 } 2364 case CTL_REALSYNC_SET: { 2365 int *syncstate; 2366 2367 syncstate = (int *)addr; 2368 2369 mtx_lock(&softc->ctl_lock); 2370 switch (*syncstate) { 2371 case 0: 2372 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2373 break; 2374 case 1: 2375 softc->flags |= CTL_FLAG_REAL_SYNC; 2376 break; 2377 default: 2378 retval = EINVAL; 2379 break; 2380 } 2381 mtx_unlock(&softc->ctl_lock); 2382 break; 2383 } 2384 case CTL_REALSYNC_GET: { 2385 int *syncstate; 2386 2387 syncstate = (int*)addr; 2388 2389 mtx_lock(&softc->ctl_lock); 2390 if (softc->flags & CTL_FLAG_REAL_SYNC) 2391 *syncstate = 1; 2392 else 2393 *syncstate = 0; 2394 mtx_unlock(&softc->ctl_lock); 2395 2396 break; 2397 } 2398 case CTL_SETSYNC: 2399 case CTL_GETSYNC: { 2400 struct ctl_sync_info *sync_info; 2401 struct ctl_lun *lun; 2402 2403 sync_info = (struct ctl_sync_info *)addr; 2404 2405 mtx_lock(&softc->ctl_lock); 2406 lun = softc->ctl_luns[sync_info->lun_id]; 2407 if (lun == NULL) { 2408 mtx_unlock(&softc->ctl_lock); 2409 sync_info->status = CTL_GS_SYNC_NO_LUN; 2410 } 2411 /* 2412 * Get or set the sync interval. We're not bounds checking 2413 * in the set case, hopefully the user won't do something 2414 * silly. 2415 */ 2416 mtx_lock(&lun->lun_lock); 2417 mtx_unlock(&softc->ctl_lock); 2418 if (cmd == CTL_GETSYNC) 2419 sync_info->sync_interval = lun->sync_interval; 2420 else 2421 lun->sync_interval = sync_info->sync_interval; 2422 mtx_unlock(&lun->lun_lock); 2423 2424 sync_info->status = CTL_GS_SYNC_OK; 2425 2426 break; 2427 } 2428 case CTL_GETSTATS: { 2429 struct ctl_stats *stats; 2430 struct ctl_lun *lun; 2431 int i; 2432 2433 stats = (struct ctl_stats *)addr; 2434 2435 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2436 stats->alloc_len) { 2437 stats->status = CTL_SS_NEED_MORE_SPACE; 2438 stats->num_luns = softc->num_luns; 2439 break; 2440 } 2441 /* 2442 * XXX KDM no locking here. If the LUN list changes, 2443 * things can blow up. 2444 */ 2445 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2446 i++, lun = STAILQ_NEXT(lun, links)) { 2447 retval = copyout(&lun->stats, &stats->lun_stats[i], 2448 sizeof(lun->stats)); 2449 if (retval != 0) 2450 break; 2451 } 2452 stats->num_luns = softc->num_luns; 2453 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2454 softc->num_luns; 2455 stats->status = CTL_SS_OK; 2456 #ifdef CTL_TIME_IO 2457 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2458 #else 2459 stats->flags = CTL_STATS_FLAG_NONE; 2460 #endif 2461 getnanouptime(&stats->timestamp); 2462 break; 2463 } 2464 case CTL_ERROR_INJECT: { 2465 struct ctl_error_desc *err_desc, *new_err_desc; 2466 struct ctl_lun *lun; 2467 2468 err_desc = (struct ctl_error_desc *)addr; 2469 2470 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2471 M_WAITOK | M_ZERO); 2472 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2473 2474 mtx_lock(&softc->ctl_lock); 2475 lun = softc->ctl_luns[err_desc->lun_id]; 2476 if (lun == NULL) { 2477 mtx_unlock(&softc->ctl_lock); 2478 free(new_err_desc, M_CTL); 2479 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2480 __func__, (uintmax_t)err_desc->lun_id); 2481 retval = EINVAL; 2482 break; 2483 } 2484 mtx_lock(&lun->lun_lock); 2485 mtx_unlock(&softc->ctl_lock); 2486 2487 /* 2488 * We could do some checking here to verify the validity 2489 * of the request, but given the complexity of error 2490 * injection requests, the checking logic would be fairly 2491 * complex. 2492 * 2493 * For now, if the request is invalid, it just won't get 2494 * executed and might get deleted. 2495 */ 2496 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2497 2498 /* 2499 * XXX KDM check to make sure the serial number is unique, 2500 * in case we somehow manage to wrap. That shouldn't 2501 * happen for a very long time, but it's the right thing to 2502 * do. 2503 */ 2504 new_err_desc->serial = lun->error_serial; 2505 err_desc->serial = lun->error_serial; 2506 lun->error_serial++; 2507 2508 mtx_unlock(&lun->lun_lock); 2509 break; 2510 } 2511 case CTL_ERROR_INJECT_DELETE: { 2512 struct ctl_error_desc *delete_desc, *desc, *desc2; 2513 struct ctl_lun *lun; 2514 int delete_done; 2515 2516 delete_desc = (struct ctl_error_desc *)addr; 2517 delete_done = 0; 2518 2519 mtx_lock(&softc->ctl_lock); 2520 lun = softc->ctl_luns[delete_desc->lun_id]; 2521 if (lun == NULL) { 2522 mtx_unlock(&softc->ctl_lock); 2523 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2524 __func__, (uintmax_t)delete_desc->lun_id); 2525 retval = EINVAL; 2526 break; 2527 } 2528 mtx_lock(&lun->lun_lock); 2529 mtx_unlock(&softc->ctl_lock); 2530 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2531 if (desc->serial != delete_desc->serial) 2532 continue; 2533 2534 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2535 links); 2536 free(desc, M_CTL); 2537 delete_done = 1; 2538 } 2539 mtx_unlock(&lun->lun_lock); 2540 if (delete_done == 0) { 2541 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2542 "error serial %ju on LUN %u\n", __func__, 2543 delete_desc->serial, delete_desc->lun_id); 2544 retval = EINVAL; 2545 break; 2546 } 2547 break; 2548 } 2549 case CTL_DUMP_STRUCTS: { 2550 int i, j, k; 2551 struct ctl_port *port; 2552 struct ctl_frontend *fe; 2553 2554 mtx_lock(&softc->ctl_lock); 2555 printf("CTL Persistent Reservation information start:\n"); 2556 for (i = 0; i < CTL_MAX_LUNS; i++) { 2557 struct ctl_lun *lun; 2558 2559 lun = softc->ctl_luns[i]; 2560 2561 if ((lun == NULL) 2562 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2563 continue; 2564 2565 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 2566 if (lun->pr_keys[j] == NULL) 2567 continue; 2568 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2569 if (lun->pr_keys[j][k] == 0) 2570 continue; 2571 printf(" LUN %d port %d iid %d key " 2572 "%#jx\n", i, j, k, 2573 (uintmax_t)lun->pr_keys[j][k]); 2574 } 2575 } 2576 } 2577 printf("CTL Persistent Reservation information end\n"); 2578 printf("CTL Ports:\n"); 2579 STAILQ_FOREACH(port, &softc->port_list, links) { 2580 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2581 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2582 port->frontend->name, port->port_type, 2583 port->physical_port, port->virtual_port, 2584 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2585 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2586 if (port->wwpn_iid[j].in_use == 0 && 2587 port->wwpn_iid[j].wwpn == 0 && 2588 port->wwpn_iid[j].name == NULL) 2589 continue; 2590 2591 printf(" iid %u use %d WWPN %#jx '%s'\n", 2592 j, port->wwpn_iid[j].in_use, 2593 (uintmax_t)port->wwpn_iid[j].wwpn, 2594 port->wwpn_iid[j].name); 2595 } 2596 } 2597 printf("CTL Port information end\n"); 2598 mtx_unlock(&softc->ctl_lock); 2599 /* 2600 * XXX KDM calling this without a lock. We'd likely want 2601 * to drop the lock before calling the frontend's dump 2602 * routine anyway. 2603 */ 2604 printf("CTL Frontends:\n"); 2605 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2606 printf(" Frontend '%s'\n", fe->name); 2607 if (fe->fe_dump != NULL) 2608 fe->fe_dump(); 2609 } 2610 printf("CTL Frontend information end\n"); 2611 break; 2612 } 2613 case CTL_LUN_REQ: { 2614 struct ctl_lun_req *lun_req; 2615 struct ctl_backend_driver *backend; 2616 2617 lun_req = (struct ctl_lun_req *)addr; 2618 2619 backend = ctl_backend_find(lun_req->backend); 2620 if (backend == NULL) { 2621 lun_req->status = CTL_LUN_ERROR; 2622 snprintf(lun_req->error_str, 2623 sizeof(lun_req->error_str), 2624 "Backend \"%s\" not found.", 2625 lun_req->backend); 2626 break; 2627 } 2628 if (lun_req->num_be_args > 0) { 2629 lun_req->kern_be_args = ctl_copyin_args( 2630 lun_req->num_be_args, 2631 lun_req->be_args, 2632 lun_req->error_str, 2633 sizeof(lun_req->error_str)); 2634 if (lun_req->kern_be_args == NULL) { 2635 lun_req->status = CTL_LUN_ERROR; 2636 break; 2637 } 2638 } 2639 2640 retval = backend->ioctl(dev, cmd, addr, flag, td); 2641 2642 if (lun_req->num_be_args > 0) { 2643 ctl_copyout_args(lun_req->num_be_args, 2644 lun_req->kern_be_args); 2645 ctl_free_args(lun_req->num_be_args, 2646 lun_req->kern_be_args); 2647 } 2648 break; 2649 } 2650 case CTL_LUN_LIST: { 2651 struct sbuf *sb; 2652 struct ctl_lun *lun; 2653 struct ctl_lun_list *list; 2654 struct ctl_option *opt; 2655 2656 list = (struct ctl_lun_list *)addr; 2657 2658 /* 2659 * Allocate a fixed length sbuf here, based on the length 2660 * of the user's buffer. We could allocate an auto-extending 2661 * buffer, and then tell the user how much larger our 2662 * amount of data is than his buffer, but that presents 2663 * some problems: 2664 * 2665 * 1. The sbuf(9) routines use a blocking malloc, and so 2666 * we can't hold a lock while calling them with an 2667 * auto-extending buffer. 2668 * 2669 * 2. There is not currently a LUN reference counting 2670 * mechanism, outside of outstanding transactions on 2671 * the LUN's OOA queue. So a LUN could go away on us 2672 * while we're getting the LUN number, backend-specific 2673 * information, etc. Thus, given the way things 2674 * currently work, we need to hold the CTL lock while 2675 * grabbing LUN information. 2676 * 2677 * So, from the user's standpoint, the best thing to do is 2678 * allocate what he thinks is a reasonable buffer length, 2679 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2680 * double the buffer length and try again. (And repeat 2681 * that until he succeeds.) 2682 */ 2683 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2684 if (sb == NULL) { 2685 list->status = CTL_LUN_LIST_ERROR; 2686 snprintf(list->error_str, sizeof(list->error_str), 2687 "Unable to allocate %d bytes for LUN list", 2688 list->alloc_len); 2689 break; 2690 } 2691 2692 sbuf_printf(sb, "<ctllunlist>\n"); 2693 2694 mtx_lock(&softc->ctl_lock); 2695 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2696 mtx_lock(&lun->lun_lock); 2697 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 2698 (uintmax_t)lun->lun); 2699 2700 /* 2701 * Bail out as soon as we see that we've overfilled 2702 * the buffer. 2703 */ 2704 if (retval != 0) 2705 break; 2706 2707 retval = sbuf_printf(sb, "\t<backend_type>%s" 2708 "</backend_type>\n", 2709 (lun->backend == NULL) ? "none" : 2710 lun->backend->name); 2711 2712 if (retval != 0) 2713 break; 2714 2715 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 2716 lun->be_lun->lun_type); 2717 2718 if (retval != 0) 2719 break; 2720 2721 if (lun->backend == NULL) { 2722 retval = sbuf_printf(sb, "</lun>\n"); 2723 if (retval != 0) 2724 break; 2725 continue; 2726 } 2727 2728 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 2729 (lun->be_lun->maxlba > 0) ? 2730 lun->be_lun->maxlba + 1 : 0); 2731 2732 if (retval != 0) 2733 break; 2734 2735 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 2736 lun->be_lun->blocksize); 2737 2738 if (retval != 0) 2739 break; 2740 2741 retval = sbuf_printf(sb, "\t<serial_number>"); 2742 2743 if (retval != 0) 2744 break; 2745 2746 retval = ctl_sbuf_printf_esc(sb, 2747 lun->be_lun->serial_num, 2748 sizeof(lun->be_lun->serial_num)); 2749 2750 if (retval != 0) 2751 break; 2752 2753 retval = sbuf_printf(sb, "</serial_number>\n"); 2754 2755 if (retval != 0) 2756 break; 2757 2758 retval = sbuf_printf(sb, "\t<device_id>"); 2759 2760 if (retval != 0) 2761 break; 2762 2763 retval = ctl_sbuf_printf_esc(sb, 2764 lun->be_lun->device_id, 2765 sizeof(lun->be_lun->device_id)); 2766 2767 if (retval != 0) 2768 break; 2769 2770 retval = sbuf_printf(sb, "</device_id>\n"); 2771 2772 if (retval != 0) 2773 break; 2774 2775 if (lun->backend->lun_info != NULL) { 2776 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 2777 if (retval != 0) 2778 break; 2779 } 2780 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 2781 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 2782 opt->name, opt->value, opt->name); 2783 if (retval != 0) 2784 break; 2785 } 2786 2787 retval = sbuf_printf(sb, "</lun>\n"); 2788 2789 if (retval != 0) 2790 break; 2791 mtx_unlock(&lun->lun_lock); 2792 } 2793 if (lun != NULL) 2794 mtx_unlock(&lun->lun_lock); 2795 mtx_unlock(&softc->ctl_lock); 2796 2797 if ((retval != 0) 2798 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 2799 retval = 0; 2800 sbuf_delete(sb); 2801 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 2802 snprintf(list->error_str, sizeof(list->error_str), 2803 "Out of space, %d bytes is too small", 2804 list->alloc_len); 2805 break; 2806 } 2807 2808 sbuf_finish(sb); 2809 2810 retval = copyout(sbuf_data(sb), list->lun_xml, 2811 sbuf_len(sb) + 1); 2812 2813 list->fill_len = sbuf_len(sb) + 1; 2814 list->status = CTL_LUN_LIST_OK; 2815 sbuf_delete(sb); 2816 break; 2817 } 2818 case CTL_ISCSI: { 2819 struct ctl_iscsi *ci; 2820 struct ctl_frontend *fe; 2821 2822 ci = (struct ctl_iscsi *)addr; 2823 2824 fe = ctl_frontend_find("iscsi"); 2825 if (fe == NULL) { 2826 ci->status = CTL_ISCSI_ERROR; 2827 snprintf(ci->error_str, sizeof(ci->error_str), 2828 "Frontend \"iscsi\" not found."); 2829 break; 2830 } 2831 2832 retval = fe->ioctl(dev, cmd, addr, flag, td); 2833 break; 2834 } 2835 case CTL_PORT_REQ: { 2836 struct ctl_req *req; 2837 struct ctl_frontend *fe; 2838 2839 req = (struct ctl_req *)addr; 2840 2841 fe = ctl_frontend_find(req->driver); 2842 if (fe == NULL) { 2843 req->status = CTL_LUN_ERROR; 2844 snprintf(req->error_str, sizeof(req->error_str), 2845 "Frontend \"%s\" not found.", req->driver); 2846 break; 2847 } 2848 if (req->num_args > 0) { 2849 req->kern_args = ctl_copyin_args(req->num_args, 2850 req->args, req->error_str, sizeof(req->error_str)); 2851 if (req->kern_args == NULL) { 2852 req->status = CTL_LUN_ERROR; 2853 break; 2854 } 2855 } 2856 2857 retval = fe->ioctl(dev, cmd, addr, flag, td); 2858 2859 if (req->num_args > 0) { 2860 ctl_copyout_args(req->num_args, req->kern_args); 2861 ctl_free_args(req->num_args, req->kern_args); 2862 } 2863 break; 2864 } 2865 case CTL_PORT_LIST: { 2866 struct sbuf *sb; 2867 struct ctl_port *port; 2868 struct ctl_lun_list *list; 2869 struct ctl_option *opt; 2870 int j; 2871 uint32_t plun; 2872 2873 list = (struct ctl_lun_list *)addr; 2874 2875 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2876 if (sb == NULL) { 2877 list->status = CTL_LUN_LIST_ERROR; 2878 snprintf(list->error_str, sizeof(list->error_str), 2879 "Unable to allocate %d bytes for LUN list", 2880 list->alloc_len); 2881 break; 2882 } 2883 2884 sbuf_printf(sb, "<ctlportlist>\n"); 2885 2886 mtx_lock(&softc->ctl_lock); 2887 STAILQ_FOREACH(port, &softc->port_list, links) { 2888 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 2889 (uintmax_t)port->targ_port); 2890 2891 /* 2892 * Bail out as soon as we see that we've overfilled 2893 * the buffer. 2894 */ 2895 if (retval != 0) 2896 break; 2897 2898 retval = sbuf_printf(sb, "\t<frontend_type>%s" 2899 "</frontend_type>\n", port->frontend->name); 2900 if (retval != 0) 2901 break; 2902 2903 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 2904 port->port_type); 2905 if (retval != 0) 2906 break; 2907 2908 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 2909 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 2910 if (retval != 0) 2911 break; 2912 2913 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 2914 port->port_name); 2915 if (retval != 0) 2916 break; 2917 2918 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 2919 port->physical_port); 2920 if (retval != 0) 2921 break; 2922 2923 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 2924 port->virtual_port); 2925 if (retval != 0) 2926 break; 2927 2928 if (port->target_devid != NULL) { 2929 sbuf_printf(sb, "\t<target>"); 2930 ctl_id_sbuf(port->target_devid, sb); 2931 sbuf_printf(sb, "</target>\n"); 2932 } 2933 2934 if (port->port_devid != NULL) { 2935 sbuf_printf(sb, "\t<port>"); 2936 ctl_id_sbuf(port->port_devid, sb); 2937 sbuf_printf(sb, "</port>\n"); 2938 } 2939 2940 if (port->port_info != NULL) { 2941 retval = port->port_info(port->onoff_arg, sb); 2942 if (retval != 0) 2943 break; 2944 } 2945 STAILQ_FOREACH(opt, &port->options, links) { 2946 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 2947 opt->name, opt->value, opt->name); 2948 if (retval != 0) 2949 break; 2950 } 2951 2952 if (port->lun_map != NULL) { 2953 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 2954 for (j = 0; j < CTL_MAX_LUNS; j++) { 2955 plun = ctl_lun_map_from_port(port, j); 2956 if (plun >= CTL_MAX_LUNS) 2957 continue; 2958 sbuf_printf(sb, 2959 "\t<lun id=\"%u\">%u</lun>\n", 2960 j, plun); 2961 } 2962 } 2963 2964 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2965 if (port->wwpn_iid[j].in_use == 0 || 2966 (port->wwpn_iid[j].wwpn == 0 && 2967 port->wwpn_iid[j].name == NULL)) 2968 continue; 2969 2970 if (port->wwpn_iid[j].name != NULL) 2971 retval = sbuf_printf(sb, 2972 "\t<initiator id=\"%u\">%s</initiator>\n", 2973 j, port->wwpn_iid[j].name); 2974 else 2975 retval = sbuf_printf(sb, 2976 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 2977 j, port->wwpn_iid[j].wwpn); 2978 if (retval != 0) 2979 break; 2980 } 2981 if (retval != 0) 2982 break; 2983 2984 retval = sbuf_printf(sb, "</targ_port>\n"); 2985 if (retval != 0) 2986 break; 2987 } 2988 mtx_unlock(&softc->ctl_lock); 2989 2990 if ((retval != 0) 2991 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 2992 retval = 0; 2993 sbuf_delete(sb); 2994 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 2995 snprintf(list->error_str, sizeof(list->error_str), 2996 "Out of space, %d bytes is too small", 2997 list->alloc_len); 2998 break; 2999 } 3000 3001 sbuf_finish(sb); 3002 3003 retval = copyout(sbuf_data(sb), list->lun_xml, 3004 sbuf_len(sb) + 1); 3005 3006 list->fill_len = sbuf_len(sb) + 1; 3007 list->status = CTL_LUN_LIST_OK; 3008 sbuf_delete(sb); 3009 break; 3010 } 3011 case CTL_LUN_MAP: { 3012 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3013 struct ctl_port *port; 3014 3015 mtx_lock(&softc->ctl_lock); 3016 if (lm->port >= CTL_MAX_PORTS || 3017 (port = softc->ctl_ports[lm->port]) == NULL) { 3018 mtx_unlock(&softc->ctl_lock); 3019 return (ENXIO); 3020 } 3021 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3022 if (lm->plun < CTL_MAX_LUNS) { 3023 if (lm->lun == UINT32_MAX) 3024 retval = ctl_lun_map_unset(port, lm->plun); 3025 else if (lm->lun < CTL_MAX_LUNS && 3026 softc->ctl_luns[lm->lun] != NULL) 3027 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3028 else 3029 return (ENXIO); 3030 } else if (lm->plun == UINT32_MAX) { 3031 if (lm->lun == UINT32_MAX) 3032 retval = ctl_lun_map_deinit(port); 3033 else 3034 retval = ctl_lun_map_init(port); 3035 } else 3036 return (ENXIO); 3037 break; 3038 } 3039 default: { 3040 /* XXX KDM should we fix this? */ 3041 #if 0 3042 struct ctl_backend_driver *backend; 3043 unsigned int type; 3044 int found; 3045 3046 found = 0; 3047 3048 /* 3049 * We encode the backend type as the ioctl type for backend 3050 * ioctls. So parse it out here, and then search for a 3051 * backend of this type. 3052 */ 3053 type = _IOC_TYPE(cmd); 3054 3055 STAILQ_FOREACH(backend, &softc->be_list, links) { 3056 if (backend->type == type) { 3057 found = 1; 3058 break; 3059 } 3060 } 3061 if (found == 0) { 3062 printf("ctl: unknown ioctl command %#lx or backend " 3063 "%d\n", cmd, type); 3064 retval = EINVAL; 3065 break; 3066 } 3067 retval = backend->ioctl(dev, cmd, addr, flag, td); 3068 #endif 3069 retval = ENOTTY; 3070 break; 3071 } 3072 } 3073 return (retval); 3074 } 3075 3076 uint32_t 3077 ctl_get_initindex(struct ctl_nexus *nexus) 3078 { 3079 if (nexus->targ_port < CTL_MAX_PORTS) 3080 return (nexus->initid.id + 3081 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3082 else 3083 return (nexus->initid.id + 3084 ((nexus->targ_port - CTL_MAX_PORTS) * 3085 CTL_MAX_INIT_PER_PORT)); 3086 } 3087 3088 uint32_t 3089 ctl_get_resindex(struct ctl_nexus *nexus) 3090 { 3091 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3092 } 3093 3094 uint32_t 3095 ctl_port_idx(int port_num) 3096 { 3097 if (port_num < CTL_MAX_PORTS) 3098 return(port_num); 3099 else 3100 return(port_num - CTL_MAX_PORTS); 3101 } 3102 3103 int 3104 ctl_lun_map_init(struct ctl_port *port) 3105 { 3106 struct ctl_softc *softc = control_softc; 3107 struct ctl_lun *lun; 3108 uint32_t i; 3109 3110 if (port->lun_map == NULL) 3111 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3112 M_CTL, M_NOWAIT); 3113 if (port->lun_map == NULL) 3114 return (ENOMEM); 3115 for (i = 0; i < CTL_MAX_LUNS; i++) 3116 port->lun_map[i] = UINT32_MAX; 3117 if (port->status & CTL_PORT_STATUS_ONLINE && 3118 port->lun_disable != NULL) { 3119 STAILQ_FOREACH(lun, &softc->lun_list, links) 3120 port->lun_disable(port->targ_lun_arg, lun->lun); 3121 } 3122 return (0); 3123 } 3124 3125 int 3126 ctl_lun_map_deinit(struct ctl_port *port) 3127 { 3128 struct ctl_softc *softc = control_softc; 3129 struct ctl_lun *lun; 3130 3131 if (port->lun_map == NULL) 3132 return (0); 3133 free(port->lun_map, M_CTL); 3134 port->lun_map = NULL; 3135 if (port->status & CTL_PORT_STATUS_ONLINE && 3136 port->lun_enable != NULL) { 3137 STAILQ_FOREACH(lun, &softc->lun_list, links) 3138 port->lun_enable(port->targ_lun_arg, lun->lun); 3139 } 3140 return (0); 3141 } 3142 3143 int 3144 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3145 { 3146 int status; 3147 uint32_t old; 3148 3149 if (port->lun_map == NULL) { 3150 status = ctl_lun_map_init(port); 3151 if (status != 0) 3152 return (status); 3153 } 3154 old = port->lun_map[plun]; 3155 port->lun_map[plun] = glun; 3156 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS && 3157 port->lun_enable != NULL) 3158 port->lun_enable(port->targ_lun_arg, plun); 3159 return (0); 3160 } 3161 3162 int 3163 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3164 { 3165 uint32_t old; 3166 3167 if (port->lun_map == NULL) 3168 return (0); 3169 old = port->lun_map[plun]; 3170 port->lun_map[plun] = UINT32_MAX; 3171 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS && 3172 port->lun_disable != NULL) 3173 port->lun_disable(port->targ_lun_arg, plun); 3174 return (0); 3175 } 3176 3177 uint32_t 3178 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3179 { 3180 3181 if (port == NULL) 3182 return (UINT32_MAX); 3183 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3184 return (lun_id); 3185 return (port->lun_map[lun_id]); 3186 } 3187 3188 uint32_t 3189 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3190 { 3191 uint32_t i; 3192 3193 if (port == NULL) 3194 return (UINT32_MAX); 3195 if (port->lun_map == NULL) 3196 return (lun_id); 3197 for (i = 0; i < CTL_MAX_LUNS; i++) { 3198 if (port->lun_map[i] == lun_id) 3199 return (i); 3200 } 3201 return (UINT32_MAX); 3202 } 3203 3204 static struct ctl_port * 3205 ctl_io_port(struct ctl_io_hdr *io_hdr) 3206 { 3207 int port_num; 3208 3209 port_num = io_hdr->nexus.targ_port; 3210 return (control_softc->ctl_ports[ctl_port_idx(port_num)]); 3211 } 3212 3213 /* 3214 * Note: This only works for bitmask sizes that are at least 32 bits, and 3215 * that are a power of 2. 3216 */ 3217 int 3218 ctl_ffz(uint32_t *mask, uint32_t size) 3219 { 3220 uint32_t num_chunks, num_pieces; 3221 int i, j; 3222 3223 num_chunks = (size >> 5); 3224 if (num_chunks == 0) 3225 num_chunks++; 3226 num_pieces = MIN((sizeof(uint32_t) * 8), size); 3227 3228 for (i = 0; i < num_chunks; i++) { 3229 for (j = 0; j < num_pieces; j++) { 3230 if ((mask[i] & (1 << j)) == 0) 3231 return ((i << 5) + j); 3232 } 3233 } 3234 3235 return (-1); 3236 } 3237 3238 int 3239 ctl_set_mask(uint32_t *mask, uint32_t bit) 3240 { 3241 uint32_t chunk, piece; 3242 3243 chunk = bit >> 5; 3244 piece = bit % (sizeof(uint32_t) * 8); 3245 3246 if ((mask[chunk] & (1 << piece)) != 0) 3247 return (-1); 3248 else 3249 mask[chunk] |= (1 << piece); 3250 3251 return (0); 3252 } 3253 3254 int 3255 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3256 { 3257 uint32_t chunk, piece; 3258 3259 chunk = bit >> 5; 3260 piece = bit % (sizeof(uint32_t) * 8); 3261 3262 if ((mask[chunk] & (1 << piece)) == 0) 3263 return (-1); 3264 else 3265 mask[chunk] &= ~(1 << piece); 3266 3267 return (0); 3268 } 3269 3270 int 3271 ctl_is_set(uint32_t *mask, uint32_t bit) 3272 { 3273 uint32_t chunk, piece; 3274 3275 chunk = bit >> 5; 3276 piece = bit % (sizeof(uint32_t) * 8); 3277 3278 if ((mask[chunk] & (1 << piece)) == 0) 3279 return (0); 3280 else 3281 return (1); 3282 } 3283 3284 static uint64_t 3285 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3286 { 3287 uint64_t *t; 3288 3289 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3290 if (t == NULL) 3291 return (0); 3292 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3293 } 3294 3295 static void 3296 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3297 { 3298 uint64_t *t; 3299 3300 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3301 if (t == NULL) 3302 return; 3303 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3304 } 3305 3306 static void 3307 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3308 { 3309 uint64_t *p; 3310 u_int i; 3311 3312 i = residx/CTL_MAX_INIT_PER_PORT; 3313 if (lun->pr_keys[i] != NULL) 3314 return; 3315 mtx_unlock(&lun->lun_lock); 3316 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3317 M_WAITOK | M_ZERO); 3318 mtx_lock(&lun->lun_lock); 3319 if (lun->pr_keys[i] == NULL) 3320 lun->pr_keys[i] = p; 3321 else 3322 free(p, M_CTL); 3323 } 3324 3325 static void 3326 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3327 { 3328 uint64_t *t; 3329 3330 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3331 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3332 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3333 } 3334 3335 /* 3336 * ctl_softc, pool_name, total_ctl_io are passed in. 3337 * npool is passed out. 3338 */ 3339 int 3340 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3341 uint32_t total_ctl_io, void **npool) 3342 { 3343 #ifdef IO_POOLS 3344 struct ctl_io_pool *pool; 3345 3346 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3347 M_NOWAIT | M_ZERO); 3348 if (pool == NULL) 3349 return (ENOMEM); 3350 3351 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3352 pool->ctl_softc = ctl_softc; 3353 pool->zone = uma_zsecond_create(pool->name, NULL, 3354 NULL, NULL, NULL, ctl_softc->io_zone); 3355 /* uma_prealloc(pool->zone, total_ctl_io); */ 3356 3357 *npool = pool; 3358 #else 3359 *npool = ctl_softc->io_zone; 3360 #endif 3361 return (0); 3362 } 3363 3364 void 3365 ctl_pool_free(struct ctl_io_pool *pool) 3366 { 3367 3368 if (pool == NULL) 3369 return; 3370 3371 #ifdef IO_POOLS 3372 uma_zdestroy(pool->zone); 3373 free(pool, M_CTL); 3374 #endif 3375 } 3376 3377 union ctl_io * 3378 ctl_alloc_io(void *pool_ref) 3379 { 3380 union ctl_io *io; 3381 #ifdef IO_POOLS 3382 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3383 3384 io = uma_zalloc(pool->zone, M_WAITOK); 3385 #else 3386 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3387 #endif 3388 if (io != NULL) 3389 io->io_hdr.pool = pool_ref; 3390 return (io); 3391 } 3392 3393 union ctl_io * 3394 ctl_alloc_io_nowait(void *pool_ref) 3395 { 3396 union ctl_io *io; 3397 #ifdef IO_POOLS 3398 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3399 3400 io = uma_zalloc(pool->zone, M_NOWAIT); 3401 #else 3402 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3403 #endif 3404 if (io != NULL) 3405 io->io_hdr.pool = pool_ref; 3406 return (io); 3407 } 3408 3409 void 3410 ctl_free_io(union ctl_io *io) 3411 { 3412 #ifdef IO_POOLS 3413 struct ctl_io_pool *pool; 3414 #endif 3415 3416 if (io == NULL) 3417 return; 3418 3419 #ifdef IO_POOLS 3420 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3421 uma_zfree(pool->zone, io); 3422 #else 3423 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3424 #endif 3425 } 3426 3427 void 3428 ctl_zero_io(union ctl_io *io) 3429 { 3430 void *pool_ref; 3431 3432 if (io == NULL) 3433 return; 3434 3435 /* 3436 * May need to preserve linked list pointers at some point too. 3437 */ 3438 pool_ref = io->io_hdr.pool; 3439 memset(io, 0, sizeof(*io)); 3440 io->io_hdr.pool = pool_ref; 3441 } 3442 3443 /* 3444 * This routine is currently used for internal copies of ctl_ios that need 3445 * to persist for some reason after we've already returned status to the 3446 * FETD. (Thus the flag set.) 3447 * 3448 * XXX XXX 3449 * Note that this makes a blind copy of all fields in the ctl_io, except 3450 * for the pool reference. This includes any memory that has been 3451 * allocated! That memory will no longer be valid after done has been 3452 * called, so this would be VERY DANGEROUS for command that actually does 3453 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3454 * start and stop commands, which don't transfer any data, so this is not a 3455 * problem. If it is used for anything else, the caller would also need to 3456 * allocate data buffer space and this routine would need to be modified to 3457 * copy the data buffer(s) as well. 3458 */ 3459 void 3460 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3461 { 3462 void *pool_ref; 3463 3464 if ((src == NULL) 3465 || (dest == NULL)) 3466 return; 3467 3468 /* 3469 * May need to preserve linked list pointers at some point too. 3470 */ 3471 pool_ref = dest->io_hdr.pool; 3472 3473 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3474 3475 dest->io_hdr.pool = pool_ref; 3476 /* 3477 * We need to know that this is an internal copy, and doesn't need 3478 * to get passed back to the FETD that allocated it. 3479 */ 3480 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3481 } 3482 3483 int 3484 ctl_expand_number(const char *buf, uint64_t *num) 3485 { 3486 char *endptr; 3487 uint64_t number; 3488 unsigned shift; 3489 3490 number = strtoq(buf, &endptr, 0); 3491 3492 switch (tolower((unsigned char)*endptr)) { 3493 case 'e': 3494 shift = 60; 3495 break; 3496 case 'p': 3497 shift = 50; 3498 break; 3499 case 't': 3500 shift = 40; 3501 break; 3502 case 'g': 3503 shift = 30; 3504 break; 3505 case 'm': 3506 shift = 20; 3507 break; 3508 case 'k': 3509 shift = 10; 3510 break; 3511 case 'b': 3512 case '\0': /* No unit. */ 3513 *num = number; 3514 return (0); 3515 default: 3516 /* Unrecognized unit. */ 3517 return (-1); 3518 } 3519 3520 if ((number << shift) >> shift != number) { 3521 /* Overflow */ 3522 return (-1); 3523 } 3524 *num = number << shift; 3525 return (0); 3526 } 3527 3528 3529 /* 3530 * This routine could be used in the future to load default and/or saved 3531 * mode page parameters for a particuar lun. 3532 */ 3533 static int 3534 ctl_init_page_index(struct ctl_lun *lun) 3535 { 3536 int i; 3537 struct ctl_page_index *page_index; 3538 const char *value; 3539 uint64_t ival; 3540 3541 memcpy(&lun->mode_pages.index, page_index_template, 3542 sizeof(page_index_template)); 3543 3544 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3545 3546 page_index = &lun->mode_pages.index[i]; 3547 /* 3548 * If this is a disk-only mode page, there's no point in 3549 * setting it up. For some pages, we have to have some 3550 * basic information about the disk in order to calculate the 3551 * mode page data. 3552 */ 3553 if ((lun->be_lun->lun_type != T_DIRECT) 3554 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3555 continue; 3556 3557 switch (page_index->page_code & SMPH_PC_MASK) { 3558 case SMS_RW_ERROR_RECOVERY_PAGE: { 3559 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3560 panic("subpage is incorrect!"); 3561 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3562 &rw_er_page_default, 3563 sizeof(rw_er_page_default)); 3564 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3565 &rw_er_page_changeable, 3566 sizeof(rw_er_page_changeable)); 3567 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3568 &rw_er_page_default, 3569 sizeof(rw_er_page_default)); 3570 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3571 &rw_er_page_default, 3572 sizeof(rw_er_page_default)); 3573 page_index->page_data = 3574 (uint8_t *)lun->mode_pages.rw_er_page; 3575 break; 3576 } 3577 case SMS_FORMAT_DEVICE_PAGE: { 3578 struct scsi_format_page *format_page; 3579 3580 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3581 panic("subpage is incorrect!"); 3582 3583 /* 3584 * Sectors per track are set above. Bytes per 3585 * sector need to be set here on a per-LUN basis. 3586 */ 3587 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3588 &format_page_default, 3589 sizeof(format_page_default)); 3590 memcpy(&lun->mode_pages.format_page[ 3591 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3592 sizeof(format_page_changeable)); 3593 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3594 &format_page_default, 3595 sizeof(format_page_default)); 3596 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3597 &format_page_default, 3598 sizeof(format_page_default)); 3599 3600 format_page = &lun->mode_pages.format_page[ 3601 CTL_PAGE_CURRENT]; 3602 scsi_ulto2b(lun->be_lun->blocksize, 3603 format_page->bytes_per_sector); 3604 3605 format_page = &lun->mode_pages.format_page[ 3606 CTL_PAGE_DEFAULT]; 3607 scsi_ulto2b(lun->be_lun->blocksize, 3608 format_page->bytes_per_sector); 3609 3610 format_page = &lun->mode_pages.format_page[ 3611 CTL_PAGE_SAVED]; 3612 scsi_ulto2b(lun->be_lun->blocksize, 3613 format_page->bytes_per_sector); 3614 3615 page_index->page_data = 3616 (uint8_t *)lun->mode_pages.format_page; 3617 break; 3618 } 3619 case SMS_RIGID_DISK_PAGE: { 3620 struct scsi_rigid_disk_page *rigid_disk_page; 3621 uint32_t sectors_per_cylinder; 3622 uint64_t cylinders; 3623 #ifndef __XSCALE__ 3624 int shift; 3625 #endif /* !__XSCALE__ */ 3626 3627 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3628 panic("invalid subpage value %d", 3629 page_index->subpage); 3630 3631 /* 3632 * Rotation rate and sectors per track are set 3633 * above. We calculate the cylinders here based on 3634 * capacity. Due to the number of heads and 3635 * sectors per track we're using, smaller arrays 3636 * may turn out to have 0 cylinders. Linux and 3637 * FreeBSD don't pay attention to these mode pages 3638 * to figure out capacity, but Solaris does. It 3639 * seems to deal with 0 cylinders just fine, and 3640 * works out a fake geometry based on the capacity. 3641 */ 3642 memcpy(&lun->mode_pages.rigid_disk_page[ 3643 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3644 sizeof(rigid_disk_page_default)); 3645 memcpy(&lun->mode_pages.rigid_disk_page[ 3646 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3647 sizeof(rigid_disk_page_changeable)); 3648 3649 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3650 CTL_DEFAULT_HEADS; 3651 3652 /* 3653 * The divide method here will be more accurate, 3654 * probably, but results in floating point being 3655 * used in the kernel on i386 (__udivdi3()). On the 3656 * XScale, though, __udivdi3() is implemented in 3657 * software. 3658 * 3659 * The shift method for cylinder calculation is 3660 * accurate if sectors_per_cylinder is a power of 3661 * 2. Otherwise it might be slightly off -- you 3662 * might have a bit of a truncation problem. 3663 */ 3664 #ifdef __XSCALE__ 3665 cylinders = (lun->be_lun->maxlba + 1) / 3666 sectors_per_cylinder; 3667 #else 3668 for (shift = 31; shift > 0; shift--) { 3669 if (sectors_per_cylinder & (1 << shift)) 3670 break; 3671 } 3672 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3673 #endif 3674 3675 /* 3676 * We've basically got 3 bytes, or 24 bits for the 3677 * cylinder size in the mode page. If we're over, 3678 * just round down to 2^24. 3679 */ 3680 if (cylinders > 0xffffff) 3681 cylinders = 0xffffff; 3682 3683 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3684 CTL_PAGE_DEFAULT]; 3685 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3686 3687 if ((value = ctl_get_opt(&lun->be_lun->options, 3688 "rpm")) != NULL) { 3689 scsi_ulto2b(strtol(value, NULL, 0), 3690 rigid_disk_page->rotation_rate); 3691 } 3692 3693 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 3694 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3695 sizeof(rigid_disk_page_default)); 3696 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 3697 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3698 sizeof(rigid_disk_page_default)); 3699 3700 page_index->page_data = 3701 (uint8_t *)lun->mode_pages.rigid_disk_page; 3702 break; 3703 } 3704 case SMS_CACHING_PAGE: { 3705 struct scsi_caching_page *caching_page; 3706 3707 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3708 panic("invalid subpage value %d", 3709 page_index->subpage); 3710 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3711 &caching_page_default, 3712 sizeof(caching_page_default)); 3713 memcpy(&lun->mode_pages.caching_page[ 3714 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3715 sizeof(caching_page_changeable)); 3716 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3717 &caching_page_default, 3718 sizeof(caching_page_default)); 3719 caching_page = &lun->mode_pages.caching_page[ 3720 CTL_PAGE_SAVED]; 3721 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 3722 if (value != NULL && strcmp(value, "off") == 0) 3723 caching_page->flags1 &= ~SCP_WCE; 3724 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 3725 if (value != NULL && strcmp(value, "off") == 0) 3726 caching_page->flags1 |= SCP_RCD; 3727 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3728 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3729 sizeof(caching_page_default)); 3730 page_index->page_data = 3731 (uint8_t *)lun->mode_pages.caching_page; 3732 break; 3733 } 3734 case SMS_CONTROL_MODE_PAGE: { 3735 struct scsi_control_page *control_page; 3736 3737 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3738 panic("invalid subpage value %d", 3739 page_index->subpage); 3740 3741 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 3742 &control_page_default, 3743 sizeof(control_page_default)); 3744 memcpy(&lun->mode_pages.control_page[ 3745 CTL_PAGE_CHANGEABLE], &control_page_changeable, 3746 sizeof(control_page_changeable)); 3747 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 3748 &control_page_default, 3749 sizeof(control_page_default)); 3750 control_page = &lun->mode_pages.control_page[ 3751 CTL_PAGE_SAVED]; 3752 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 3753 if (value != NULL && strcmp(value, "unrestricted") == 0) { 3754 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 3755 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 3756 } 3757 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 3758 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 3759 sizeof(control_page_default)); 3760 page_index->page_data = 3761 (uint8_t *)lun->mode_pages.control_page; 3762 break; 3763 3764 } 3765 case SMS_INFO_EXCEPTIONS_PAGE: { 3766 switch (page_index->subpage) { 3767 case SMS_SUBPAGE_PAGE_0: 3768 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 3769 &ie_page_default, 3770 sizeof(ie_page_default)); 3771 memcpy(&lun->mode_pages.ie_page[ 3772 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 3773 sizeof(ie_page_changeable)); 3774 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 3775 &ie_page_default, 3776 sizeof(ie_page_default)); 3777 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 3778 &ie_page_default, 3779 sizeof(ie_page_default)); 3780 page_index->page_data = 3781 (uint8_t *)lun->mode_pages.ie_page; 3782 break; 3783 case 0x02: { 3784 struct ctl_logical_block_provisioning_page *page; 3785 3786 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 3787 &lbp_page_default, 3788 sizeof(lbp_page_default)); 3789 memcpy(&lun->mode_pages.lbp_page[ 3790 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 3791 sizeof(lbp_page_changeable)); 3792 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3793 &lbp_page_default, 3794 sizeof(lbp_page_default)); 3795 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 3796 value = ctl_get_opt(&lun->be_lun->options, 3797 "avail-threshold"); 3798 if (value != NULL && 3799 ctl_expand_number(value, &ival) == 0) { 3800 page->descr[0].flags |= SLBPPD_ENABLED | 3801 SLBPPD_ARMING_DEC; 3802 if (lun->be_lun->blocksize) 3803 ival /= lun->be_lun->blocksize; 3804 else 3805 ival /= 512; 3806 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3807 page->descr[0].count); 3808 } 3809 value = ctl_get_opt(&lun->be_lun->options, 3810 "used-threshold"); 3811 if (value != NULL && 3812 ctl_expand_number(value, &ival) == 0) { 3813 page->descr[1].flags |= SLBPPD_ENABLED | 3814 SLBPPD_ARMING_INC; 3815 if (lun->be_lun->blocksize) 3816 ival /= lun->be_lun->blocksize; 3817 else 3818 ival /= 512; 3819 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3820 page->descr[1].count); 3821 } 3822 value = ctl_get_opt(&lun->be_lun->options, 3823 "pool-avail-threshold"); 3824 if (value != NULL && 3825 ctl_expand_number(value, &ival) == 0) { 3826 page->descr[2].flags |= SLBPPD_ENABLED | 3827 SLBPPD_ARMING_DEC; 3828 if (lun->be_lun->blocksize) 3829 ival /= lun->be_lun->blocksize; 3830 else 3831 ival /= 512; 3832 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3833 page->descr[2].count); 3834 } 3835 value = ctl_get_opt(&lun->be_lun->options, 3836 "pool-used-threshold"); 3837 if (value != NULL && 3838 ctl_expand_number(value, &ival) == 0) { 3839 page->descr[3].flags |= SLBPPD_ENABLED | 3840 SLBPPD_ARMING_INC; 3841 if (lun->be_lun->blocksize) 3842 ival /= lun->be_lun->blocksize; 3843 else 3844 ival /= 512; 3845 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3846 page->descr[3].count); 3847 } 3848 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 3849 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3850 sizeof(lbp_page_default)); 3851 page_index->page_data = 3852 (uint8_t *)lun->mode_pages.lbp_page; 3853 }} 3854 break; 3855 } 3856 case SMS_VENDOR_SPECIFIC_PAGE:{ 3857 switch (page_index->subpage) { 3858 case DBGCNF_SUBPAGE_CODE: { 3859 struct copan_debugconf_subpage *current_page, 3860 *saved_page; 3861 3862 memcpy(&lun->mode_pages.debugconf_subpage[ 3863 CTL_PAGE_CURRENT], 3864 &debugconf_page_default, 3865 sizeof(debugconf_page_default)); 3866 memcpy(&lun->mode_pages.debugconf_subpage[ 3867 CTL_PAGE_CHANGEABLE], 3868 &debugconf_page_changeable, 3869 sizeof(debugconf_page_changeable)); 3870 memcpy(&lun->mode_pages.debugconf_subpage[ 3871 CTL_PAGE_DEFAULT], 3872 &debugconf_page_default, 3873 sizeof(debugconf_page_default)); 3874 memcpy(&lun->mode_pages.debugconf_subpage[ 3875 CTL_PAGE_SAVED], 3876 &debugconf_page_default, 3877 sizeof(debugconf_page_default)); 3878 page_index->page_data = 3879 (uint8_t *)lun->mode_pages.debugconf_subpage; 3880 3881 current_page = (struct copan_debugconf_subpage *) 3882 (page_index->page_data + 3883 (page_index->page_len * 3884 CTL_PAGE_CURRENT)); 3885 saved_page = (struct copan_debugconf_subpage *) 3886 (page_index->page_data + 3887 (page_index->page_len * 3888 CTL_PAGE_SAVED)); 3889 break; 3890 } 3891 default: 3892 panic("invalid subpage value %d", 3893 page_index->subpage); 3894 break; 3895 } 3896 break; 3897 } 3898 default: 3899 panic("invalid page value %d", 3900 page_index->page_code & SMPH_PC_MASK); 3901 break; 3902 } 3903 } 3904 3905 return (CTL_RETVAL_COMPLETE); 3906 } 3907 3908 static int 3909 ctl_init_log_page_index(struct ctl_lun *lun) 3910 { 3911 struct ctl_page_index *page_index; 3912 int i, j, k, prev; 3913 3914 memcpy(&lun->log_pages.index, log_page_index_template, 3915 sizeof(log_page_index_template)); 3916 3917 prev = -1; 3918 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 3919 3920 page_index = &lun->log_pages.index[i]; 3921 /* 3922 * If this is a disk-only mode page, there's no point in 3923 * setting it up. For some pages, we have to have some 3924 * basic information about the disk in order to calculate the 3925 * mode page data. 3926 */ 3927 if ((lun->be_lun->lun_type != T_DIRECT) 3928 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3929 continue; 3930 3931 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 3932 lun->backend->lun_attr == NULL) 3933 continue; 3934 3935 if (page_index->page_code != prev) { 3936 lun->log_pages.pages_page[j] = page_index->page_code; 3937 prev = page_index->page_code; 3938 j++; 3939 } 3940 lun->log_pages.subpages_page[k*2] = page_index->page_code; 3941 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 3942 k++; 3943 } 3944 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 3945 lun->log_pages.index[0].page_len = j; 3946 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 3947 lun->log_pages.index[1].page_len = k * 2; 3948 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 3949 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 3950 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 3951 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 3952 3953 return (CTL_RETVAL_COMPLETE); 3954 } 3955 3956 static int 3957 hex2bin(const char *str, uint8_t *buf, int buf_size) 3958 { 3959 int i; 3960 u_char c; 3961 3962 memset(buf, 0, buf_size); 3963 while (isspace(str[0])) 3964 str++; 3965 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 3966 str += 2; 3967 buf_size *= 2; 3968 for (i = 0; str[i] != 0 && i < buf_size; i++) { 3969 c = str[i]; 3970 if (isdigit(c)) 3971 c -= '0'; 3972 else if (isalpha(c)) 3973 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 3974 else 3975 break; 3976 if (c >= 16) 3977 break; 3978 if ((i & 1) == 0) 3979 buf[i / 2] |= (c << 4); 3980 else 3981 buf[i / 2] |= c; 3982 } 3983 return ((i + 1) / 2); 3984 } 3985 3986 /* 3987 * LUN allocation. 3988 * 3989 * Requirements: 3990 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 3991 * wants us to allocate the LUN and he can block. 3992 * - ctl_softc is always set 3993 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 3994 * 3995 * Returns 0 for success, non-zero (errno) for failure. 3996 */ 3997 static int 3998 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 3999 struct ctl_be_lun *const be_lun) 4000 { 4001 struct ctl_lun *nlun, *lun; 4002 struct scsi_vpd_id_descriptor *desc; 4003 struct scsi_vpd_id_t10 *t10id; 4004 const char *eui, *naa, *scsiname, *vendor, *value; 4005 int lun_number, i, lun_malloced; 4006 int devidlen, idlen1, idlen2 = 0, len; 4007 4008 if (be_lun == NULL) 4009 return (EINVAL); 4010 4011 /* 4012 * We currently only support Direct Access or Processor LUN types. 4013 */ 4014 switch (be_lun->lun_type) { 4015 case T_DIRECT: 4016 break; 4017 case T_PROCESSOR: 4018 break; 4019 case T_SEQUENTIAL: 4020 case T_CHANGER: 4021 default: 4022 be_lun->lun_config_status(be_lun->be_lun, 4023 CTL_LUN_CONFIG_FAILURE); 4024 break; 4025 } 4026 if (ctl_lun == NULL) { 4027 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4028 lun_malloced = 1; 4029 } else { 4030 lun_malloced = 0; 4031 lun = ctl_lun; 4032 } 4033 4034 memset(lun, 0, sizeof(*lun)); 4035 if (lun_malloced) 4036 lun->flags = CTL_LUN_MALLOCED; 4037 4038 /* Generate LUN ID. */ 4039 devidlen = max(CTL_DEVID_MIN_LEN, 4040 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4041 idlen1 = sizeof(*t10id) + devidlen; 4042 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4043 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4044 if (scsiname != NULL) { 4045 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4046 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4047 } 4048 eui = ctl_get_opt(&be_lun->options, "eui"); 4049 if (eui != NULL) { 4050 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4051 } 4052 naa = ctl_get_opt(&be_lun->options, "naa"); 4053 if (naa != NULL) { 4054 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4055 } 4056 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4057 M_CTL, M_WAITOK | M_ZERO); 4058 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4059 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4060 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4061 desc->length = idlen1; 4062 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4063 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4064 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4065 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4066 } else { 4067 strncpy(t10id->vendor, vendor, 4068 min(sizeof(t10id->vendor), strlen(vendor))); 4069 } 4070 strncpy((char *)t10id->vendor_spec_id, 4071 (char *)be_lun->device_id, devidlen); 4072 if (scsiname != NULL) { 4073 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4074 desc->length); 4075 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4076 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4077 SVPD_ID_TYPE_SCSI_NAME; 4078 desc->length = idlen2; 4079 strlcpy(desc->identifier, scsiname, idlen2); 4080 } 4081 if (eui != NULL) { 4082 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4083 desc->length); 4084 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4085 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4086 SVPD_ID_TYPE_EUI64; 4087 desc->length = hex2bin(eui, desc->identifier, 16); 4088 desc->length = desc->length > 12 ? 16 : 4089 (desc->length > 8 ? 12 : 8); 4090 len -= 16 - desc->length; 4091 } 4092 if (naa != NULL) { 4093 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4094 desc->length); 4095 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4096 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4097 SVPD_ID_TYPE_NAA; 4098 desc->length = hex2bin(naa, desc->identifier, 16); 4099 desc->length = desc->length > 8 ? 16 : 8; 4100 len -= 16 - desc->length; 4101 } 4102 lun->lun_devid->len = len; 4103 4104 mtx_lock(&ctl_softc->ctl_lock); 4105 /* 4106 * See if the caller requested a particular LUN number. If so, see 4107 * if it is available. Otherwise, allocate the first available LUN. 4108 */ 4109 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4110 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4111 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4112 mtx_unlock(&ctl_softc->ctl_lock); 4113 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4114 printf("ctl: requested LUN ID %d is higher " 4115 "than CTL_MAX_LUNS - 1 (%d)\n", 4116 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4117 } else { 4118 /* 4119 * XXX KDM return an error, or just assign 4120 * another LUN ID in this case?? 4121 */ 4122 printf("ctl: requested LUN ID %d is already " 4123 "in use\n", be_lun->req_lun_id); 4124 } 4125 if (lun->flags & CTL_LUN_MALLOCED) 4126 free(lun, M_CTL); 4127 be_lun->lun_config_status(be_lun->be_lun, 4128 CTL_LUN_CONFIG_FAILURE); 4129 return (ENOSPC); 4130 } 4131 lun_number = be_lun->req_lun_id; 4132 } else { 4133 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4134 if (lun_number == -1) { 4135 mtx_unlock(&ctl_softc->ctl_lock); 4136 printf("ctl: can't allocate LUN, out of LUNs\n"); 4137 if (lun->flags & CTL_LUN_MALLOCED) 4138 free(lun, M_CTL); 4139 be_lun->lun_config_status(be_lun->be_lun, 4140 CTL_LUN_CONFIG_FAILURE); 4141 return (ENOSPC); 4142 } 4143 } 4144 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4145 4146 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4147 lun->lun = lun_number; 4148 lun->be_lun = be_lun; 4149 /* 4150 * The processor LUN is always enabled. Disk LUNs come on line 4151 * disabled, and must be enabled by the backend. 4152 */ 4153 lun->flags |= CTL_LUN_DISABLED; 4154 lun->backend = be_lun->be; 4155 be_lun->ctl_lun = lun; 4156 be_lun->lun_id = lun_number; 4157 atomic_add_int(&be_lun->be->num_luns, 1); 4158 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4159 lun->flags |= CTL_LUN_OFFLINE; 4160 4161 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4162 lun->flags |= CTL_LUN_STOPPED; 4163 4164 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4165 lun->flags |= CTL_LUN_INOPERABLE; 4166 4167 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4168 lun->flags |= CTL_LUN_PRIMARY_SC; 4169 4170 value = ctl_get_opt(&be_lun->options, "readonly"); 4171 if (value != NULL && strcmp(value, "on") == 0) 4172 lun->flags |= CTL_LUN_READONLY; 4173 4174 lun->serseq = CTL_LUN_SERSEQ_OFF; 4175 if (be_lun->flags & CTL_LUN_FLAG_SERSEQ_READ) 4176 lun->serseq = CTL_LUN_SERSEQ_READ; 4177 value = ctl_get_opt(&be_lun->options, "serseq"); 4178 if (value != NULL && strcmp(value, "on") == 0) 4179 lun->serseq = CTL_LUN_SERSEQ_ON; 4180 else if (value != NULL && strcmp(value, "read") == 0) 4181 lun->serseq = CTL_LUN_SERSEQ_READ; 4182 else if (value != NULL && strcmp(value, "off") == 0) 4183 lun->serseq = CTL_LUN_SERSEQ_OFF; 4184 4185 lun->ctl_softc = ctl_softc; 4186 #ifdef CTL_TIME_IO 4187 lun->last_busy = getsbinuptime(); 4188 #endif 4189 TAILQ_INIT(&lun->ooa_queue); 4190 TAILQ_INIT(&lun->blocked_queue); 4191 STAILQ_INIT(&lun->error_list); 4192 ctl_tpc_lun_init(lun); 4193 4194 /* 4195 * Initialize the mode and log page index. 4196 */ 4197 ctl_init_page_index(lun); 4198 ctl_init_log_page_index(lun); 4199 4200 /* 4201 * Now, before we insert this lun on the lun list, set the lun 4202 * inventory changed UA for all other luns. 4203 */ 4204 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4205 mtx_lock(&nlun->lun_lock); 4206 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4207 mtx_unlock(&nlun->lun_lock); 4208 } 4209 4210 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4211 4212 ctl_softc->ctl_luns[lun_number] = lun; 4213 4214 ctl_softc->num_luns++; 4215 4216 /* Setup statistics gathering */ 4217 lun->stats.device_type = be_lun->lun_type; 4218 lun->stats.lun_number = lun_number; 4219 if (lun->stats.device_type == T_DIRECT) 4220 lun->stats.blocksize = be_lun->blocksize; 4221 else 4222 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4223 for (i = 0;i < CTL_MAX_PORTS;i++) 4224 lun->stats.ports[i].targ_port = i; 4225 4226 mtx_unlock(&ctl_softc->ctl_lock); 4227 4228 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4229 return (0); 4230 } 4231 4232 /* 4233 * Delete a LUN. 4234 * Assumptions: 4235 * - LUN has already been marked invalid and any pending I/O has been taken 4236 * care of. 4237 */ 4238 static int 4239 ctl_free_lun(struct ctl_lun *lun) 4240 { 4241 struct ctl_softc *softc; 4242 struct ctl_lun *nlun; 4243 int i; 4244 4245 softc = lun->ctl_softc; 4246 4247 mtx_assert(&softc->ctl_lock, MA_OWNED); 4248 4249 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4250 4251 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4252 4253 softc->ctl_luns[lun->lun] = NULL; 4254 4255 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4256 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4257 4258 softc->num_luns--; 4259 4260 /* 4261 * Tell the backend to free resources, if this LUN has a backend. 4262 */ 4263 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4264 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4265 4266 ctl_tpc_lun_shutdown(lun); 4267 mtx_destroy(&lun->lun_lock); 4268 free(lun->lun_devid, M_CTL); 4269 for (i = 0; i < CTL_MAX_PORTS; i++) 4270 free(lun->pending_ua[i], M_CTL); 4271 for (i = 0; i < 2 * CTL_MAX_PORTS; i++) 4272 free(lun->pr_keys[i], M_CTL); 4273 free(lun->write_buffer, M_CTL); 4274 if (lun->flags & CTL_LUN_MALLOCED) 4275 free(lun, M_CTL); 4276 4277 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4278 mtx_lock(&nlun->lun_lock); 4279 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4280 mtx_unlock(&nlun->lun_lock); 4281 } 4282 4283 return (0); 4284 } 4285 4286 static void 4287 ctl_create_lun(struct ctl_be_lun *be_lun) 4288 { 4289 struct ctl_softc *softc; 4290 4291 softc = control_softc; 4292 4293 /* 4294 * ctl_alloc_lun() should handle all potential failure cases. 4295 */ 4296 ctl_alloc_lun(softc, NULL, be_lun); 4297 } 4298 4299 int 4300 ctl_add_lun(struct ctl_be_lun *be_lun) 4301 { 4302 struct ctl_softc *softc = control_softc; 4303 4304 mtx_lock(&softc->ctl_lock); 4305 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4306 mtx_unlock(&softc->ctl_lock); 4307 wakeup(&softc->pending_lun_queue); 4308 4309 return (0); 4310 } 4311 4312 int 4313 ctl_enable_lun(struct ctl_be_lun *be_lun) 4314 { 4315 struct ctl_softc *softc; 4316 struct ctl_port *port, *nport; 4317 struct ctl_lun *lun; 4318 int retval; 4319 4320 lun = (struct ctl_lun *)be_lun->ctl_lun; 4321 softc = lun->ctl_softc; 4322 4323 mtx_lock(&softc->ctl_lock); 4324 mtx_lock(&lun->lun_lock); 4325 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4326 /* 4327 * eh? Why did we get called if the LUN is already 4328 * enabled? 4329 */ 4330 mtx_unlock(&lun->lun_lock); 4331 mtx_unlock(&softc->ctl_lock); 4332 return (0); 4333 } 4334 lun->flags &= ~CTL_LUN_DISABLED; 4335 mtx_unlock(&lun->lun_lock); 4336 4337 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4338 nport = STAILQ_NEXT(port, links); 4339 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4340 port->lun_map != NULL || port->lun_enable == NULL) 4341 continue; 4342 4343 /* 4344 * Drop the lock while we call the FETD's enable routine. 4345 * This can lead to a callback into CTL (at least in the 4346 * case of the internal initiator frontend. 4347 */ 4348 mtx_unlock(&softc->ctl_lock); 4349 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4350 mtx_lock(&softc->ctl_lock); 4351 if (retval != 0) { 4352 printf("%s: FETD %s port %d returned error " 4353 "%d for lun_enable on lun %jd\n", 4354 __func__, port->port_name, port->targ_port, 4355 retval, (intmax_t)lun->lun); 4356 } 4357 } 4358 4359 mtx_unlock(&softc->ctl_lock); 4360 4361 return (0); 4362 } 4363 4364 int 4365 ctl_disable_lun(struct ctl_be_lun *be_lun) 4366 { 4367 struct ctl_softc *softc; 4368 struct ctl_port *port; 4369 struct ctl_lun *lun; 4370 int retval; 4371 4372 lun = (struct ctl_lun *)be_lun->ctl_lun; 4373 softc = lun->ctl_softc; 4374 4375 mtx_lock(&softc->ctl_lock); 4376 mtx_lock(&lun->lun_lock); 4377 if (lun->flags & CTL_LUN_DISABLED) { 4378 mtx_unlock(&lun->lun_lock); 4379 mtx_unlock(&softc->ctl_lock); 4380 return (0); 4381 } 4382 lun->flags |= CTL_LUN_DISABLED; 4383 mtx_unlock(&lun->lun_lock); 4384 4385 STAILQ_FOREACH(port, &softc->port_list, links) { 4386 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4387 port->lun_map != NULL || port->lun_disable == NULL) 4388 continue; 4389 4390 /* 4391 * Drop the lock before we call the frontend's disable 4392 * routine, to avoid lock order reversals. 4393 * 4394 * XXX KDM what happens if the frontend list changes while 4395 * we're traversing it? It's unlikely, but should be handled. 4396 */ 4397 mtx_unlock(&softc->ctl_lock); 4398 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4399 mtx_lock(&softc->ctl_lock); 4400 if (retval != 0) { 4401 printf("%s: FETD %s port %d returned error " 4402 "%d for lun_disable on lun %jd\n", 4403 __func__, port->port_name, port->targ_port, 4404 retval, (intmax_t)lun->lun); 4405 } 4406 } 4407 4408 mtx_unlock(&softc->ctl_lock); 4409 4410 return (0); 4411 } 4412 4413 int 4414 ctl_start_lun(struct ctl_be_lun *be_lun) 4415 { 4416 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4417 4418 mtx_lock(&lun->lun_lock); 4419 lun->flags &= ~CTL_LUN_STOPPED; 4420 mtx_unlock(&lun->lun_lock); 4421 return (0); 4422 } 4423 4424 int 4425 ctl_stop_lun(struct ctl_be_lun *be_lun) 4426 { 4427 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4428 4429 mtx_lock(&lun->lun_lock); 4430 lun->flags |= CTL_LUN_STOPPED; 4431 mtx_unlock(&lun->lun_lock); 4432 return (0); 4433 } 4434 4435 int 4436 ctl_lun_offline(struct ctl_be_lun *be_lun) 4437 { 4438 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4439 4440 mtx_lock(&lun->lun_lock); 4441 lun->flags |= CTL_LUN_OFFLINE; 4442 mtx_unlock(&lun->lun_lock); 4443 return (0); 4444 } 4445 4446 int 4447 ctl_lun_online(struct ctl_be_lun *be_lun) 4448 { 4449 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4450 4451 mtx_lock(&lun->lun_lock); 4452 lun->flags &= ~CTL_LUN_OFFLINE; 4453 mtx_unlock(&lun->lun_lock); 4454 return (0); 4455 } 4456 4457 int 4458 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4459 { 4460 struct ctl_softc *softc; 4461 struct ctl_lun *lun; 4462 4463 lun = (struct ctl_lun *)be_lun->ctl_lun; 4464 softc = lun->ctl_softc; 4465 4466 mtx_lock(&lun->lun_lock); 4467 4468 /* 4469 * The LUN needs to be disabled before it can be marked invalid. 4470 */ 4471 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4472 mtx_unlock(&lun->lun_lock); 4473 return (-1); 4474 } 4475 /* 4476 * Mark the LUN invalid. 4477 */ 4478 lun->flags |= CTL_LUN_INVALID; 4479 4480 /* 4481 * If there is nothing in the OOA queue, go ahead and free the LUN. 4482 * If we have something in the OOA queue, we'll free it when the 4483 * last I/O completes. 4484 */ 4485 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4486 mtx_unlock(&lun->lun_lock); 4487 mtx_lock(&softc->ctl_lock); 4488 ctl_free_lun(lun); 4489 mtx_unlock(&softc->ctl_lock); 4490 } else 4491 mtx_unlock(&lun->lun_lock); 4492 4493 return (0); 4494 } 4495 4496 int 4497 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4498 { 4499 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4500 4501 mtx_lock(&lun->lun_lock); 4502 lun->flags |= CTL_LUN_INOPERABLE; 4503 mtx_unlock(&lun->lun_lock); 4504 return (0); 4505 } 4506 4507 int 4508 ctl_lun_operable(struct ctl_be_lun *be_lun) 4509 { 4510 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4511 4512 mtx_lock(&lun->lun_lock); 4513 lun->flags &= ~CTL_LUN_INOPERABLE; 4514 mtx_unlock(&lun->lun_lock); 4515 return (0); 4516 } 4517 4518 void 4519 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4520 { 4521 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4522 4523 mtx_lock(&lun->lun_lock); 4524 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4525 mtx_unlock(&lun->lun_lock); 4526 } 4527 4528 /* 4529 * Backend "memory move is complete" callback for requests that never 4530 * make it down to say RAIDCore's configuration code. 4531 */ 4532 int 4533 ctl_config_move_done(union ctl_io *io) 4534 { 4535 int retval; 4536 4537 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4538 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 4539 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 4540 4541 if ((io->io_hdr.port_status != 0) && 4542 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4543 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4544 /* 4545 * For hardware error sense keys, the sense key 4546 * specific value is defined to be a retry count, 4547 * but we use it to pass back an internal FETD 4548 * error code. XXX KDM Hopefully the FETD is only 4549 * using 16 bits for an error code, since that's 4550 * all the space we have in the sks field. 4551 */ 4552 ctl_set_internal_failure(&io->scsiio, 4553 /*sks_valid*/ 1, 4554 /*retry_count*/ 4555 io->io_hdr.port_status); 4556 } 4557 4558 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4559 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4560 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4561 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4562 /* 4563 * XXX KDM just assuming a single pointer here, and not a 4564 * S/G list. If we start using S/G lists for config data, 4565 * we'll need to know how to clean them up here as well. 4566 */ 4567 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4568 free(io->scsiio.kern_data_ptr, M_CTL); 4569 ctl_done(io); 4570 retval = CTL_RETVAL_COMPLETE; 4571 } else { 4572 /* 4573 * XXX KDM now we need to continue data movement. Some 4574 * options: 4575 * - call ctl_scsiio() again? We don't do this for data 4576 * writes, because for those at least we know ahead of 4577 * time where the write will go and how long it is. For 4578 * config writes, though, that information is largely 4579 * contained within the write itself, thus we need to 4580 * parse out the data again. 4581 * 4582 * - Call some other function once the data is in? 4583 */ 4584 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4585 ctl_data_print(io); 4586 4587 /* 4588 * XXX KDM call ctl_scsiio() again for now, and check flag 4589 * bits to see whether we're allocated or not. 4590 */ 4591 retval = ctl_scsiio(&io->scsiio); 4592 } 4593 return (retval); 4594 } 4595 4596 /* 4597 * This gets called by a backend driver when it is done with a 4598 * data_submit method. 4599 */ 4600 void 4601 ctl_data_submit_done(union ctl_io *io) 4602 { 4603 /* 4604 * If the IO_CONT flag is set, we need to call the supplied 4605 * function to continue processing the I/O, instead of completing 4606 * the I/O just yet. 4607 * 4608 * If there is an error, though, we don't want to keep processing. 4609 * Instead, just send status back to the initiator. 4610 */ 4611 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4612 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4613 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4614 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4615 io->scsiio.io_cont(io); 4616 return; 4617 } 4618 ctl_done(io); 4619 } 4620 4621 /* 4622 * This gets called by a backend driver when it is done with a 4623 * configuration write. 4624 */ 4625 void 4626 ctl_config_write_done(union ctl_io *io) 4627 { 4628 uint8_t *buf; 4629 4630 /* 4631 * If the IO_CONT flag is set, we need to call the supplied 4632 * function to continue processing the I/O, instead of completing 4633 * the I/O just yet. 4634 * 4635 * If there is an error, though, we don't want to keep processing. 4636 * Instead, just send status back to the initiator. 4637 */ 4638 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4639 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4640 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4641 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4642 io->scsiio.io_cont(io); 4643 return; 4644 } 4645 /* 4646 * Since a configuration write can be done for commands that actually 4647 * have data allocated, like write buffer, and commands that have 4648 * no data, like start/stop unit, we need to check here. 4649 */ 4650 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4651 buf = io->scsiio.kern_data_ptr; 4652 else 4653 buf = NULL; 4654 ctl_done(io); 4655 if (buf) 4656 free(buf, M_CTL); 4657 } 4658 4659 void 4660 ctl_config_read_done(union ctl_io *io) 4661 { 4662 uint8_t *buf; 4663 4664 /* 4665 * If there is some error -- we are done, skip data transfer. 4666 */ 4667 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 4668 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4669 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 4670 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4671 buf = io->scsiio.kern_data_ptr; 4672 else 4673 buf = NULL; 4674 ctl_done(io); 4675 if (buf) 4676 free(buf, M_CTL); 4677 return; 4678 } 4679 4680 /* 4681 * If the IO_CONT flag is set, we need to call the supplied 4682 * function to continue processing the I/O, instead of completing 4683 * the I/O just yet. 4684 */ 4685 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 4686 io->scsiio.io_cont(io); 4687 return; 4688 } 4689 4690 ctl_datamove(io); 4691 } 4692 4693 /* 4694 * SCSI release command. 4695 */ 4696 int 4697 ctl_scsi_release(struct ctl_scsiio *ctsio) 4698 { 4699 int length, longid, thirdparty_id, resv_id; 4700 struct ctl_lun *lun; 4701 uint32_t residx; 4702 4703 length = 0; 4704 resv_id = 0; 4705 4706 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4707 4708 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4709 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4710 4711 switch (ctsio->cdb[0]) { 4712 case RELEASE_10: { 4713 struct scsi_release_10 *cdb; 4714 4715 cdb = (struct scsi_release_10 *)ctsio->cdb; 4716 4717 if (cdb->byte2 & SR10_LONGID) 4718 longid = 1; 4719 else 4720 thirdparty_id = cdb->thirdparty_id; 4721 4722 resv_id = cdb->resv_id; 4723 length = scsi_2btoul(cdb->length); 4724 break; 4725 } 4726 } 4727 4728 4729 /* 4730 * XXX KDM right now, we only support LUN reservation. We don't 4731 * support 3rd party reservations, or extent reservations, which 4732 * might actually need the parameter list. If we've gotten this 4733 * far, we've got a LUN reservation. Anything else got kicked out 4734 * above. So, according to SPC, ignore the length. 4735 */ 4736 length = 0; 4737 4738 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4739 && (length > 0)) { 4740 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4741 ctsio->kern_data_len = length; 4742 ctsio->kern_total_len = length; 4743 ctsio->kern_data_resid = 0; 4744 ctsio->kern_rel_offset = 0; 4745 ctsio->kern_sg_entries = 0; 4746 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4747 ctsio->be_move_done = ctl_config_move_done; 4748 ctl_datamove((union ctl_io *)ctsio); 4749 4750 return (CTL_RETVAL_COMPLETE); 4751 } 4752 4753 if (length > 0) 4754 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4755 4756 mtx_lock(&lun->lun_lock); 4757 4758 /* 4759 * According to SPC, it is not an error for an intiator to attempt 4760 * to release a reservation on a LUN that isn't reserved, or that 4761 * is reserved by another initiator. The reservation can only be 4762 * released, though, by the initiator who made it or by one of 4763 * several reset type events. 4764 */ 4765 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 4766 lun->flags &= ~CTL_LUN_RESERVED; 4767 4768 mtx_unlock(&lun->lun_lock); 4769 4770 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4771 free(ctsio->kern_data_ptr, M_CTL); 4772 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4773 } 4774 4775 ctl_set_success(ctsio); 4776 ctl_done((union ctl_io *)ctsio); 4777 return (CTL_RETVAL_COMPLETE); 4778 } 4779 4780 int 4781 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 4782 { 4783 int extent, thirdparty, longid; 4784 int resv_id, length; 4785 uint64_t thirdparty_id; 4786 struct ctl_lun *lun; 4787 uint32_t residx; 4788 4789 extent = 0; 4790 thirdparty = 0; 4791 longid = 0; 4792 resv_id = 0; 4793 length = 0; 4794 thirdparty_id = 0; 4795 4796 CTL_DEBUG_PRINT(("ctl_reserve\n")); 4797 4798 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4799 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4800 4801 switch (ctsio->cdb[0]) { 4802 case RESERVE_10: { 4803 struct scsi_reserve_10 *cdb; 4804 4805 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 4806 4807 if (cdb->byte2 & SR10_LONGID) 4808 longid = 1; 4809 else 4810 thirdparty_id = cdb->thirdparty_id; 4811 4812 resv_id = cdb->resv_id; 4813 length = scsi_2btoul(cdb->length); 4814 break; 4815 } 4816 } 4817 4818 /* 4819 * XXX KDM right now, we only support LUN reservation. We don't 4820 * support 3rd party reservations, or extent reservations, which 4821 * might actually need the parameter list. If we've gotten this 4822 * far, we've got a LUN reservation. Anything else got kicked out 4823 * above. So, according to SPC, ignore the length. 4824 */ 4825 length = 0; 4826 4827 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4828 && (length > 0)) { 4829 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4830 ctsio->kern_data_len = length; 4831 ctsio->kern_total_len = length; 4832 ctsio->kern_data_resid = 0; 4833 ctsio->kern_rel_offset = 0; 4834 ctsio->kern_sg_entries = 0; 4835 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4836 ctsio->be_move_done = ctl_config_move_done; 4837 ctl_datamove((union ctl_io *)ctsio); 4838 4839 return (CTL_RETVAL_COMPLETE); 4840 } 4841 4842 if (length > 0) 4843 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4844 4845 mtx_lock(&lun->lun_lock); 4846 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 4847 ctl_set_reservation_conflict(ctsio); 4848 goto bailout; 4849 } 4850 4851 lun->flags |= CTL_LUN_RESERVED; 4852 lun->res_idx = residx; 4853 4854 ctl_set_success(ctsio); 4855 4856 bailout: 4857 mtx_unlock(&lun->lun_lock); 4858 4859 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4860 free(ctsio->kern_data_ptr, M_CTL); 4861 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4862 } 4863 4864 ctl_done((union ctl_io *)ctsio); 4865 return (CTL_RETVAL_COMPLETE); 4866 } 4867 4868 int 4869 ctl_start_stop(struct ctl_scsiio *ctsio) 4870 { 4871 struct scsi_start_stop_unit *cdb; 4872 struct ctl_lun *lun; 4873 int retval; 4874 4875 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 4876 4877 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4878 retval = 0; 4879 4880 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 4881 4882 /* 4883 * XXX KDM 4884 * We don't support the immediate bit on a stop unit. In order to 4885 * do that, we would need to code up a way to know that a stop is 4886 * pending, and hold off any new commands until it completes, one 4887 * way or another. Then we could accept or reject those commands 4888 * depending on its status. We would almost need to do the reverse 4889 * of what we do below for an immediate start -- return the copy of 4890 * the ctl_io to the FETD with status to send to the host (and to 4891 * free the copy!) and then free the original I/O once the stop 4892 * actually completes. That way, the OOA queue mechanism can work 4893 * to block commands that shouldn't proceed. Another alternative 4894 * would be to put the copy in the queue in place of the original, 4895 * and return the original back to the caller. That could be 4896 * slightly safer.. 4897 */ 4898 if ((cdb->byte2 & SSS_IMMED) 4899 && ((cdb->how & SSS_START) == 0)) { 4900 ctl_set_invalid_field(ctsio, 4901 /*sks_valid*/ 1, 4902 /*command*/ 1, 4903 /*field*/ 1, 4904 /*bit_valid*/ 1, 4905 /*bit*/ 0); 4906 ctl_done((union ctl_io *)ctsio); 4907 return (CTL_RETVAL_COMPLETE); 4908 } 4909 4910 if ((lun->flags & CTL_LUN_PR_RESERVED) 4911 && ((cdb->how & SSS_START)==0)) { 4912 uint32_t residx; 4913 4914 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4915 if (ctl_get_prkey(lun, residx) == 0 4916 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 4917 4918 ctl_set_reservation_conflict(ctsio); 4919 ctl_done((union ctl_io *)ctsio); 4920 return (CTL_RETVAL_COMPLETE); 4921 } 4922 } 4923 4924 /* 4925 * If there is no backend on this device, we can't start or stop 4926 * it. In theory we shouldn't get any start/stop commands in the 4927 * first place at this level if the LUN doesn't have a backend. 4928 * That should get stopped by the command decode code. 4929 */ 4930 if (lun->backend == NULL) { 4931 ctl_set_invalid_opcode(ctsio); 4932 ctl_done((union ctl_io *)ctsio); 4933 return (CTL_RETVAL_COMPLETE); 4934 } 4935 4936 /* 4937 * XXX KDM Copan-specific offline behavior. 4938 * Figure out a reasonable way to port this? 4939 */ 4940 #ifdef NEEDTOPORT 4941 mtx_lock(&lun->lun_lock); 4942 4943 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 4944 && (lun->flags & CTL_LUN_OFFLINE)) { 4945 /* 4946 * If the LUN is offline, and the on/offline bit isn't set, 4947 * reject the start or stop. Otherwise, let it through. 4948 */ 4949 mtx_unlock(&lun->lun_lock); 4950 ctl_set_lun_not_ready(ctsio); 4951 ctl_done((union ctl_io *)ctsio); 4952 } else { 4953 mtx_unlock(&lun->lun_lock); 4954 #endif /* NEEDTOPORT */ 4955 /* 4956 * This could be a start or a stop when we're online, 4957 * or a stop/offline or start/online. A start or stop when 4958 * we're offline is covered in the case above. 4959 */ 4960 /* 4961 * In the non-immediate case, we send the request to 4962 * the backend and return status to the user when 4963 * it is done. 4964 * 4965 * In the immediate case, we allocate a new ctl_io 4966 * to hold a copy of the request, and send that to 4967 * the backend. We then set good status on the 4968 * user's request and return it immediately. 4969 */ 4970 if (cdb->byte2 & SSS_IMMED) { 4971 union ctl_io *new_io; 4972 4973 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 4974 ctl_copy_io((union ctl_io *)ctsio, new_io); 4975 retval = lun->backend->config_write(new_io); 4976 ctl_set_success(ctsio); 4977 ctl_done((union ctl_io *)ctsio); 4978 } else { 4979 retval = lun->backend->config_write( 4980 (union ctl_io *)ctsio); 4981 } 4982 #ifdef NEEDTOPORT 4983 } 4984 #endif 4985 return (retval); 4986 } 4987 4988 /* 4989 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 4990 * we don't really do anything with the LBA and length fields if the user 4991 * passes them in. Instead we'll just flush out the cache for the entire 4992 * LUN. 4993 */ 4994 int 4995 ctl_sync_cache(struct ctl_scsiio *ctsio) 4996 { 4997 struct ctl_lun *lun; 4998 struct ctl_softc *softc; 4999 struct ctl_lba_len_flags *lbalen; 5000 uint64_t starting_lba; 5001 uint32_t block_count; 5002 int retval; 5003 uint8_t byte2; 5004 5005 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5006 5007 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5008 softc = lun->ctl_softc; 5009 retval = 0; 5010 5011 switch (ctsio->cdb[0]) { 5012 case SYNCHRONIZE_CACHE: { 5013 struct scsi_sync_cache *cdb; 5014 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5015 5016 starting_lba = scsi_4btoul(cdb->begin_lba); 5017 block_count = scsi_2btoul(cdb->lb_count); 5018 byte2 = cdb->byte2; 5019 break; 5020 } 5021 case SYNCHRONIZE_CACHE_16: { 5022 struct scsi_sync_cache_16 *cdb; 5023 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5024 5025 starting_lba = scsi_8btou64(cdb->begin_lba); 5026 block_count = scsi_4btoul(cdb->lb_count); 5027 byte2 = cdb->byte2; 5028 break; 5029 } 5030 default: 5031 ctl_set_invalid_opcode(ctsio); 5032 ctl_done((union ctl_io *)ctsio); 5033 goto bailout; 5034 break; /* NOTREACHED */ 5035 } 5036 5037 /* 5038 * We check the LBA and length, but don't do anything with them. 5039 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5040 * get flushed. This check will just help satisfy anyone who wants 5041 * to see an error for an out of range LBA. 5042 */ 5043 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5044 ctl_set_lba_out_of_range(ctsio); 5045 ctl_done((union ctl_io *)ctsio); 5046 goto bailout; 5047 } 5048 5049 /* 5050 * If this LUN has no backend, we can't flush the cache anyway. 5051 */ 5052 if (lun->backend == NULL) { 5053 ctl_set_invalid_opcode(ctsio); 5054 ctl_done((union ctl_io *)ctsio); 5055 goto bailout; 5056 } 5057 5058 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5059 lbalen->lba = starting_lba; 5060 lbalen->len = block_count; 5061 lbalen->flags = byte2; 5062 5063 /* 5064 * Check to see whether we're configured to send the SYNCHRONIZE 5065 * CACHE command directly to the back end. 5066 */ 5067 mtx_lock(&lun->lun_lock); 5068 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5069 && (++(lun->sync_count) >= lun->sync_interval)) { 5070 lun->sync_count = 0; 5071 mtx_unlock(&lun->lun_lock); 5072 retval = lun->backend->config_write((union ctl_io *)ctsio); 5073 } else { 5074 mtx_unlock(&lun->lun_lock); 5075 ctl_set_success(ctsio); 5076 ctl_done((union ctl_io *)ctsio); 5077 } 5078 5079 bailout: 5080 5081 return (retval); 5082 } 5083 5084 int 5085 ctl_format(struct ctl_scsiio *ctsio) 5086 { 5087 struct scsi_format *cdb; 5088 struct ctl_lun *lun; 5089 int length, defect_list_len; 5090 5091 CTL_DEBUG_PRINT(("ctl_format\n")); 5092 5093 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5094 5095 cdb = (struct scsi_format *)ctsio->cdb; 5096 5097 length = 0; 5098 if (cdb->byte2 & SF_FMTDATA) { 5099 if (cdb->byte2 & SF_LONGLIST) 5100 length = sizeof(struct scsi_format_header_long); 5101 else 5102 length = sizeof(struct scsi_format_header_short); 5103 } 5104 5105 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5106 && (length > 0)) { 5107 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5108 ctsio->kern_data_len = length; 5109 ctsio->kern_total_len = length; 5110 ctsio->kern_data_resid = 0; 5111 ctsio->kern_rel_offset = 0; 5112 ctsio->kern_sg_entries = 0; 5113 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5114 ctsio->be_move_done = ctl_config_move_done; 5115 ctl_datamove((union ctl_io *)ctsio); 5116 5117 return (CTL_RETVAL_COMPLETE); 5118 } 5119 5120 defect_list_len = 0; 5121 5122 if (cdb->byte2 & SF_FMTDATA) { 5123 if (cdb->byte2 & SF_LONGLIST) { 5124 struct scsi_format_header_long *header; 5125 5126 header = (struct scsi_format_header_long *) 5127 ctsio->kern_data_ptr; 5128 5129 defect_list_len = scsi_4btoul(header->defect_list_len); 5130 if (defect_list_len != 0) { 5131 ctl_set_invalid_field(ctsio, 5132 /*sks_valid*/ 1, 5133 /*command*/ 0, 5134 /*field*/ 2, 5135 /*bit_valid*/ 0, 5136 /*bit*/ 0); 5137 goto bailout; 5138 } 5139 } else { 5140 struct scsi_format_header_short *header; 5141 5142 header = (struct scsi_format_header_short *) 5143 ctsio->kern_data_ptr; 5144 5145 defect_list_len = scsi_2btoul(header->defect_list_len); 5146 if (defect_list_len != 0) { 5147 ctl_set_invalid_field(ctsio, 5148 /*sks_valid*/ 1, 5149 /*command*/ 0, 5150 /*field*/ 2, 5151 /*bit_valid*/ 0, 5152 /*bit*/ 0); 5153 goto bailout; 5154 } 5155 } 5156 } 5157 5158 /* 5159 * The format command will clear out the "Medium format corrupted" 5160 * status if set by the configuration code. That status is really 5161 * just a way to notify the host that we have lost the media, and 5162 * get them to issue a command that will basically make them think 5163 * they're blowing away the media. 5164 */ 5165 mtx_lock(&lun->lun_lock); 5166 lun->flags &= ~CTL_LUN_INOPERABLE; 5167 mtx_unlock(&lun->lun_lock); 5168 5169 ctl_set_success(ctsio); 5170 bailout: 5171 5172 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5173 free(ctsio->kern_data_ptr, M_CTL); 5174 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5175 } 5176 5177 ctl_done((union ctl_io *)ctsio); 5178 return (CTL_RETVAL_COMPLETE); 5179 } 5180 5181 int 5182 ctl_read_buffer(struct ctl_scsiio *ctsio) 5183 { 5184 struct scsi_read_buffer *cdb; 5185 struct ctl_lun *lun; 5186 int buffer_offset, len; 5187 static uint8_t descr[4]; 5188 static uint8_t echo_descr[4] = { 0 }; 5189 5190 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5191 5192 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5193 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5194 5195 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5196 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5197 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5198 ctl_set_invalid_field(ctsio, 5199 /*sks_valid*/ 1, 5200 /*command*/ 1, 5201 /*field*/ 1, 5202 /*bit_valid*/ 1, 5203 /*bit*/ 4); 5204 ctl_done((union ctl_io *)ctsio); 5205 return (CTL_RETVAL_COMPLETE); 5206 } 5207 5208 len = scsi_3btoul(cdb->length); 5209 buffer_offset = scsi_3btoul(cdb->offset); 5210 5211 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5212 ctl_set_invalid_field(ctsio, 5213 /*sks_valid*/ 1, 5214 /*command*/ 1, 5215 /*field*/ 6, 5216 /*bit_valid*/ 0, 5217 /*bit*/ 0); 5218 ctl_done((union ctl_io *)ctsio); 5219 return (CTL_RETVAL_COMPLETE); 5220 } 5221 5222 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5223 descr[0] = 0; 5224 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5225 ctsio->kern_data_ptr = descr; 5226 len = min(len, sizeof(descr)); 5227 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5228 ctsio->kern_data_ptr = echo_descr; 5229 len = min(len, sizeof(echo_descr)); 5230 } else { 5231 if (lun->write_buffer == NULL) { 5232 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5233 M_CTL, M_WAITOK); 5234 } 5235 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5236 } 5237 ctsio->kern_data_len = len; 5238 ctsio->kern_total_len = len; 5239 ctsio->kern_data_resid = 0; 5240 ctsio->kern_rel_offset = 0; 5241 ctsio->kern_sg_entries = 0; 5242 ctl_set_success(ctsio); 5243 ctsio->be_move_done = ctl_config_move_done; 5244 ctl_datamove((union ctl_io *)ctsio); 5245 return (CTL_RETVAL_COMPLETE); 5246 } 5247 5248 int 5249 ctl_write_buffer(struct ctl_scsiio *ctsio) 5250 { 5251 struct scsi_write_buffer *cdb; 5252 struct ctl_lun *lun; 5253 int buffer_offset, len; 5254 5255 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5256 5257 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5258 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5259 5260 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5261 ctl_set_invalid_field(ctsio, 5262 /*sks_valid*/ 1, 5263 /*command*/ 1, 5264 /*field*/ 1, 5265 /*bit_valid*/ 1, 5266 /*bit*/ 4); 5267 ctl_done((union ctl_io *)ctsio); 5268 return (CTL_RETVAL_COMPLETE); 5269 } 5270 5271 len = scsi_3btoul(cdb->length); 5272 buffer_offset = scsi_3btoul(cdb->offset); 5273 5274 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5275 ctl_set_invalid_field(ctsio, 5276 /*sks_valid*/ 1, 5277 /*command*/ 1, 5278 /*field*/ 6, 5279 /*bit_valid*/ 0, 5280 /*bit*/ 0); 5281 ctl_done((union ctl_io *)ctsio); 5282 return (CTL_RETVAL_COMPLETE); 5283 } 5284 5285 /* 5286 * If we've got a kernel request that hasn't been malloced yet, 5287 * malloc it and tell the caller the data buffer is here. 5288 */ 5289 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5290 if (lun->write_buffer == NULL) { 5291 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5292 M_CTL, M_WAITOK); 5293 } 5294 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5295 ctsio->kern_data_len = len; 5296 ctsio->kern_total_len = len; 5297 ctsio->kern_data_resid = 0; 5298 ctsio->kern_rel_offset = 0; 5299 ctsio->kern_sg_entries = 0; 5300 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5301 ctsio->be_move_done = ctl_config_move_done; 5302 ctl_datamove((union ctl_io *)ctsio); 5303 5304 return (CTL_RETVAL_COMPLETE); 5305 } 5306 5307 ctl_set_success(ctsio); 5308 ctl_done((union ctl_io *)ctsio); 5309 return (CTL_RETVAL_COMPLETE); 5310 } 5311 5312 int 5313 ctl_write_same(struct ctl_scsiio *ctsio) 5314 { 5315 struct ctl_lun *lun; 5316 struct ctl_lba_len_flags *lbalen; 5317 uint64_t lba; 5318 uint32_t num_blocks; 5319 int len, retval; 5320 uint8_t byte2; 5321 5322 retval = CTL_RETVAL_COMPLETE; 5323 5324 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5325 5326 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5327 5328 switch (ctsio->cdb[0]) { 5329 case WRITE_SAME_10: { 5330 struct scsi_write_same_10 *cdb; 5331 5332 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5333 5334 lba = scsi_4btoul(cdb->addr); 5335 num_blocks = scsi_2btoul(cdb->length); 5336 byte2 = cdb->byte2; 5337 break; 5338 } 5339 case WRITE_SAME_16: { 5340 struct scsi_write_same_16 *cdb; 5341 5342 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5343 5344 lba = scsi_8btou64(cdb->addr); 5345 num_blocks = scsi_4btoul(cdb->length); 5346 byte2 = cdb->byte2; 5347 break; 5348 } 5349 default: 5350 /* 5351 * We got a command we don't support. This shouldn't 5352 * happen, commands should be filtered out above us. 5353 */ 5354 ctl_set_invalid_opcode(ctsio); 5355 ctl_done((union ctl_io *)ctsio); 5356 5357 return (CTL_RETVAL_COMPLETE); 5358 break; /* NOTREACHED */ 5359 } 5360 5361 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5362 if ((byte2 & SWS_UNMAP) == 0 && 5363 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5364 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5365 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5366 ctl_done((union ctl_io *)ctsio); 5367 return (CTL_RETVAL_COMPLETE); 5368 } 5369 5370 /* 5371 * The first check is to make sure we're in bounds, the second 5372 * check is to catch wrap-around problems. If the lba + num blocks 5373 * is less than the lba, then we've wrapped around and the block 5374 * range is invalid anyway. 5375 */ 5376 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5377 || ((lba + num_blocks) < lba)) { 5378 ctl_set_lba_out_of_range(ctsio); 5379 ctl_done((union ctl_io *)ctsio); 5380 return (CTL_RETVAL_COMPLETE); 5381 } 5382 5383 /* Zero number of blocks means "to the last logical block" */ 5384 if (num_blocks == 0) { 5385 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5386 ctl_set_invalid_field(ctsio, 5387 /*sks_valid*/ 0, 5388 /*command*/ 1, 5389 /*field*/ 0, 5390 /*bit_valid*/ 0, 5391 /*bit*/ 0); 5392 ctl_done((union ctl_io *)ctsio); 5393 return (CTL_RETVAL_COMPLETE); 5394 } 5395 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5396 } 5397 5398 len = lun->be_lun->blocksize; 5399 5400 /* 5401 * If we've got a kernel request that hasn't been malloced yet, 5402 * malloc it and tell the caller the data buffer is here. 5403 */ 5404 if ((byte2 & SWS_NDOB) == 0 && 5405 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5406 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5407 ctsio->kern_data_len = len; 5408 ctsio->kern_total_len = len; 5409 ctsio->kern_data_resid = 0; 5410 ctsio->kern_rel_offset = 0; 5411 ctsio->kern_sg_entries = 0; 5412 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5413 ctsio->be_move_done = ctl_config_move_done; 5414 ctl_datamove((union ctl_io *)ctsio); 5415 5416 return (CTL_RETVAL_COMPLETE); 5417 } 5418 5419 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5420 lbalen->lba = lba; 5421 lbalen->len = num_blocks; 5422 lbalen->flags = byte2; 5423 retval = lun->backend->config_write((union ctl_io *)ctsio); 5424 5425 return (retval); 5426 } 5427 5428 int 5429 ctl_unmap(struct ctl_scsiio *ctsio) 5430 { 5431 struct ctl_lun *lun; 5432 struct scsi_unmap *cdb; 5433 struct ctl_ptr_len_flags *ptrlen; 5434 struct scsi_unmap_header *hdr; 5435 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5436 uint64_t lba; 5437 uint32_t num_blocks; 5438 int len, retval; 5439 uint8_t byte2; 5440 5441 retval = CTL_RETVAL_COMPLETE; 5442 5443 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5444 5445 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5446 cdb = (struct scsi_unmap *)ctsio->cdb; 5447 5448 len = scsi_2btoul(cdb->length); 5449 byte2 = cdb->byte2; 5450 5451 /* 5452 * If we've got a kernel request that hasn't been malloced yet, 5453 * malloc it and tell the caller the data buffer is here. 5454 */ 5455 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5456 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5457 ctsio->kern_data_len = len; 5458 ctsio->kern_total_len = len; 5459 ctsio->kern_data_resid = 0; 5460 ctsio->kern_rel_offset = 0; 5461 ctsio->kern_sg_entries = 0; 5462 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5463 ctsio->be_move_done = ctl_config_move_done; 5464 ctl_datamove((union ctl_io *)ctsio); 5465 5466 return (CTL_RETVAL_COMPLETE); 5467 } 5468 5469 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5470 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5471 if (len < sizeof (*hdr) || 5472 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5473 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5474 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5475 ctl_set_invalid_field(ctsio, 5476 /*sks_valid*/ 0, 5477 /*command*/ 0, 5478 /*field*/ 0, 5479 /*bit_valid*/ 0, 5480 /*bit*/ 0); 5481 goto done; 5482 } 5483 len = scsi_2btoul(hdr->desc_length); 5484 buf = (struct scsi_unmap_desc *)(hdr + 1); 5485 end = buf + len / sizeof(*buf); 5486 5487 endnz = buf; 5488 for (range = buf; range < end; range++) { 5489 lba = scsi_8btou64(range->lba); 5490 num_blocks = scsi_4btoul(range->length); 5491 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5492 || ((lba + num_blocks) < lba)) { 5493 ctl_set_lba_out_of_range(ctsio); 5494 ctl_done((union ctl_io *)ctsio); 5495 return (CTL_RETVAL_COMPLETE); 5496 } 5497 if (num_blocks != 0) 5498 endnz = range + 1; 5499 } 5500 5501 /* 5502 * Block backend can not handle zero last range. 5503 * Filter it out and return if there is nothing left. 5504 */ 5505 len = (uint8_t *)endnz - (uint8_t *)buf; 5506 if (len == 0) { 5507 ctl_set_success(ctsio); 5508 goto done; 5509 } 5510 5511 mtx_lock(&lun->lun_lock); 5512 ptrlen = (struct ctl_ptr_len_flags *) 5513 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5514 ptrlen->ptr = (void *)buf; 5515 ptrlen->len = len; 5516 ptrlen->flags = byte2; 5517 ctl_check_blocked(lun); 5518 mtx_unlock(&lun->lun_lock); 5519 5520 retval = lun->backend->config_write((union ctl_io *)ctsio); 5521 return (retval); 5522 5523 done: 5524 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5525 free(ctsio->kern_data_ptr, M_CTL); 5526 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5527 } 5528 ctl_done((union ctl_io *)ctsio); 5529 return (CTL_RETVAL_COMPLETE); 5530 } 5531 5532 /* 5533 * Note that this function currently doesn't actually do anything inside 5534 * CTL to enforce things if the DQue bit is turned on. 5535 * 5536 * Also note that this function can't be used in the default case, because 5537 * the DQue bit isn't set in the changeable mask for the control mode page 5538 * anyway. This is just here as an example for how to implement a page 5539 * handler, and a placeholder in case we want to allow the user to turn 5540 * tagged queueing on and off. 5541 * 5542 * The D_SENSE bit handling is functional, however, and will turn 5543 * descriptor sense on and off for a given LUN. 5544 */ 5545 int 5546 ctl_control_page_handler(struct ctl_scsiio *ctsio, 5547 struct ctl_page_index *page_index, uint8_t *page_ptr) 5548 { 5549 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5550 struct ctl_lun *lun; 5551 int set_ua; 5552 uint32_t initidx; 5553 5554 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5555 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5556 set_ua = 0; 5557 5558 user_cp = (struct scsi_control_page *)page_ptr; 5559 current_cp = (struct scsi_control_page *) 5560 (page_index->page_data + (page_index->page_len * 5561 CTL_PAGE_CURRENT)); 5562 saved_cp = (struct scsi_control_page *) 5563 (page_index->page_data + (page_index->page_len * 5564 CTL_PAGE_SAVED)); 5565 5566 mtx_lock(&lun->lun_lock); 5567 if (((current_cp->rlec & SCP_DSENSE) == 0) 5568 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5569 /* 5570 * Descriptor sense is currently turned off and the user 5571 * wants to turn it on. 5572 */ 5573 current_cp->rlec |= SCP_DSENSE; 5574 saved_cp->rlec |= SCP_DSENSE; 5575 lun->flags |= CTL_LUN_SENSE_DESC; 5576 set_ua = 1; 5577 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5578 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5579 /* 5580 * Descriptor sense is currently turned on, and the user 5581 * wants to turn it off. 5582 */ 5583 current_cp->rlec &= ~SCP_DSENSE; 5584 saved_cp->rlec &= ~SCP_DSENSE; 5585 lun->flags &= ~CTL_LUN_SENSE_DESC; 5586 set_ua = 1; 5587 } 5588 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 5589 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 5590 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5591 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5592 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5593 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5594 set_ua = 1; 5595 } 5596 if ((current_cp->eca_and_aen & SCP_SWP) != 5597 (user_cp->eca_and_aen & SCP_SWP)) { 5598 current_cp->eca_and_aen &= ~SCP_SWP; 5599 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5600 saved_cp->eca_and_aen &= ~SCP_SWP; 5601 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5602 set_ua = 1; 5603 } 5604 if (set_ua != 0) 5605 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5606 mtx_unlock(&lun->lun_lock); 5607 5608 return (0); 5609 } 5610 5611 int 5612 ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 5613 struct ctl_page_index *page_index, uint8_t *page_ptr) 5614 { 5615 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 5616 struct ctl_lun *lun; 5617 int set_ua; 5618 uint32_t initidx; 5619 5620 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5621 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5622 set_ua = 0; 5623 5624 user_cp = (struct scsi_caching_page *)page_ptr; 5625 current_cp = (struct scsi_caching_page *) 5626 (page_index->page_data + (page_index->page_len * 5627 CTL_PAGE_CURRENT)); 5628 saved_cp = (struct scsi_caching_page *) 5629 (page_index->page_data + (page_index->page_len * 5630 CTL_PAGE_SAVED)); 5631 5632 mtx_lock(&lun->lun_lock); 5633 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 5634 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 5635 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5636 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5637 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5638 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5639 set_ua = 1; 5640 } 5641 if (set_ua != 0) 5642 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5643 mtx_unlock(&lun->lun_lock); 5644 5645 return (0); 5646 } 5647 5648 int 5649 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 5650 struct ctl_page_index *page_index, 5651 uint8_t *page_ptr) 5652 { 5653 uint8_t *c; 5654 int i; 5655 5656 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 5657 ctl_time_io_secs = 5658 (c[0] << 8) | 5659 (c[1] << 0) | 5660 0; 5661 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 5662 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 5663 printf("page data:"); 5664 for (i=0; i<8; i++) 5665 printf(" %.2x",page_ptr[i]); 5666 printf("\n"); 5667 return (0); 5668 } 5669 5670 int 5671 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 5672 struct ctl_page_index *page_index, 5673 int pc) 5674 { 5675 struct copan_debugconf_subpage *page; 5676 5677 page = (struct copan_debugconf_subpage *)page_index->page_data + 5678 (page_index->page_len * pc); 5679 5680 switch (pc) { 5681 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5682 case SMS_PAGE_CTRL_DEFAULT >> 6: 5683 case SMS_PAGE_CTRL_SAVED >> 6: 5684 /* 5685 * We don't update the changable or default bits for this page. 5686 */ 5687 break; 5688 case SMS_PAGE_CTRL_CURRENT >> 6: 5689 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 5690 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 5691 break; 5692 default: 5693 #ifdef NEEDTOPORT 5694 EPRINT(0, "Invalid PC %d!!", pc); 5695 #endif /* NEEDTOPORT */ 5696 break; 5697 } 5698 return (0); 5699 } 5700 5701 5702 static int 5703 ctl_do_mode_select(union ctl_io *io) 5704 { 5705 struct scsi_mode_page_header *page_header; 5706 struct ctl_page_index *page_index; 5707 struct ctl_scsiio *ctsio; 5708 int control_dev, page_len; 5709 int page_len_offset, page_len_size; 5710 union ctl_modepage_info *modepage_info; 5711 struct ctl_lun *lun; 5712 int *len_left, *len_used; 5713 int retval, i; 5714 5715 ctsio = &io->scsiio; 5716 page_index = NULL; 5717 page_len = 0; 5718 retval = CTL_RETVAL_COMPLETE; 5719 5720 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5721 5722 if (lun->be_lun->lun_type != T_DIRECT) 5723 control_dev = 1; 5724 else 5725 control_dev = 0; 5726 5727 modepage_info = (union ctl_modepage_info *) 5728 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5729 len_left = &modepage_info->header.len_left; 5730 len_used = &modepage_info->header.len_used; 5731 5732 do_next_page: 5733 5734 page_header = (struct scsi_mode_page_header *) 5735 (ctsio->kern_data_ptr + *len_used); 5736 5737 if (*len_left == 0) { 5738 free(ctsio->kern_data_ptr, M_CTL); 5739 ctl_set_success(ctsio); 5740 ctl_done((union ctl_io *)ctsio); 5741 return (CTL_RETVAL_COMPLETE); 5742 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5743 5744 free(ctsio->kern_data_ptr, M_CTL); 5745 ctl_set_param_len_error(ctsio); 5746 ctl_done((union ctl_io *)ctsio); 5747 return (CTL_RETVAL_COMPLETE); 5748 5749 } else if ((page_header->page_code & SMPH_SPF) 5750 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5751 5752 free(ctsio->kern_data_ptr, M_CTL); 5753 ctl_set_param_len_error(ctsio); 5754 ctl_done((union ctl_io *)ctsio); 5755 return (CTL_RETVAL_COMPLETE); 5756 } 5757 5758 5759 /* 5760 * XXX KDM should we do something with the block descriptor? 5761 */ 5762 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5763 5764 if ((control_dev != 0) 5765 && (lun->mode_pages.index[i].page_flags & 5766 CTL_PAGE_FLAG_DISK_ONLY)) 5767 continue; 5768 5769 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 5770 (page_header->page_code & SMPH_PC_MASK)) 5771 continue; 5772 5773 /* 5774 * If neither page has a subpage code, then we've got a 5775 * match. 5776 */ 5777 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 5778 && ((page_header->page_code & SMPH_SPF) == 0)) { 5779 page_index = &lun->mode_pages.index[i]; 5780 page_len = page_header->page_length; 5781 break; 5782 } 5783 5784 /* 5785 * If both pages have subpages, then the subpage numbers 5786 * have to match. 5787 */ 5788 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 5789 && (page_header->page_code & SMPH_SPF)) { 5790 struct scsi_mode_page_header_sp *sph; 5791 5792 sph = (struct scsi_mode_page_header_sp *)page_header; 5793 5794 if (lun->mode_pages.index[i].subpage == 5795 sph->subpage) { 5796 page_index = &lun->mode_pages.index[i]; 5797 page_len = scsi_2btoul(sph->page_length); 5798 break; 5799 } 5800 } 5801 } 5802 5803 /* 5804 * If we couldn't find the page, or if we don't have a mode select 5805 * handler for it, send back an error to the user. 5806 */ 5807 if ((page_index == NULL) 5808 || (page_index->select_handler == NULL)) { 5809 ctl_set_invalid_field(ctsio, 5810 /*sks_valid*/ 1, 5811 /*command*/ 0, 5812 /*field*/ *len_used, 5813 /*bit_valid*/ 0, 5814 /*bit*/ 0); 5815 free(ctsio->kern_data_ptr, M_CTL); 5816 ctl_done((union ctl_io *)ctsio); 5817 return (CTL_RETVAL_COMPLETE); 5818 } 5819 5820 if (page_index->page_code & SMPH_SPF) { 5821 page_len_offset = 2; 5822 page_len_size = 2; 5823 } else { 5824 page_len_size = 1; 5825 page_len_offset = 1; 5826 } 5827 5828 /* 5829 * If the length the initiator gives us isn't the one we specify in 5830 * the mode page header, or if they didn't specify enough data in 5831 * the CDB to avoid truncating this page, kick out the request. 5832 */ 5833 if ((page_len != (page_index->page_len - page_len_offset - 5834 page_len_size)) 5835 || (*len_left < page_index->page_len)) { 5836 5837 5838 ctl_set_invalid_field(ctsio, 5839 /*sks_valid*/ 1, 5840 /*command*/ 0, 5841 /*field*/ *len_used + page_len_offset, 5842 /*bit_valid*/ 0, 5843 /*bit*/ 0); 5844 free(ctsio->kern_data_ptr, M_CTL); 5845 ctl_done((union ctl_io *)ctsio); 5846 return (CTL_RETVAL_COMPLETE); 5847 } 5848 5849 /* 5850 * Run through the mode page, checking to make sure that the bits 5851 * the user changed are actually legal for him to change. 5852 */ 5853 for (i = 0; i < page_index->page_len; i++) { 5854 uint8_t *user_byte, *change_mask, *current_byte; 5855 int bad_bit; 5856 int j; 5857 5858 user_byte = (uint8_t *)page_header + i; 5859 change_mask = page_index->page_data + 5860 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 5861 current_byte = page_index->page_data + 5862 (page_index->page_len * CTL_PAGE_CURRENT) + i; 5863 5864 /* 5865 * Check to see whether the user set any bits in this byte 5866 * that he is not allowed to set. 5867 */ 5868 if ((*user_byte & ~(*change_mask)) == 5869 (*current_byte & ~(*change_mask))) 5870 continue; 5871 5872 /* 5873 * Go through bit by bit to determine which one is illegal. 5874 */ 5875 bad_bit = 0; 5876 for (j = 7; j >= 0; j--) { 5877 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 5878 (((1 << i) & ~(*change_mask)) & *current_byte)) { 5879 bad_bit = i; 5880 break; 5881 } 5882 } 5883 ctl_set_invalid_field(ctsio, 5884 /*sks_valid*/ 1, 5885 /*command*/ 0, 5886 /*field*/ *len_used + i, 5887 /*bit_valid*/ 1, 5888 /*bit*/ bad_bit); 5889 free(ctsio->kern_data_ptr, M_CTL); 5890 ctl_done((union ctl_io *)ctsio); 5891 return (CTL_RETVAL_COMPLETE); 5892 } 5893 5894 /* 5895 * Decrement these before we call the page handler, since we may 5896 * end up getting called back one way or another before the handler 5897 * returns to this context. 5898 */ 5899 *len_left -= page_index->page_len; 5900 *len_used += page_index->page_len; 5901 5902 retval = page_index->select_handler(ctsio, page_index, 5903 (uint8_t *)page_header); 5904 5905 /* 5906 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 5907 * wait until this queued command completes to finish processing 5908 * the mode page. If it returns anything other than 5909 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 5910 * already set the sense information, freed the data pointer, and 5911 * completed the io for us. 5912 */ 5913 if (retval != CTL_RETVAL_COMPLETE) 5914 goto bailout_no_done; 5915 5916 /* 5917 * If the initiator sent us more than one page, parse the next one. 5918 */ 5919 if (*len_left > 0) 5920 goto do_next_page; 5921 5922 ctl_set_success(ctsio); 5923 free(ctsio->kern_data_ptr, M_CTL); 5924 ctl_done((union ctl_io *)ctsio); 5925 5926 bailout_no_done: 5927 5928 return (CTL_RETVAL_COMPLETE); 5929 5930 } 5931 5932 int 5933 ctl_mode_select(struct ctl_scsiio *ctsio) 5934 { 5935 int param_len, pf, sp; 5936 int header_size, bd_len; 5937 int len_left, len_used; 5938 struct ctl_page_index *page_index; 5939 struct ctl_lun *lun; 5940 int control_dev, page_len; 5941 union ctl_modepage_info *modepage_info; 5942 int retval; 5943 5944 pf = 0; 5945 sp = 0; 5946 page_len = 0; 5947 len_used = 0; 5948 len_left = 0; 5949 retval = 0; 5950 bd_len = 0; 5951 page_index = NULL; 5952 5953 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5954 5955 if (lun->be_lun->lun_type != T_DIRECT) 5956 control_dev = 1; 5957 else 5958 control_dev = 0; 5959 5960 switch (ctsio->cdb[0]) { 5961 case MODE_SELECT_6: { 5962 struct scsi_mode_select_6 *cdb; 5963 5964 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 5965 5966 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 5967 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 5968 5969 param_len = cdb->length; 5970 header_size = sizeof(struct scsi_mode_header_6); 5971 break; 5972 } 5973 case MODE_SELECT_10: { 5974 struct scsi_mode_select_10 *cdb; 5975 5976 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 5977 5978 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 5979 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 5980 5981 param_len = scsi_2btoul(cdb->length); 5982 header_size = sizeof(struct scsi_mode_header_10); 5983 break; 5984 } 5985 default: 5986 ctl_set_invalid_opcode(ctsio); 5987 ctl_done((union ctl_io *)ctsio); 5988 return (CTL_RETVAL_COMPLETE); 5989 break; /* NOTREACHED */ 5990 } 5991 5992 /* 5993 * From SPC-3: 5994 * "A parameter list length of zero indicates that the Data-Out Buffer 5995 * shall be empty. This condition shall not be considered as an error." 5996 */ 5997 if (param_len == 0) { 5998 ctl_set_success(ctsio); 5999 ctl_done((union ctl_io *)ctsio); 6000 return (CTL_RETVAL_COMPLETE); 6001 } 6002 6003 /* 6004 * Since we'll hit this the first time through, prior to 6005 * allocation, we don't need to free a data buffer here. 6006 */ 6007 if (param_len < header_size) { 6008 ctl_set_param_len_error(ctsio); 6009 ctl_done((union ctl_io *)ctsio); 6010 return (CTL_RETVAL_COMPLETE); 6011 } 6012 6013 /* 6014 * Allocate the data buffer and grab the user's data. In theory, 6015 * we shouldn't have to sanity check the parameter list length here 6016 * because the maximum size is 64K. We should be able to malloc 6017 * that much without too many problems. 6018 */ 6019 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6020 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6021 ctsio->kern_data_len = param_len; 6022 ctsio->kern_total_len = param_len; 6023 ctsio->kern_data_resid = 0; 6024 ctsio->kern_rel_offset = 0; 6025 ctsio->kern_sg_entries = 0; 6026 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6027 ctsio->be_move_done = ctl_config_move_done; 6028 ctl_datamove((union ctl_io *)ctsio); 6029 6030 return (CTL_RETVAL_COMPLETE); 6031 } 6032 6033 switch (ctsio->cdb[0]) { 6034 case MODE_SELECT_6: { 6035 struct scsi_mode_header_6 *mh6; 6036 6037 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6038 bd_len = mh6->blk_desc_len; 6039 break; 6040 } 6041 case MODE_SELECT_10: { 6042 struct scsi_mode_header_10 *mh10; 6043 6044 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6045 bd_len = scsi_2btoul(mh10->blk_desc_len); 6046 break; 6047 } 6048 default: 6049 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6050 break; 6051 } 6052 6053 if (param_len < (header_size + bd_len)) { 6054 free(ctsio->kern_data_ptr, M_CTL); 6055 ctl_set_param_len_error(ctsio); 6056 ctl_done((union ctl_io *)ctsio); 6057 return (CTL_RETVAL_COMPLETE); 6058 } 6059 6060 /* 6061 * Set the IO_CONT flag, so that if this I/O gets passed to 6062 * ctl_config_write_done(), it'll get passed back to 6063 * ctl_do_mode_select() for further processing, or completion if 6064 * we're all done. 6065 */ 6066 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6067 ctsio->io_cont = ctl_do_mode_select; 6068 6069 modepage_info = (union ctl_modepage_info *) 6070 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6071 6072 memset(modepage_info, 0, sizeof(*modepage_info)); 6073 6074 len_left = param_len - header_size - bd_len; 6075 len_used = header_size + bd_len; 6076 6077 modepage_info->header.len_left = len_left; 6078 modepage_info->header.len_used = len_used; 6079 6080 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6081 } 6082 6083 int 6084 ctl_mode_sense(struct ctl_scsiio *ctsio) 6085 { 6086 struct ctl_lun *lun; 6087 int pc, page_code, dbd, llba, subpage; 6088 int alloc_len, page_len, header_len, total_len; 6089 struct scsi_mode_block_descr *block_desc; 6090 struct ctl_page_index *page_index; 6091 int control_dev; 6092 6093 dbd = 0; 6094 llba = 0; 6095 block_desc = NULL; 6096 page_index = NULL; 6097 6098 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6099 6100 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6101 6102 if (lun->be_lun->lun_type != T_DIRECT) 6103 control_dev = 1; 6104 else 6105 control_dev = 0; 6106 6107 switch (ctsio->cdb[0]) { 6108 case MODE_SENSE_6: { 6109 struct scsi_mode_sense_6 *cdb; 6110 6111 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6112 6113 header_len = sizeof(struct scsi_mode_hdr_6); 6114 if (cdb->byte2 & SMS_DBD) 6115 dbd = 1; 6116 else 6117 header_len += sizeof(struct scsi_mode_block_descr); 6118 6119 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6120 page_code = cdb->page & SMS_PAGE_CODE; 6121 subpage = cdb->subpage; 6122 alloc_len = cdb->length; 6123 break; 6124 } 6125 case MODE_SENSE_10: { 6126 struct scsi_mode_sense_10 *cdb; 6127 6128 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6129 6130 header_len = sizeof(struct scsi_mode_hdr_10); 6131 6132 if (cdb->byte2 & SMS_DBD) 6133 dbd = 1; 6134 else 6135 header_len += sizeof(struct scsi_mode_block_descr); 6136 if (cdb->byte2 & SMS10_LLBAA) 6137 llba = 1; 6138 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6139 page_code = cdb->page & SMS_PAGE_CODE; 6140 subpage = cdb->subpage; 6141 alloc_len = scsi_2btoul(cdb->length); 6142 break; 6143 } 6144 default: 6145 ctl_set_invalid_opcode(ctsio); 6146 ctl_done((union ctl_io *)ctsio); 6147 return (CTL_RETVAL_COMPLETE); 6148 break; /* NOTREACHED */ 6149 } 6150 6151 /* 6152 * We have to make a first pass through to calculate the size of 6153 * the pages that match the user's query. Then we allocate enough 6154 * memory to hold it, and actually copy the data into the buffer. 6155 */ 6156 switch (page_code) { 6157 case SMS_ALL_PAGES_PAGE: { 6158 int i; 6159 6160 page_len = 0; 6161 6162 /* 6163 * At the moment, values other than 0 and 0xff here are 6164 * reserved according to SPC-3. 6165 */ 6166 if ((subpage != SMS_SUBPAGE_PAGE_0) 6167 && (subpage != SMS_SUBPAGE_ALL)) { 6168 ctl_set_invalid_field(ctsio, 6169 /*sks_valid*/ 1, 6170 /*command*/ 1, 6171 /*field*/ 3, 6172 /*bit_valid*/ 0, 6173 /*bit*/ 0); 6174 ctl_done((union ctl_io *)ctsio); 6175 return (CTL_RETVAL_COMPLETE); 6176 } 6177 6178 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6179 if ((control_dev != 0) 6180 && (lun->mode_pages.index[i].page_flags & 6181 CTL_PAGE_FLAG_DISK_ONLY)) 6182 continue; 6183 6184 /* 6185 * We don't use this subpage if the user didn't 6186 * request all subpages. 6187 */ 6188 if ((lun->mode_pages.index[i].subpage != 0) 6189 && (subpage == SMS_SUBPAGE_PAGE_0)) 6190 continue; 6191 6192 #if 0 6193 printf("found page %#x len %d\n", 6194 lun->mode_pages.index[i].page_code & 6195 SMPH_PC_MASK, 6196 lun->mode_pages.index[i].page_len); 6197 #endif 6198 page_len += lun->mode_pages.index[i].page_len; 6199 } 6200 break; 6201 } 6202 default: { 6203 int i; 6204 6205 page_len = 0; 6206 6207 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6208 /* Look for the right page code */ 6209 if ((lun->mode_pages.index[i].page_code & 6210 SMPH_PC_MASK) != page_code) 6211 continue; 6212 6213 /* Look for the right subpage or the subpage wildcard*/ 6214 if ((lun->mode_pages.index[i].subpage != subpage) 6215 && (subpage != SMS_SUBPAGE_ALL)) 6216 continue; 6217 6218 /* Make sure the page is supported for this dev type */ 6219 if ((control_dev != 0) 6220 && (lun->mode_pages.index[i].page_flags & 6221 CTL_PAGE_FLAG_DISK_ONLY)) 6222 continue; 6223 6224 #if 0 6225 printf("found page %#x len %d\n", 6226 lun->mode_pages.index[i].page_code & 6227 SMPH_PC_MASK, 6228 lun->mode_pages.index[i].page_len); 6229 #endif 6230 6231 page_len += lun->mode_pages.index[i].page_len; 6232 } 6233 6234 if (page_len == 0) { 6235 ctl_set_invalid_field(ctsio, 6236 /*sks_valid*/ 1, 6237 /*command*/ 1, 6238 /*field*/ 2, 6239 /*bit_valid*/ 1, 6240 /*bit*/ 5); 6241 ctl_done((union ctl_io *)ctsio); 6242 return (CTL_RETVAL_COMPLETE); 6243 } 6244 break; 6245 } 6246 } 6247 6248 total_len = header_len + page_len; 6249 #if 0 6250 printf("header_len = %d, page_len = %d, total_len = %d\n", 6251 header_len, page_len, total_len); 6252 #endif 6253 6254 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6255 ctsio->kern_sg_entries = 0; 6256 ctsio->kern_data_resid = 0; 6257 ctsio->kern_rel_offset = 0; 6258 if (total_len < alloc_len) { 6259 ctsio->residual = alloc_len - total_len; 6260 ctsio->kern_data_len = total_len; 6261 ctsio->kern_total_len = total_len; 6262 } else { 6263 ctsio->residual = 0; 6264 ctsio->kern_data_len = alloc_len; 6265 ctsio->kern_total_len = alloc_len; 6266 } 6267 6268 switch (ctsio->cdb[0]) { 6269 case MODE_SENSE_6: { 6270 struct scsi_mode_hdr_6 *header; 6271 6272 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6273 6274 header->datalen = MIN(total_len - 1, 254); 6275 if (control_dev == 0) { 6276 header->dev_specific = 0x10; /* DPOFUA */ 6277 if ((lun->flags & CTL_LUN_READONLY) || 6278 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6279 .eca_and_aen & SCP_SWP) != 0) 6280 header->dev_specific |= 0x80; /* WP */ 6281 } 6282 if (dbd) 6283 header->block_descr_len = 0; 6284 else 6285 header->block_descr_len = 6286 sizeof(struct scsi_mode_block_descr); 6287 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6288 break; 6289 } 6290 case MODE_SENSE_10: { 6291 struct scsi_mode_hdr_10 *header; 6292 int datalen; 6293 6294 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6295 6296 datalen = MIN(total_len - 2, 65533); 6297 scsi_ulto2b(datalen, header->datalen); 6298 if (control_dev == 0) { 6299 header->dev_specific = 0x10; /* DPOFUA */ 6300 if ((lun->flags & CTL_LUN_READONLY) || 6301 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6302 .eca_and_aen & SCP_SWP) != 0) 6303 header->dev_specific |= 0x80; /* WP */ 6304 } 6305 if (dbd) 6306 scsi_ulto2b(0, header->block_descr_len); 6307 else 6308 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6309 header->block_descr_len); 6310 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6311 break; 6312 } 6313 default: 6314 panic("invalid CDB type %#x", ctsio->cdb[0]); 6315 break; /* NOTREACHED */ 6316 } 6317 6318 /* 6319 * If we've got a disk, use its blocksize in the block 6320 * descriptor. Otherwise, just set it to 0. 6321 */ 6322 if (dbd == 0) { 6323 if (control_dev == 0) 6324 scsi_ulto3b(lun->be_lun->blocksize, 6325 block_desc->block_len); 6326 else 6327 scsi_ulto3b(0, block_desc->block_len); 6328 } 6329 6330 switch (page_code) { 6331 case SMS_ALL_PAGES_PAGE: { 6332 int i, data_used; 6333 6334 data_used = header_len; 6335 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6336 struct ctl_page_index *page_index; 6337 6338 page_index = &lun->mode_pages.index[i]; 6339 6340 if ((control_dev != 0) 6341 && (page_index->page_flags & 6342 CTL_PAGE_FLAG_DISK_ONLY)) 6343 continue; 6344 6345 /* 6346 * We don't use this subpage if the user didn't 6347 * request all subpages. We already checked (above) 6348 * to make sure the user only specified a subpage 6349 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6350 */ 6351 if ((page_index->subpage != 0) 6352 && (subpage == SMS_SUBPAGE_PAGE_0)) 6353 continue; 6354 6355 /* 6356 * Call the handler, if it exists, to update the 6357 * page to the latest values. 6358 */ 6359 if (page_index->sense_handler != NULL) 6360 page_index->sense_handler(ctsio, page_index,pc); 6361 6362 memcpy(ctsio->kern_data_ptr + data_used, 6363 page_index->page_data + 6364 (page_index->page_len * pc), 6365 page_index->page_len); 6366 data_used += page_index->page_len; 6367 } 6368 break; 6369 } 6370 default: { 6371 int i, data_used; 6372 6373 data_used = header_len; 6374 6375 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6376 struct ctl_page_index *page_index; 6377 6378 page_index = &lun->mode_pages.index[i]; 6379 6380 /* Look for the right page code */ 6381 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6382 continue; 6383 6384 /* Look for the right subpage or the subpage wildcard*/ 6385 if ((page_index->subpage != subpage) 6386 && (subpage != SMS_SUBPAGE_ALL)) 6387 continue; 6388 6389 /* Make sure the page is supported for this dev type */ 6390 if ((control_dev != 0) 6391 && (page_index->page_flags & 6392 CTL_PAGE_FLAG_DISK_ONLY)) 6393 continue; 6394 6395 /* 6396 * Call the handler, if it exists, to update the 6397 * page to the latest values. 6398 */ 6399 if (page_index->sense_handler != NULL) 6400 page_index->sense_handler(ctsio, page_index,pc); 6401 6402 memcpy(ctsio->kern_data_ptr + data_used, 6403 page_index->page_data + 6404 (page_index->page_len * pc), 6405 page_index->page_len); 6406 data_used += page_index->page_len; 6407 } 6408 break; 6409 } 6410 } 6411 6412 ctl_set_success(ctsio); 6413 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6414 ctsio->be_move_done = ctl_config_move_done; 6415 ctl_datamove((union ctl_io *)ctsio); 6416 return (CTL_RETVAL_COMPLETE); 6417 } 6418 6419 int 6420 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6421 struct ctl_page_index *page_index, 6422 int pc) 6423 { 6424 struct ctl_lun *lun; 6425 struct scsi_log_param_header *phdr; 6426 uint8_t *data; 6427 uint64_t val; 6428 6429 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6430 data = page_index->page_data; 6431 6432 if (lun->backend->lun_attr != NULL && 6433 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6434 != UINT64_MAX) { 6435 phdr = (struct scsi_log_param_header *)data; 6436 scsi_ulto2b(0x0001, phdr->param_code); 6437 phdr->param_control = SLP_LBIN | SLP_LP; 6438 phdr->param_len = 8; 6439 data = (uint8_t *)(phdr + 1); 6440 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6441 data[4] = 0x02; /* per-pool */ 6442 data += phdr->param_len; 6443 } 6444 6445 if (lun->backend->lun_attr != NULL && 6446 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6447 != UINT64_MAX) { 6448 phdr = (struct scsi_log_param_header *)data; 6449 scsi_ulto2b(0x0002, phdr->param_code); 6450 phdr->param_control = SLP_LBIN | SLP_LP; 6451 phdr->param_len = 8; 6452 data = (uint8_t *)(phdr + 1); 6453 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6454 data[4] = 0x01; /* per-LUN */ 6455 data += phdr->param_len; 6456 } 6457 6458 if (lun->backend->lun_attr != NULL && 6459 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6460 != UINT64_MAX) { 6461 phdr = (struct scsi_log_param_header *)data; 6462 scsi_ulto2b(0x00f1, phdr->param_code); 6463 phdr->param_control = SLP_LBIN | SLP_LP; 6464 phdr->param_len = 8; 6465 data = (uint8_t *)(phdr + 1); 6466 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6467 data[4] = 0x02; /* per-pool */ 6468 data += phdr->param_len; 6469 } 6470 6471 if (lun->backend->lun_attr != NULL && 6472 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6473 != UINT64_MAX) { 6474 phdr = (struct scsi_log_param_header *)data; 6475 scsi_ulto2b(0x00f2, phdr->param_code); 6476 phdr->param_control = SLP_LBIN | SLP_LP; 6477 phdr->param_len = 8; 6478 data = (uint8_t *)(phdr + 1); 6479 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6480 data[4] = 0x02; /* per-pool */ 6481 data += phdr->param_len; 6482 } 6483 6484 page_index->page_len = data - page_index->page_data; 6485 return (0); 6486 } 6487 6488 int 6489 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6490 struct ctl_page_index *page_index, 6491 int pc) 6492 { 6493 struct ctl_lun *lun; 6494 struct stat_page *data; 6495 uint64_t rn, wn, rb, wb; 6496 struct bintime rt, wt; 6497 int i; 6498 6499 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6500 data = (struct stat_page *)page_index->page_data; 6501 6502 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6503 data->sap.hdr.param_control = SLP_LBIN; 6504 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6505 sizeof(struct scsi_log_param_header); 6506 rn = wn = rb = wb = 0; 6507 bintime_clear(&rt); 6508 bintime_clear(&wt); 6509 for (i = 0; i < CTL_MAX_PORTS; i++) { 6510 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6511 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6512 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6513 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6514 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6515 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6516 } 6517 scsi_u64to8b(rn, data->sap.read_num); 6518 scsi_u64to8b(wn, data->sap.write_num); 6519 if (lun->stats.blocksize > 0) { 6520 scsi_u64to8b(wb / lun->stats.blocksize, 6521 data->sap.recvieved_lba); 6522 scsi_u64to8b(rb / lun->stats.blocksize, 6523 data->sap.transmitted_lba); 6524 } 6525 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6526 data->sap.read_int); 6527 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6528 data->sap.write_int); 6529 scsi_u64to8b(0, data->sap.weighted_num); 6530 scsi_u64to8b(0, data->sap.weighted_int); 6531 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6532 data->it.hdr.param_control = SLP_LBIN; 6533 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6534 sizeof(struct scsi_log_param_header); 6535 #ifdef CTL_TIME_IO 6536 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6537 #endif 6538 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6539 data->it.hdr.param_control = SLP_LBIN; 6540 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6541 sizeof(struct scsi_log_param_header); 6542 scsi_ulto4b(3, data->ti.exponent); 6543 scsi_ulto4b(1, data->ti.integer); 6544 6545 page_index->page_len = sizeof(*data); 6546 return (0); 6547 } 6548 6549 int 6550 ctl_log_sense(struct ctl_scsiio *ctsio) 6551 { 6552 struct ctl_lun *lun; 6553 int i, pc, page_code, subpage; 6554 int alloc_len, total_len; 6555 struct ctl_page_index *page_index; 6556 struct scsi_log_sense *cdb; 6557 struct scsi_log_header *header; 6558 6559 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6560 6561 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6562 cdb = (struct scsi_log_sense *)ctsio->cdb; 6563 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6564 page_code = cdb->page & SLS_PAGE_CODE; 6565 subpage = cdb->subpage; 6566 alloc_len = scsi_2btoul(cdb->length); 6567 6568 page_index = NULL; 6569 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6570 page_index = &lun->log_pages.index[i]; 6571 6572 /* Look for the right page code */ 6573 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6574 continue; 6575 6576 /* Look for the right subpage or the subpage wildcard*/ 6577 if (page_index->subpage != subpage) 6578 continue; 6579 6580 break; 6581 } 6582 if (i >= CTL_NUM_LOG_PAGES) { 6583 ctl_set_invalid_field(ctsio, 6584 /*sks_valid*/ 1, 6585 /*command*/ 1, 6586 /*field*/ 2, 6587 /*bit_valid*/ 0, 6588 /*bit*/ 0); 6589 ctl_done((union ctl_io *)ctsio); 6590 return (CTL_RETVAL_COMPLETE); 6591 } 6592 6593 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6594 6595 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6596 ctsio->kern_sg_entries = 0; 6597 ctsio->kern_data_resid = 0; 6598 ctsio->kern_rel_offset = 0; 6599 if (total_len < alloc_len) { 6600 ctsio->residual = alloc_len - total_len; 6601 ctsio->kern_data_len = total_len; 6602 ctsio->kern_total_len = total_len; 6603 } else { 6604 ctsio->residual = 0; 6605 ctsio->kern_data_len = alloc_len; 6606 ctsio->kern_total_len = alloc_len; 6607 } 6608 6609 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6610 header->page = page_index->page_code; 6611 if (page_index->subpage) { 6612 header->page |= SL_SPF; 6613 header->subpage = page_index->subpage; 6614 } 6615 scsi_ulto2b(page_index->page_len, header->datalen); 6616 6617 /* 6618 * Call the handler, if it exists, to update the 6619 * page to the latest values. 6620 */ 6621 if (page_index->sense_handler != NULL) 6622 page_index->sense_handler(ctsio, page_index, pc); 6623 6624 memcpy(header + 1, page_index->page_data, page_index->page_len); 6625 6626 ctl_set_success(ctsio); 6627 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6628 ctsio->be_move_done = ctl_config_move_done; 6629 ctl_datamove((union ctl_io *)ctsio); 6630 return (CTL_RETVAL_COMPLETE); 6631 } 6632 6633 int 6634 ctl_read_capacity(struct ctl_scsiio *ctsio) 6635 { 6636 struct scsi_read_capacity *cdb; 6637 struct scsi_read_capacity_data *data; 6638 struct ctl_lun *lun; 6639 uint32_t lba; 6640 6641 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6642 6643 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6644 6645 lba = scsi_4btoul(cdb->addr); 6646 if (((cdb->pmi & SRC_PMI) == 0) 6647 && (lba != 0)) { 6648 ctl_set_invalid_field(/*ctsio*/ ctsio, 6649 /*sks_valid*/ 1, 6650 /*command*/ 1, 6651 /*field*/ 2, 6652 /*bit_valid*/ 0, 6653 /*bit*/ 0); 6654 ctl_done((union ctl_io *)ctsio); 6655 return (CTL_RETVAL_COMPLETE); 6656 } 6657 6658 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6659 6660 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6661 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6662 ctsio->residual = 0; 6663 ctsio->kern_data_len = sizeof(*data); 6664 ctsio->kern_total_len = sizeof(*data); 6665 ctsio->kern_data_resid = 0; 6666 ctsio->kern_rel_offset = 0; 6667 ctsio->kern_sg_entries = 0; 6668 6669 /* 6670 * If the maximum LBA is greater than 0xfffffffe, the user must 6671 * issue a SERVICE ACTION IN (16) command, with the read capacity 6672 * serivce action set. 6673 */ 6674 if (lun->be_lun->maxlba > 0xfffffffe) 6675 scsi_ulto4b(0xffffffff, data->addr); 6676 else 6677 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6678 6679 /* 6680 * XXX KDM this may not be 512 bytes... 6681 */ 6682 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6683 6684 ctl_set_success(ctsio); 6685 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6686 ctsio->be_move_done = ctl_config_move_done; 6687 ctl_datamove((union ctl_io *)ctsio); 6688 return (CTL_RETVAL_COMPLETE); 6689 } 6690 6691 int 6692 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6693 { 6694 struct scsi_read_capacity_16 *cdb; 6695 struct scsi_read_capacity_data_long *data; 6696 struct ctl_lun *lun; 6697 uint64_t lba; 6698 uint32_t alloc_len; 6699 6700 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6701 6702 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6703 6704 alloc_len = scsi_4btoul(cdb->alloc_len); 6705 lba = scsi_8btou64(cdb->addr); 6706 6707 if ((cdb->reladr & SRC16_PMI) 6708 && (lba != 0)) { 6709 ctl_set_invalid_field(/*ctsio*/ ctsio, 6710 /*sks_valid*/ 1, 6711 /*command*/ 1, 6712 /*field*/ 2, 6713 /*bit_valid*/ 0, 6714 /*bit*/ 0); 6715 ctl_done((union ctl_io *)ctsio); 6716 return (CTL_RETVAL_COMPLETE); 6717 } 6718 6719 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6720 6721 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6722 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6723 6724 if (sizeof(*data) < alloc_len) { 6725 ctsio->residual = alloc_len - sizeof(*data); 6726 ctsio->kern_data_len = sizeof(*data); 6727 ctsio->kern_total_len = sizeof(*data); 6728 } else { 6729 ctsio->residual = 0; 6730 ctsio->kern_data_len = alloc_len; 6731 ctsio->kern_total_len = alloc_len; 6732 } 6733 ctsio->kern_data_resid = 0; 6734 ctsio->kern_rel_offset = 0; 6735 ctsio->kern_sg_entries = 0; 6736 6737 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6738 /* XXX KDM this may not be 512 bytes... */ 6739 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6740 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6741 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6742 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6743 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6744 6745 ctl_set_success(ctsio); 6746 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6747 ctsio->be_move_done = ctl_config_move_done; 6748 ctl_datamove((union ctl_io *)ctsio); 6749 return (CTL_RETVAL_COMPLETE); 6750 } 6751 6752 int 6753 ctl_get_lba_status(struct ctl_scsiio *ctsio) 6754 { 6755 struct scsi_get_lba_status *cdb; 6756 struct scsi_get_lba_status_data *data; 6757 struct ctl_lun *lun; 6758 struct ctl_lba_len_flags *lbalen; 6759 uint64_t lba; 6760 uint32_t alloc_len, total_len; 6761 int retval; 6762 6763 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 6764 6765 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6766 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 6767 lba = scsi_8btou64(cdb->addr); 6768 alloc_len = scsi_4btoul(cdb->alloc_len); 6769 6770 if (lba > lun->be_lun->maxlba) { 6771 ctl_set_lba_out_of_range(ctsio); 6772 ctl_done((union ctl_io *)ctsio); 6773 return (CTL_RETVAL_COMPLETE); 6774 } 6775 6776 total_len = sizeof(*data) + sizeof(data->descr[0]); 6777 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6778 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 6779 6780 if (total_len < alloc_len) { 6781 ctsio->residual = alloc_len - total_len; 6782 ctsio->kern_data_len = total_len; 6783 ctsio->kern_total_len = total_len; 6784 } else { 6785 ctsio->residual = 0; 6786 ctsio->kern_data_len = alloc_len; 6787 ctsio->kern_total_len = alloc_len; 6788 } 6789 ctsio->kern_data_resid = 0; 6790 ctsio->kern_rel_offset = 0; 6791 ctsio->kern_sg_entries = 0; 6792 6793 /* Fill dummy data in case backend can't tell anything. */ 6794 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 6795 scsi_u64to8b(lba, data->descr[0].addr); 6796 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 6797 data->descr[0].length); 6798 data->descr[0].status = 0; /* Mapped or unknown. */ 6799 6800 ctl_set_success(ctsio); 6801 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6802 ctsio->be_move_done = ctl_config_move_done; 6803 6804 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6805 lbalen->lba = lba; 6806 lbalen->len = total_len; 6807 lbalen->flags = 0; 6808 retval = lun->backend->config_read((union ctl_io *)ctsio); 6809 return (CTL_RETVAL_COMPLETE); 6810 } 6811 6812 int 6813 ctl_read_defect(struct ctl_scsiio *ctsio) 6814 { 6815 struct scsi_read_defect_data_10 *ccb10; 6816 struct scsi_read_defect_data_12 *ccb12; 6817 struct scsi_read_defect_data_hdr_10 *data10; 6818 struct scsi_read_defect_data_hdr_12 *data12; 6819 uint32_t alloc_len, data_len; 6820 uint8_t format; 6821 6822 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 6823 6824 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6825 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 6826 format = ccb10->format; 6827 alloc_len = scsi_2btoul(ccb10->alloc_length); 6828 data_len = sizeof(*data10); 6829 } else { 6830 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 6831 format = ccb12->format; 6832 alloc_len = scsi_4btoul(ccb12->alloc_length); 6833 data_len = sizeof(*data12); 6834 } 6835 if (alloc_len == 0) { 6836 ctl_set_success(ctsio); 6837 ctl_done((union ctl_io *)ctsio); 6838 return (CTL_RETVAL_COMPLETE); 6839 } 6840 6841 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 6842 if (data_len < alloc_len) { 6843 ctsio->residual = alloc_len - data_len; 6844 ctsio->kern_data_len = data_len; 6845 ctsio->kern_total_len = data_len; 6846 } else { 6847 ctsio->residual = 0; 6848 ctsio->kern_data_len = alloc_len; 6849 ctsio->kern_total_len = alloc_len; 6850 } 6851 ctsio->kern_data_resid = 0; 6852 ctsio->kern_rel_offset = 0; 6853 ctsio->kern_sg_entries = 0; 6854 6855 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6856 data10 = (struct scsi_read_defect_data_hdr_10 *) 6857 ctsio->kern_data_ptr; 6858 data10->format = format; 6859 scsi_ulto2b(0, data10->length); 6860 } else { 6861 data12 = (struct scsi_read_defect_data_hdr_12 *) 6862 ctsio->kern_data_ptr; 6863 data12->format = format; 6864 scsi_ulto2b(0, data12->generation); 6865 scsi_ulto4b(0, data12->length); 6866 } 6867 6868 ctl_set_success(ctsio); 6869 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6870 ctsio->be_move_done = ctl_config_move_done; 6871 ctl_datamove((union ctl_io *)ctsio); 6872 return (CTL_RETVAL_COMPLETE); 6873 } 6874 6875 int 6876 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 6877 { 6878 struct scsi_maintenance_in *cdb; 6879 int retval; 6880 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os; 6881 int num_target_port_groups, num_target_ports; 6882 struct ctl_lun *lun; 6883 struct ctl_softc *softc; 6884 struct ctl_port *port; 6885 struct scsi_target_group_data *rtg_ptr; 6886 struct scsi_target_group_data_extended *rtg_ext_ptr; 6887 struct scsi_target_port_group_descriptor *tpg_desc; 6888 6889 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 6890 6891 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 6892 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6893 softc = lun->ctl_softc; 6894 6895 retval = CTL_RETVAL_COMPLETE; 6896 6897 switch (cdb->byte2 & STG_PDF_MASK) { 6898 case STG_PDF_LENGTH: 6899 ext = 0; 6900 break; 6901 case STG_PDF_EXTENDED: 6902 ext = 1; 6903 break; 6904 default: 6905 ctl_set_invalid_field(/*ctsio*/ ctsio, 6906 /*sks_valid*/ 1, 6907 /*command*/ 1, 6908 /*field*/ 2, 6909 /*bit_valid*/ 1, 6910 /*bit*/ 5); 6911 ctl_done((union ctl_io *)ctsio); 6912 return(retval); 6913 } 6914 6915 if (softc->is_single) 6916 num_target_port_groups = 1; 6917 else 6918 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 6919 num_target_ports = 0; 6920 mtx_lock(&softc->ctl_lock); 6921 STAILQ_FOREACH(port, &softc->port_list, links) { 6922 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 6923 continue; 6924 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 6925 continue; 6926 num_target_ports++; 6927 } 6928 mtx_unlock(&softc->ctl_lock); 6929 6930 if (ext) 6931 total_len = sizeof(struct scsi_target_group_data_extended); 6932 else 6933 total_len = sizeof(struct scsi_target_group_data); 6934 total_len += sizeof(struct scsi_target_port_group_descriptor) * 6935 num_target_port_groups + 6936 sizeof(struct scsi_target_port_descriptor) * 6937 num_target_ports * num_target_port_groups; 6938 6939 alloc_len = scsi_4btoul(cdb->length); 6940 6941 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6942 6943 ctsio->kern_sg_entries = 0; 6944 6945 if (total_len < alloc_len) { 6946 ctsio->residual = alloc_len - total_len; 6947 ctsio->kern_data_len = total_len; 6948 ctsio->kern_total_len = total_len; 6949 } else { 6950 ctsio->residual = 0; 6951 ctsio->kern_data_len = alloc_len; 6952 ctsio->kern_total_len = alloc_len; 6953 } 6954 ctsio->kern_data_resid = 0; 6955 ctsio->kern_rel_offset = 0; 6956 6957 if (ext) { 6958 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 6959 ctsio->kern_data_ptr; 6960 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 6961 rtg_ext_ptr->format_type = 0x10; 6962 rtg_ext_ptr->implicit_transition_time = 0; 6963 tpg_desc = &rtg_ext_ptr->groups[0]; 6964 } else { 6965 rtg_ptr = (struct scsi_target_group_data *) 6966 ctsio->kern_data_ptr; 6967 scsi_ulto4b(total_len - 4, rtg_ptr->length); 6968 tpg_desc = &rtg_ptr->groups[0]; 6969 } 6970 6971 mtx_lock(&softc->ctl_lock); 6972 pg = softc->port_offset / CTL_MAX_PORTS; 6973 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) { 6974 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) { 6975 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6976 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 6977 } else if (lun->flags & CTL_LUN_PRIMARY_SC) { 6978 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6979 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6980 } else { 6981 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6982 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6983 } 6984 } else { 6985 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 6986 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6987 } 6988 for (g = 0; g < num_target_port_groups; g++) { 6989 tpg_desc->pref_state = (g == pg) ? gs : os; 6990 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP; 6991 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 6992 tpg_desc->status = TPG_IMPLICIT; 6993 pc = 0; 6994 STAILQ_FOREACH(port, &softc->port_list, links) { 6995 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 6996 continue; 6997 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 6998 continue; 6999 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 7000 scsi_ulto2b(p, tpg_desc->descriptors[pc]. 7001 relative_target_port_identifier); 7002 pc++; 7003 } 7004 tpg_desc->target_port_count = pc; 7005 tpg_desc = (struct scsi_target_port_group_descriptor *) 7006 &tpg_desc->descriptors[pc]; 7007 } 7008 mtx_unlock(&softc->ctl_lock); 7009 7010 ctl_set_success(ctsio); 7011 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7012 ctsio->be_move_done = ctl_config_move_done; 7013 ctl_datamove((union ctl_io *)ctsio); 7014 return(retval); 7015 } 7016 7017 int 7018 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7019 { 7020 struct ctl_lun *lun; 7021 struct scsi_report_supported_opcodes *cdb; 7022 const struct ctl_cmd_entry *entry, *sentry; 7023 struct scsi_report_supported_opcodes_all *all; 7024 struct scsi_report_supported_opcodes_descr *descr; 7025 struct scsi_report_supported_opcodes_one *one; 7026 int retval; 7027 int alloc_len, total_len; 7028 int opcode, service_action, i, j, num; 7029 7030 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7031 7032 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7033 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7034 7035 retval = CTL_RETVAL_COMPLETE; 7036 7037 opcode = cdb->requested_opcode; 7038 service_action = scsi_2btoul(cdb->requested_service_action); 7039 switch (cdb->options & RSO_OPTIONS_MASK) { 7040 case RSO_OPTIONS_ALL: 7041 num = 0; 7042 for (i = 0; i < 256; i++) { 7043 entry = &ctl_cmd_table[i]; 7044 if (entry->flags & CTL_CMD_FLAG_SA5) { 7045 for (j = 0; j < 32; j++) { 7046 sentry = &((const struct ctl_cmd_entry *) 7047 entry->execute)[j]; 7048 if (ctl_cmd_applicable( 7049 lun->be_lun->lun_type, sentry)) 7050 num++; 7051 } 7052 } else { 7053 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7054 entry)) 7055 num++; 7056 } 7057 } 7058 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7059 num * sizeof(struct scsi_report_supported_opcodes_descr); 7060 break; 7061 case RSO_OPTIONS_OC: 7062 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7063 ctl_set_invalid_field(/*ctsio*/ ctsio, 7064 /*sks_valid*/ 1, 7065 /*command*/ 1, 7066 /*field*/ 2, 7067 /*bit_valid*/ 1, 7068 /*bit*/ 2); 7069 ctl_done((union ctl_io *)ctsio); 7070 return (CTL_RETVAL_COMPLETE); 7071 } 7072 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7073 break; 7074 case RSO_OPTIONS_OC_SA: 7075 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7076 service_action >= 32) { 7077 ctl_set_invalid_field(/*ctsio*/ ctsio, 7078 /*sks_valid*/ 1, 7079 /*command*/ 1, 7080 /*field*/ 2, 7081 /*bit_valid*/ 1, 7082 /*bit*/ 2); 7083 ctl_done((union ctl_io *)ctsio); 7084 return (CTL_RETVAL_COMPLETE); 7085 } 7086 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7087 break; 7088 default: 7089 ctl_set_invalid_field(/*ctsio*/ ctsio, 7090 /*sks_valid*/ 1, 7091 /*command*/ 1, 7092 /*field*/ 2, 7093 /*bit_valid*/ 1, 7094 /*bit*/ 2); 7095 ctl_done((union ctl_io *)ctsio); 7096 return (CTL_RETVAL_COMPLETE); 7097 } 7098 7099 alloc_len = scsi_4btoul(cdb->length); 7100 7101 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7102 7103 ctsio->kern_sg_entries = 0; 7104 7105 if (total_len < alloc_len) { 7106 ctsio->residual = alloc_len - total_len; 7107 ctsio->kern_data_len = total_len; 7108 ctsio->kern_total_len = total_len; 7109 } else { 7110 ctsio->residual = 0; 7111 ctsio->kern_data_len = alloc_len; 7112 ctsio->kern_total_len = alloc_len; 7113 } 7114 ctsio->kern_data_resid = 0; 7115 ctsio->kern_rel_offset = 0; 7116 7117 switch (cdb->options & RSO_OPTIONS_MASK) { 7118 case RSO_OPTIONS_ALL: 7119 all = (struct scsi_report_supported_opcodes_all *) 7120 ctsio->kern_data_ptr; 7121 num = 0; 7122 for (i = 0; i < 256; i++) { 7123 entry = &ctl_cmd_table[i]; 7124 if (entry->flags & CTL_CMD_FLAG_SA5) { 7125 for (j = 0; j < 32; j++) { 7126 sentry = &((const struct ctl_cmd_entry *) 7127 entry->execute)[j]; 7128 if (!ctl_cmd_applicable( 7129 lun->be_lun->lun_type, sentry)) 7130 continue; 7131 descr = &all->descr[num++]; 7132 descr->opcode = i; 7133 scsi_ulto2b(j, descr->service_action); 7134 descr->flags = RSO_SERVACTV; 7135 scsi_ulto2b(sentry->length, 7136 descr->cdb_length); 7137 } 7138 } else { 7139 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7140 entry)) 7141 continue; 7142 descr = &all->descr[num++]; 7143 descr->opcode = i; 7144 scsi_ulto2b(0, descr->service_action); 7145 descr->flags = 0; 7146 scsi_ulto2b(entry->length, descr->cdb_length); 7147 } 7148 } 7149 scsi_ulto4b( 7150 num * sizeof(struct scsi_report_supported_opcodes_descr), 7151 all->length); 7152 break; 7153 case RSO_OPTIONS_OC: 7154 one = (struct scsi_report_supported_opcodes_one *) 7155 ctsio->kern_data_ptr; 7156 entry = &ctl_cmd_table[opcode]; 7157 goto fill_one; 7158 case RSO_OPTIONS_OC_SA: 7159 one = (struct scsi_report_supported_opcodes_one *) 7160 ctsio->kern_data_ptr; 7161 entry = &ctl_cmd_table[opcode]; 7162 entry = &((const struct ctl_cmd_entry *) 7163 entry->execute)[service_action]; 7164 fill_one: 7165 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7166 one->support = 3; 7167 scsi_ulto2b(entry->length, one->cdb_length); 7168 one->cdb_usage[0] = opcode; 7169 memcpy(&one->cdb_usage[1], entry->usage, 7170 entry->length - 1); 7171 } else 7172 one->support = 1; 7173 break; 7174 } 7175 7176 ctl_set_success(ctsio); 7177 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7178 ctsio->be_move_done = ctl_config_move_done; 7179 ctl_datamove((union ctl_io *)ctsio); 7180 return(retval); 7181 } 7182 7183 int 7184 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7185 { 7186 struct scsi_report_supported_tmf *cdb; 7187 struct scsi_report_supported_tmf_data *data; 7188 int retval; 7189 int alloc_len, total_len; 7190 7191 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7192 7193 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7194 7195 retval = CTL_RETVAL_COMPLETE; 7196 7197 total_len = sizeof(struct scsi_report_supported_tmf_data); 7198 alloc_len = scsi_4btoul(cdb->length); 7199 7200 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7201 7202 ctsio->kern_sg_entries = 0; 7203 7204 if (total_len < alloc_len) { 7205 ctsio->residual = alloc_len - total_len; 7206 ctsio->kern_data_len = total_len; 7207 ctsio->kern_total_len = total_len; 7208 } else { 7209 ctsio->residual = 0; 7210 ctsio->kern_data_len = alloc_len; 7211 ctsio->kern_total_len = alloc_len; 7212 } 7213 ctsio->kern_data_resid = 0; 7214 ctsio->kern_rel_offset = 0; 7215 7216 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7217 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 7218 data->byte2 |= RST_ITNRS; 7219 7220 ctl_set_success(ctsio); 7221 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7222 ctsio->be_move_done = ctl_config_move_done; 7223 ctl_datamove((union ctl_io *)ctsio); 7224 return (retval); 7225 } 7226 7227 int 7228 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7229 { 7230 struct scsi_report_timestamp *cdb; 7231 struct scsi_report_timestamp_data *data; 7232 struct timeval tv; 7233 int64_t timestamp; 7234 int retval; 7235 int alloc_len, total_len; 7236 7237 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7238 7239 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7240 7241 retval = CTL_RETVAL_COMPLETE; 7242 7243 total_len = sizeof(struct scsi_report_timestamp_data); 7244 alloc_len = scsi_4btoul(cdb->length); 7245 7246 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7247 7248 ctsio->kern_sg_entries = 0; 7249 7250 if (total_len < alloc_len) { 7251 ctsio->residual = alloc_len - total_len; 7252 ctsio->kern_data_len = total_len; 7253 ctsio->kern_total_len = total_len; 7254 } else { 7255 ctsio->residual = 0; 7256 ctsio->kern_data_len = alloc_len; 7257 ctsio->kern_total_len = alloc_len; 7258 } 7259 ctsio->kern_data_resid = 0; 7260 ctsio->kern_rel_offset = 0; 7261 7262 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7263 scsi_ulto2b(sizeof(*data) - 2, data->length); 7264 data->origin = RTS_ORIG_OUTSIDE; 7265 getmicrotime(&tv); 7266 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7267 scsi_ulto4b(timestamp >> 16, data->timestamp); 7268 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7269 7270 ctl_set_success(ctsio); 7271 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7272 ctsio->be_move_done = ctl_config_move_done; 7273 ctl_datamove((union ctl_io *)ctsio); 7274 return (retval); 7275 } 7276 7277 int 7278 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7279 { 7280 struct scsi_per_res_in *cdb; 7281 int alloc_len, total_len = 0; 7282 /* struct scsi_per_res_in_rsrv in_data; */ 7283 struct ctl_lun *lun; 7284 struct ctl_softc *softc; 7285 uint64_t key; 7286 7287 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7288 7289 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7290 7291 alloc_len = scsi_2btoul(cdb->length); 7292 7293 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7294 softc = lun->ctl_softc; 7295 7296 retry: 7297 mtx_lock(&lun->lun_lock); 7298 switch (cdb->action) { 7299 case SPRI_RK: /* read keys */ 7300 total_len = sizeof(struct scsi_per_res_in_keys) + 7301 lun->pr_key_count * 7302 sizeof(struct scsi_per_res_key); 7303 break; 7304 case SPRI_RR: /* read reservation */ 7305 if (lun->flags & CTL_LUN_PR_RESERVED) 7306 total_len = sizeof(struct scsi_per_res_in_rsrv); 7307 else 7308 total_len = sizeof(struct scsi_per_res_in_header); 7309 break; 7310 case SPRI_RC: /* report capabilities */ 7311 total_len = sizeof(struct scsi_per_res_cap); 7312 break; 7313 case SPRI_RS: /* read full status */ 7314 total_len = sizeof(struct scsi_per_res_in_header) + 7315 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7316 lun->pr_key_count; 7317 break; 7318 default: 7319 panic("Invalid PR type %x", cdb->action); 7320 } 7321 mtx_unlock(&lun->lun_lock); 7322 7323 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7324 7325 if (total_len < alloc_len) { 7326 ctsio->residual = alloc_len - total_len; 7327 ctsio->kern_data_len = total_len; 7328 ctsio->kern_total_len = total_len; 7329 } else { 7330 ctsio->residual = 0; 7331 ctsio->kern_data_len = alloc_len; 7332 ctsio->kern_total_len = alloc_len; 7333 } 7334 7335 ctsio->kern_data_resid = 0; 7336 ctsio->kern_rel_offset = 0; 7337 ctsio->kern_sg_entries = 0; 7338 7339 mtx_lock(&lun->lun_lock); 7340 switch (cdb->action) { 7341 case SPRI_RK: { // read keys 7342 struct scsi_per_res_in_keys *res_keys; 7343 int i, key_count; 7344 7345 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7346 7347 /* 7348 * We had to drop the lock to allocate our buffer, which 7349 * leaves time for someone to come in with another 7350 * persistent reservation. (That is unlikely, though, 7351 * since this should be the only persistent reservation 7352 * command active right now.) 7353 */ 7354 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7355 (lun->pr_key_count * 7356 sizeof(struct scsi_per_res_key)))){ 7357 mtx_unlock(&lun->lun_lock); 7358 free(ctsio->kern_data_ptr, M_CTL); 7359 printf("%s: reservation length changed, retrying\n", 7360 __func__); 7361 goto retry; 7362 } 7363 7364 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7365 7366 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7367 lun->pr_key_count, res_keys->header.length); 7368 7369 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7370 if ((key = ctl_get_prkey(lun, i)) == 0) 7371 continue; 7372 7373 /* 7374 * We used lun->pr_key_count to calculate the 7375 * size to allocate. If it turns out the number of 7376 * initiators with the registered flag set is 7377 * larger than that (i.e. they haven't been kept in 7378 * sync), we've got a problem. 7379 */ 7380 if (key_count >= lun->pr_key_count) { 7381 #ifdef NEEDTOPORT 7382 csevent_log(CSC_CTL | CSC_SHELF_SW | 7383 CTL_PR_ERROR, 7384 csevent_LogType_Fault, 7385 csevent_AlertLevel_Yellow, 7386 csevent_FRU_ShelfController, 7387 csevent_FRU_Firmware, 7388 csevent_FRU_Unknown, 7389 "registered keys %d >= key " 7390 "count %d", key_count, 7391 lun->pr_key_count); 7392 #endif 7393 key_count++; 7394 continue; 7395 } 7396 scsi_u64to8b(key, res_keys->keys[key_count].key); 7397 key_count++; 7398 } 7399 break; 7400 } 7401 case SPRI_RR: { // read reservation 7402 struct scsi_per_res_in_rsrv *res; 7403 int tmp_len, header_only; 7404 7405 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7406 7407 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7408 7409 if (lun->flags & CTL_LUN_PR_RESERVED) 7410 { 7411 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7412 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7413 res->header.length); 7414 header_only = 0; 7415 } else { 7416 tmp_len = sizeof(struct scsi_per_res_in_header); 7417 scsi_ulto4b(0, res->header.length); 7418 header_only = 1; 7419 } 7420 7421 /* 7422 * We had to drop the lock to allocate our buffer, which 7423 * leaves time for someone to come in with another 7424 * persistent reservation. (That is unlikely, though, 7425 * since this should be the only persistent reservation 7426 * command active right now.) 7427 */ 7428 if (tmp_len != total_len) { 7429 mtx_unlock(&lun->lun_lock); 7430 free(ctsio->kern_data_ptr, M_CTL); 7431 printf("%s: reservation status changed, retrying\n", 7432 __func__); 7433 goto retry; 7434 } 7435 7436 /* 7437 * No reservation held, so we're done. 7438 */ 7439 if (header_only != 0) 7440 break; 7441 7442 /* 7443 * If the registration is an All Registrants type, the key 7444 * is 0, since it doesn't really matter. 7445 */ 7446 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7447 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7448 res->data.reservation); 7449 } 7450 res->data.scopetype = lun->res_type; 7451 break; 7452 } 7453 case SPRI_RC: //report capabilities 7454 { 7455 struct scsi_per_res_cap *res_cap; 7456 uint16_t type_mask; 7457 7458 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7459 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7460 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7461 type_mask = SPRI_TM_WR_EX_AR | 7462 SPRI_TM_EX_AC_RO | 7463 SPRI_TM_WR_EX_RO | 7464 SPRI_TM_EX_AC | 7465 SPRI_TM_WR_EX | 7466 SPRI_TM_EX_AC_AR; 7467 scsi_ulto2b(type_mask, res_cap->type_mask); 7468 break; 7469 } 7470 case SPRI_RS: { // read full status 7471 struct scsi_per_res_in_full *res_status; 7472 struct scsi_per_res_in_full_desc *res_desc; 7473 struct ctl_port *port; 7474 int i, len; 7475 7476 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7477 7478 /* 7479 * We had to drop the lock to allocate our buffer, which 7480 * leaves time for someone to come in with another 7481 * persistent reservation. (That is unlikely, though, 7482 * since this should be the only persistent reservation 7483 * command active right now.) 7484 */ 7485 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7486 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7487 lun->pr_key_count)){ 7488 mtx_unlock(&lun->lun_lock); 7489 free(ctsio->kern_data_ptr, M_CTL); 7490 printf("%s: reservation length changed, retrying\n", 7491 __func__); 7492 goto retry; 7493 } 7494 7495 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7496 7497 res_desc = &res_status->desc[0]; 7498 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7499 if ((key = ctl_get_prkey(lun, i)) == 0) 7500 continue; 7501 7502 scsi_u64to8b(key, res_desc->res_key.key); 7503 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7504 (lun->pr_res_idx == i || 7505 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7506 res_desc->flags = SPRI_FULL_R_HOLDER; 7507 res_desc->scopetype = lun->res_type; 7508 } 7509 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7510 res_desc->rel_trgt_port_id); 7511 len = 0; 7512 port = softc->ctl_ports[ 7513 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; 7514 if (port != NULL) 7515 len = ctl_create_iid(port, 7516 i % CTL_MAX_INIT_PER_PORT, 7517 res_desc->transport_id); 7518 scsi_ulto4b(len, res_desc->additional_length); 7519 res_desc = (struct scsi_per_res_in_full_desc *) 7520 &res_desc->transport_id[len]; 7521 } 7522 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7523 res_status->header.length); 7524 break; 7525 } 7526 default: 7527 /* 7528 * This is a bug, because we just checked for this above, 7529 * and should have returned an error. 7530 */ 7531 panic("Invalid PR type %x", cdb->action); 7532 break; /* NOTREACHED */ 7533 } 7534 mtx_unlock(&lun->lun_lock); 7535 7536 ctl_set_success(ctsio); 7537 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7538 ctsio->be_move_done = ctl_config_move_done; 7539 ctl_datamove((union ctl_io *)ctsio); 7540 return (CTL_RETVAL_COMPLETE); 7541 } 7542 7543 static void 7544 ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua) 7545 { 7546 int off = lun->ctl_softc->persis_offset; 7547 7548 if (residx >= off && residx < off + CTL_MAX_INITIATORS) 7549 ctl_est_ua(lun, residx - off, ua); 7550 } 7551 7552 /* 7553 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7554 * it should return. 7555 */ 7556 static int 7557 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7558 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7559 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7560 struct scsi_per_res_out_parms* param) 7561 { 7562 union ctl_ha_msg persis_io; 7563 int retval, i; 7564 int isc_retval; 7565 7566 retval = 0; 7567 7568 mtx_lock(&lun->lun_lock); 7569 if (sa_res_key == 0) { 7570 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7571 /* validate scope and type */ 7572 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7573 SPR_LU_SCOPE) { 7574 mtx_unlock(&lun->lun_lock); 7575 ctl_set_invalid_field(/*ctsio*/ ctsio, 7576 /*sks_valid*/ 1, 7577 /*command*/ 1, 7578 /*field*/ 2, 7579 /*bit_valid*/ 1, 7580 /*bit*/ 4); 7581 ctl_done((union ctl_io *)ctsio); 7582 return (1); 7583 } 7584 7585 if (type>8 || type==2 || type==4 || type==0) { 7586 mtx_unlock(&lun->lun_lock); 7587 ctl_set_invalid_field(/*ctsio*/ ctsio, 7588 /*sks_valid*/ 1, 7589 /*command*/ 1, 7590 /*field*/ 2, 7591 /*bit_valid*/ 1, 7592 /*bit*/ 0); 7593 ctl_done((union ctl_io *)ctsio); 7594 return (1); 7595 } 7596 7597 /* 7598 * Unregister everybody else and build UA for 7599 * them 7600 */ 7601 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7602 if (i == residx || ctl_get_prkey(lun, i) == 0) 7603 continue; 7604 7605 ctl_clr_prkey(lun, i); 7606 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7607 } 7608 lun->pr_key_count = 1; 7609 lun->res_type = type; 7610 if (lun->res_type != SPR_TYPE_WR_EX_AR 7611 && lun->res_type != SPR_TYPE_EX_AC_AR) 7612 lun->pr_res_idx = residx; 7613 7614 /* send msg to other side */ 7615 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7616 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7617 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7618 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7619 persis_io.pr.pr_info.res_type = type; 7620 memcpy(persis_io.pr.pr_info.sa_res_key, 7621 param->serv_act_res_key, 7622 sizeof(param->serv_act_res_key)); 7623 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7624 &persis_io, sizeof(persis_io), 0)) > 7625 CTL_HA_STATUS_SUCCESS) { 7626 printf("CTL:Persis Out error returned " 7627 "from ctl_ha_msg_send %d\n", 7628 isc_retval); 7629 } 7630 } else { 7631 /* not all registrants */ 7632 mtx_unlock(&lun->lun_lock); 7633 free(ctsio->kern_data_ptr, M_CTL); 7634 ctl_set_invalid_field(ctsio, 7635 /*sks_valid*/ 1, 7636 /*command*/ 0, 7637 /*field*/ 8, 7638 /*bit_valid*/ 0, 7639 /*bit*/ 0); 7640 ctl_done((union ctl_io *)ctsio); 7641 return (1); 7642 } 7643 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7644 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7645 int found = 0; 7646 7647 if (res_key == sa_res_key) { 7648 /* special case */ 7649 /* 7650 * The spec implies this is not good but doesn't 7651 * say what to do. There are two choices either 7652 * generate a res conflict or check condition 7653 * with illegal field in parameter data. Since 7654 * that is what is done when the sa_res_key is 7655 * zero I'll take that approach since this has 7656 * to do with the sa_res_key. 7657 */ 7658 mtx_unlock(&lun->lun_lock); 7659 free(ctsio->kern_data_ptr, M_CTL); 7660 ctl_set_invalid_field(ctsio, 7661 /*sks_valid*/ 1, 7662 /*command*/ 0, 7663 /*field*/ 8, 7664 /*bit_valid*/ 0, 7665 /*bit*/ 0); 7666 ctl_done((union ctl_io *)ctsio); 7667 return (1); 7668 } 7669 7670 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7671 if (ctl_get_prkey(lun, i) != sa_res_key) 7672 continue; 7673 7674 found = 1; 7675 ctl_clr_prkey(lun, i); 7676 lun->pr_key_count--; 7677 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7678 } 7679 if (!found) { 7680 mtx_unlock(&lun->lun_lock); 7681 free(ctsio->kern_data_ptr, M_CTL); 7682 ctl_set_reservation_conflict(ctsio); 7683 ctl_done((union ctl_io *)ctsio); 7684 return (CTL_RETVAL_COMPLETE); 7685 } 7686 /* send msg to other side */ 7687 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7688 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7689 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7690 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7691 persis_io.pr.pr_info.res_type = type; 7692 memcpy(persis_io.pr.pr_info.sa_res_key, 7693 param->serv_act_res_key, 7694 sizeof(param->serv_act_res_key)); 7695 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7696 &persis_io, sizeof(persis_io), 0)) > 7697 CTL_HA_STATUS_SUCCESS) { 7698 printf("CTL:Persis Out error returned from " 7699 "ctl_ha_msg_send %d\n", isc_retval); 7700 } 7701 } else { 7702 /* Reserved but not all registrants */ 7703 /* sa_res_key is res holder */ 7704 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7705 /* validate scope and type */ 7706 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7707 SPR_LU_SCOPE) { 7708 mtx_unlock(&lun->lun_lock); 7709 ctl_set_invalid_field(/*ctsio*/ ctsio, 7710 /*sks_valid*/ 1, 7711 /*command*/ 1, 7712 /*field*/ 2, 7713 /*bit_valid*/ 1, 7714 /*bit*/ 4); 7715 ctl_done((union ctl_io *)ctsio); 7716 return (1); 7717 } 7718 7719 if (type>8 || type==2 || type==4 || type==0) { 7720 mtx_unlock(&lun->lun_lock); 7721 ctl_set_invalid_field(/*ctsio*/ ctsio, 7722 /*sks_valid*/ 1, 7723 /*command*/ 1, 7724 /*field*/ 2, 7725 /*bit_valid*/ 1, 7726 /*bit*/ 0); 7727 ctl_done((union ctl_io *)ctsio); 7728 return (1); 7729 } 7730 7731 /* 7732 * Do the following: 7733 * if sa_res_key != res_key remove all 7734 * registrants w/sa_res_key and generate UA 7735 * for these registrants(Registrations 7736 * Preempted) if it wasn't an exclusive 7737 * reservation generate UA(Reservations 7738 * Preempted) for all other registered nexuses 7739 * if the type has changed. Establish the new 7740 * reservation and holder. If res_key and 7741 * sa_res_key are the same do the above 7742 * except don't unregister the res holder. 7743 */ 7744 7745 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7746 if (i == residx || ctl_get_prkey(lun, i) == 0) 7747 continue; 7748 7749 if (sa_res_key == ctl_get_prkey(lun, i)) { 7750 ctl_clr_prkey(lun, i); 7751 lun->pr_key_count--; 7752 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7753 } else if (type != lun->res_type 7754 && (lun->res_type == SPR_TYPE_WR_EX_RO 7755 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7756 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 7757 } 7758 } 7759 lun->res_type = type; 7760 if (lun->res_type != SPR_TYPE_WR_EX_AR 7761 && lun->res_type != SPR_TYPE_EX_AC_AR) 7762 lun->pr_res_idx = residx; 7763 else 7764 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7765 7766 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7767 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7768 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7769 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7770 persis_io.pr.pr_info.res_type = type; 7771 memcpy(persis_io.pr.pr_info.sa_res_key, 7772 param->serv_act_res_key, 7773 sizeof(param->serv_act_res_key)); 7774 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7775 &persis_io, sizeof(persis_io), 0)) > 7776 CTL_HA_STATUS_SUCCESS) { 7777 printf("CTL:Persis Out error returned " 7778 "from ctl_ha_msg_send %d\n", 7779 isc_retval); 7780 } 7781 } else { 7782 /* 7783 * sa_res_key is not the res holder just 7784 * remove registrants 7785 */ 7786 int found=0; 7787 7788 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7789 if (sa_res_key != ctl_get_prkey(lun, i)) 7790 continue; 7791 7792 found = 1; 7793 ctl_clr_prkey(lun, i); 7794 lun->pr_key_count--; 7795 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7796 } 7797 7798 if (!found) { 7799 mtx_unlock(&lun->lun_lock); 7800 free(ctsio->kern_data_ptr, M_CTL); 7801 ctl_set_reservation_conflict(ctsio); 7802 ctl_done((union ctl_io *)ctsio); 7803 return (1); 7804 } 7805 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7806 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7807 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7808 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7809 persis_io.pr.pr_info.res_type = type; 7810 memcpy(persis_io.pr.pr_info.sa_res_key, 7811 param->serv_act_res_key, 7812 sizeof(param->serv_act_res_key)); 7813 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7814 &persis_io, sizeof(persis_io), 0)) > 7815 CTL_HA_STATUS_SUCCESS) { 7816 printf("CTL:Persis Out error returned " 7817 "from ctl_ha_msg_send %d\n", 7818 isc_retval); 7819 } 7820 } 7821 } 7822 7823 lun->PRGeneration++; 7824 mtx_unlock(&lun->lun_lock); 7825 7826 return (retval); 7827 } 7828 7829 static void 7830 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7831 { 7832 uint64_t sa_res_key; 7833 int i; 7834 7835 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7836 7837 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7838 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7839 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 7840 if (sa_res_key == 0) { 7841 /* 7842 * Unregister everybody else and build UA for 7843 * them 7844 */ 7845 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7846 if (i == msg->pr.pr_info.residx || 7847 ctl_get_prkey(lun, i) == 0) 7848 continue; 7849 7850 ctl_clr_prkey(lun, i); 7851 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7852 } 7853 7854 lun->pr_key_count = 1; 7855 lun->res_type = msg->pr.pr_info.res_type; 7856 if (lun->res_type != SPR_TYPE_WR_EX_AR 7857 && lun->res_type != SPR_TYPE_EX_AC_AR) 7858 lun->pr_res_idx = msg->pr.pr_info.residx; 7859 } else { 7860 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7861 if (sa_res_key == ctl_get_prkey(lun, i)) 7862 continue; 7863 7864 ctl_clr_prkey(lun, i); 7865 lun->pr_key_count--; 7866 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7867 } 7868 } 7869 } else { 7870 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7871 if (i == msg->pr.pr_info.residx || 7872 ctl_get_prkey(lun, i) == 0) 7873 continue; 7874 7875 if (sa_res_key == ctl_get_prkey(lun, i)) { 7876 ctl_clr_prkey(lun, i); 7877 lun->pr_key_count--; 7878 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7879 } else if (msg->pr.pr_info.res_type != lun->res_type 7880 && (lun->res_type == SPR_TYPE_WR_EX_RO 7881 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 7882 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 7883 } 7884 } 7885 lun->res_type = msg->pr.pr_info.res_type; 7886 if (lun->res_type != SPR_TYPE_WR_EX_AR 7887 && lun->res_type != SPR_TYPE_EX_AC_AR) 7888 lun->pr_res_idx = msg->pr.pr_info.residx; 7889 else 7890 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7891 } 7892 lun->PRGeneration++; 7893 7894 } 7895 7896 7897 int 7898 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7899 { 7900 int retval; 7901 int isc_retval; 7902 u_int32_t param_len; 7903 struct scsi_per_res_out *cdb; 7904 struct ctl_lun *lun; 7905 struct scsi_per_res_out_parms* param; 7906 struct ctl_softc *softc; 7907 uint32_t residx; 7908 uint64_t res_key, sa_res_key, key; 7909 uint8_t type; 7910 union ctl_ha_msg persis_io; 7911 int i; 7912 7913 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 7914 7915 retval = CTL_RETVAL_COMPLETE; 7916 7917 cdb = (struct scsi_per_res_out *)ctsio->cdb; 7918 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7919 softc = lun->ctl_softc; 7920 7921 /* 7922 * We only support whole-LUN scope. The scope & type are ignored for 7923 * register, register and ignore existing key and clear. 7924 * We sometimes ignore scope and type on preempts too!! 7925 * Verify reservation type here as well. 7926 */ 7927 type = cdb->scope_type & SPR_TYPE_MASK; 7928 if ((cdb->action == SPRO_RESERVE) 7929 || (cdb->action == SPRO_RELEASE)) { 7930 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 7931 ctl_set_invalid_field(/*ctsio*/ ctsio, 7932 /*sks_valid*/ 1, 7933 /*command*/ 1, 7934 /*field*/ 2, 7935 /*bit_valid*/ 1, 7936 /*bit*/ 4); 7937 ctl_done((union ctl_io *)ctsio); 7938 return (CTL_RETVAL_COMPLETE); 7939 } 7940 7941 if (type>8 || type==2 || type==4 || type==0) { 7942 ctl_set_invalid_field(/*ctsio*/ ctsio, 7943 /*sks_valid*/ 1, 7944 /*command*/ 1, 7945 /*field*/ 2, 7946 /*bit_valid*/ 1, 7947 /*bit*/ 0); 7948 ctl_done((union ctl_io *)ctsio); 7949 return (CTL_RETVAL_COMPLETE); 7950 } 7951 } 7952 7953 param_len = scsi_4btoul(cdb->length); 7954 7955 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 7956 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 7957 ctsio->kern_data_len = param_len; 7958 ctsio->kern_total_len = param_len; 7959 ctsio->kern_data_resid = 0; 7960 ctsio->kern_rel_offset = 0; 7961 ctsio->kern_sg_entries = 0; 7962 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7963 ctsio->be_move_done = ctl_config_move_done; 7964 ctl_datamove((union ctl_io *)ctsio); 7965 7966 return (CTL_RETVAL_COMPLETE); 7967 } 7968 7969 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 7970 7971 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 7972 res_key = scsi_8btou64(param->res_key.key); 7973 sa_res_key = scsi_8btou64(param->serv_act_res_key); 7974 7975 /* 7976 * Validate the reservation key here except for SPRO_REG_IGNO 7977 * This must be done for all other service actions 7978 */ 7979 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 7980 mtx_lock(&lun->lun_lock); 7981 if ((key = ctl_get_prkey(lun, residx)) != 0) { 7982 if (res_key != key) { 7983 /* 7984 * The current key passed in doesn't match 7985 * the one the initiator previously 7986 * registered. 7987 */ 7988 mtx_unlock(&lun->lun_lock); 7989 free(ctsio->kern_data_ptr, M_CTL); 7990 ctl_set_reservation_conflict(ctsio); 7991 ctl_done((union ctl_io *)ctsio); 7992 return (CTL_RETVAL_COMPLETE); 7993 } 7994 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 7995 /* 7996 * We are not registered 7997 */ 7998 mtx_unlock(&lun->lun_lock); 7999 free(ctsio->kern_data_ptr, M_CTL); 8000 ctl_set_reservation_conflict(ctsio); 8001 ctl_done((union ctl_io *)ctsio); 8002 return (CTL_RETVAL_COMPLETE); 8003 } else if (res_key != 0) { 8004 /* 8005 * We are not registered and trying to register but 8006 * the register key isn't zero. 8007 */ 8008 mtx_unlock(&lun->lun_lock); 8009 free(ctsio->kern_data_ptr, M_CTL); 8010 ctl_set_reservation_conflict(ctsio); 8011 ctl_done((union ctl_io *)ctsio); 8012 return (CTL_RETVAL_COMPLETE); 8013 } 8014 mtx_unlock(&lun->lun_lock); 8015 } 8016 8017 switch (cdb->action & SPRO_ACTION_MASK) { 8018 case SPRO_REGISTER: 8019 case SPRO_REG_IGNO: { 8020 8021 #if 0 8022 printf("Registration received\n"); 8023 #endif 8024 8025 /* 8026 * We don't support any of these options, as we report in 8027 * the read capabilities request (see 8028 * ctl_persistent_reserve_in(), above). 8029 */ 8030 if ((param->flags & SPR_SPEC_I_PT) 8031 || (param->flags & SPR_ALL_TG_PT) 8032 || (param->flags & SPR_APTPL)) { 8033 int bit_ptr; 8034 8035 if (param->flags & SPR_APTPL) 8036 bit_ptr = 0; 8037 else if (param->flags & SPR_ALL_TG_PT) 8038 bit_ptr = 2; 8039 else /* SPR_SPEC_I_PT */ 8040 bit_ptr = 3; 8041 8042 free(ctsio->kern_data_ptr, M_CTL); 8043 ctl_set_invalid_field(ctsio, 8044 /*sks_valid*/ 1, 8045 /*command*/ 0, 8046 /*field*/ 20, 8047 /*bit_valid*/ 1, 8048 /*bit*/ bit_ptr); 8049 ctl_done((union ctl_io *)ctsio); 8050 return (CTL_RETVAL_COMPLETE); 8051 } 8052 8053 mtx_lock(&lun->lun_lock); 8054 8055 /* 8056 * The initiator wants to clear the 8057 * key/unregister. 8058 */ 8059 if (sa_res_key == 0) { 8060 if ((res_key == 0 8061 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8062 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8063 && ctl_get_prkey(lun, residx) == 0)) { 8064 mtx_unlock(&lun->lun_lock); 8065 goto done; 8066 } 8067 8068 ctl_clr_prkey(lun, residx); 8069 lun->pr_key_count--; 8070 8071 if (residx == lun->pr_res_idx) { 8072 lun->flags &= ~CTL_LUN_PR_RESERVED; 8073 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8074 8075 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8076 || lun->res_type == SPR_TYPE_EX_AC_RO) 8077 && lun->pr_key_count) { 8078 /* 8079 * If the reservation is a registrants 8080 * only type we need to generate a UA 8081 * for other registered inits. The 8082 * sense code should be RESERVATIONS 8083 * RELEASED 8084 */ 8085 8086 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8087 if (ctl_get_prkey(lun, i + 8088 softc->persis_offset) == 0) 8089 continue; 8090 ctl_est_ua(lun, i, 8091 CTL_UA_RES_RELEASE); 8092 } 8093 } 8094 lun->res_type = 0; 8095 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8096 if (lun->pr_key_count==0) { 8097 lun->flags &= ~CTL_LUN_PR_RESERVED; 8098 lun->res_type = 0; 8099 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8100 } 8101 } 8102 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8103 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8104 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8105 persis_io.pr.pr_info.residx = residx; 8106 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8107 &persis_io, sizeof(persis_io), 0 )) > 8108 CTL_HA_STATUS_SUCCESS) { 8109 printf("CTL:Persis Out error returned from " 8110 "ctl_ha_msg_send %d\n", isc_retval); 8111 } 8112 } else /* sa_res_key != 0 */ { 8113 8114 /* 8115 * If we aren't registered currently then increment 8116 * the key count and set the registered flag. 8117 */ 8118 ctl_alloc_prkey(lun, residx); 8119 if (ctl_get_prkey(lun, residx) == 0) 8120 lun->pr_key_count++; 8121 ctl_set_prkey(lun, residx, sa_res_key); 8122 8123 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8124 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8125 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8126 persis_io.pr.pr_info.residx = residx; 8127 memcpy(persis_io.pr.pr_info.sa_res_key, 8128 param->serv_act_res_key, 8129 sizeof(param->serv_act_res_key)); 8130 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8131 &persis_io, sizeof(persis_io), 0)) > 8132 CTL_HA_STATUS_SUCCESS) { 8133 printf("CTL:Persis Out error returned from " 8134 "ctl_ha_msg_send %d\n", isc_retval); 8135 } 8136 } 8137 lun->PRGeneration++; 8138 mtx_unlock(&lun->lun_lock); 8139 8140 break; 8141 } 8142 case SPRO_RESERVE: 8143 #if 0 8144 printf("Reserve executed type %d\n", type); 8145 #endif 8146 mtx_lock(&lun->lun_lock); 8147 if (lun->flags & CTL_LUN_PR_RESERVED) { 8148 /* 8149 * if this isn't the reservation holder and it's 8150 * not a "all registrants" type or if the type is 8151 * different then we have a conflict 8152 */ 8153 if ((lun->pr_res_idx != residx 8154 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8155 || lun->res_type != type) { 8156 mtx_unlock(&lun->lun_lock); 8157 free(ctsio->kern_data_ptr, M_CTL); 8158 ctl_set_reservation_conflict(ctsio); 8159 ctl_done((union ctl_io *)ctsio); 8160 return (CTL_RETVAL_COMPLETE); 8161 } 8162 mtx_unlock(&lun->lun_lock); 8163 } else /* create a reservation */ { 8164 /* 8165 * If it's not an "all registrants" type record 8166 * reservation holder 8167 */ 8168 if (type != SPR_TYPE_WR_EX_AR 8169 && type != SPR_TYPE_EX_AC_AR) 8170 lun->pr_res_idx = residx; /* Res holder */ 8171 else 8172 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8173 8174 lun->flags |= CTL_LUN_PR_RESERVED; 8175 lun->res_type = type; 8176 8177 mtx_unlock(&lun->lun_lock); 8178 8179 /* send msg to other side */ 8180 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8181 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8182 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8183 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8184 persis_io.pr.pr_info.res_type = type; 8185 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8186 &persis_io, sizeof(persis_io), 0)) > 8187 CTL_HA_STATUS_SUCCESS) { 8188 printf("CTL:Persis Out error returned from " 8189 "ctl_ha_msg_send %d\n", isc_retval); 8190 } 8191 } 8192 break; 8193 8194 case SPRO_RELEASE: 8195 mtx_lock(&lun->lun_lock); 8196 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8197 /* No reservation exists return good status */ 8198 mtx_unlock(&lun->lun_lock); 8199 goto done; 8200 } 8201 /* 8202 * Is this nexus a reservation holder? 8203 */ 8204 if (lun->pr_res_idx != residx 8205 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8206 /* 8207 * not a res holder return good status but 8208 * do nothing 8209 */ 8210 mtx_unlock(&lun->lun_lock); 8211 goto done; 8212 } 8213 8214 if (lun->res_type != type) { 8215 mtx_unlock(&lun->lun_lock); 8216 free(ctsio->kern_data_ptr, M_CTL); 8217 ctl_set_illegal_pr_release(ctsio); 8218 ctl_done((union ctl_io *)ctsio); 8219 return (CTL_RETVAL_COMPLETE); 8220 } 8221 8222 /* okay to release */ 8223 lun->flags &= ~CTL_LUN_PR_RESERVED; 8224 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8225 lun->res_type = 0; 8226 8227 /* 8228 * if this isn't an exclusive access 8229 * res generate UA for all other 8230 * registrants. 8231 */ 8232 if (type != SPR_TYPE_EX_AC 8233 && type != SPR_TYPE_WR_EX) { 8234 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8235 if (i == residx || 8236 ctl_get_prkey(lun, 8237 i + softc->persis_offset) == 0) 8238 continue; 8239 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8240 } 8241 } 8242 mtx_unlock(&lun->lun_lock); 8243 /* Send msg to other side */ 8244 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8245 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8246 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8247 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8248 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8249 printf("CTL:Persis Out error returned from " 8250 "ctl_ha_msg_send %d\n", isc_retval); 8251 } 8252 break; 8253 8254 case SPRO_CLEAR: 8255 /* send msg to other side */ 8256 8257 mtx_lock(&lun->lun_lock); 8258 lun->flags &= ~CTL_LUN_PR_RESERVED; 8259 lun->res_type = 0; 8260 lun->pr_key_count = 0; 8261 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8262 8263 ctl_clr_prkey(lun, residx); 8264 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8265 if (ctl_get_prkey(lun, i) != 0) { 8266 ctl_clr_prkey(lun, i); 8267 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8268 } 8269 lun->PRGeneration++; 8270 mtx_unlock(&lun->lun_lock); 8271 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8272 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8273 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8274 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8275 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8276 printf("CTL:Persis Out error returned from " 8277 "ctl_ha_msg_send %d\n", isc_retval); 8278 } 8279 break; 8280 8281 case SPRO_PREEMPT: 8282 case SPRO_PRE_ABO: { 8283 int nretval; 8284 8285 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8286 residx, ctsio, cdb, param); 8287 if (nretval != 0) 8288 return (CTL_RETVAL_COMPLETE); 8289 break; 8290 } 8291 default: 8292 panic("Invalid PR type %x", cdb->action); 8293 } 8294 8295 done: 8296 free(ctsio->kern_data_ptr, M_CTL); 8297 ctl_set_success(ctsio); 8298 ctl_done((union ctl_io *)ctsio); 8299 8300 return (retval); 8301 } 8302 8303 /* 8304 * This routine is for handling a message from the other SC pertaining to 8305 * persistent reserve out. All the error checking will have been done 8306 * so only perorming the action need be done here to keep the two 8307 * in sync. 8308 */ 8309 static void 8310 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8311 { 8312 struct ctl_lun *lun; 8313 struct ctl_softc *softc; 8314 int i; 8315 uint32_t targ_lun; 8316 8317 softc = control_softc; 8318 8319 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8320 lun = softc->ctl_luns[targ_lun]; 8321 mtx_lock(&lun->lun_lock); 8322 switch(msg->pr.pr_info.action) { 8323 case CTL_PR_REG_KEY: 8324 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8325 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8326 lun->pr_key_count++; 8327 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8328 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8329 lun->PRGeneration++; 8330 break; 8331 8332 case CTL_PR_UNREG_KEY: 8333 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8334 lun->pr_key_count--; 8335 8336 /* XXX Need to see if the reservation has been released */ 8337 /* if so do we need to generate UA? */ 8338 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8339 lun->flags &= ~CTL_LUN_PR_RESERVED; 8340 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8341 8342 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8343 || lun->res_type == SPR_TYPE_EX_AC_RO) 8344 && lun->pr_key_count) { 8345 /* 8346 * If the reservation is a registrants 8347 * only type we need to generate a UA 8348 * for other registered inits. The 8349 * sense code should be RESERVATIONS 8350 * RELEASED 8351 */ 8352 8353 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8354 if (ctl_get_prkey(lun, i + 8355 softc->persis_offset) == 0) 8356 continue; 8357 8358 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8359 } 8360 } 8361 lun->res_type = 0; 8362 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8363 if (lun->pr_key_count==0) { 8364 lun->flags &= ~CTL_LUN_PR_RESERVED; 8365 lun->res_type = 0; 8366 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8367 } 8368 } 8369 lun->PRGeneration++; 8370 break; 8371 8372 case CTL_PR_RESERVE: 8373 lun->flags |= CTL_LUN_PR_RESERVED; 8374 lun->res_type = msg->pr.pr_info.res_type; 8375 lun->pr_res_idx = msg->pr.pr_info.residx; 8376 8377 break; 8378 8379 case CTL_PR_RELEASE: 8380 /* 8381 * if this isn't an exclusive access res generate UA for all 8382 * other registrants. 8383 */ 8384 if (lun->res_type != SPR_TYPE_EX_AC 8385 && lun->res_type != SPR_TYPE_WR_EX) { 8386 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8387 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0) 8388 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8389 } 8390 8391 lun->flags &= ~CTL_LUN_PR_RESERVED; 8392 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8393 lun->res_type = 0; 8394 break; 8395 8396 case CTL_PR_PREEMPT: 8397 ctl_pro_preempt_other(lun, msg); 8398 break; 8399 case CTL_PR_CLEAR: 8400 lun->flags &= ~CTL_LUN_PR_RESERVED; 8401 lun->res_type = 0; 8402 lun->pr_key_count = 0; 8403 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8404 8405 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8406 if (ctl_get_prkey(lun, i) == 0) 8407 continue; 8408 ctl_clr_prkey(lun, i); 8409 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8410 } 8411 lun->PRGeneration++; 8412 break; 8413 } 8414 8415 mtx_unlock(&lun->lun_lock); 8416 } 8417 8418 int 8419 ctl_read_write(struct ctl_scsiio *ctsio) 8420 { 8421 struct ctl_lun *lun; 8422 struct ctl_lba_len_flags *lbalen; 8423 uint64_t lba; 8424 uint32_t num_blocks; 8425 int flags, retval; 8426 int isread; 8427 8428 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8429 8430 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8431 8432 flags = 0; 8433 retval = CTL_RETVAL_COMPLETE; 8434 8435 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8436 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8437 switch (ctsio->cdb[0]) { 8438 case READ_6: 8439 case WRITE_6: { 8440 struct scsi_rw_6 *cdb; 8441 8442 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8443 8444 lba = scsi_3btoul(cdb->addr); 8445 /* only 5 bits are valid in the most significant address byte */ 8446 lba &= 0x1fffff; 8447 num_blocks = cdb->length; 8448 /* 8449 * This is correct according to SBC-2. 8450 */ 8451 if (num_blocks == 0) 8452 num_blocks = 256; 8453 break; 8454 } 8455 case READ_10: 8456 case WRITE_10: { 8457 struct scsi_rw_10 *cdb; 8458 8459 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8460 if (cdb->byte2 & SRW10_FUA) 8461 flags |= CTL_LLF_FUA; 8462 if (cdb->byte2 & SRW10_DPO) 8463 flags |= CTL_LLF_DPO; 8464 lba = scsi_4btoul(cdb->addr); 8465 num_blocks = scsi_2btoul(cdb->length); 8466 break; 8467 } 8468 case WRITE_VERIFY_10: { 8469 struct scsi_write_verify_10 *cdb; 8470 8471 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8472 flags |= CTL_LLF_FUA; 8473 if (cdb->byte2 & SWV_DPO) 8474 flags |= CTL_LLF_DPO; 8475 lba = scsi_4btoul(cdb->addr); 8476 num_blocks = scsi_2btoul(cdb->length); 8477 break; 8478 } 8479 case READ_12: 8480 case WRITE_12: { 8481 struct scsi_rw_12 *cdb; 8482 8483 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8484 if (cdb->byte2 & SRW12_FUA) 8485 flags |= CTL_LLF_FUA; 8486 if (cdb->byte2 & SRW12_DPO) 8487 flags |= CTL_LLF_DPO; 8488 lba = scsi_4btoul(cdb->addr); 8489 num_blocks = scsi_4btoul(cdb->length); 8490 break; 8491 } 8492 case WRITE_VERIFY_12: { 8493 struct scsi_write_verify_12 *cdb; 8494 8495 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8496 flags |= CTL_LLF_FUA; 8497 if (cdb->byte2 & SWV_DPO) 8498 flags |= CTL_LLF_DPO; 8499 lba = scsi_4btoul(cdb->addr); 8500 num_blocks = scsi_4btoul(cdb->length); 8501 break; 8502 } 8503 case READ_16: 8504 case WRITE_16: { 8505 struct scsi_rw_16 *cdb; 8506 8507 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8508 if (cdb->byte2 & SRW12_FUA) 8509 flags |= CTL_LLF_FUA; 8510 if (cdb->byte2 & SRW12_DPO) 8511 flags |= CTL_LLF_DPO; 8512 lba = scsi_8btou64(cdb->addr); 8513 num_blocks = scsi_4btoul(cdb->length); 8514 break; 8515 } 8516 case WRITE_ATOMIC_16: { 8517 struct scsi_rw_16 *cdb; 8518 8519 if (lun->be_lun->atomicblock == 0) { 8520 ctl_set_invalid_opcode(ctsio); 8521 ctl_done((union ctl_io *)ctsio); 8522 return (CTL_RETVAL_COMPLETE); 8523 } 8524 8525 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8526 if (cdb->byte2 & SRW12_FUA) 8527 flags |= CTL_LLF_FUA; 8528 if (cdb->byte2 & SRW12_DPO) 8529 flags |= CTL_LLF_DPO; 8530 lba = scsi_8btou64(cdb->addr); 8531 num_blocks = scsi_4btoul(cdb->length); 8532 if (num_blocks > lun->be_lun->atomicblock) { 8533 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8534 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8535 /*bit*/ 0); 8536 ctl_done((union ctl_io *)ctsio); 8537 return (CTL_RETVAL_COMPLETE); 8538 } 8539 break; 8540 } 8541 case WRITE_VERIFY_16: { 8542 struct scsi_write_verify_16 *cdb; 8543 8544 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8545 flags |= CTL_LLF_FUA; 8546 if (cdb->byte2 & SWV_DPO) 8547 flags |= CTL_LLF_DPO; 8548 lba = scsi_8btou64(cdb->addr); 8549 num_blocks = scsi_4btoul(cdb->length); 8550 break; 8551 } 8552 default: 8553 /* 8554 * We got a command we don't support. This shouldn't 8555 * happen, commands should be filtered out above us. 8556 */ 8557 ctl_set_invalid_opcode(ctsio); 8558 ctl_done((union ctl_io *)ctsio); 8559 8560 return (CTL_RETVAL_COMPLETE); 8561 break; /* NOTREACHED */ 8562 } 8563 8564 /* 8565 * The first check is to make sure we're in bounds, the second 8566 * check is to catch wrap-around problems. If the lba + num blocks 8567 * is less than the lba, then we've wrapped around and the block 8568 * range is invalid anyway. 8569 */ 8570 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8571 || ((lba + num_blocks) < lba)) { 8572 ctl_set_lba_out_of_range(ctsio); 8573 ctl_done((union ctl_io *)ctsio); 8574 return (CTL_RETVAL_COMPLETE); 8575 } 8576 8577 /* 8578 * According to SBC-3, a transfer length of 0 is not an error. 8579 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8580 * translates to 256 blocks for those commands. 8581 */ 8582 if (num_blocks == 0) { 8583 ctl_set_success(ctsio); 8584 ctl_done((union ctl_io *)ctsio); 8585 return (CTL_RETVAL_COMPLETE); 8586 } 8587 8588 /* Set FUA and/or DPO if caches are disabled. */ 8589 if (isread) { 8590 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8591 SCP_RCD) != 0) 8592 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8593 } else { 8594 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8595 SCP_WCE) == 0) 8596 flags |= CTL_LLF_FUA; 8597 } 8598 8599 lbalen = (struct ctl_lba_len_flags *) 8600 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8601 lbalen->lba = lba; 8602 lbalen->len = num_blocks; 8603 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8604 8605 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8606 ctsio->kern_rel_offset = 0; 8607 8608 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8609 8610 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8611 8612 return (retval); 8613 } 8614 8615 static int 8616 ctl_cnw_cont(union ctl_io *io) 8617 { 8618 struct ctl_scsiio *ctsio; 8619 struct ctl_lun *lun; 8620 struct ctl_lba_len_flags *lbalen; 8621 int retval; 8622 8623 ctsio = &io->scsiio; 8624 ctsio->io_hdr.status = CTL_STATUS_NONE; 8625 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8626 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8627 lbalen = (struct ctl_lba_len_flags *) 8628 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8629 lbalen->flags &= ~CTL_LLF_COMPARE; 8630 lbalen->flags |= CTL_LLF_WRITE; 8631 8632 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8633 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8634 return (retval); 8635 } 8636 8637 int 8638 ctl_cnw(struct ctl_scsiio *ctsio) 8639 { 8640 struct ctl_lun *lun; 8641 struct ctl_lba_len_flags *lbalen; 8642 uint64_t lba; 8643 uint32_t num_blocks; 8644 int flags, retval; 8645 8646 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8647 8648 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8649 8650 flags = 0; 8651 retval = CTL_RETVAL_COMPLETE; 8652 8653 switch (ctsio->cdb[0]) { 8654 case COMPARE_AND_WRITE: { 8655 struct scsi_compare_and_write *cdb; 8656 8657 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8658 if (cdb->byte2 & SRW10_FUA) 8659 flags |= CTL_LLF_FUA; 8660 if (cdb->byte2 & SRW10_DPO) 8661 flags |= CTL_LLF_DPO; 8662 lba = scsi_8btou64(cdb->addr); 8663 num_blocks = cdb->length; 8664 break; 8665 } 8666 default: 8667 /* 8668 * We got a command we don't support. This shouldn't 8669 * happen, commands should be filtered out above us. 8670 */ 8671 ctl_set_invalid_opcode(ctsio); 8672 ctl_done((union ctl_io *)ctsio); 8673 8674 return (CTL_RETVAL_COMPLETE); 8675 break; /* NOTREACHED */ 8676 } 8677 8678 /* 8679 * The first check is to make sure we're in bounds, the second 8680 * check is to catch wrap-around problems. If the lba + num blocks 8681 * is less than the lba, then we've wrapped around and the block 8682 * range is invalid anyway. 8683 */ 8684 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8685 || ((lba + num_blocks) < lba)) { 8686 ctl_set_lba_out_of_range(ctsio); 8687 ctl_done((union ctl_io *)ctsio); 8688 return (CTL_RETVAL_COMPLETE); 8689 } 8690 8691 /* 8692 * According to SBC-3, a transfer length of 0 is not an error. 8693 */ 8694 if (num_blocks == 0) { 8695 ctl_set_success(ctsio); 8696 ctl_done((union ctl_io *)ctsio); 8697 return (CTL_RETVAL_COMPLETE); 8698 } 8699 8700 /* Set FUA if write cache is disabled. */ 8701 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8702 SCP_WCE) == 0) 8703 flags |= CTL_LLF_FUA; 8704 8705 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8706 ctsio->kern_rel_offset = 0; 8707 8708 /* 8709 * Set the IO_CONT flag, so that if this I/O gets passed to 8710 * ctl_data_submit_done(), it'll get passed back to 8711 * ctl_ctl_cnw_cont() for further processing. 8712 */ 8713 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8714 ctsio->io_cont = ctl_cnw_cont; 8715 8716 lbalen = (struct ctl_lba_len_flags *) 8717 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8718 lbalen->lba = lba; 8719 lbalen->len = num_blocks; 8720 lbalen->flags = CTL_LLF_COMPARE | flags; 8721 8722 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8723 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8724 return (retval); 8725 } 8726 8727 int 8728 ctl_verify(struct ctl_scsiio *ctsio) 8729 { 8730 struct ctl_lun *lun; 8731 struct ctl_lba_len_flags *lbalen; 8732 uint64_t lba; 8733 uint32_t num_blocks; 8734 int bytchk, flags; 8735 int retval; 8736 8737 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8738 8739 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8740 8741 bytchk = 0; 8742 flags = CTL_LLF_FUA; 8743 retval = CTL_RETVAL_COMPLETE; 8744 8745 switch (ctsio->cdb[0]) { 8746 case VERIFY_10: { 8747 struct scsi_verify_10 *cdb; 8748 8749 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8750 if (cdb->byte2 & SVFY_BYTCHK) 8751 bytchk = 1; 8752 if (cdb->byte2 & SVFY_DPO) 8753 flags |= CTL_LLF_DPO; 8754 lba = scsi_4btoul(cdb->addr); 8755 num_blocks = scsi_2btoul(cdb->length); 8756 break; 8757 } 8758 case VERIFY_12: { 8759 struct scsi_verify_12 *cdb; 8760 8761 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8762 if (cdb->byte2 & SVFY_BYTCHK) 8763 bytchk = 1; 8764 if (cdb->byte2 & SVFY_DPO) 8765 flags |= CTL_LLF_DPO; 8766 lba = scsi_4btoul(cdb->addr); 8767 num_blocks = scsi_4btoul(cdb->length); 8768 break; 8769 } 8770 case VERIFY_16: { 8771 struct scsi_rw_16 *cdb; 8772 8773 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8774 if (cdb->byte2 & SVFY_BYTCHK) 8775 bytchk = 1; 8776 if (cdb->byte2 & SVFY_DPO) 8777 flags |= CTL_LLF_DPO; 8778 lba = scsi_8btou64(cdb->addr); 8779 num_blocks = scsi_4btoul(cdb->length); 8780 break; 8781 } 8782 default: 8783 /* 8784 * We got a command we don't support. This shouldn't 8785 * happen, commands should be filtered out above us. 8786 */ 8787 ctl_set_invalid_opcode(ctsio); 8788 ctl_done((union ctl_io *)ctsio); 8789 return (CTL_RETVAL_COMPLETE); 8790 } 8791 8792 /* 8793 * The first check is to make sure we're in bounds, the second 8794 * check is to catch wrap-around problems. If the lba + num blocks 8795 * is less than the lba, then we've wrapped around and the block 8796 * range is invalid anyway. 8797 */ 8798 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8799 || ((lba + num_blocks) < lba)) { 8800 ctl_set_lba_out_of_range(ctsio); 8801 ctl_done((union ctl_io *)ctsio); 8802 return (CTL_RETVAL_COMPLETE); 8803 } 8804 8805 /* 8806 * According to SBC-3, a transfer length of 0 is not an error. 8807 */ 8808 if (num_blocks == 0) { 8809 ctl_set_success(ctsio); 8810 ctl_done((union ctl_io *)ctsio); 8811 return (CTL_RETVAL_COMPLETE); 8812 } 8813 8814 lbalen = (struct ctl_lba_len_flags *) 8815 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8816 lbalen->lba = lba; 8817 lbalen->len = num_blocks; 8818 if (bytchk) { 8819 lbalen->flags = CTL_LLF_COMPARE | flags; 8820 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8821 } else { 8822 lbalen->flags = CTL_LLF_VERIFY | flags; 8823 ctsio->kern_total_len = 0; 8824 } 8825 ctsio->kern_rel_offset = 0; 8826 8827 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8828 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8829 return (retval); 8830 } 8831 8832 int 8833 ctl_report_luns(struct ctl_scsiio *ctsio) 8834 { 8835 struct ctl_softc *softc = control_softc; 8836 struct scsi_report_luns *cdb; 8837 struct scsi_report_luns_data *lun_data; 8838 struct ctl_lun *lun, *request_lun; 8839 struct ctl_port *port; 8840 int num_luns, retval; 8841 uint32_t alloc_len, lun_datalen; 8842 int num_filled, well_known; 8843 uint32_t initidx, targ_lun_id, lun_id; 8844 8845 retval = CTL_RETVAL_COMPLETE; 8846 well_known = 0; 8847 8848 cdb = (struct scsi_report_luns *)ctsio->cdb; 8849 port = ctl_io_port(&ctsio->io_hdr); 8850 8851 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 8852 8853 mtx_lock(&softc->ctl_lock); 8854 num_luns = 0; 8855 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { 8856 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) 8857 num_luns++; 8858 } 8859 mtx_unlock(&softc->ctl_lock); 8860 8861 switch (cdb->select_report) { 8862 case RPL_REPORT_DEFAULT: 8863 case RPL_REPORT_ALL: 8864 break; 8865 case RPL_REPORT_WELLKNOWN: 8866 well_known = 1; 8867 num_luns = 0; 8868 break; 8869 default: 8870 ctl_set_invalid_field(ctsio, 8871 /*sks_valid*/ 1, 8872 /*command*/ 1, 8873 /*field*/ 2, 8874 /*bit_valid*/ 0, 8875 /*bit*/ 0); 8876 ctl_done((union ctl_io *)ctsio); 8877 return (retval); 8878 break; /* NOTREACHED */ 8879 } 8880 8881 alloc_len = scsi_4btoul(cdb->length); 8882 /* 8883 * The initiator has to allocate at least 16 bytes for this request, 8884 * so he can at least get the header and the first LUN. Otherwise 8885 * we reject the request (per SPC-3 rev 14, section 6.21). 8886 */ 8887 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 8888 sizeof(struct scsi_report_luns_lundata))) { 8889 ctl_set_invalid_field(ctsio, 8890 /*sks_valid*/ 1, 8891 /*command*/ 1, 8892 /*field*/ 6, 8893 /*bit_valid*/ 0, 8894 /*bit*/ 0); 8895 ctl_done((union ctl_io *)ctsio); 8896 return (retval); 8897 } 8898 8899 request_lun = (struct ctl_lun *) 8900 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8901 8902 lun_datalen = sizeof(*lun_data) + 8903 (num_luns * sizeof(struct scsi_report_luns_lundata)); 8904 8905 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 8906 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 8907 ctsio->kern_sg_entries = 0; 8908 8909 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8910 8911 mtx_lock(&softc->ctl_lock); 8912 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 8913 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 8914 if (lun_id >= CTL_MAX_LUNS) 8915 continue; 8916 lun = softc->ctl_luns[lun_id]; 8917 if (lun == NULL) 8918 continue; 8919 8920 if (targ_lun_id <= 0xff) { 8921 /* 8922 * Peripheral addressing method, bus number 0. 8923 */ 8924 lun_data->luns[num_filled].lundata[0] = 8925 RPL_LUNDATA_ATYP_PERIPH; 8926 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 8927 num_filled++; 8928 } else if (targ_lun_id <= 0x3fff) { 8929 /* 8930 * Flat addressing method. 8931 */ 8932 lun_data->luns[num_filled].lundata[0] = 8933 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 8934 lun_data->luns[num_filled].lundata[1] = 8935 (targ_lun_id & 0xff); 8936 num_filled++; 8937 } else if (targ_lun_id <= 0xffffff) { 8938 /* 8939 * Extended flat addressing method. 8940 */ 8941 lun_data->luns[num_filled].lundata[0] = 8942 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 8943 scsi_ulto3b(targ_lun_id, 8944 &lun_data->luns[num_filled].lundata[1]); 8945 num_filled++; 8946 } else { 8947 printf("ctl_report_luns: bogus LUN number %jd, " 8948 "skipping\n", (intmax_t)targ_lun_id); 8949 } 8950 /* 8951 * According to SPC-3, rev 14 section 6.21: 8952 * 8953 * "The execution of a REPORT LUNS command to any valid and 8954 * installed logical unit shall clear the REPORTED LUNS DATA 8955 * HAS CHANGED unit attention condition for all logical 8956 * units of that target with respect to the requesting 8957 * initiator. A valid and installed logical unit is one 8958 * having a PERIPHERAL QUALIFIER of 000b in the standard 8959 * INQUIRY data (see 6.4.2)." 8960 * 8961 * If request_lun is NULL, the LUN this report luns command 8962 * was issued to is either disabled or doesn't exist. In that 8963 * case, we shouldn't clear any pending lun change unit 8964 * attention. 8965 */ 8966 if (request_lun != NULL) { 8967 mtx_lock(&lun->lun_lock); 8968 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 8969 mtx_unlock(&lun->lun_lock); 8970 } 8971 } 8972 mtx_unlock(&softc->ctl_lock); 8973 8974 /* 8975 * It's quite possible that we've returned fewer LUNs than we allocated 8976 * space for. Trim it. 8977 */ 8978 lun_datalen = sizeof(*lun_data) + 8979 (num_filled * sizeof(struct scsi_report_luns_lundata)); 8980 8981 if (lun_datalen < alloc_len) { 8982 ctsio->residual = alloc_len - lun_datalen; 8983 ctsio->kern_data_len = lun_datalen; 8984 ctsio->kern_total_len = lun_datalen; 8985 } else { 8986 ctsio->residual = 0; 8987 ctsio->kern_data_len = alloc_len; 8988 ctsio->kern_total_len = alloc_len; 8989 } 8990 ctsio->kern_data_resid = 0; 8991 ctsio->kern_rel_offset = 0; 8992 ctsio->kern_sg_entries = 0; 8993 8994 /* 8995 * We set this to the actual data length, regardless of how much 8996 * space we actually have to return results. If the user looks at 8997 * this value, he'll know whether or not he allocated enough space 8998 * and reissue the command if necessary. We don't support well 8999 * known logical units, so if the user asks for that, return none. 9000 */ 9001 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9002 9003 /* 9004 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9005 * this request. 9006 */ 9007 ctl_set_success(ctsio); 9008 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9009 ctsio->be_move_done = ctl_config_move_done; 9010 ctl_datamove((union ctl_io *)ctsio); 9011 return (retval); 9012 } 9013 9014 int 9015 ctl_request_sense(struct ctl_scsiio *ctsio) 9016 { 9017 struct scsi_request_sense *cdb; 9018 struct scsi_sense_data *sense_ptr; 9019 struct ctl_softc *ctl_softc; 9020 struct ctl_lun *lun; 9021 uint32_t initidx; 9022 int have_error; 9023 scsi_sense_data_type sense_format; 9024 ctl_ua_type ua_type; 9025 9026 cdb = (struct scsi_request_sense *)ctsio->cdb; 9027 9028 ctl_softc = control_softc; 9029 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9030 9031 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9032 9033 /* 9034 * Determine which sense format the user wants. 9035 */ 9036 if (cdb->byte2 & SRS_DESC) 9037 sense_format = SSD_TYPE_DESC; 9038 else 9039 sense_format = SSD_TYPE_FIXED; 9040 9041 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9042 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9043 ctsio->kern_sg_entries = 0; 9044 9045 /* 9046 * struct scsi_sense_data, which is currently set to 256 bytes, is 9047 * larger than the largest allowed value for the length field in the 9048 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9049 */ 9050 ctsio->residual = 0; 9051 ctsio->kern_data_len = cdb->length; 9052 ctsio->kern_total_len = cdb->length; 9053 9054 ctsio->kern_data_resid = 0; 9055 ctsio->kern_rel_offset = 0; 9056 ctsio->kern_sg_entries = 0; 9057 9058 /* 9059 * If we don't have a LUN, we don't have any pending sense. 9060 */ 9061 if (lun == NULL) 9062 goto no_sense; 9063 9064 have_error = 0; 9065 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9066 /* 9067 * Check for pending sense, and then for pending unit attentions. 9068 * Pending sense gets returned first, then pending unit attentions. 9069 */ 9070 mtx_lock(&lun->lun_lock); 9071 #ifdef CTL_WITH_CA 9072 if (ctl_is_set(lun->have_ca, initidx)) { 9073 scsi_sense_data_type stored_format; 9074 9075 /* 9076 * Check to see which sense format was used for the stored 9077 * sense data. 9078 */ 9079 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9080 9081 /* 9082 * If the user requested a different sense format than the 9083 * one we stored, then we need to convert it to the other 9084 * format. If we're going from descriptor to fixed format 9085 * sense data, we may lose things in translation, depending 9086 * on what options were used. 9087 * 9088 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9089 * for some reason we'll just copy it out as-is. 9090 */ 9091 if ((stored_format == SSD_TYPE_FIXED) 9092 && (sense_format == SSD_TYPE_DESC)) 9093 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9094 &lun->pending_sense[initidx], 9095 (struct scsi_sense_data_desc *)sense_ptr); 9096 else if ((stored_format == SSD_TYPE_DESC) 9097 && (sense_format == SSD_TYPE_FIXED)) 9098 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9099 &lun->pending_sense[initidx], 9100 (struct scsi_sense_data_fixed *)sense_ptr); 9101 else 9102 memcpy(sense_ptr, &lun->pending_sense[initidx], 9103 MIN(sizeof(*sense_ptr), 9104 sizeof(lun->pending_sense[initidx]))); 9105 9106 ctl_clear_mask(lun->have_ca, initidx); 9107 have_error = 1; 9108 } else 9109 #endif 9110 { 9111 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9112 if (ua_type != CTL_UA_NONE) 9113 have_error = 1; 9114 if (ua_type == CTL_UA_LUN_CHANGE) { 9115 mtx_unlock(&lun->lun_lock); 9116 mtx_lock(&ctl_softc->ctl_lock); 9117 ctl_clr_ua_allluns(ctl_softc, initidx, ua_type); 9118 mtx_unlock(&ctl_softc->ctl_lock); 9119 mtx_lock(&lun->lun_lock); 9120 } 9121 9122 } 9123 mtx_unlock(&lun->lun_lock); 9124 9125 /* 9126 * We already have a pending error, return it. 9127 */ 9128 if (have_error != 0) { 9129 /* 9130 * We report the SCSI status as OK, since the status of the 9131 * request sense command itself is OK. 9132 * We report 0 for the sense length, because we aren't doing 9133 * autosense in this case. We're reporting sense as 9134 * parameter data. 9135 */ 9136 ctl_set_success(ctsio); 9137 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9138 ctsio->be_move_done = ctl_config_move_done; 9139 ctl_datamove((union ctl_io *)ctsio); 9140 return (CTL_RETVAL_COMPLETE); 9141 } 9142 9143 no_sense: 9144 9145 /* 9146 * No sense information to report, so we report that everything is 9147 * okay. 9148 */ 9149 ctl_set_sense_data(sense_ptr, 9150 lun, 9151 sense_format, 9152 /*current_error*/ 1, 9153 /*sense_key*/ SSD_KEY_NO_SENSE, 9154 /*asc*/ 0x00, 9155 /*ascq*/ 0x00, 9156 SSD_ELEM_NONE); 9157 9158 /* 9159 * We report 0 for the sense length, because we aren't doing 9160 * autosense in this case. We're reporting sense as parameter data. 9161 */ 9162 ctl_set_success(ctsio); 9163 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9164 ctsio->be_move_done = ctl_config_move_done; 9165 ctl_datamove((union ctl_io *)ctsio); 9166 return (CTL_RETVAL_COMPLETE); 9167 } 9168 9169 int 9170 ctl_tur(struct ctl_scsiio *ctsio) 9171 { 9172 9173 CTL_DEBUG_PRINT(("ctl_tur\n")); 9174 9175 ctl_set_success(ctsio); 9176 ctl_done((union ctl_io *)ctsio); 9177 9178 return (CTL_RETVAL_COMPLETE); 9179 } 9180 9181 #ifdef notyet 9182 static int 9183 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9184 { 9185 9186 } 9187 #endif 9188 9189 /* 9190 * SCSI VPD page 0x00, the Supported VPD Pages page. 9191 */ 9192 static int 9193 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9194 { 9195 struct scsi_vpd_supported_pages *pages; 9196 int sup_page_size; 9197 struct ctl_lun *lun; 9198 int p; 9199 9200 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9201 9202 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9203 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9204 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9205 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9206 ctsio->kern_sg_entries = 0; 9207 9208 if (sup_page_size < alloc_len) { 9209 ctsio->residual = alloc_len - sup_page_size; 9210 ctsio->kern_data_len = sup_page_size; 9211 ctsio->kern_total_len = sup_page_size; 9212 } else { 9213 ctsio->residual = 0; 9214 ctsio->kern_data_len = alloc_len; 9215 ctsio->kern_total_len = alloc_len; 9216 } 9217 ctsio->kern_data_resid = 0; 9218 ctsio->kern_rel_offset = 0; 9219 ctsio->kern_sg_entries = 0; 9220 9221 /* 9222 * The control device is always connected. The disk device, on the 9223 * other hand, may not be online all the time. Need to change this 9224 * to figure out whether the disk device is actually online or not. 9225 */ 9226 if (lun != NULL) 9227 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9228 lun->be_lun->lun_type; 9229 else 9230 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9231 9232 p = 0; 9233 /* Supported VPD pages */ 9234 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9235 /* Serial Number */ 9236 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9237 /* Device Identification */ 9238 pages->page_list[p++] = SVPD_DEVICE_ID; 9239 /* Extended INQUIRY Data */ 9240 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9241 /* Mode Page Policy */ 9242 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9243 /* SCSI Ports */ 9244 pages->page_list[p++] = SVPD_SCSI_PORTS; 9245 /* Third-party Copy */ 9246 pages->page_list[p++] = SVPD_SCSI_TPC; 9247 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9248 /* Block limits */ 9249 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9250 /* Block Device Characteristics */ 9251 pages->page_list[p++] = SVPD_BDC; 9252 /* Logical Block Provisioning */ 9253 pages->page_list[p++] = SVPD_LBP; 9254 } 9255 pages->length = p; 9256 9257 ctl_set_success(ctsio); 9258 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9259 ctsio->be_move_done = ctl_config_move_done; 9260 ctl_datamove((union ctl_io *)ctsio); 9261 return (CTL_RETVAL_COMPLETE); 9262 } 9263 9264 /* 9265 * SCSI VPD page 0x80, the Unit Serial Number page. 9266 */ 9267 static int 9268 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9269 { 9270 struct scsi_vpd_unit_serial_number *sn_ptr; 9271 struct ctl_lun *lun; 9272 int data_len; 9273 9274 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9275 9276 data_len = 4 + CTL_SN_LEN; 9277 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9278 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9279 if (data_len < alloc_len) { 9280 ctsio->residual = alloc_len - data_len; 9281 ctsio->kern_data_len = data_len; 9282 ctsio->kern_total_len = data_len; 9283 } else { 9284 ctsio->residual = 0; 9285 ctsio->kern_data_len = alloc_len; 9286 ctsio->kern_total_len = alloc_len; 9287 } 9288 ctsio->kern_data_resid = 0; 9289 ctsio->kern_rel_offset = 0; 9290 ctsio->kern_sg_entries = 0; 9291 9292 /* 9293 * The control device is always connected. The disk device, on the 9294 * other hand, may not be online all the time. Need to change this 9295 * to figure out whether the disk device is actually online or not. 9296 */ 9297 if (lun != NULL) 9298 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9299 lun->be_lun->lun_type; 9300 else 9301 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9302 9303 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9304 sn_ptr->length = CTL_SN_LEN; 9305 /* 9306 * If we don't have a LUN, we just leave the serial number as 9307 * all spaces. 9308 */ 9309 if (lun != NULL) { 9310 strncpy((char *)sn_ptr->serial_num, 9311 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9312 } else 9313 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9314 9315 ctl_set_success(ctsio); 9316 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9317 ctsio->be_move_done = ctl_config_move_done; 9318 ctl_datamove((union ctl_io *)ctsio); 9319 return (CTL_RETVAL_COMPLETE); 9320 } 9321 9322 9323 /* 9324 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9325 */ 9326 static int 9327 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9328 { 9329 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9330 struct ctl_lun *lun; 9331 int data_len; 9332 9333 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9334 9335 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9336 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9337 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9338 ctsio->kern_sg_entries = 0; 9339 9340 if (data_len < alloc_len) { 9341 ctsio->residual = alloc_len - data_len; 9342 ctsio->kern_data_len = data_len; 9343 ctsio->kern_total_len = data_len; 9344 } else { 9345 ctsio->residual = 0; 9346 ctsio->kern_data_len = alloc_len; 9347 ctsio->kern_total_len = alloc_len; 9348 } 9349 ctsio->kern_data_resid = 0; 9350 ctsio->kern_rel_offset = 0; 9351 ctsio->kern_sg_entries = 0; 9352 9353 /* 9354 * The control device is always connected. The disk device, on the 9355 * other hand, may not be online all the time. 9356 */ 9357 if (lun != NULL) 9358 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9359 lun->be_lun->lun_type; 9360 else 9361 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9362 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9363 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9364 /* 9365 * We support head of queue, ordered and simple tags. 9366 */ 9367 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9368 /* 9369 * Volatile cache supported. 9370 */ 9371 eid_ptr->flags3 = SVPD_EID_V_SUP; 9372 9373 /* 9374 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9375 * attention for a particular IT nexus on all LUNs once we report 9376 * it to that nexus once. This bit is required as of SPC-4. 9377 */ 9378 eid_ptr->flags4 = SVPD_EID_LUICLT; 9379 9380 /* 9381 * XXX KDM in order to correctly answer this, we would need 9382 * information from the SIM to determine how much sense data it 9383 * can send. So this would really be a path inquiry field, most 9384 * likely. This can be set to a maximum of 252 according to SPC-4, 9385 * but the hardware may or may not be able to support that much. 9386 * 0 just means that the maximum sense data length is not reported. 9387 */ 9388 eid_ptr->max_sense_length = 0; 9389 9390 ctl_set_success(ctsio); 9391 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9392 ctsio->be_move_done = ctl_config_move_done; 9393 ctl_datamove((union ctl_io *)ctsio); 9394 return (CTL_RETVAL_COMPLETE); 9395 } 9396 9397 static int 9398 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9399 { 9400 struct scsi_vpd_mode_page_policy *mpp_ptr; 9401 struct ctl_lun *lun; 9402 int data_len; 9403 9404 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9405 9406 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9407 sizeof(struct scsi_vpd_mode_page_policy_descr); 9408 9409 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9410 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9411 ctsio->kern_sg_entries = 0; 9412 9413 if (data_len < alloc_len) { 9414 ctsio->residual = alloc_len - data_len; 9415 ctsio->kern_data_len = data_len; 9416 ctsio->kern_total_len = data_len; 9417 } else { 9418 ctsio->residual = 0; 9419 ctsio->kern_data_len = alloc_len; 9420 ctsio->kern_total_len = alloc_len; 9421 } 9422 ctsio->kern_data_resid = 0; 9423 ctsio->kern_rel_offset = 0; 9424 ctsio->kern_sg_entries = 0; 9425 9426 /* 9427 * The control device is always connected. The disk device, on the 9428 * other hand, may not be online all the time. 9429 */ 9430 if (lun != NULL) 9431 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9432 lun->be_lun->lun_type; 9433 else 9434 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9435 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9436 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9437 mpp_ptr->descr[0].page_code = 0x3f; 9438 mpp_ptr->descr[0].subpage_code = 0xff; 9439 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9440 9441 ctl_set_success(ctsio); 9442 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9443 ctsio->be_move_done = ctl_config_move_done; 9444 ctl_datamove((union ctl_io *)ctsio); 9445 return (CTL_RETVAL_COMPLETE); 9446 } 9447 9448 /* 9449 * SCSI VPD page 0x83, the Device Identification page. 9450 */ 9451 static int 9452 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9453 { 9454 struct scsi_vpd_device_id *devid_ptr; 9455 struct scsi_vpd_id_descriptor *desc; 9456 struct ctl_softc *softc; 9457 struct ctl_lun *lun; 9458 struct ctl_port *port; 9459 int data_len; 9460 uint8_t proto; 9461 9462 softc = control_softc; 9463 9464 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9465 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9466 9467 data_len = sizeof(struct scsi_vpd_device_id) + 9468 sizeof(struct scsi_vpd_id_descriptor) + 9469 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9470 sizeof(struct scsi_vpd_id_descriptor) + 9471 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9472 if (lun && lun->lun_devid) 9473 data_len += lun->lun_devid->len; 9474 if (port->port_devid) 9475 data_len += port->port_devid->len; 9476 if (port->target_devid) 9477 data_len += port->target_devid->len; 9478 9479 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9480 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9481 ctsio->kern_sg_entries = 0; 9482 9483 if (data_len < alloc_len) { 9484 ctsio->residual = alloc_len - data_len; 9485 ctsio->kern_data_len = data_len; 9486 ctsio->kern_total_len = data_len; 9487 } else { 9488 ctsio->residual = 0; 9489 ctsio->kern_data_len = alloc_len; 9490 ctsio->kern_total_len = alloc_len; 9491 } 9492 ctsio->kern_data_resid = 0; 9493 ctsio->kern_rel_offset = 0; 9494 ctsio->kern_sg_entries = 0; 9495 9496 /* 9497 * The control device is always connected. The disk device, on the 9498 * other hand, may not be online all the time. 9499 */ 9500 if (lun != NULL) 9501 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9502 lun->be_lun->lun_type; 9503 else 9504 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9505 devid_ptr->page_code = SVPD_DEVICE_ID; 9506 scsi_ulto2b(data_len - 4, devid_ptr->length); 9507 9508 if (port->port_type == CTL_PORT_FC) 9509 proto = SCSI_PROTO_FC << 4; 9510 else if (port->port_type == CTL_PORT_ISCSI) 9511 proto = SCSI_PROTO_ISCSI << 4; 9512 else 9513 proto = SCSI_PROTO_SPI << 4; 9514 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9515 9516 /* 9517 * We're using a LUN association here. i.e., this device ID is a 9518 * per-LUN identifier. 9519 */ 9520 if (lun && lun->lun_devid) { 9521 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9522 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9523 lun->lun_devid->len); 9524 } 9525 9526 /* 9527 * This is for the WWPN which is a port association. 9528 */ 9529 if (port->port_devid) { 9530 memcpy(desc, port->port_devid->data, port->port_devid->len); 9531 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9532 port->port_devid->len); 9533 } 9534 9535 /* 9536 * This is for the Relative Target Port(type 4h) identifier 9537 */ 9538 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9539 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9540 SVPD_ID_TYPE_RELTARG; 9541 desc->length = 4; 9542 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9543 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9544 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9545 9546 /* 9547 * This is for the Target Port Group(type 5h) identifier 9548 */ 9549 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9550 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9551 SVPD_ID_TYPE_TPORTGRP; 9552 desc->length = 4; 9553 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, 9554 &desc->identifier[2]); 9555 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9556 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9557 9558 /* 9559 * This is for the Target identifier 9560 */ 9561 if (port->target_devid) { 9562 memcpy(desc, port->target_devid->data, port->target_devid->len); 9563 } 9564 9565 ctl_set_success(ctsio); 9566 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9567 ctsio->be_move_done = ctl_config_move_done; 9568 ctl_datamove((union ctl_io *)ctsio); 9569 return (CTL_RETVAL_COMPLETE); 9570 } 9571 9572 static int 9573 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9574 { 9575 struct ctl_softc *softc = control_softc; 9576 struct scsi_vpd_scsi_ports *sp; 9577 struct scsi_vpd_port_designation *pd; 9578 struct scsi_vpd_port_designation_cont *pdc; 9579 struct ctl_lun *lun; 9580 struct ctl_port *port; 9581 int data_len, num_target_ports, iid_len, id_len, g, pg, p; 9582 int num_target_port_groups; 9583 9584 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9585 9586 if (softc->is_single) 9587 num_target_port_groups = 1; 9588 else 9589 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 9590 num_target_ports = 0; 9591 iid_len = 0; 9592 id_len = 0; 9593 mtx_lock(&softc->ctl_lock); 9594 STAILQ_FOREACH(port, &softc->port_list, links) { 9595 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9596 continue; 9597 if (lun != NULL && 9598 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9599 continue; 9600 num_target_ports++; 9601 if (port->init_devid) 9602 iid_len += port->init_devid->len; 9603 if (port->port_devid) 9604 id_len += port->port_devid->len; 9605 } 9606 mtx_unlock(&softc->ctl_lock); 9607 9608 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * 9609 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9610 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9611 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9612 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9613 ctsio->kern_sg_entries = 0; 9614 9615 if (data_len < alloc_len) { 9616 ctsio->residual = alloc_len - data_len; 9617 ctsio->kern_data_len = data_len; 9618 ctsio->kern_total_len = data_len; 9619 } else { 9620 ctsio->residual = 0; 9621 ctsio->kern_data_len = alloc_len; 9622 ctsio->kern_total_len = alloc_len; 9623 } 9624 ctsio->kern_data_resid = 0; 9625 ctsio->kern_rel_offset = 0; 9626 ctsio->kern_sg_entries = 0; 9627 9628 /* 9629 * The control device is always connected. The disk device, on the 9630 * other hand, may not be online all the time. Need to change this 9631 * to figure out whether the disk device is actually online or not. 9632 */ 9633 if (lun != NULL) 9634 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9635 lun->be_lun->lun_type; 9636 else 9637 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9638 9639 sp->page_code = SVPD_SCSI_PORTS; 9640 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9641 sp->page_length); 9642 pd = &sp->design[0]; 9643 9644 mtx_lock(&softc->ctl_lock); 9645 pg = softc->port_offset / CTL_MAX_PORTS; 9646 for (g = 0; g < num_target_port_groups; g++) { 9647 STAILQ_FOREACH(port, &softc->port_list, links) { 9648 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9649 continue; 9650 if (lun != NULL && 9651 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9652 continue; 9653 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 9654 scsi_ulto2b(p, pd->relative_port_id); 9655 if (port->init_devid && g == pg) { 9656 iid_len = port->init_devid->len; 9657 memcpy(pd->initiator_transportid, 9658 port->init_devid->data, port->init_devid->len); 9659 } else 9660 iid_len = 0; 9661 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9662 pdc = (struct scsi_vpd_port_designation_cont *) 9663 (&pd->initiator_transportid[iid_len]); 9664 if (port->port_devid && g == pg) { 9665 id_len = port->port_devid->len; 9666 memcpy(pdc->target_port_descriptors, 9667 port->port_devid->data, port->port_devid->len); 9668 } else 9669 id_len = 0; 9670 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9671 pd = (struct scsi_vpd_port_designation *) 9672 ((uint8_t *)pdc->target_port_descriptors + id_len); 9673 } 9674 } 9675 mtx_unlock(&softc->ctl_lock); 9676 9677 ctl_set_success(ctsio); 9678 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9679 ctsio->be_move_done = ctl_config_move_done; 9680 ctl_datamove((union ctl_io *)ctsio); 9681 return (CTL_RETVAL_COMPLETE); 9682 } 9683 9684 static int 9685 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9686 { 9687 struct scsi_vpd_block_limits *bl_ptr; 9688 struct ctl_lun *lun; 9689 int bs; 9690 9691 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9692 9693 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9694 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9695 ctsio->kern_sg_entries = 0; 9696 9697 if (sizeof(*bl_ptr) < alloc_len) { 9698 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9699 ctsio->kern_data_len = sizeof(*bl_ptr); 9700 ctsio->kern_total_len = sizeof(*bl_ptr); 9701 } else { 9702 ctsio->residual = 0; 9703 ctsio->kern_data_len = alloc_len; 9704 ctsio->kern_total_len = alloc_len; 9705 } 9706 ctsio->kern_data_resid = 0; 9707 ctsio->kern_rel_offset = 0; 9708 ctsio->kern_sg_entries = 0; 9709 9710 /* 9711 * The control device is always connected. The disk device, on the 9712 * other hand, may not be online all the time. Need to change this 9713 * to figure out whether the disk device is actually online or not. 9714 */ 9715 if (lun != NULL) 9716 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9717 lun->be_lun->lun_type; 9718 else 9719 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9720 9721 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9722 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9723 bl_ptr->max_cmp_write_len = 0xff; 9724 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9725 if (lun != NULL) { 9726 bs = lun->be_lun->blocksize; 9727 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9728 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9729 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9730 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9731 if (lun->be_lun->ublockexp != 0) { 9732 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9733 bl_ptr->opt_unmap_grain); 9734 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9735 bl_ptr->unmap_grain_align); 9736 } 9737 } 9738 scsi_ulto4b(lun->be_lun->atomicblock, 9739 bl_ptr->max_atomic_transfer_length); 9740 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9741 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9742 } 9743 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9744 9745 ctl_set_success(ctsio); 9746 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9747 ctsio->be_move_done = ctl_config_move_done; 9748 ctl_datamove((union ctl_io *)ctsio); 9749 return (CTL_RETVAL_COMPLETE); 9750 } 9751 9752 static int 9753 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9754 { 9755 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9756 struct ctl_lun *lun; 9757 const char *value; 9758 u_int i; 9759 9760 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9761 9762 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9763 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9764 ctsio->kern_sg_entries = 0; 9765 9766 if (sizeof(*bdc_ptr) < alloc_len) { 9767 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 9768 ctsio->kern_data_len = sizeof(*bdc_ptr); 9769 ctsio->kern_total_len = sizeof(*bdc_ptr); 9770 } else { 9771 ctsio->residual = 0; 9772 ctsio->kern_data_len = alloc_len; 9773 ctsio->kern_total_len = alloc_len; 9774 } 9775 ctsio->kern_data_resid = 0; 9776 ctsio->kern_rel_offset = 0; 9777 ctsio->kern_sg_entries = 0; 9778 9779 /* 9780 * The control device is always connected. The disk device, on the 9781 * other hand, may not be online all the time. Need to change this 9782 * to figure out whether the disk device is actually online or not. 9783 */ 9784 if (lun != NULL) 9785 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9786 lun->be_lun->lun_type; 9787 else 9788 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9789 bdc_ptr->page_code = SVPD_BDC; 9790 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9791 if (lun != NULL && 9792 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9793 i = strtol(value, NULL, 0); 9794 else 9795 i = CTL_DEFAULT_ROTATION_RATE; 9796 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9797 if (lun != NULL && 9798 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9799 i = strtol(value, NULL, 0); 9800 else 9801 i = 0; 9802 bdc_ptr->wab_wac_ff = (i & 0x0f); 9803 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9804 9805 ctl_set_success(ctsio); 9806 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9807 ctsio->be_move_done = ctl_config_move_done; 9808 ctl_datamove((union ctl_io *)ctsio); 9809 return (CTL_RETVAL_COMPLETE); 9810 } 9811 9812 static int 9813 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9814 { 9815 struct scsi_vpd_logical_block_prov *lbp_ptr; 9816 struct ctl_lun *lun; 9817 9818 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9819 9820 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9821 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9822 ctsio->kern_sg_entries = 0; 9823 9824 if (sizeof(*lbp_ptr) < alloc_len) { 9825 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 9826 ctsio->kern_data_len = sizeof(*lbp_ptr); 9827 ctsio->kern_total_len = sizeof(*lbp_ptr); 9828 } else { 9829 ctsio->residual = 0; 9830 ctsio->kern_data_len = alloc_len; 9831 ctsio->kern_total_len = alloc_len; 9832 } 9833 ctsio->kern_data_resid = 0; 9834 ctsio->kern_rel_offset = 0; 9835 ctsio->kern_sg_entries = 0; 9836 9837 /* 9838 * The control device is always connected. The disk device, on the 9839 * other hand, may not be online all the time. Need to change this 9840 * to figure out whether the disk device is actually online or not. 9841 */ 9842 if (lun != NULL) 9843 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9844 lun->be_lun->lun_type; 9845 else 9846 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9847 9848 lbp_ptr->page_code = SVPD_LBP; 9849 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9850 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9851 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9852 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9853 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9854 lbp_ptr->prov_type = SVPD_LBP_THIN; 9855 } 9856 9857 ctl_set_success(ctsio); 9858 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9859 ctsio->be_move_done = ctl_config_move_done; 9860 ctl_datamove((union ctl_io *)ctsio); 9861 return (CTL_RETVAL_COMPLETE); 9862 } 9863 9864 /* 9865 * INQUIRY with the EVPD bit set. 9866 */ 9867 static int 9868 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9869 { 9870 struct ctl_lun *lun; 9871 struct scsi_inquiry *cdb; 9872 int alloc_len, retval; 9873 9874 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9875 cdb = (struct scsi_inquiry *)ctsio->cdb; 9876 alloc_len = scsi_2btoul(cdb->length); 9877 9878 switch (cdb->page_code) { 9879 case SVPD_SUPPORTED_PAGES: 9880 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9881 break; 9882 case SVPD_UNIT_SERIAL_NUMBER: 9883 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9884 break; 9885 case SVPD_DEVICE_ID: 9886 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9887 break; 9888 case SVPD_EXTENDED_INQUIRY_DATA: 9889 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9890 break; 9891 case SVPD_MODE_PAGE_POLICY: 9892 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9893 break; 9894 case SVPD_SCSI_PORTS: 9895 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9896 break; 9897 case SVPD_SCSI_TPC: 9898 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9899 break; 9900 case SVPD_BLOCK_LIMITS: 9901 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9902 goto err; 9903 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9904 break; 9905 case SVPD_BDC: 9906 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9907 goto err; 9908 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9909 break; 9910 case SVPD_LBP: 9911 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9912 goto err; 9913 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9914 break; 9915 default: 9916 err: 9917 ctl_set_invalid_field(ctsio, 9918 /*sks_valid*/ 1, 9919 /*command*/ 1, 9920 /*field*/ 2, 9921 /*bit_valid*/ 0, 9922 /*bit*/ 0); 9923 ctl_done((union ctl_io *)ctsio); 9924 retval = CTL_RETVAL_COMPLETE; 9925 break; 9926 } 9927 9928 return (retval); 9929 } 9930 9931 /* 9932 * Standard INQUIRY data. 9933 */ 9934 static int 9935 ctl_inquiry_std(struct ctl_scsiio *ctsio) 9936 { 9937 struct scsi_inquiry_data *inq_ptr; 9938 struct scsi_inquiry *cdb; 9939 struct ctl_softc *softc; 9940 struct ctl_lun *lun; 9941 char *val; 9942 uint32_t alloc_len, data_len; 9943 ctl_port_type port_type; 9944 9945 softc = control_softc; 9946 9947 /* 9948 * Figure out whether we're talking to a Fibre Channel port or not. 9949 * We treat the ioctl front end, and any SCSI adapters, as packetized 9950 * SCSI front ends. 9951 */ 9952 port_type = softc->ctl_ports[ 9953 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; 9954 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 9955 port_type = CTL_PORT_SCSI; 9956 9957 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9958 cdb = (struct scsi_inquiry *)ctsio->cdb; 9959 alloc_len = scsi_2btoul(cdb->length); 9960 9961 /* 9962 * We malloc the full inquiry data size here and fill it 9963 * in. If the user only asks for less, we'll give him 9964 * that much. 9965 */ 9966 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 9967 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9968 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9969 ctsio->kern_sg_entries = 0; 9970 ctsio->kern_data_resid = 0; 9971 ctsio->kern_rel_offset = 0; 9972 9973 if (data_len < alloc_len) { 9974 ctsio->residual = alloc_len - data_len; 9975 ctsio->kern_data_len = data_len; 9976 ctsio->kern_total_len = data_len; 9977 } else { 9978 ctsio->residual = 0; 9979 ctsio->kern_data_len = alloc_len; 9980 ctsio->kern_total_len = alloc_len; 9981 } 9982 9983 /* 9984 * If we have a LUN configured, report it as connected. Otherwise, 9985 * report that it is offline or no device is supported, depending 9986 * on the value of inquiry_pq_no_lun. 9987 * 9988 * According to the spec (SPC-4 r34), the peripheral qualifier 9989 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 9990 * 9991 * "A peripheral device having the specified peripheral device type 9992 * is not connected to this logical unit. However, the device 9993 * server is capable of supporting the specified peripheral device 9994 * type on this logical unit." 9995 * 9996 * According to the same spec, the peripheral qualifier 9997 * SID_QUAL_BAD_LU (011b) is used in this scenario: 9998 * 9999 * "The device server is not capable of supporting a peripheral 10000 * device on this logical unit. For this peripheral qualifier the 10001 * peripheral device type shall be set to 1Fh. All other peripheral 10002 * device type values are reserved for this peripheral qualifier." 10003 * 10004 * Given the text, it would seem that we probably want to report that 10005 * the LUN is offline here. There is no LUN connected, but we can 10006 * support a LUN at the given LUN number. 10007 * 10008 * In the real world, though, it sounds like things are a little 10009 * different: 10010 * 10011 * - Linux, when presented with a LUN with the offline peripheral 10012 * qualifier, will create an sg driver instance for it. So when 10013 * you attach it to CTL, you wind up with a ton of sg driver 10014 * instances. (One for every LUN that Linux bothered to probe.) 10015 * Linux does this despite the fact that it issues a REPORT LUNs 10016 * to LUN 0 to get the inventory of supported LUNs. 10017 * 10018 * - There is other anecdotal evidence (from Emulex folks) about 10019 * arrays that use the offline peripheral qualifier for LUNs that 10020 * are on the "passive" path in an active/passive array. 10021 * 10022 * So the solution is provide a hopefully reasonable default 10023 * (return bad/no LUN) and allow the user to change the behavior 10024 * with a tunable/sysctl variable. 10025 */ 10026 if (lun != NULL) 10027 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10028 lun->be_lun->lun_type; 10029 else if (softc->inquiry_pq_no_lun == 0) 10030 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10031 else 10032 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10033 10034 /* RMB in byte 2 is 0 */ 10035 inq_ptr->version = SCSI_REV_SPC4; 10036 10037 /* 10038 * According to SAM-3, even if a device only supports a single 10039 * level of LUN addressing, it should still set the HISUP bit: 10040 * 10041 * 4.9.1 Logical unit numbers overview 10042 * 10043 * All logical unit number formats described in this standard are 10044 * hierarchical in structure even when only a single level in that 10045 * hierarchy is used. The HISUP bit shall be set to one in the 10046 * standard INQUIRY data (see SPC-2) when any logical unit number 10047 * format described in this standard is used. Non-hierarchical 10048 * formats are outside the scope of this standard. 10049 * 10050 * Therefore we set the HiSup bit here. 10051 * 10052 * The reponse format is 2, per SPC-3. 10053 */ 10054 inq_ptr->response_format = SID_HiSup | 2; 10055 10056 inq_ptr->additional_length = data_len - 10057 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10058 CTL_DEBUG_PRINT(("additional_length = %d\n", 10059 inq_ptr->additional_length)); 10060 10061 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10062 /* 16 bit addressing */ 10063 if (port_type == CTL_PORT_SCSI) 10064 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10065 /* XXX set the SID_MultiP bit here if we're actually going to 10066 respond on multiple ports */ 10067 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10068 10069 /* 16 bit data bus, synchronous transfers */ 10070 if (port_type == CTL_PORT_SCSI) 10071 inq_ptr->flags = SID_WBus16 | SID_Sync; 10072 /* 10073 * XXX KDM do we want to support tagged queueing on the control 10074 * device at all? 10075 */ 10076 if ((lun == NULL) 10077 || (lun->be_lun->lun_type != T_PROCESSOR)) 10078 inq_ptr->flags |= SID_CmdQue; 10079 /* 10080 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10081 * We have 8 bytes for the vendor name, and 16 bytes for the device 10082 * name and 4 bytes for the revision. 10083 */ 10084 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10085 "vendor")) == NULL) { 10086 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10087 } else { 10088 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10089 strncpy(inq_ptr->vendor, val, 10090 min(sizeof(inq_ptr->vendor), strlen(val))); 10091 } 10092 if (lun == NULL) { 10093 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10094 sizeof(inq_ptr->product)); 10095 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10096 switch (lun->be_lun->lun_type) { 10097 case T_DIRECT: 10098 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10099 sizeof(inq_ptr->product)); 10100 break; 10101 case T_PROCESSOR: 10102 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10103 sizeof(inq_ptr->product)); 10104 break; 10105 default: 10106 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10107 sizeof(inq_ptr->product)); 10108 break; 10109 } 10110 } else { 10111 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10112 strncpy(inq_ptr->product, val, 10113 min(sizeof(inq_ptr->product), strlen(val))); 10114 } 10115 10116 /* 10117 * XXX make this a macro somewhere so it automatically gets 10118 * incremented when we make changes. 10119 */ 10120 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10121 "revision")) == NULL) { 10122 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10123 } else { 10124 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10125 strncpy(inq_ptr->revision, val, 10126 min(sizeof(inq_ptr->revision), strlen(val))); 10127 } 10128 10129 /* 10130 * For parallel SCSI, we support double transition and single 10131 * transition clocking. We also support QAS (Quick Arbitration 10132 * and Selection) and Information Unit transfers on both the 10133 * control and array devices. 10134 */ 10135 if (port_type == CTL_PORT_SCSI) 10136 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10137 SID_SPI_IUS; 10138 10139 /* SAM-5 (no version claimed) */ 10140 scsi_ulto2b(0x00A0, inq_ptr->version1); 10141 /* SPC-4 (no version claimed) */ 10142 scsi_ulto2b(0x0460, inq_ptr->version2); 10143 if (port_type == CTL_PORT_FC) { 10144 /* FCP-2 ANSI INCITS.350:2003 */ 10145 scsi_ulto2b(0x0917, inq_ptr->version3); 10146 } else if (port_type == CTL_PORT_SCSI) { 10147 /* SPI-4 ANSI INCITS.362:200x */ 10148 scsi_ulto2b(0x0B56, inq_ptr->version3); 10149 } else if (port_type == CTL_PORT_ISCSI) { 10150 /* iSCSI (no version claimed) */ 10151 scsi_ulto2b(0x0960, inq_ptr->version3); 10152 } else if (port_type == CTL_PORT_SAS) { 10153 /* SAS (no version claimed) */ 10154 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10155 } 10156 10157 if (lun == NULL) { 10158 /* SBC-4 (no version claimed) */ 10159 scsi_ulto2b(0x0600, inq_ptr->version4); 10160 } else { 10161 switch (lun->be_lun->lun_type) { 10162 case T_DIRECT: 10163 /* SBC-4 (no version claimed) */ 10164 scsi_ulto2b(0x0600, inq_ptr->version4); 10165 break; 10166 case T_PROCESSOR: 10167 default: 10168 break; 10169 } 10170 } 10171 10172 ctl_set_success(ctsio); 10173 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10174 ctsio->be_move_done = ctl_config_move_done; 10175 ctl_datamove((union ctl_io *)ctsio); 10176 return (CTL_RETVAL_COMPLETE); 10177 } 10178 10179 int 10180 ctl_inquiry(struct ctl_scsiio *ctsio) 10181 { 10182 struct scsi_inquiry *cdb; 10183 int retval; 10184 10185 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10186 10187 cdb = (struct scsi_inquiry *)ctsio->cdb; 10188 if (cdb->byte2 & SI_EVPD) 10189 retval = ctl_inquiry_evpd(ctsio); 10190 else if (cdb->page_code == 0) 10191 retval = ctl_inquiry_std(ctsio); 10192 else { 10193 ctl_set_invalid_field(ctsio, 10194 /*sks_valid*/ 1, 10195 /*command*/ 1, 10196 /*field*/ 2, 10197 /*bit_valid*/ 0, 10198 /*bit*/ 0); 10199 ctl_done((union ctl_io *)ctsio); 10200 return (CTL_RETVAL_COMPLETE); 10201 } 10202 10203 return (retval); 10204 } 10205 10206 /* 10207 * For known CDB types, parse the LBA and length. 10208 */ 10209 static int 10210 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10211 { 10212 if (io->io_hdr.io_type != CTL_IO_SCSI) 10213 return (1); 10214 10215 switch (io->scsiio.cdb[0]) { 10216 case COMPARE_AND_WRITE: { 10217 struct scsi_compare_and_write *cdb; 10218 10219 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10220 10221 *lba = scsi_8btou64(cdb->addr); 10222 *len = cdb->length; 10223 break; 10224 } 10225 case READ_6: 10226 case WRITE_6: { 10227 struct scsi_rw_6 *cdb; 10228 10229 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10230 10231 *lba = scsi_3btoul(cdb->addr); 10232 /* only 5 bits are valid in the most significant address byte */ 10233 *lba &= 0x1fffff; 10234 *len = cdb->length; 10235 break; 10236 } 10237 case READ_10: 10238 case WRITE_10: { 10239 struct scsi_rw_10 *cdb; 10240 10241 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10242 10243 *lba = scsi_4btoul(cdb->addr); 10244 *len = scsi_2btoul(cdb->length); 10245 break; 10246 } 10247 case WRITE_VERIFY_10: { 10248 struct scsi_write_verify_10 *cdb; 10249 10250 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10251 10252 *lba = scsi_4btoul(cdb->addr); 10253 *len = scsi_2btoul(cdb->length); 10254 break; 10255 } 10256 case READ_12: 10257 case WRITE_12: { 10258 struct scsi_rw_12 *cdb; 10259 10260 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10261 10262 *lba = scsi_4btoul(cdb->addr); 10263 *len = scsi_4btoul(cdb->length); 10264 break; 10265 } 10266 case WRITE_VERIFY_12: { 10267 struct scsi_write_verify_12 *cdb; 10268 10269 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10270 10271 *lba = scsi_4btoul(cdb->addr); 10272 *len = scsi_4btoul(cdb->length); 10273 break; 10274 } 10275 case READ_16: 10276 case WRITE_16: 10277 case WRITE_ATOMIC_16: { 10278 struct scsi_rw_16 *cdb; 10279 10280 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10281 10282 *lba = scsi_8btou64(cdb->addr); 10283 *len = scsi_4btoul(cdb->length); 10284 break; 10285 } 10286 case WRITE_VERIFY_16: { 10287 struct scsi_write_verify_16 *cdb; 10288 10289 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10290 10291 *lba = scsi_8btou64(cdb->addr); 10292 *len = scsi_4btoul(cdb->length); 10293 break; 10294 } 10295 case WRITE_SAME_10: { 10296 struct scsi_write_same_10 *cdb; 10297 10298 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10299 10300 *lba = scsi_4btoul(cdb->addr); 10301 *len = scsi_2btoul(cdb->length); 10302 break; 10303 } 10304 case WRITE_SAME_16: { 10305 struct scsi_write_same_16 *cdb; 10306 10307 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10308 10309 *lba = scsi_8btou64(cdb->addr); 10310 *len = scsi_4btoul(cdb->length); 10311 break; 10312 } 10313 case VERIFY_10: { 10314 struct scsi_verify_10 *cdb; 10315 10316 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10317 10318 *lba = scsi_4btoul(cdb->addr); 10319 *len = scsi_2btoul(cdb->length); 10320 break; 10321 } 10322 case VERIFY_12: { 10323 struct scsi_verify_12 *cdb; 10324 10325 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10326 10327 *lba = scsi_4btoul(cdb->addr); 10328 *len = scsi_4btoul(cdb->length); 10329 break; 10330 } 10331 case VERIFY_16: { 10332 struct scsi_verify_16 *cdb; 10333 10334 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10335 10336 *lba = scsi_8btou64(cdb->addr); 10337 *len = scsi_4btoul(cdb->length); 10338 break; 10339 } 10340 case UNMAP: { 10341 *lba = 0; 10342 *len = UINT64_MAX; 10343 break; 10344 } 10345 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10346 struct scsi_get_lba_status *cdb; 10347 10348 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10349 *lba = scsi_8btou64(cdb->addr); 10350 *len = UINT32_MAX; 10351 break; 10352 } 10353 default: 10354 return (1); 10355 break; /* NOTREACHED */ 10356 } 10357 10358 return (0); 10359 } 10360 10361 static ctl_action 10362 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10363 bool seq) 10364 { 10365 uint64_t endlba1, endlba2; 10366 10367 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10368 endlba2 = lba2 + len2 - 1; 10369 10370 if ((endlba1 < lba2) || (endlba2 < lba1)) 10371 return (CTL_ACTION_PASS); 10372 else 10373 return (CTL_ACTION_BLOCK); 10374 } 10375 10376 static int 10377 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10378 { 10379 struct ctl_ptr_len_flags *ptrlen; 10380 struct scsi_unmap_desc *buf, *end, *range; 10381 uint64_t lba; 10382 uint32_t len; 10383 10384 /* If not UNMAP -- go other way. */ 10385 if (io->io_hdr.io_type != CTL_IO_SCSI || 10386 io->scsiio.cdb[0] != UNMAP) 10387 return (CTL_ACTION_ERROR); 10388 10389 /* If UNMAP without data -- block and wait for data. */ 10390 ptrlen = (struct ctl_ptr_len_flags *) 10391 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10392 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10393 ptrlen->ptr == NULL) 10394 return (CTL_ACTION_BLOCK); 10395 10396 /* UNMAP with data -- check for collision. */ 10397 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10398 end = buf + ptrlen->len / sizeof(*buf); 10399 for (range = buf; range < end; range++) { 10400 lba = scsi_8btou64(range->lba); 10401 len = scsi_4btoul(range->length); 10402 if ((lba < lba2 + len2) && (lba + len > lba2)) 10403 return (CTL_ACTION_BLOCK); 10404 } 10405 return (CTL_ACTION_PASS); 10406 } 10407 10408 static ctl_action 10409 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10410 { 10411 uint64_t lba1, lba2; 10412 uint64_t len1, len2; 10413 int retval; 10414 10415 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10416 return (CTL_ACTION_ERROR); 10417 10418 retval = ctl_extent_check_unmap(io1, lba2, len2); 10419 if (retval != CTL_ACTION_ERROR) 10420 return (retval); 10421 10422 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10423 return (CTL_ACTION_ERROR); 10424 10425 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10426 } 10427 10428 static ctl_action 10429 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10430 { 10431 uint64_t lba1, lba2; 10432 uint64_t len1, len2; 10433 10434 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10435 return (CTL_ACTION_ERROR); 10436 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10437 return (CTL_ACTION_ERROR); 10438 10439 if (lba1 + len1 == lba2) 10440 return (CTL_ACTION_BLOCK); 10441 return (CTL_ACTION_PASS); 10442 } 10443 10444 static ctl_action 10445 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10446 union ctl_io *ooa_io) 10447 { 10448 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10449 ctl_serialize_action *serialize_row; 10450 10451 /* 10452 * The initiator attempted multiple untagged commands at the same 10453 * time. Can't do that. 10454 */ 10455 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10456 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10457 && ((pending_io->io_hdr.nexus.targ_port == 10458 ooa_io->io_hdr.nexus.targ_port) 10459 && (pending_io->io_hdr.nexus.initid.id == 10460 ooa_io->io_hdr.nexus.initid.id)) 10461 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10462 CTL_FLAG_STATUS_SENT)) == 0)) 10463 return (CTL_ACTION_OVERLAP); 10464 10465 /* 10466 * The initiator attempted to send multiple tagged commands with 10467 * the same ID. (It's fine if different initiators have the same 10468 * tag ID.) 10469 * 10470 * Even if all of those conditions are true, we don't kill the I/O 10471 * if the command ahead of us has been aborted. We won't end up 10472 * sending it to the FETD, and it's perfectly legal to resend a 10473 * command with the same tag number as long as the previous 10474 * instance of this tag number has been aborted somehow. 10475 */ 10476 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10477 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10478 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10479 && ((pending_io->io_hdr.nexus.targ_port == 10480 ooa_io->io_hdr.nexus.targ_port) 10481 && (pending_io->io_hdr.nexus.initid.id == 10482 ooa_io->io_hdr.nexus.initid.id)) 10483 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10484 CTL_FLAG_STATUS_SENT)) == 0)) 10485 return (CTL_ACTION_OVERLAP_TAG); 10486 10487 /* 10488 * If we get a head of queue tag, SAM-3 says that we should 10489 * immediately execute it. 10490 * 10491 * What happens if this command would normally block for some other 10492 * reason? e.g. a request sense with a head of queue tag 10493 * immediately after a write. Normally that would block, but this 10494 * will result in its getting executed immediately... 10495 * 10496 * We currently return "pass" instead of "skip", so we'll end up 10497 * going through the rest of the queue to check for overlapped tags. 10498 * 10499 * XXX KDM check for other types of blockage first?? 10500 */ 10501 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10502 return (CTL_ACTION_PASS); 10503 10504 /* 10505 * Ordered tags have to block until all items ahead of them 10506 * have completed. If we get called with an ordered tag, we always 10507 * block, if something else is ahead of us in the queue. 10508 */ 10509 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10510 return (CTL_ACTION_BLOCK); 10511 10512 /* 10513 * Simple tags get blocked until all head of queue and ordered tags 10514 * ahead of them have completed. I'm lumping untagged commands in 10515 * with simple tags here. XXX KDM is that the right thing to do? 10516 */ 10517 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10518 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10519 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10520 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10521 return (CTL_ACTION_BLOCK); 10522 10523 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10524 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10525 10526 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10527 10528 switch (serialize_row[pending_entry->seridx]) { 10529 case CTL_SER_BLOCK: 10530 return (CTL_ACTION_BLOCK); 10531 case CTL_SER_EXTENT: 10532 return (ctl_extent_check(ooa_io, pending_io, 10533 (lun->serseq == CTL_LUN_SERSEQ_ON))); 10534 case CTL_SER_EXTENTOPT: 10535 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10536 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10537 return (ctl_extent_check(ooa_io, pending_io, 10538 (lun->serseq == CTL_LUN_SERSEQ_ON))); 10539 return (CTL_ACTION_PASS); 10540 case CTL_SER_EXTENTSEQ: 10541 if (lun->serseq != CTL_LUN_SERSEQ_OFF) 10542 return (ctl_extent_check_seq(ooa_io, pending_io)); 10543 return (CTL_ACTION_PASS); 10544 case CTL_SER_PASS: 10545 return (CTL_ACTION_PASS); 10546 case CTL_SER_BLOCKOPT: 10547 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10548 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10549 return (CTL_ACTION_BLOCK); 10550 return (CTL_ACTION_PASS); 10551 case CTL_SER_SKIP: 10552 return (CTL_ACTION_SKIP); 10553 default: 10554 panic("invalid serialization value %d", 10555 serialize_row[pending_entry->seridx]); 10556 } 10557 10558 return (CTL_ACTION_ERROR); 10559 } 10560 10561 /* 10562 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10563 * Assumptions: 10564 * - pending_io is generally either incoming, or on the blocked queue 10565 * - starting I/O is the I/O we want to start the check with. 10566 */ 10567 static ctl_action 10568 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10569 union ctl_io *starting_io) 10570 { 10571 union ctl_io *ooa_io; 10572 ctl_action action; 10573 10574 mtx_assert(&lun->lun_lock, MA_OWNED); 10575 10576 /* 10577 * Run back along the OOA queue, starting with the current 10578 * blocked I/O and going through every I/O before it on the 10579 * queue. If starting_io is NULL, we'll just end up returning 10580 * CTL_ACTION_PASS. 10581 */ 10582 for (ooa_io = starting_io; ooa_io != NULL; 10583 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10584 ooa_links)){ 10585 10586 /* 10587 * This routine just checks to see whether 10588 * cur_blocked is blocked by ooa_io, which is ahead 10589 * of it in the queue. It doesn't queue/dequeue 10590 * cur_blocked. 10591 */ 10592 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10593 switch (action) { 10594 case CTL_ACTION_BLOCK: 10595 case CTL_ACTION_OVERLAP: 10596 case CTL_ACTION_OVERLAP_TAG: 10597 case CTL_ACTION_SKIP: 10598 case CTL_ACTION_ERROR: 10599 return (action); 10600 break; /* NOTREACHED */ 10601 case CTL_ACTION_PASS: 10602 break; 10603 default: 10604 panic("invalid action %d", action); 10605 break; /* NOTREACHED */ 10606 } 10607 } 10608 10609 return (CTL_ACTION_PASS); 10610 } 10611 10612 /* 10613 * Assumptions: 10614 * - An I/O has just completed, and has been removed from the per-LUN OOA 10615 * queue, so some items on the blocked queue may now be unblocked. 10616 */ 10617 static int 10618 ctl_check_blocked(struct ctl_lun *lun) 10619 { 10620 union ctl_io *cur_blocked, *next_blocked; 10621 10622 mtx_assert(&lun->lun_lock, MA_OWNED); 10623 10624 /* 10625 * Run forward from the head of the blocked queue, checking each 10626 * entry against the I/Os prior to it on the OOA queue to see if 10627 * there is still any blockage. 10628 * 10629 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10630 * with our removing a variable on it while it is traversing the 10631 * list. 10632 */ 10633 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10634 cur_blocked != NULL; cur_blocked = next_blocked) { 10635 union ctl_io *prev_ooa; 10636 ctl_action action; 10637 10638 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10639 blocked_links); 10640 10641 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10642 ctl_ooaq, ooa_links); 10643 10644 /* 10645 * If cur_blocked happens to be the first item in the OOA 10646 * queue now, prev_ooa will be NULL, and the action 10647 * returned will just be CTL_ACTION_PASS. 10648 */ 10649 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10650 10651 switch (action) { 10652 case CTL_ACTION_BLOCK: 10653 /* Nothing to do here, still blocked */ 10654 break; 10655 case CTL_ACTION_OVERLAP: 10656 case CTL_ACTION_OVERLAP_TAG: 10657 /* 10658 * This shouldn't happen! In theory we've already 10659 * checked this command for overlap... 10660 */ 10661 break; 10662 case CTL_ACTION_PASS: 10663 case CTL_ACTION_SKIP: { 10664 const struct ctl_cmd_entry *entry; 10665 int isc_retval; 10666 10667 /* 10668 * The skip case shouldn't happen, this transaction 10669 * should have never made it onto the blocked queue. 10670 */ 10671 /* 10672 * This I/O is no longer blocked, we can remove it 10673 * from the blocked queue. Since this is a TAILQ 10674 * (doubly linked list), we can do O(1) removals 10675 * from any place on the list. 10676 */ 10677 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10678 blocked_links); 10679 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10680 10681 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 10682 /* 10683 * Need to send IO back to original side to 10684 * run 10685 */ 10686 union ctl_ha_msg msg_info; 10687 10688 msg_info.hdr.original_sc = 10689 cur_blocked->io_hdr.original_sc; 10690 msg_info.hdr.serializing_sc = cur_blocked; 10691 msg_info.hdr.msg_type = CTL_MSG_R2R; 10692 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10693 &msg_info, sizeof(msg_info), 0)) > 10694 CTL_HA_STATUS_SUCCESS) { 10695 printf("CTL:Check Blocked error from " 10696 "ctl_ha_msg_send %d\n", 10697 isc_retval); 10698 } 10699 break; 10700 } 10701 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10702 10703 /* 10704 * Check this I/O for LUN state changes that may 10705 * have happened while this command was blocked. 10706 * The LUN state may have been changed by a command 10707 * ahead of us in the queue, so we need to re-check 10708 * for any states that can be caused by SCSI 10709 * commands. 10710 */ 10711 if (ctl_scsiio_lun_check(lun, entry, 10712 &cur_blocked->scsiio) == 0) { 10713 cur_blocked->io_hdr.flags |= 10714 CTL_FLAG_IS_WAS_ON_RTR; 10715 ctl_enqueue_rtr(cur_blocked); 10716 } else 10717 ctl_done(cur_blocked); 10718 break; 10719 } 10720 default: 10721 /* 10722 * This probably shouldn't happen -- we shouldn't 10723 * get CTL_ACTION_ERROR, or anything else. 10724 */ 10725 break; 10726 } 10727 } 10728 10729 return (CTL_RETVAL_COMPLETE); 10730 } 10731 10732 /* 10733 * This routine (with one exception) checks LUN flags that can be set by 10734 * commands ahead of us in the OOA queue. These flags have to be checked 10735 * when a command initially comes in, and when we pull a command off the 10736 * blocked queue and are preparing to execute it. The reason we have to 10737 * check these flags for commands on the blocked queue is that the LUN 10738 * state may have been changed by a command ahead of us while we're on the 10739 * blocked queue. 10740 * 10741 * Ordering is somewhat important with these checks, so please pay 10742 * careful attention to the placement of any new checks. 10743 */ 10744 static int 10745 ctl_scsiio_lun_check(struct ctl_lun *lun, 10746 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10747 { 10748 struct ctl_softc *softc = lun->ctl_softc; 10749 int retval; 10750 uint32_t residx; 10751 10752 retval = 0; 10753 10754 mtx_assert(&lun->lun_lock, MA_OWNED); 10755 10756 /* 10757 * If this shelf is a secondary shelf controller, we have to reject 10758 * any media access commands. 10759 */ 10760 if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 && 10761 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) { 10762 ctl_set_lun_standby(ctsio); 10763 retval = 1; 10764 goto bailout; 10765 } 10766 10767 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10768 if (lun->flags & CTL_LUN_READONLY) { 10769 ctl_set_sense(ctsio, /*current_error*/ 1, 10770 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10771 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 10772 retval = 1; 10773 goto bailout; 10774 } 10775 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 10776 .eca_and_aen & SCP_SWP) != 0) { 10777 ctl_set_sense(ctsio, /*current_error*/ 1, 10778 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10779 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 10780 retval = 1; 10781 goto bailout; 10782 } 10783 } 10784 10785 /* 10786 * Check for a reservation conflict. If this command isn't allowed 10787 * even on reserved LUNs, and if this initiator isn't the one who 10788 * reserved us, reject the command with a reservation conflict. 10789 */ 10790 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 10791 if ((lun->flags & CTL_LUN_RESERVED) 10792 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10793 if (lun->res_idx != residx) { 10794 ctl_set_reservation_conflict(ctsio); 10795 retval = 1; 10796 goto bailout; 10797 } 10798 } 10799 10800 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 10801 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 10802 /* No reservation or command is allowed. */; 10803 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 10804 (lun->res_type == SPR_TYPE_WR_EX || 10805 lun->res_type == SPR_TYPE_WR_EX_RO || 10806 lun->res_type == SPR_TYPE_WR_EX_AR)) { 10807 /* The command is allowed for Write Exclusive resv. */; 10808 } else { 10809 /* 10810 * if we aren't registered or it's a res holder type 10811 * reservation and this isn't the res holder then set a 10812 * conflict. 10813 */ 10814 if (ctl_get_prkey(lun, residx) == 0 10815 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10816 ctl_set_reservation_conflict(ctsio); 10817 retval = 1; 10818 goto bailout; 10819 } 10820 10821 } 10822 10823 if ((lun->flags & CTL_LUN_OFFLINE) 10824 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 10825 ctl_set_lun_not_ready(ctsio); 10826 retval = 1; 10827 goto bailout; 10828 } 10829 10830 /* 10831 * If the LUN is stopped, see if this particular command is allowed 10832 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 10833 */ 10834 if ((lun->flags & CTL_LUN_STOPPED) 10835 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10836 /* "Logical unit not ready, initializing cmd. required" */ 10837 ctl_set_lun_stopped(ctsio); 10838 retval = 1; 10839 goto bailout; 10840 } 10841 10842 if ((lun->flags & CTL_LUN_INOPERABLE) 10843 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10844 /* "Medium format corrupted" */ 10845 ctl_set_medium_format_corrupted(ctsio); 10846 retval = 1; 10847 goto bailout; 10848 } 10849 10850 bailout: 10851 return (retval); 10852 10853 } 10854 10855 static void 10856 ctl_failover_io(union ctl_io *io, int have_lock) 10857 { 10858 ctl_set_busy(&io->scsiio); 10859 ctl_done(io); 10860 } 10861 10862 #ifdef notyet 10863 static void 10864 ctl_failover(void) 10865 { 10866 struct ctl_lun *lun; 10867 struct ctl_softc *softc; 10868 union ctl_io *next_io, *pending_io; 10869 union ctl_io *io; 10870 int lun_idx; 10871 10872 softc = control_softc; 10873 10874 mtx_lock(&softc->ctl_lock); 10875 /* 10876 * Remove any cmds from the other SC from the rtr queue. These 10877 * will obviously only be for LUNs for which we're the primary. 10878 * We can't send status or get/send data for these commands. 10879 * Since they haven't been executed yet, we can just remove them. 10880 * We'll either abort them or delete them below, depending on 10881 * which HA mode we're in. 10882 */ 10883 #ifdef notyet 10884 mtx_lock(&softc->queue_lock); 10885 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); 10886 io != NULL; io = next_io) { 10887 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10888 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10889 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, 10890 ctl_io_hdr, links); 10891 } 10892 mtx_unlock(&softc->queue_lock); 10893 #endif 10894 10895 for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) { 10896 lun = softc->ctl_luns[lun_idx]; 10897 if (lun==NULL) 10898 continue; 10899 10900 /* 10901 * Processor LUNs are primary on both sides. 10902 * XXX will this always be true? 10903 */ 10904 if (lun->be_lun->lun_type == T_PROCESSOR) 10905 continue; 10906 10907 if ((lun->flags & CTL_LUN_PRIMARY_SC) 10908 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10909 printf("FAILOVER: primary lun %d\n", lun_idx); 10910 /* 10911 * Remove all commands from the other SC. First from the 10912 * blocked queue then from the ooa queue. Once we have 10913 * removed them. Call ctl_check_blocked to see if there 10914 * is anything that can run. 10915 */ 10916 for (io = (union ctl_io *)TAILQ_FIRST( 10917 &lun->blocked_queue); io != NULL; io = next_io) { 10918 10919 next_io = (union ctl_io *)TAILQ_NEXT( 10920 &io->io_hdr, blocked_links); 10921 10922 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10923 TAILQ_REMOVE(&lun->blocked_queue, 10924 &io->io_hdr,blocked_links); 10925 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10926 TAILQ_REMOVE(&lun->ooa_queue, 10927 &io->io_hdr, ooa_links); 10928 10929 ctl_free_io(io); 10930 } 10931 } 10932 10933 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10934 io != NULL; io = next_io) { 10935 10936 next_io = (union ctl_io *)TAILQ_NEXT( 10937 &io->io_hdr, ooa_links); 10938 10939 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10940 10941 TAILQ_REMOVE(&lun->ooa_queue, 10942 &io->io_hdr, 10943 ooa_links); 10944 10945 ctl_free_io(io); 10946 } 10947 } 10948 ctl_check_blocked(lun); 10949 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 10950 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 10951 10952 printf("FAILOVER: primary lun %d\n", lun_idx); 10953 /* 10954 * Abort all commands from the other SC. We can't 10955 * send status back for them now. These should get 10956 * cleaned up when they are completed or come out 10957 * for a datamove operation. 10958 */ 10959 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10960 io != NULL; io = next_io) { 10961 next_io = (union ctl_io *)TAILQ_NEXT( 10962 &io->io_hdr, ooa_links); 10963 10964 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10965 io->io_hdr.flags |= CTL_FLAG_ABORT; 10966 } 10967 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10968 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 10969 10970 printf("FAILOVER: secondary lun %d\n", lun_idx); 10971 10972 lun->flags |= CTL_LUN_PRIMARY_SC; 10973 10974 /* 10975 * We send all I/O that was sent to this controller 10976 * and redirected to the other side back with 10977 * busy status, and have the initiator retry it. 10978 * Figuring out how much data has been transferred, 10979 * etc. and picking up where we left off would be 10980 * very tricky. 10981 * 10982 * XXX KDM need to remove I/O from the blocked 10983 * queue as well! 10984 */ 10985 for (pending_io = (union ctl_io *)TAILQ_FIRST( 10986 &lun->ooa_queue); pending_io != NULL; 10987 pending_io = next_io) { 10988 10989 next_io = (union ctl_io *)TAILQ_NEXT( 10990 &pending_io->io_hdr, ooa_links); 10991 10992 pending_io->io_hdr.flags &= 10993 ~CTL_FLAG_SENT_2OTHER_SC; 10994 10995 if (pending_io->io_hdr.flags & 10996 CTL_FLAG_IO_ACTIVE) { 10997 pending_io->io_hdr.flags |= 10998 CTL_FLAG_FAILOVER; 10999 } else { 11000 ctl_set_busy(&pending_io->scsiio); 11001 ctl_done(pending_io); 11002 } 11003 } 11004 11005 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11006 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11007 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11008 printf("FAILOVER: secondary lun %d\n", lun_idx); 11009 /* 11010 * if the first io on the OOA is not on the RtR queue 11011 * add it. 11012 */ 11013 lun->flags |= CTL_LUN_PRIMARY_SC; 11014 11015 pending_io = (union ctl_io *)TAILQ_FIRST( 11016 &lun->ooa_queue); 11017 if (pending_io==NULL) { 11018 printf("Nothing on OOA queue\n"); 11019 continue; 11020 } 11021 11022 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11023 if ((pending_io->io_hdr.flags & 11024 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11025 pending_io->io_hdr.flags |= 11026 CTL_FLAG_IS_WAS_ON_RTR; 11027 ctl_enqueue_rtr(pending_io); 11028 } 11029 #if 0 11030 else 11031 { 11032 printf("Tag 0x%04x is running\n", 11033 pending_io->scsiio.tag_num); 11034 } 11035 #endif 11036 11037 next_io = (union ctl_io *)TAILQ_NEXT( 11038 &pending_io->io_hdr, ooa_links); 11039 for (pending_io=next_io; pending_io != NULL; 11040 pending_io = next_io) { 11041 pending_io->io_hdr.flags &= 11042 ~CTL_FLAG_SENT_2OTHER_SC; 11043 next_io = (union ctl_io *)TAILQ_NEXT( 11044 &pending_io->io_hdr, ooa_links); 11045 if (pending_io->io_hdr.flags & 11046 CTL_FLAG_IS_WAS_ON_RTR) { 11047 #if 0 11048 printf("Tag 0x%04x is running\n", 11049 pending_io->scsiio.tag_num); 11050 #endif 11051 continue; 11052 } 11053 11054 switch (ctl_check_ooa(lun, pending_io, 11055 (union ctl_io *)TAILQ_PREV( 11056 &pending_io->io_hdr, ctl_ooaq, 11057 ooa_links))) { 11058 11059 case CTL_ACTION_BLOCK: 11060 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11061 &pending_io->io_hdr, 11062 blocked_links); 11063 pending_io->io_hdr.flags |= 11064 CTL_FLAG_BLOCKED; 11065 break; 11066 case CTL_ACTION_PASS: 11067 case CTL_ACTION_SKIP: 11068 pending_io->io_hdr.flags |= 11069 CTL_FLAG_IS_WAS_ON_RTR; 11070 ctl_enqueue_rtr(pending_io); 11071 break; 11072 case CTL_ACTION_OVERLAP: 11073 ctl_set_overlapped_cmd( 11074 (struct ctl_scsiio *)pending_io); 11075 ctl_done(pending_io); 11076 break; 11077 case CTL_ACTION_OVERLAP_TAG: 11078 ctl_set_overlapped_tag( 11079 (struct ctl_scsiio *)pending_io, 11080 pending_io->scsiio.tag_num & 0xff); 11081 ctl_done(pending_io); 11082 break; 11083 case CTL_ACTION_ERROR: 11084 default: 11085 ctl_set_internal_failure( 11086 (struct ctl_scsiio *)pending_io, 11087 0, // sks_valid 11088 0); //retry count 11089 ctl_done(pending_io); 11090 break; 11091 } 11092 } 11093 11094 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11095 } else { 11096 panic("Unhandled HA mode failover, LUN flags = %#x, " 11097 "ha_mode = #%x", lun->flags, softc->ha_mode); 11098 } 11099 } 11100 ctl_pause_rtr = 0; 11101 mtx_unlock(&softc->ctl_lock); 11102 } 11103 #endif 11104 11105 static int 11106 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11107 { 11108 struct ctl_lun *lun; 11109 const struct ctl_cmd_entry *entry; 11110 uint32_t initidx, targ_lun; 11111 int retval; 11112 11113 retval = 0; 11114 11115 lun = NULL; 11116 11117 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11118 if ((targ_lun < CTL_MAX_LUNS) 11119 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11120 /* 11121 * If the LUN is invalid, pretend that it doesn't exist. 11122 * It will go away as soon as all pending I/O has been 11123 * completed. 11124 */ 11125 mtx_lock(&lun->lun_lock); 11126 if (lun->flags & CTL_LUN_DISABLED) { 11127 mtx_unlock(&lun->lun_lock); 11128 lun = NULL; 11129 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11130 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11131 } else { 11132 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11133 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11134 lun->be_lun; 11135 if (lun->be_lun->lun_type == T_PROCESSOR) { 11136 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11137 } 11138 11139 /* 11140 * Every I/O goes into the OOA queue for a 11141 * particular LUN, and stays there until completion. 11142 */ 11143 #ifdef CTL_TIME_IO 11144 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11145 lun->idle_time += getsbinuptime() - 11146 lun->last_busy; 11147 } 11148 #endif 11149 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11150 ooa_links); 11151 } 11152 } else { 11153 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11154 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11155 } 11156 11157 /* Get command entry and return error if it is unsuppotyed. */ 11158 entry = ctl_validate_command(ctsio); 11159 if (entry == NULL) { 11160 if (lun) 11161 mtx_unlock(&lun->lun_lock); 11162 return (retval); 11163 } 11164 11165 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11166 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11167 11168 /* 11169 * Check to see whether we can send this command to LUNs that don't 11170 * exist. This should pretty much only be the case for inquiry 11171 * and request sense. Further checks, below, really require having 11172 * a LUN, so we can't really check the command anymore. Just put 11173 * it on the rtr queue. 11174 */ 11175 if (lun == NULL) { 11176 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11177 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11178 ctl_enqueue_rtr((union ctl_io *)ctsio); 11179 return (retval); 11180 } 11181 11182 ctl_set_unsupported_lun(ctsio); 11183 ctl_done((union ctl_io *)ctsio); 11184 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11185 return (retval); 11186 } else { 11187 /* 11188 * Make sure we support this particular command on this LUN. 11189 * e.g., we don't support writes to the control LUN. 11190 */ 11191 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11192 mtx_unlock(&lun->lun_lock); 11193 ctl_set_invalid_opcode(ctsio); 11194 ctl_done((union ctl_io *)ctsio); 11195 return (retval); 11196 } 11197 } 11198 11199 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11200 11201 #ifdef CTL_WITH_CA 11202 /* 11203 * If we've got a request sense, it'll clear the contingent 11204 * allegiance condition. Otherwise, if we have a CA condition for 11205 * this initiator, clear it, because it sent down a command other 11206 * than request sense. 11207 */ 11208 if ((ctsio->cdb[0] != REQUEST_SENSE) 11209 && (ctl_is_set(lun->have_ca, initidx))) 11210 ctl_clear_mask(lun->have_ca, initidx); 11211 #endif 11212 11213 /* 11214 * If the command has this flag set, it handles its own unit 11215 * attention reporting, we shouldn't do anything. Otherwise we 11216 * check for any pending unit attentions, and send them back to the 11217 * initiator. We only do this when a command initially comes in, 11218 * not when we pull it off the blocked queue. 11219 * 11220 * According to SAM-3, section 5.3.2, the order that things get 11221 * presented back to the host is basically unit attentions caused 11222 * by some sort of reset event, busy status, reservation conflicts 11223 * or task set full, and finally any other status. 11224 * 11225 * One issue here is that some of the unit attentions we report 11226 * don't fall into the "reset" category (e.g. "reported luns data 11227 * has changed"). So reporting it here, before the reservation 11228 * check, may be technically wrong. I guess the only thing to do 11229 * would be to check for and report the reset events here, and then 11230 * check for the other unit attention types after we check for a 11231 * reservation conflict. 11232 * 11233 * XXX KDM need to fix this 11234 */ 11235 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11236 ctl_ua_type ua_type; 11237 scsi_sense_data_type sense_format; 11238 11239 if (lun->flags & CTL_LUN_SENSE_DESC) 11240 sense_format = SSD_TYPE_DESC; 11241 else 11242 sense_format = SSD_TYPE_FIXED; 11243 11244 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11245 sense_format); 11246 if (ua_type != CTL_UA_NONE) { 11247 mtx_unlock(&lun->lun_lock); 11248 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11249 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11250 ctsio->sense_len = SSD_FULL_SIZE; 11251 ctl_done((union ctl_io *)ctsio); 11252 return (retval); 11253 } 11254 } 11255 11256 11257 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11258 mtx_unlock(&lun->lun_lock); 11259 ctl_done((union ctl_io *)ctsio); 11260 return (retval); 11261 } 11262 11263 /* 11264 * XXX CHD this is where we want to send IO to other side if 11265 * this LUN is secondary on this SC. We will need to make a copy 11266 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11267 * the copy we send as FROM_OTHER. 11268 * We also need to stuff the address of the original IO so we can 11269 * find it easily. Something similar will need be done on the other 11270 * side so when we are done we can find the copy. 11271 */ 11272 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11273 union ctl_ha_msg msg_info; 11274 int isc_retval; 11275 11276 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11277 11278 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11279 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11280 #if 0 11281 printf("1. ctsio %p\n", ctsio); 11282 #endif 11283 msg_info.hdr.serializing_sc = NULL; 11284 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11285 msg_info.scsi.tag_num = ctsio->tag_num; 11286 msg_info.scsi.tag_type = ctsio->tag_type; 11287 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11288 11289 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11290 11291 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11292 (void *)&msg_info, sizeof(msg_info), 0)) > 11293 CTL_HA_STATUS_SUCCESS) { 11294 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11295 isc_retval); 11296 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 11297 } else { 11298 #if 0 11299 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11300 #endif 11301 } 11302 11303 /* 11304 * XXX KDM this I/O is off the incoming queue, but hasn't 11305 * been inserted on any other queue. We may need to come 11306 * up with a holding queue while we wait for serialization 11307 * so that we have an idea of what we're waiting for from 11308 * the other side. 11309 */ 11310 mtx_unlock(&lun->lun_lock); 11311 return (retval); 11312 } 11313 11314 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11315 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11316 ctl_ooaq, ooa_links))) { 11317 case CTL_ACTION_BLOCK: 11318 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11319 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11320 blocked_links); 11321 mtx_unlock(&lun->lun_lock); 11322 return (retval); 11323 case CTL_ACTION_PASS: 11324 case CTL_ACTION_SKIP: 11325 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11326 mtx_unlock(&lun->lun_lock); 11327 ctl_enqueue_rtr((union ctl_io *)ctsio); 11328 break; 11329 case CTL_ACTION_OVERLAP: 11330 mtx_unlock(&lun->lun_lock); 11331 ctl_set_overlapped_cmd(ctsio); 11332 ctl_done((union ctl_io *)ctsio); 11333 break; 11334 case CTL_ACTION_OVERLAP_TAG: 11335 mtx_unlock(&lun->lun_lock); 11336 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11337 ctl_done((union ctl_io *)ctsio); 11338 break; 11339 case CTL_ACTION_ERROR: 11340 default: 11341 mtx_unlock(&lun->lun_lock); 11342 ctl_set_internal_failure(ctsio, 11343 /*sks_valid*/ 0, 11344 /*retry_count*/ 0); 11345 ctl_done((union ctl_io *)ctsio); 11346 break; 11347 } 11348 return (retval); 11349 } 11350 11351 const struct ctl_cmd_entry * 11352 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11353 { 11354 const struct ctl_cmd_entry *entry; 11355 int service_action; 11356 11357 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11358 if (sa) 11359 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11360 if (entry->flags & CTL_CMD_FLAG_SA5) { 11361 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11362 entry = &((const struct ctl_cmd_entry *) 11363 entry->execute)[service_action]; 11364 } 11365 return (entry); 11366 } 11367 11368 const struct ctl_cmd_entry * 11369 ctl_validate_command(struct ctl_scsiio *ctsio) 11370 { 11371 const struct ctl_cmd_entry *entry; 11372 int i, sa; 11373 uint8_t diff; 11374 11375 entry = ctl_get_cmd_entry(ctsio, &sa); 11376 if (entry->execute == NULL) { 11377 if (sa) 11378 ctl_set_invalid_field(ctsio, 11379 /*sks_valid*/ 1, 11380 /*command*/ 1, 11381 /*field*/ 1, 11382 /*bit_valid*/ 1, 11383 /*bit*/ 4); 11384 else 11385 ctl_set_invalid_opcode(ctsio); 11386 ctl_done((union ctl_io *)ctsio); 11387 return (NULL); 11388 } 11389 KASSERT(entry->length > 0, 11390 ("Not defined length for command 0x%02x/0x%02x", 11391 ctsio->cdb[0], ctsio->cdb[1])); 11392 for (i = 1; i < entry->length; i++) { 11393 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11394 if (diff == 0) 11395 continue; 11396 ctl_set_invalid_field(ctsio, 11397 /*sks_valid*/ 1, 11398 /*command*/ 1, 11399 /*field*/ i, 11400 /*bit_valid*/ 1, 11401 /*bit*/ fls(diff) - 1); 11402 ctl_done((union ctl_io *)ctsio); 11403 return (NULL); 11404 } 11405 return (entry); 11406 } 11407 11408 static int 11409 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11410 { 11411 11412 switch (lun_type) { 11413 case T_PROCESSOR: 11414 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11415 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11416 return (0); 11417 break; 11418 case T_DIRECT: 11419 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11420 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11421 return (0); 11422 break; 11423 default: 11424 return (0); 11425 } 11426 return (1); 11427 } 11428 11429 static int 11430 ctl_scsiio(struct ctl_scsiio *ctsio) 11431 { 11432 int retval; 11433 const struct ctl_cmd_entry *entry; 11434 11435 retval = CTL_RETVAL_COMPLETE; 11436 11437 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11438 11439 entry = ctl_get_cmd_entry(ctsio, NULL); 11440 11441 /* 11442 * If this I/O has been aborted, just send it straight to 11443 * ctl_done() without executing it. 11444 */ 11445 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11446 ctl_done((union ctl_io *)ctsio); 11447 goto bailout; 11448 } 11449 11450 /* 11451 * All the checks should have been handled by ctl_scsiio_precheck(). 11452 * We should be clear now to just execute the I/O. 11453 */ 11454 retval = entry->execute(ctsio); 11455 11456 bailout: 11457 return (retval); 11458 } 11459 11460 /* 11461 * Since we only implement one target right now, a bus reset simply resets 11462 * our single target. 11463 */ 11464 static int 11465 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11466 { 11467 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11468 } 11469 11470 static int 11471 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11472 ctl_ua_type ua_type) 11473 { 11474 struct ctl_lun *lun; 11475 int retval; 11476 11477 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11478 union ctl_ha_msg msg_info; 11479 11480 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11481 msg_info.hdr.nexus = io->io_hdr.nexus; 11482 if (ua_type==CTL_UA_TARG_RESET) 11483 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11484 else 11485 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11486 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11487 msg_info.hdr.original_sc = NULL; 11488 msg_info.hdr.serializing_sc = NULL; 11489 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11490 (void *)&msg_info, sizeof(msg_info), 0)) { 11491 } 11492 } 11493 retval = 0; 11494 11495 mtx_lock(&softc->ctl_lock); 11496 STAILQ_FOREACH(lun, &softc->lun_list, links) 11497 retval += ctl_lun_reset(lun, io, ua_type); 11498 mtx_unlock(&softc->ctl_lock); 11499 11500 return (retval); 11501 } 11502 11503 /* 11504 * The LUN should always be set. The I/O is optional, and is used to 11505 * distinguish between I/Os sent by this initiator, and by other 11506 * initiators. We set unit attention for initiators other than this one. 11507 * SAM-3 is vague on this point. It does say that a unit attention should 11508 * be established for other initiators when a LUN is reset (see section 11509 * 5.7.3), but it doesn't specifically say that the unit attention should 11510 * be established for this particular initiator when a LUN is reset. Here 11511 * is the relevant text, from SAM-3 rev 8: 11512 * 11513 * 5.7.2 When a SCSI initiator port aborts its own tasks 11514 * 11515 * When a SCSI initiator port causes its own task(s) to be aborted, no 11516 * notification that the task(s) have been aborted shall be returned to 11517 * the SCSI initiator port other than the completion response for the 11518 * command or task management function action that caused the task(s) to 11519 * be aborted and notification(s) associated with related effects of the 11520 * action (e.g., a reset unit attention condition). 11521 * 11522 * XXX KDM for now, we're setting unit attention for all initiators. 11523 */ 11524 static int 11525 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11526 { 11527 union ctl_io *xio; 11528 #if 0 11529 uint32_t initidx; 11530 #endif 11531 #ifdef CTL_WITH_CA 11532 int i; 11533 #endif 11534 11535 mtx_lock(&lun->lun_lock); 11536 /* 11537 * Run through the OOA queue and abort each I/O. 11538 */ 11539 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11540 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11541 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11542 } 11543 11544 /* 11545 * This version sets unit attention for every 11546 */ 11547 #if 0 11548 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11549 ctl_est_ua_all(lun, initidx, ua_type); 11550 #else 11551 ctl_est_ua_all(lun, -1, ua_type); 11552 #endif 11553 11554 /* 11555 * A reset (any kind, really) clears reservations established with 11556 * RESERVE/RELEASE. It does not clear reservations established 11557 * with PERSISTENT RESERVE OUT, but we don't support that at the 11558 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11559 * reservations made with the RESERVE/RELEASE commands, because 11560 * those commands are obsolete in SPC-3. 11561 */ 11562 lun->flags &= ~CTL_LUN_RESERVED; 11563 11564 #ifdef CTL_WITH_CA 11565 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11566 ctl_clear_mask(lun->have_ca, i); 11567 #endif 11568 mtx_unlock(&lun->lun_lock); 11569 11570 return (0); 11571 } 11572 11573 static void 11574 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11575 int other_sc) 11576 { 11577 union ctl_io *xio; 11578 11579 mtx_assert(&lun->lun_lock, MA_OWNED); 11580 11581 /* 11582 * Run through the OOA queue and attempt to find the given I/O. 11583 * The target port, initiator ID, tag type and tag number have to 11584 * match the values that we got from the initiator. If we have an 11585 * untagged command to abort, simply abort the first untagged command 11586 * we come to. We only allow one untagged command at a time of course. 11587 */ 11588 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11589 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11590 11591 if ((targ_port == UINT32_MAX || 11592 targ_port == xio->io_hdr.nexus.targ_port) && 11593 (init_id == UINT32_MAX || 11594 init_id == xio->io_hdr.nexus.initid.id)) { 11595 if (targ_port != xio->io_hdr.nexus.targ_port || 11596 init_id != xio->io_hdr.nexus.initid.id) 11597 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11598 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11599 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11600 union ctl_ha_msg msg_info; 11601 11602 msg_info.hdr.nexus = xio->io_hdr.nexus; 11603 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11604 msg_info.task.tag_num = xio->scsiio.tag_num; 11605 msg_info.task.tag_type = xio->scsiio.tag_type; 11606 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11607 msg_info.hdr.original_sc = NULL; 11608 msg_info.hdr.serializing_sc = NULL; 11609 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11610 (void *)&msg_info, sizeof(msg_info), 0); 11611 } 11612 } 11613 } 11614 } 11615 11616 static int 11617 ctl_abort_task_set(union ctl_io *io) 11618 { 11619 struct ctl_softc *softc = control_softc; 11620 struct ctl_lun *lun; 11621 uint32_t targ_lun; 11622 11623 /* 11624 * Look up the LUN. 11625 */ 11626 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11627 mtx_lock(&softc->ctl_lock); 11628 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 11629 lun = softc->ctl_luns[targ_lun]; 11630 else { 11631 mtx_unlock(&softc->ctl_lock); 11632 return (1); 11633 } 11634 11635 mtx_lock(&lun->lun_lock); 11636 mtx_unlock(&softc->ctl_lock); 11637 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11638 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11639 io->io_hdr.nexus.initid.id, 11640 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11641 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11642 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11643 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11644 } 11645 mtx_unlock(&lun->lun_lock); 11646 return (0); 11647 } 11648 11649 static int 11650 ctl_i_t_nexus_reset(union ctl_io *io) 11651 { 11652 struct ctl_softc *softc = control_softc; 11653 struct ctl_lun *lun; 11654 uint32_t initidx, residx; 11655 11656 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11657 residx = ctl_get_resindex(&io->io_hdr.nexus); 11658 mtx_lock(&softc->ctl_lock); 11659 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11660 mtx_lock(&lun->lun_lock); 11661 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11662 io->io_hdr.nexus.initid.id, 11663 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11664 #ifdef CTL_WITH_CA 11665 ctl_clear_mask(lun->have_ca, initidx); 11666 #endif 11667 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 11668 lun->flags &= ~CTL_LUN_RESERVED; 11669 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11670 mtx_unlock(&lun->lun_lock); 11671 } 11672 mtx_unlock(&softc->ctl_lock); 11673 return (0); 11674 } 11675 11676 static int 11677 ctl_abort_task(union ctl_io *io) 11678 { 11679 union ctl_io *xio; 11680 struct ctl_lun *lun; 11681 struct ctl_softc *softc; 11682 #if 0 11683 struct sbuf sb; 11684 char printbuf[128]; 11685 #endif 11686 int found; 11687 uint32_t targ_lun; 11688 11689 softc = control_softc; 11690 found = 0; 11691 11692 /* 11693 * Look up the LUN. 11694 */ 11695 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11696 mtx_lock(&softc->ctl_lock); 11697 if ((targ_lun < CTL_MAX_LUNS) 11698 && (softc->ctl_luns[targ_lun] != NULL)) 11699 lun = softc->ctl_luns[targ_lun]; 11700 else { 11701 mtx_unlock(&softc->ctl_lock); 11702 return (1); 11703 } 11704 11705 #if 0 11706 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11707 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11708 #endif 11709 11710 mtx_lock(&lun->lun_lock); 11711 mtx_unlock(&softc->ctl_lock); 11712 /* 11713 * Run through the OOA queue and attempt to find the given I/O. 11714 * The target port, initiator ID, tag type and tag number have to 11715 * match the values that we got from the initiator. If we have an 11716 * untagged command to abort, simply abort the first untagged command 11717 * we come to. We only allow one untagged command at a time of course. 11718 */ 11719 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11720 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11721 #if 0 11722 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11723 11724 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11725 lun->lun, xio->scsiio.tag_num, 11726 xio->scsiio.tag_type, 11727 (xio->io_hdr.blocked_links.tqe_prev 11728 == NULL) ? "" : " BLOCKED", 11729 (xio->io_hdr.flags & 11730 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11731 (xio->io_hdr.flags & 11732 CTL_FLAG_ABORT) ? " ABORT" : "", 11733 (xio->io_hdr.flags & 11734 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11735 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11736 sbuf_finish(&sb); 11737 printf("%s\n", sbuf_data(&sb)); 11738 #endif 11739 11740 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11741 || (xio->io_hdr.nexus.initid.id != io->io_hdr.nexus.initid.id) 11742 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11743 continue; 11744 11745 /* 11746 * If the abort says that the task is untagged, the 11747 * task in the queue must be untagged. Otherwise, 11748 * we just check to see whether the tag numbers 11749 * match. This is because the QLogic firmware 11750 * doesn't pass back the tag type in an abort 11751 * request. 11752 */ 11753 #if 0 11754 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11755 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11756 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11757 #endif 11758 /* 11759 * XXX KDM we've got problems with FC, because it 11760 * doesn't send down a tag type with aborts. So we 11761 * can only really go by the tag number... 11762 * This may cause problems with parallel SCSI. 11763 * Need to figure that out!! 11764 */ 11765 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11766 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11767 found = 1; 11768 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11769 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11770 union ctl_ha_msg msg_info; 11771 11772 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11773 msg_info.hdr.nexus = io->io_hdr.nexus; 11774 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11775 msg_info.task.tag_num = io->taskio.tag_num; 11776 msg_info.task.tag_type = io->taskio.tag_type; 11777 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11778 msg_info.hdr.original_sc = NULL; 11779 msg_info.hdr.serializing_sc = NULL; 11780 #if 0 11781 printf("Sent Abort to other side\n"); 11782 #endif 11783 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11784 (void *)&msg_info, sizeof(msg_info), 0) != 11785 CTL_HA_STATUS_SUCCESS) { 11786 } 11787 } 11788 #if 0 11789 printf("ctl_abort_task: found I/O to abort\n"); 11790 #endif 11791 } 11792 } 11793 mtx_unlock(&lun->lun_lock); 11794 11795 if (found == 0) { 11796 /* 11797 * This isn't really an error. It's entirely possible for 11798 * the abort and command completion to cross on the wire. 11799 * This is more of an informative/diagnostic error. 11800 */ 11801 #if 0 11802 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11803 "%d:%d:%d:%d tag %d type %d\n", 11804 io->io_hdr.nexus.initid.id, 11805 io->io_hdr.nexus.targ_port, 11806 io->io_hdr.nexus.targ_target.id, 11807 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11808 io->taskio.tag_type); 11809 #endif 11810 } 11811 return (0); 11812 } 11813 11814 static void 11815 ctl_run_task(union ctl_io *io) 11816 { 11817 struct ctl_softc *softc = control_softc; 11818 int retval = 1; 11819 const char *task_desc; 11820 11821 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11822 11823 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11824 ("ctl_run_task: Unextected io_type %d\n", 11825 io->io_hdr.io_type)); 11826 11827 task_desc = ctl_scsi_task_string(&io->taskio); 11828 if (task_desc != NULL) { 11829 #ifdef NEEDTOPORT 11830 csevent_log(CSC_CTL | CSC_SHELF_SW | 11831 CTL_TASK_REPORT, 11832 csevent_LogType_Trace, 11833 csevent_Severity_Information, 11834 csevent_AlertLevel_Green, 11835 csevent_FRU_Firmware, 11836 csevent_FRU_Unknown, 11837 "CTL: received task: %s",task_desc); 11838 #endif 11839 } else { 11840 #ifdef NEEDTOPORT 11841 csevent_log(CSC_CTL | CSC_SHELF_SW | 11842 CTL_TASK_REPORT, 11843 csevent_LogType_Trace, 11844 csevent_Severity_Information, 11845 csevent_AlertLevel_Green, 11846 csevent_FRU_Firmware, 11847 csevent_FRU_Unknown, 11848 "CTL: received unknown task " 11849 "type: %d (%#x)", 11850 io->taskio.task_action, 11851 io->taskio.task_action); 11852 #endif 11853 } 11854 switch (io->taskio.task_action) { 11855 case CTL_TASK_ABORT_TASK: 11856 retval = ctl_abort_task(io); 11857 break; 11858 case CTL_TASK_ABORT_TASK_SET: 11859 case CTL_TASK_CLEAR_TASK_SET: 11860 retval = ctl_abort_task_set(io); 11861 break; 11862 case CTL_TASK_CLEAR_ACA: 11863 break; 11864 case CTL_TASK_I_T_NEXUS_RESET: 11865 retval = ctl_i_t_nexus_reset(io); 11866 break; 11867 case CTL_TASK_LUN_RESET: { 11868 struct ctl_lun *lun; 11869 uint32_t targ_lun; 11870 11871 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11872 mtx_lock(&softc->ctl_lock); 11873 if ((targ_lun < CTL_MAX_LUNS) 11874 && (softc->ctl_luns[targ_lun] != NULL)) 11875 lun = softc->ctl_luns[targ_lun]; 11876 else { 11877 mtx_unlock(&softc->ctl_lock); 11878 retval = 1; 11879 break; 11880 } 11881 11882 if (!(io->io_hdr.flags & 11883 CTL_FLAG_FROM_OTHER_SC)) { 11884 union ctl_ha_msg msg_info; 11885 11886 io->io_hdr.flags |= 11887 CTL_FLAG_SENT_2OTHER_SC; 11888 msg_info.hdr.msg_type = 11889 CTL_MSG_MANAGE_TASKS; 11890 msg_info.hdr.nexus = io->io_hdr.nexus; 11891 msg_info.task.task_action = 11892 CTL_TASK_LUN_RESET; 11893 msg_info.hdr.original_sc = NULL; 11894 msg_info.hdr.serializing_sc = NULL; 11895 if (CTL_HA_STATUS_SUCCESS != 11896 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11897 (void *)&msg_info, 11898 sizeof(msg_info), 0)) { 11899 } 11900 } 11901 11902 retval = ctl_lun_reset(lun, io, 11903 CTL_UA_LUN_RESET); 11904 mtx_unlock(&softc->ctl_lock); 11905 break; 11906 } 11907 case CTL_TASK_TARGET_RESET: 11908 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11909 break; 11910 case CTL_TASK_BUS_RESET: 11911 retval = ctl_bus_reset(softc, io); 11912 break; 11913 case CTL_TASK_PORT_LOGIN: 11914 break; 11915 case CTL_TASK_PORT_LOGOUT: 11916 break; 11917 default: 11918 printf("ctl_run_task: got unknown task management event %d\n", 11919 io->taskio.task_action); 11920 break; 11921 } 11922 if (retval == 0) 11923 io->io_hdr.status = CTL_SUCCESS; 11924 else 11925 io->io_hdr.status = CTL_ERROR; 11926 ctl_done(io); 11927 } 11928 11929 /* 11930 * For HA operation. Handle commands that come in from the other 11931 * controller. 11932 */ 11933 static void 11934 ctl_handle_isc(union ctl_io *io) 11935 { 11936 int free_io; 11937 struct ctl_lun *lun; 11938 struct ctl_softc *softc; 11939 uint32_t targ_lun; 11940 11941 softc = control_softc; 11942 11943 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11944 lun = softc->ctl_luns[targ_lun]; 11945 11946 switch (io->io_hdr.msg_type) { 11947 case CTL_MSG_SERIALIZE: 11948 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 11949 break; 11950 case CTL_MSG_R2R: { 11951 const struct ctl_cmd_entry *entry; 11952 11953 /* 11954 * This is only used in SER_ONLY mode. 11955 */ 11956 free_io = 0; 11957 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11958 mtx_lock(&lun->lun_lock); 11959 if (ctl_scsiio_lun_check(lun, 11960 entry, (struct ctl_scsiio *)io) != 0) { 11961 mtx_unlock(&lun->lun_lock); 11962 ctl_done(io); 11963 break; 11964 } 11965 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11966 mtx_unlock(&lun->lun_lock); 11967 ctl_enqueue_rtr(io); 11968 break; 11969 } 11970 case CTL_MSG_FINISH_IO: 11971 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11972 free_io = 0; 11973 ctl_done(io); 11974 } else { 11975 free_io = 1; 11976 mtx_lock(&lun->lun_lock); 11977 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11978 ooa_links); 11979 ctl_check_blocked(lun); 11980 mtx_unlock(&lun->lun_lock); 11981 } 11982 break; 11983 case CTL_MSG_PERS_ACTION: 11984 ctl_hndl_per_res_out_on_other_sc( 11985 (union ctl_ha_msg *)&io->presio.pr_msg); 11986 free_io = 1; 11987 break; 11988 case CTL_MSG_BAD_JUJU: 11989 free_io = 0; 11990 ctl_done(io); 11991 break; 11992 case CTL_MSG_DATAMOVE: 11993 /* Only used in XFER mode */ 11994 free_io = 0; 11995 ctl_datamove_remote(io); 11996 break; 11997 case CTL_MSG_DATAMOVE_DONE: 11998 /* Only used in XFER mode */ 11999 free_io = 0; 12000 io->scsiio.be_move_done(io); 12001 break; 12002 default: 12003 free_io = 1; 12004 printf("%s: Invalid message type %d\n", 12005 __func__, io->io_hdr.msg_type); 12006 break; 12007 } 12008 if (free_io) 12009 ctl_free_io(io); 12010 12011 } 12012 12013 12014 /* 12015 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12016 * there is no match. 12017 */ 12018 static ctl_lun_error_pattern 12019 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12020 { 12021 const struct ctl_cmd_entry *entry; 12022 ctl_lun_error_pattern filtered_pattern, pattern; 12023 12024 pattern = desc->error_pattern; 12025 12026 /* 12027 * XXX KDM we need more data passed into this function to match a 12028 * custom pattern, and we actually need to implement custom pattern 12029 * matching. 12030 */ 12031 if (pattern & CTL_LUN_PAT_CMD) 12032 return (CTL_LUN_PAT_CMD); 12033 12034 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12035 return (CTL_LUN_PAT_ANY); 12036 12037 entry = ctl_get_cmd_entry(ctsio, NULL); 12038 12039 filtered_pattern = entry->pattern & pattern; 12040 12041 /* 12042 * If the user requested specific flags in the pattern (e.g. 12043 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12044 * flags. 12045 * 12046 * If the user did not specify any flags, it doesn't matter whether 12047 * or not the command supports the flags. 12048 */ 12049 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12050 (pattern & ~CTL_LUN_PAT_MASK)) 12051 return (CTL_LUN_PAT_NONE); 12052 12053 /* 12054 * If the user asked for a range check, see if the requested LBA 12055 * range overlaps with this command's LBA range. 12056 */ 12057 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12058 uint64_t lba1; 12059 uint64_t len1; 12060 ctl_action action; 12061 int retval; 12062 12063 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12064 if (retval != 0) 12065 return (CTL_LUN_PAT_NONE); 12066 12067 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12068 desc->lba_range.len, FALSE); 12069 /* 12070 * A "pass" means that the LBA ranges don't overlap, so 12071 * this doesn't match the user's range criteria. 12072 */ 12073 if (action == CTL_ACTION_PASS) 12074 return (CTL_LUN_PAT_NONE); 12075 } 12076 12077 return (filtered_pattern); 12078 } 12079 12080 static void 12081 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12082 { 12083 struct ctl_error_desc *desc, *desc2; 12084 12085 mtx_assert(&lun->lun_lock, MA_OWNED); 12086 12087 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12088 ctl_lun_error_pattern pattern; 12089 /* 12090 * Check to see whether this particular command matches 12091 * the pattern in the descriptor. 12092 */ 12093 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12094 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12095 continue; 12096 12097 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12098 case CTL_LUN_INJ_ABORTED: 12099 ctl_set_aborted(&io->scsiio); 12100 break; 12101 case CTL_LUN_INJ_MEDIUM_ERR: 12102 ctl_set_medium_error(&io->scsiio); 12103 break; 12104 case CTL_LUN_INJ_UA: 12105 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12106 * OCCURRED */ 12107 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12108 break; 12109 case CTL_LUN_INJ_CUSTOM: 12110 /* 12111 * We're assuming the user knows what he is doing. 12112 * Just copy the sense information without doing 12113 * checks. 12114 */ 12115 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12116 MIN(sizeof(desc->custom_sense), 12117 sizeof(io->scsiio.sense_data))); 12118 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12119 io->scsiio.sense_len = SSD_FULL_SIZE; 12120 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12121 break; 12122 case CTL_LUN_INJ_NONE: 12123 default: 12124 /* 12125 * If this is an error injection type we don't know 12126 * about, clear the continuous flag (if it is set) 12127 * so it will get deleted below. 12128 */ 12129 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12130 break; 12131 } 12132 /* 12133 * By default, each error injection action is a one-shot 12134 */ 12135 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12136 continue; 12137 12138 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12139 12140 free(desc, M_CTL); 12141 } 12142 } 12143 12144 #ifdef CTL_IO_DELAY 12145 static void 12146 ctl_datamove_timer_wakeup(void *arg) 12147 { 12148 union ctl_io *io; 12149 12150 io = (union ctl_io *)arg; 12151 12152 ctl_datamove(io); 12153 } 12154 #endif /* CTL_IO_DELAY */ 12155 12156 void 12157 ctl_datamove(union ctl_io *io) 12158 { 12159 void (*fe_datamove)(union ctl_io *io); 12160 12161 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12162 12163 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12164 12165 #ifdef CTL_TIME_IO 12166 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12167 char str[256]; 12168 char path_str[64]; 12169 struct sbuf sb; 12170 12171 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12172 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12173 12174 sbuf_cat(&sb, path_str); 12175 switch (io->io_hdr.io_type) { 12176 case CTL_IO_SCSI: 12177 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12178 sbuf_printf(&sb, "\n"); 12179 sbuf_cat(&sb, path_str); 12180 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12181 io->scsiio.tag_num, io->scsiio.tag_type); 12182 break; 12183 case CTL_IO_TASK: 12184 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12185 "Tag Type: %d\n", io->taskio.task_action, 12186 io->taskio.tag_num, io->taskio.tag_type); 12187 break; 12188 default: 12189 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12190 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12191 break; 12192 } 12193 sbuf_cat(&sb, path_str); 12194 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12195 (intmax_t)time_uptime - io->io_hdr.start_time); 12196 sbuf_finish(&sb); 12197 printf("%s", sbuf_data(&sb)); 12198 } 12199 #endif /* CTL_TIME_IO */ 12200 12201 #ifdef CTL_IO_DELAY 12202 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12203 struct ctl_lun *lun; 12204 12205 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12206 12207 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12208 } else { 12209 struct ctl_lun *lun; 12210 12211 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12212 if ((lun != NULL) 12213 && (lun->delay_info.datamove_delay > 0)) { 12214 struct callout *callout; 12215 12216 callout = (struct callout *)&io->io_hdr.timer_bytes; 12217 callout_init(callout, /*mpsafe*/ 1); 12218 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12219 callout_reset(callout, 12220 lun->delay_info.datamove_delay * hz, 12221 ctl_datamove_timer_wakeup, io); 12222 if (lun->delay_info.datamove_type == 12223 CTL_DELAY_TYPE_ONESHOT) 12224 lun->delay_info.datamove_delay = 0; 12225 return; 12226 } 12227 } 12228 #endif 12229 12230 /* 12231 * This command has been aborted. Set the port status, so we fail 12232 * the data move. 12233 */ 12234 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12235 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 12236 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 12237 io->io_hdr.nexus.targ_port, 12238 (uintmax_t)io->io_hdr.nexus.targ_target.id, 12239 io->io_hdr.nexus.targ_lun); 12240 io->io_hdr.port_status = 31337; 12241 /* 12242 * Note that the backend, in this case, will get the 12243 * callback in its context. In other cases it may get 12244 * called in the frontend's interrupt thread context. 12245 */ 12246 io->scsiio.be_move_done(io); 12247 return; 12248 } 12249 12250 /* Don't confuse frontend with zero length data move. */ 12251 if (io->scsiio.kern_data_len == 0) { 12252 io->scsiio.be_move_done(io); 12253 return; 12254 } 12255 12256 /* 12257 * If we're in XFER mode and this I/O is from the other shelf 12258 * controller, we need to send the DMA to the other side to 12259 * actually transfer the data to/from the host. In serialize only 12260 * mode the transfer happens below CTL and ctl_datamove() is only 12261 * called on the machine that originally received the I/O. 12262 */ 12263 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12264 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12265 union ctl_ha_msg msg; 12266 uint32_t sg_entries_sent; 12267 int do_sg_copy; 12268 int i; 12269 12270 memset(&msg, 0, sizeof(msg)); 12271 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12272 msg.hdr.original_sc = io->io_hdr.original_sc; 12273 msg.hdr.serializing_sc = io; 12274 msg.hdr.nexus = io->io_hdr.nexus; 12275 msg.dt.flags = io->io_hdr.flags; 12276 /* 12277 * We convert everything into a S/G list here. We can't 12278 * pass by reference, only by value between controllers. 12279 * So we can't pass a pointer to the S/G list, only as many 12280 * S/G entries as we can fit in here. If it's possible for 12281 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12282 * then we need to break this up into multiple transfers. 12283 */ 12284 if (io->scsiio.kern_sg_entries == 0) { 12285 msg.dt.kern_sg_entries = 1; 12286 /* 12287 * If this is in cached memory, flush the cache 12288 * before we send the DMA request to the other 12289 * controller. We want to do this in either the 12290 * read or the write case. The read case is 12291 * straightforward. In the write case, we want to 12292 * make sure nothing is in the local cache that 12293 * could overwrite the DMAed data. 12294 */ 12295 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12296 /* 12297 * XXX KDM use bus_dmamap_sync() here. 12298 */ 12299 } 12300 12301 /* 12302 * Convert to a physical address if this is a 12303 * virtual address. 12304 */ 12305 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12306 msg.dt.sg_list[0].addr = 12307 io->scsiio.kern_data_ptr; 12308 } else { 12309 /* 12310 * XXX KDM use busdma here! 12311 */ 12312 #if 0 12313 msg.dt.sg_list[0].addr = (void *) 12314 vtophys(io->scsiio.kern_data_ptr); 12315 #endif 12316 } 12317 12318 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12319 do_sg_copy = 0; 12320 } else { 12321 struct ctl_sg_entry *sgl; 12322 12323 do_sg_copy = 1; 12324 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12325 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12326 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12327 /* 12328 * XXX KDM use bus_dmamap_sync() here. 12329 */ 12330 } 12331 } 12332 12333 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12334 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12335 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12336 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12337 msg.dt.sg_sequence = 0; 12338 12339 /* 12340 * Loop until we've sent all of the S/G entries. On the 12341 * other end, we'll recompose these S/G entries into one 12342 * contiguous list before passing it to the 12343 */ 12344 for (sg_entries_sent = 0; sg_entries_sent < 12345 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12346 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12347 sizeof(msg.dt.sg_list[0])), 12348 msg.dt.kern_sg_entries - sg_entries_sent); 12349 12350 if (do_sg_copy != 0) { 12351 struct ctl_sg_entry *sgl; 12352 int j; 12353 12354 sgl = (struct ctl_sg_entry *) 12355 io->scsiio.kern_data_ptr; 12356 /* 12357 * If this is in cached memory, flush the cache 12358 * before we send the DMA request to the other 12359 * controller. We want to do this in either 12360 * the * read or the write case. The read 12361 * case is straightforward. In the write 12362 * case, we want to make sure nothing is 12363 * in the local cache that could overwrite 12364 * the DMAed data. 12365 */ 12366 12367 for (i = sg_entries_sent, j = 0; 12368 i < msg.dt.cur_sg_entries; i++, j++) { 12369 if ((io->io_hdr.flags & 12370 CTL_FLAG_NO_DATASYNC) == 0) { 12371 /* 12372 * XXX KDM use bus_dmamap_sync() 12373 */ 12374 } 12375 if ((io->io_hdr.flags & 12376 CTL_FLAG_BUS_ADDR) == 0) { 12377 /* 12378 * XXX KDM use busdma. 12379 */ 12380 #if 0 12381 msg.dt.sg_list[j].addr =(void *) 12382 vtophys(sgl[i].addr); 12383 #endif 12384 } else { 12385 msg.dt.sg_list[j].addr = 12386 sgl[i].addr; 12387 } 12388 msg.dt.sg_list[j].len = sgl[i].len; 12389 } 12390 } 12391 12392 sg_entries_sent += msg.dt.cur_sg_entries; 12393 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12394 msg.dt.sg_last = 1; 12395 else 12396 msg.dt.sg_last = 0; 12397 12398 /* 12399 * XXX KDM drop and reacquire the lock here? 12400 */ 12401 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12402 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12403 /* 12404 * XXX do something here. 12405 */ 12406 } 12407 12408 msg.dt.sent_sg_entries = sg_entries_sent; 12409 } 12410 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12411 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12412 ctl_failover_io(io, /*have_lock*/ 0); 12413 12414 } else { 12415 12416 /* 12417 * Lookup the fe_datamove() function for this particular 12418 * front end. 12419 */ 12420 fe_datamove = 12421 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12422 12423 fe_datamove(io); 12424 } 12425 } 12426 12427 static void 12428 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12429 { 12430 union ctl_ha_msg msg; 12431 int isc_status; 12432 12433 memset(&msg, 0, sizeof(msg)); 12434 12435 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12436 msg.hdr.original_sc = io; 12437 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12438 msg.hdr.nexus = io->io_hdr.nexus; 12439 msg.hdr.status = io->io_hdr.status; 12440 msg.scsi.tag_num = io->scsiio.tag_num; 12441 msg.scsi.tag_type = io->scsiio.tag_type; 12442 msg.scsi.scsi_status = io->scsiio.scsi_status; 12443 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12444 sizeof(io->scsiio.sense_data)); 12445 msg.scsi.sense_len = io->scsiio.sense_len; 12446 msg.scsi.sense_residual = io->scsiio.sense_residual; 12447 msg.scsi.fetd_status = io->io_hdr.port_status; 12448 msg.scsi.residual = io->scsiio.residual; 12449 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12450 12451 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12452 ctl_failover_io(io, /*have_lock*/ have_lock); 12453 return; 12454 } 12455 12456 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12457 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12458 /* XXX do something if this fails */ 12459 } 12460 12461 } 12462 12463 /* 12464 * The DMA to the remote side is done, now we need to tell the other side 12465 * we're done so it can continue with its data movement. 12466 */ 12467 static void 12468 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12469 { 12470 union ctl_io *io; 12471 12472 io = rq->context; 12473 12474 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12475 printf("%s: ISC DMA write failed with error %d", __func__, 12476 rq->ret); 12477 ctl_set_internal_failure(&io->scsiio, 12478 /*sks_valid*/ 1, 12479 /*retry_count*/ rq->ret); 12480 } 12481 12482 ctl_dt_req_free(rq); 12483 12484 /* 12485 * In this case, we had to malloc the memory locally. Free it. 12486 */ 12487 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12488 int i; 12489 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12490 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12491 } 12492 /* 12493 * The data is in local and remote memory, so now we need to send 12494 * status (good or back) back to the other side. 12495 */ 12496 ctl_send_datamove_done(io, /*have_lock*/ 0); 12497 } 12498 12499 /* 12500 * We've moved the data from the host/controller into local memory. Now we 12501 * need to push it over to the remote controller's memory. 12502 */ 12503 static int 12504 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12505 { 12506 int retval; 12507 12508 retval = 0; 12509 12510 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12511 ctl_datamove_remote_write_cb); 12512 12513 return (retval); 12514 } 12515 12516 static void 12517 ctl_datamove_remote_write(union ctl_io *io) 12518 { 12519 int retval; 12520 void (*fe_datamove)(union ctl_io *io); 12521 12522 /* 12523 * - Get the data from the host/HBA into local memory. 12524 * - DMA memory from the local controller to the remote controller. 12525 * - Send status back to the remote controller. 12526 */ 12527 12528 retval = ctl_datamove_remote_sgl_setup(io); 12529 if (retval != 0) 12530 return; 12531 12532 /* Switch the pointer over so the FETD knows what to do */ 12533 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12534 12535 /* 12536 * Use a custom move done callback, since we need to send completion 12537 * back to the other controller, not to the backend on this side. 12538 */ 12539 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12540 12541 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12542 12543 fe_datamove(io); 12544 12545 return; 12546 12547 } 12548 12549 static int 12550 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12551 { 12552 #if 0 12553 char str[256]; 12554 char path_str[64]; 12555 struct sbuf sb; 12556 #endif 12557 12558 /* 12559 * In this case, we had to malloc the memory locally. Free it. 12560 */ 12561 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12562 int i; 12563 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12564 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12565 } 12566 12567 #if 0 12568 scsi_path_string(io, path_str, sizeof(path_str)); 12569 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12570 sbuf_cat(&sb, path_str); 12571 scsi_command_string(&io->scsiio, NULL, &sb); 12572 sbuf_printf(&sb, "\n"); 12573 sbuf_cat(&sb, path_str); 12574 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12575 io->scsiio.tag_num, io->scsiio.tag_type); 12576 sbuf_cat(&sb, path_str); 12577 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12578 io->io_hdr.flags, io->io_hdr.status); 12579 sbuf_finish(&sb); 12580 printk("%s", sbuf_data(&sb)); 12581 #endif 12582 12583 12584 /* 12585 * The read is done, now we need to send status (good or bad) back 12586 * to the other side. 12587 */ 12588 ctl_send_datamove_done(io, /*have_lock*/ 0); 12589 12590 return (0); 12591 } 12592 12593 static void 12594 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12595 { 12596 union ctl_io *io; 12597 void (*fe_datamove)(union ctl_io *io); 12598 12599 io = rq->context; 12600 12601 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12602 printf("%s: ISC DMA read failed with error %d", __func__, 12603 rq->ret); 12604 ctl_set_internal_failure(&io->scsiio, 12605 /*sks_valid*/ 1, 12606 /*retry_count*/ rq->ret); 12607 } 12608 12609 ctl_dt_req_free(rq); 12610 12611 /* Switch the pointer over so the FETD knows what to do */ 12612 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12613 12614 /* 12615 * Use a custom move done callback, since we need to send completion 12616 * back to the other controller, not to the backend on this side. 12617 */ 12618 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12619 12620 /* XXX KDM add checks like the ones in ctl_datamove? */ 12621 12622 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12623 12624 fe_datamove(io); 12625 } 12626 12627 static int 12628 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12629 { 12630 struct ctl_sg_entry *local_sglist, *remote_sglist; 12631 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 12632 struct ctl_softc *softc; 12633 int retval; 12634 int i; 12635 12636 retval = 0; 12637 softc = control_softc; 12638 12639 local_sglist = io->io_hdr.local_sglist; 12640 local_dma_sglist = io->io_hdr.local_dma_sglist; 12641 remote_sglist = io->io_hdr.remote_sglist; 12642 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12643 12644 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 12645 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 12646 local_sglist[i].len = remote_sglist[i].len; 12647 12648 /* 12649 * XXX Detect the situation where the RS-level I/O 12650 * redirector on the other side has already read the 12651 * data off of the AOR RS on this side, and 12652 * transferred it to remote (mirror) memory on the 12653 * other side. Since we already have the data in 12654 * memory here, we just need to use it. 12655 * 12656 * XXX KDM this can probably be removed once we 12657 * get the cache device code in and take the 12658 * current AOR implementation out. 12659 */ 12660 #ifdef NEEDTOPORT 12661 if ((remote_sglist[i].addr >= 12662 (void *)vtophys(softc->mirr->addr)) 12663 && (remote_sglist[i].addr < 12664 ((void *)vtophys(softc->mirr->addr) + 12665 CacheMirrorOffset))) { 12666 local_sglist[i].addr = remote_sglist[i].addr - 12667 CacheMirrorOffset; 12668 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12669 CTL_FLAG_DATA_IN) 12670 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 12671 } else { 12672 local_sglist[i].addr = remote_sglist[i].addr + 12673 CacheMirrorOffset; 12674 } 12675 #endif 12676 #if 0 12677 printf("%s: local %p, remote %p, len %d\n", 12678 __func__, local_sglist[i].addr, 12679 remote_sglist[i].addr, local_sglist[i].len); 12680 #endif 12681 } 12682 } else { 12683 uint32_t len_to_go; 12684 12685 /* 12686 * In this case, we don't have automatically allocated 12687 * memory for this I/O on this controller. This typically 12688 * happens with internal CTL I/O -- e.g. inquiry, mode 12689 * sense, etc. Anything coming from RAIDCore will have 12690 * a mirror area available. 12691 */ 12692 len_to_go = io->scsiio.kern_data_len; 12693 12694 /* 12695 * Clear the no datasync flag, we have to use malloced 12696 * buffers. 12697 */ 12698 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 12699 12700 /* 12701 * The difficult thing here is that the size of the various 12702 * S/G segments may be different than the size from the 12703 * remote controller. That'll make it harder when DMAing 12704 * the data back to the other side. 12705 */ 12706 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 12707 sizeof(io->io_hdr.remote_sglist[0])) && 12708 (len_to_go > 0); i++) { 12709 local_sglist[i].len = MIN(len_to_go, 131072); 12710 CTL_SIZE_8B(local_dma_sglist[i].len, 12711 local_sglist[i].len); 12712 local_sglist[i].addr = 12713 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 12714 12715 local_dma_sglist[i].addr = local_sglist[i].addr; 12716 12717 if (local_sglist[i].addr == NULL) { 12718 int j; 12719 12720 printf("malloc failed for %zd bytes!", 12721 local_dma_sglist[i].len); 12722 for (j = 0; j < i; j++) { 12723 free(local_sglist[j].addr, M_CTL); 12724 } 12725 ctl_set_internal_failure(&io->scsiio, 12726 /*sks_valid*/ 1, 12727 /*retry_count*/ 4857); 12728 retval = 1; 12729 goto bailout_error; 12730 12731 } 12732 /* XXX KDM do we need a sync here? */ 12733 12734 len_to_go -= local_sglist[i].len; 12735 } 12736 /* 12737 * Reset the number of S/G entries accordingly. The 12738 * original number of S/G entries is available in 12739 * rem_sg_entries. 12740 */ 12741 io->scsiio.kern_sg_entries = i; 12742 12743 #if 0 12744 printf("%s: kern_sg_entries = %d\n", __func__, 12745 io->scsiio.kern_sg_entries); 12746 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12747 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 12748 local_sglist[i].addr, local_sglist[i].len, 12749 local_dma_sglist[i].len); 12750 #endif 12751 } 12752 12753 12754 return (retval); 12755 12756 bailout_error: 12757 12758 ctl_send_datamove_done(io, /*have_lock*/ 0); 12759 12760 return (retval); 12761 } 12762 12763 static int 12764 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12765 ctl_ha_dt_cb callback) 12766 { 12767 struct ctl_ha_dt_req *rq; 12768 struct ctl_sg_entry *remote_sglist, *local_sglist; 12769 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 12770 uint32_t local_used, remote_used, total_used; 12771 int retval; 12772 int i, j; 12773 12774 retval = 0; 12775 12776 rq = ctl_dt_req_alloc(); 12777 12778 /* 12779 * If we failed to allocate the request, and if the DMA didn't fail 12780 * anyway, set busy status. This is just a resource allocation 12781 * failure. 12782 */ 12783 if ((rq == NULL) 12784 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 12785 ctl_set_busy(&io->scsiio); 12786 12787 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 12788 12789 if (rq != NULL) 12790 ctl_dt_req_free(rq); 12791 12792 /* 12793 * The data move failed. We need to return status back 12794 * to the other controller. No point in trying to DMA 12795 * data to the remote controller. 12796 */ 12797 12798 ctl_send_datamove_done(io, /*have_lock*/ 0); 12799 12800 retval = 1; 12801 12802 goto bailout; 12803 } 12804 12805 local_sglist = io->io_hdr.local_sglist; 12806 local_dma_sglist = io->io_hdr.local_dma_sglist; 12807 remote_sglist = io->io_hdr.remote_sglist; 12808 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12809 local_used = 0; 12810 remote_used = 0; 12811 total_used = 0; 12812 12813 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 12814 rq->ret = CTL_HA_STATUS_SUCCESS; 12815 rq->context = io; 12816 callback(rq); 12817 goto bailout; 12818 } 12819 12820 /* 12821 * Pull/push the data over the wire from/to the other controller. 12822 * This takes into account the possibility that the local and 12823 * remote sglists may not be identical in terms of the size of 12824 * the elements and the number of elements. 12825 * 12826 * One fundamental assumption here is that the length allocated for 12827 * both the local and remote sglists is identical. Otherwise, we've 12828 * essentially got a coding error of some sort. 12829 */ 12830 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12831 int isc_ret; 12832 uint32_t cur_len, dma_length; 12833 uint8_t *tmp_ptr; 12834 12835 rq->id = CTL_HA_DATA_CTL; 12836 rq->command = command; 12837 rq->context = io; 12838 12839 /* 12840 * Both pointers should be aligned. But it is possible 12841 * that the allocation length is not. They should both 12842 * also have enough slack left over at the end, though, 12843 * to round up to the next 8 byte boundary. 12844 */ 12845 cur_len = MIN(local_sglist[i].len - local_used, 12846 remote_sglist[j].len - remote_used); 12847 12848 /* 12849 * In this case, we have a size issue and need to decrease 12850 * the size, except in the case where we actually have less 12851 * than 8 bytes left. In that case, we need to increase 12852 * the DMA length to get the last bit. 12853 */ 12854 if ((cur_len & 0x7) != 0) { 12855 if (cur_len > 0x7) { 12856 cur_len = cur_len - (cur_len & 0x7); 12857 dma_length = cur_len; 12858 } else { 12859 CTL_SIZE_8B(dma_length, cur_len); 12860 } 12861 12862 } else 12863 dma_length = cur_len; 12864 12865 /* 12866 * If we had to allocate memory for this I/O, instead of using 12867 * the non-cached mirror memory, we'll need to flush the cache 12868 * before trying to DMA to the other controller. 12869 * 12870 * We could end up doing this multiple times for the same 12871 * segment if we have a larger local segment than remote 12872 * segment. That shouldn't be an issue. 12873 */ 12874 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12875 /* 12876 * XXX KDM use bus_dmamap_sync() here. 12877 */ 12878 } 12879 12880 rq->size = dma_length; 12881 12882 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12883 tmp_ptr += local_used; 12884 12885 /* Use physical addresses when talking to ISC hardware */ 12886 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12887 /* XXX KDM use busdma */ 12888 #if 0 12889 rq->local = vtophys(tmp_ptr); 12890 #endif 12891 } else 12892 rq->local = tmp_ptr; 12893 12894 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12895 tmp_ptr += remote_used; 12896 rq->remote = tmp_ptr; 12897 12898 rq->callback = NULL; 12899 12900 local_used += cur_len; 12901 if (local_used >= local_sglist[i].len) { 12902 i++; 12903 local_used = 0; 12904 } 12905 12906 remote_used += cur_len; 12907 if (remote_used >= remote_sglist[j].len) { 12908 j++; 12909 remote_used = 0; 12910 } 12911 total_used += cur_len; 12912 12913 if (total_used >= io->scsiio.kern_data_len) 12914 rq->callback = callback; 12915 12916 if ((rq->size & 0x7) != 0) { 12917 printf("%s: warning: size %d is not on 8b boundary\n", 12918 __func__, rq->size); 12919 } 12920 if (((uintptr_t)rq->local & 0x7) != 0) { 12921 printf("%s: warning: local %p not on 8b boundary\n", 12922 __func__, rq->local); 12923 } 12924 if (((uintptr_t)rq->remote & 0x7) != 0) { 12925 printf("%s: warning: remote %p not on 8b boundary\n", 12926 __func__, rq->local); 12927 } 12928 #if 0 12929 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12930 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12931 rq->local, rq->remote, rq->size); 12932 #endif 12933 12934 isc_ret = ctl_dt_single(rq); 12935 if (isc_ret == CTL_HA_STATUS_WAIT) 12936 continue; 12937 12938 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 12939 rq->ret = CTL_HA_STATUS_SUCCESS; 12940 } else { 12941 rq->ret = isc_ret; 12942 } 12943 callback(rq); 12944 goto bailout; 12945 } 12946 12947 bailout: 12948 return (retval); 12949 12950 } 12951 12952 static void 12953 ctl_datamove_remote_read(union ctl_io *io) 12954 { 12955 int retval; 12956 int i; 12957 12958 /* 12959 * This will send an error to the other controller in the case of a 12960 * failure. 12961 */ 12962 retval = ctl_datamove_remote_sgl_setup(io); 12963 if (retval != 0) 12964 return; 12965 12966 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12967 ctl_datamove_remote_read_cb); 12968 if ((retval != 0) 12969 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 12970 /* 12971 * Make sure we free memory if there was an error.. The 12972 * ctl_datamove_remote_xfer() function will send the 12973 * datamove done message, or call the callback with an 12974 * error if there is a problem. 12975 */ 12976 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12977 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12978 } 12979 12980 return; 12981 } 12982 12983 /* 12984 * Process a datamove request from the other controller. This is used for 12985 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12986 * first. Once that is complete, the data gets DMAed into the remote 12987 * controller's memory. For reads, we DMA from the remote controller's 12988 * memory into our memory first, and then move it out to the FETD. 12989 */ 12990 static void 12991 ctl_datamove_remote(union ctl_io *io) 12992 { 12993 struct ctl_softc *softc; 12994 12995 softc = control_softc; 12996 12997 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 12998 12999 /* 13000 * Note that we look for an aborted I/O here, but don't do some of 13001 * the other checks that ctl_datamove() normally does. 13002 * We don't need to run the datamove delay code, since that should 13003 * have been done if need be on the other controller. 13004 */ 13005 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13006 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 13007 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 13008 io->io_hdr.nexus.targ_port, 13009 io->io_hdr.nexus.targ_target.id, 13010 io->io_hdr.nexus.targ_lun); 13011 io->io_hdr.port_status = 31338; 13012 ctl_send_datamove_done(io, /*have_lock*/ 0); 13013 return; 13014 } 13015 13016 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 13017 ctl_datamove_remote_write(io); 13018 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 13019 ctl_datamove_remote_read(io); 13020 } else { 13021 union ctl_ha_msg msg; 13022 struct scsi_sense_data *sense; 13023 uint8_t sks[3]; 13024 int retry_count; 13025 13026 memset(&msg, 0, sizeof(msg)); 13027 13028 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13029 msg.hdr.status = CTL_SCSI_ERROR; 13030 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13031 13032 retry_count = 4243; 13033 13034 sense = &msg.scsi.sense_data; 13035 sks[0] = SSD_SCS_VALID; 13036 sks[1] = (retry_count >> 8) & 0xff; 13037 sks[2] = retry_count & 0xff; 13038 13039 /* "Internal target failure" */ 13040 scsi_set_sense_data(sense, 13041 /*sense_format*/ SSD_TYPE_NONE, 13042 /*current_error*/ 1, 13043 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13044 /*asc*/ 0x44, 13045 /*ascq*/ 0x00, 13046 /*type*/ SSD_ELEM_SKS, 13047 /*size*/ sizeof(sks), 13048 /*data*/ sks, 13049 SSD_ELEM_NONE); 13050 13051 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13052 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13053 ctl_failover_io(io, /*have_lock*/ 1); 13054 return; 13055 } 13056 13057 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13058 CTL_HA_STATUS_SUCCESS) { 13059 /* XXX KDM what to do if this fails? */ 13060 } 13061 return; 13062 } 13063 13064 } 13065 13066 static int 13067 ctl_process_done(union ctl_io *io) 13068 { 13069 struct ctl_lun *lun; 13070 struct ctl_softc *softc = control_softc; 13071 void (*fe_done)(union ctl_io *io); 13072 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 13073 13074 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13075 13076 fe_done = softc->ctl_ports[targ_port]->fe_done; 13077 13078 #ifdef CTL_TIME_IO 13079 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13080 char str[256]; 13081 char path_str[64]; 13082 struct sbuf sb; 13083 13084 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13085 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13086 13087 sbuf_cat(&sb, path_str); 13088 switch (io->io_hdr.io_type) { 13089 case CTL_IO_SCSI: 13090 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13091 sbuf_printf(&sb, "\n"); 13092 sbuf_cat(&sb, path_str); 13093 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13094 io->scsiio.tag_num, io->scsiio.tag_type); 13095 break; 13096 case CTL_IO_TASK: 13097 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13098 "Tag Type: %d\n", io->taskio.task_action, 13099 io->taskio.tag_num, io->taskio.tag_type); 13100 break; 13101 default: 13102 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13103 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13104 break; 13105 } 13106 sbuf_cat(&sb, path_str); 13107 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13108 (intmax_t)time_uptime - io->io_hdr.start_time); 13109 sbuf_finish(&sb); 13110 printf("%s", sbuf_data(&sb)); 13111 } 13112 #endif /* CTL_TIME_IO */ 13113 13114 switch (io->io_hdr.io_type) { 13115 case CTL_IO_SCSI: 13116 break; 13117 case CTL_IO_TASK: 13118 if (ctl_debug & CTL_DEBUG_INFO) 13119 ctl_io_error_print(io, NULL); 13120 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13121 ctl_free_io(io); 13122 else 13123 fe_done(io); 13124 return (CTL_RETVAL_COMPLETE); 13125 default: 13126 panic("ctl_process_done: invalid io type %d\n", 13127 io->io_hdr.io_type); 13128 break; /* NOTREACHED */ 13129 } 13130 13131 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13132 if (lun == NULL) { 13133 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13134 io->io_hdr.nexus.targ_mapped_lun)); 13135 goto bailout; 13136 } 13137 13138 mtx_lock(&lun->lun_lock); 13139 13140 /* 13141 * Check to see if we have any errors to inject here. We only 13142 * inject errors for commands that don't already have errors set. 13143 */ 13144 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 13145 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13146 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13147 ctl_inject_error(lun, io); 13148 13149 /* 13150 * XXX KDM how do we treat commands that aren't completed 13151 * successfully? 13152 * 13153 * XXX KDM should we also track I/O latency? 13154 */ 13155 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13156 io->io_hdr.io_type == CTL_IO_SCSI) { 13157 #ifdef CTL_TIME_IO 13158 struct bintime cur_bt; 13159 #endif 13160 int type; 13161 13162 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13163 CTL_FLAG_DATA_IN) 13164 type = CTL_STATS_READ; 13165 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13166 CTL_FLAG_DATA_OUT) 13167 type = CTL_STATS_WRITE; 13168 else 13169 type = CTL_STATS_NO_IO; 13170 13171 lun->stats.ports[targ_port].bytes[type] += 13172 io->scsiio.kern_total_len; 13173 lun->stats.ports[targ_port].operations[type]++; 13174 #ifdef CTL_TIME_IO 13175 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13176 &io->io_hdr.dma_bt); 13177 lun->stats.ports[targ_port].num_dmas[type] += 13178 io->io_hdr.num_dmas; 13179 getbintime(&cur_bt); 13180 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13181 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13182 #endif 13183 } 13184 13185 /* 13186 * Remove this from the OOA queue. 13187 */ 13188 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13189 #ifdef CTL_TIME_IO 13190 if (TAILQ_EMPTY(&lun->ooa_queue)) 13191 lun->last_busy = getsbinuptime(); 13192 #endif 13193 13194 /* 13195 * Run through the blocked queue on this LUN and see if anything 13196 * has become unblocked, now that this transaction is done. 13197 */ 13198 ctl_check_blocked(lun); 13199 13200 /* 13201 * If the LUN has been invalidated, free it if there is nothing 13202 * left on its OOA queue. 13203 */ 13204 if ((lun->flags & CTL_LUN_INVALID) 13205 && TAILQ_EMPTY(&lun->ooa_queue)) { 13206 mtx_unlock(&lun->lun_lock); 13207 mtx_lock(&softc->ctl_lock); 13208 ctl_free_lun(lun); 13209 mtx_unlock(&softc->ctl_lock); 13210 } else 13211 mtx_unlock(&lun->lun_lock); 13212 13213 bailout: 13214 13215 /* 13216 * If this command has been aborted, make sure we set the status 13217 * properly. The FETD is responsible for freeing the I/O and doing 13218 * whatever it needs to do to clean up its state. 13219 */ 13220 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13221 ctl_set_task_aborted(&io->scsiio); 13222 13223 /* 13224 * If enabled, print command error status. 13225 */ 13226 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13227 (ctl_debug & CTL_DEBUG_INFO) != 0) 13228 ctl_io_error_print(io, NULL); 13229 13230 /* 13231 * Tell the FETD or the other shelf controller we're done with this 13232 * command. Note that only SCSI commands get to this point. Task 13233 * management commands are completed above. 13234 * 13235 * We only send status to the other controller if we're in XFER 13236 * mode. In SER_ONLY mode, the I/O is done on the controller that 13237 * received the I/O (from CTL's perspective), and so the status is 13238 * generated there. 13239 * 13240 * XXX KDM if we hold the lock here, we could cause a deadlock 13241 * if the frontend comes back in in this context to queue 13242 * something. 13243 */ 13244 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13245 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13246 union ctl_ha_msg msg; 13247 13248 memset(&msg, 0, sizeof(msg)); 13249 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13250 msg.hdr.original_sc = io->io_hdr.original_sc; 13251 msg.hdr.nexus = io->io_hdr.nexus; 13252 msg.hdr.status = io->io_hdr.status; 13253 msg.scsi.scsi_status = io->scsiio.scsi_status; 13254 msg.scsi.tag_num = io->scsiio.tag_num; 13255 msg.scsi.tag_type = io->scsiio.tag_type; 13256 msg.scsi.sense_len = io->scsiio.sense_len; 13257 msg.scsi.sense_residual = io->scsiio.sense_residual; 13258 msg.scsi.residual = io->scsiio.residual; 13259 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13260 sizeof(io->scsiio.sense_data)); 13261 /* 13262 * We copy this whether or not this is an I/O-related 13263 * command. Otherwise, we'd have to go and check to see 13264 * whether it's a read/write command, and it really isn't 13265 * worth it. 13266 */ 13267 memcpy(&msg.scsi.lbalen, 13268 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13269 sizeof(msg.scsi.lbalen)); 13270 13271 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13272 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13273 /* XXX do something here */ 13274 } 13275 13276 ctl_free_io(io); 13277 } else 13278 fe_done(io); 13279 13280 return (CTL_RETVAL_COMPLETE); 13281 } 13282 13283 #ifdef CTL_WITH_CA 13284 /* 13285 * Front end should call this if it doesn't do autosense. When the request 13286 * sense comes back in from the initiator, we'll dequeue this and send it. 13287 */ 13288 int 13289 ctl_queue_sense(union ctl_io *io) 13290 { 13291 struct ctl_lun *lun; 13292 struct ctl_port *port; 13293 struct ctl_softc *softc; 13294 uint32_t initidx, targ_lun; 13295 13296 softc = control_softc; 13297 13298 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13299 13300 /* 13301 * LUN lookup will likely move to the ctl_work_thread() once we 13302 * have our new queueing infrastructure (that doesn't put things on 13303 * a per-LUN queue initially). That is so that we can handle 13304 * things like an INQUIRY to a LUN that we don't have enabled. We 13305 * can't deal with that right now. 13306 */ 13307 mtx_lock(&softc->ctl_lock); 13308 13309 /* 13310 * If we don't have a LUN for this, just toss the sense 13311 * information. 13312 */ 13313 port = ctl_io_port(&ctsio->io_hdr); 13314 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13315 if ((targ_lun < CTL_MAX_LUNS) 13316 && (softc->ctl_luns[targ_lun] != NULL)) 13317 lun = softc->ctl_luns[targ_lun]; 13318 else 13319 goto bailout; 13320 13321 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13322 13323 mtx_lock(&lun->lun_lock); 13324 /* 13325 * Already have CA set for this LUN...toss the sense information. 13326 */ 13327 if (ctl_is_set(lun->have_ca, initidx)) { 13328 mtx_unlock(&lun->lun_lock); 13329 goto bailout; 13330 } 13331 13332 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13333 MIN(sizeof(lun->pending_sense[initidx]), 13334 sizeof(io->scsiio.sense_data))); 13335 ctl_set_mask(lun->have_ca, initidx); 13336 mtx_unlock(&lun->lun_lock); 13337 13338 bailout: 13339 mtx_unlock(&softc->ctl_lock); 13340 13341 ctl_free_io(io); 13342 13343 return (CTL_RETVAL_COMPLETE); 13344 } 13345 #endif 13346 13347 /* 13348 * Primary command inlet from frontend ports. All SCSI and task I/O 13349 * requests must go through this function. 13350 */ 13351 int 13352 ctl_queue(union ctl_io *io) 13353 { 13354 struct ctl_port *port; 13355 13356 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13357 13358 #ifdef CTL_TIME_IO 13359 io->io_hdr.start_time = time_uptime; 13360 getbintime(&io->io_hdr.start_bt); 13361 #endif /* CTL_TIME_IO */ 13362 13363 /* Map FE-specific LUN ID into global one. */ 13364 port = ctl_io_port(&io->io_hdr); 13365 io->io_hdr.nexus.targ_mapped_lun = 13366 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13367 13368 switch (io->io_hdr.io_type) { 13369 case CTL_IO_SCSI: 13370 case CTL_IO_TASK: 13371 if (ctl_debug & CTL_DEBUG_CDB) 13372 ctl_io_print(io); 13373 ctl_enqueue_incoming(io); 13374 break; 13375 default: 13376 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13377 return (EINVAL); 13378 } 13379 13380 return (CTL_RETVAL_COMPLETE); 13381 } 13382 13383 #ifdef CTL_IO_DELAY 13384 static void 13385 ctl_done_timer_wakeup(void *arg) 13386 { 13387 union ctl_io *io; 13388 13389 io = (union ctl_io *)arg; 13390 ctl_done(io); 13391 } 13392 #endif /* CTL_IO_DELAY */ 13393 13394 void 13395 ctl_done(union ctl_io *io) 13396 { 13397 13398 /* 13399 * Enable this to catch duplicate completion issues. 13400 */ 13401 #if 0 13402 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13403 printf("%s: type %d msg %d cdb %x iptl: " 13404 "%d:%d:%d:%d tag 0x%04x " 13405 "flag %#x status %x\n", 13406 __func__, 13407 io->io_hdr.io_type, 13408 io->io_hdr.msg_type, 13409 io->scsiio.cdb[0], 13410 io->io_hdr.nexus.initid.id, 13411 io->io_hdr.nexus.targ_port, 13412 io->io_hdr.nexus.targ_target.id, 13413 io->io_hdr.nexus.targ_lun, 13414 (io->io_hdr.io_type == 13415 CTL_IO_TASK) ? 13416 io->taskio.tag_num : 13417 io->scsiio.tag_num, 13418 io->io_hdr.flags, 13419 io->io_hdr.status); 13420 } else 13421 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13422 #endif 13423 13424 /* 13425 * This is an internal copy of an I/O, and should not go through 13426 * the normal done processing logic. 13427 */ 13428 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13429 return; 13430 13431 /* 13432 * We need to send a msg to the serializing shelf to finish the IO 13433 * as well. We don't send a finish message to the other shelf if 13434 * this is a task management command. Task management commands 13435 * aren't serialized in the OOA queue, but rather just executed on 13436 * both shelf controllers for commands that originated on that 13437 * controller. 13438 */ 13439 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13440 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13441 union ctl_ha_msg msg_io; 13442 13443 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13444 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13445 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13446 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13447 } 13448 /* continue on to finish IO */ 13449 } 13450 #ifdef CTL_IO_DELAY 13451 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13452 struct ctl_lun *lun; 13453 13454 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13455 13456 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13457 } else { 13458 struct ctl_lun *lun; 13459 13460 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13461 13462 if ((lun != NULL) 13463 && (lun->delay_info.done_delay > 0)) { 13464 struct callout *callout; 13465 13466 callout = (struct callout *)&io->io_hdr.timer_bytes; 13467 callout_init(callout, /*mpsafe*/ 1); 13468 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13469 callout_reset(callout, 13470 lun->delay_info.done_delay * hz, 13471 ctl_done_timer_wakeup, io); 13472 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13473 lun->delay_info.done_delay = 0; 13474 return; 13475 } 13476 } 13477 #endif /* CTL_IO_DELAY */ 13478 13479 ctl_enqueue_done(io); 13480 } 13481 13482 int 13483 ctl_isc(struct ctl_scsiio *ctsio) 13484 { 13485 struct ctl_lun *lun; 13486 int retval; 13487 13488 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13489 13490 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13491 13492 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13493 13494 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13495 13496 return (retval); 13497 } 13498 13499 13500 static void 13501 ctl_work_thread(void *arg) 13502 { 13503 struct ctl_thread *thr = (struct ctl_thread *)arg; 13504 struct ctl_softc *softc = thr->ctl_softc; 13505 union ctl_io *io; 13506 int retval; 13507 13508 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13509 13510 for (;;) { 13511 retval = 0; 13512 13513 /* 13514 * We handle the queues in this order: 13515 * - ISC 13516 * - done queue (to free up resources, unblock other commands) 13517 * - RtR queue 13518 * - incoming queue 13519 * 13520 * If those queues are empty, we break out of the loop and 13521 * go to sleep. 13522 */ 13523 mtx_lock(&thr->queue_lock); 13524 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13525 if (io != NULL) { 13526 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13527 mtx_unlock(&thr->queue_lock); 13528 ctl_handle_isc(io); 13529 continue; 13530 } 13531 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13532 if (io != NULL) { 13533 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13534 /* clear any blocked commands, call fe_done */ 13535 mtx_unlock(&thr->queue_lock); 13536 retval = ctl_process_done(io); 13537 continue; 13538 } 13539 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13540 if (io != NULL) { 13541 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13542 mtx_unlock(&thr->queue_lock); 13543 if (io->io_hdr.io_type == CTL_IO_TASK) 13544 ctl_run_task(io); 13545 else 13546 ctl_scsiio_precheck(softc, &io->scsiio); 13547 continue; 13548 } 13549 if (!ctl_pause_rtr) { 13550 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13551 if (io != NULL) { 13552 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13553 mtx_unlock(&thr->queue_lock); 13554 retval = ctl_scsiio(&io->scsiio); 13555 if (retval != CTL_RETVAL_COMPLETE) 13556 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13557 continue; 13558 } 13559 } 13560 13561 /* Sleep until we have something to do. */ 13562 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13563 } 13564 } 13565 13566 static void 13567 ctl_lun_thread(void *arg) 13568 { 13569 struct ctl_softc *softc = (struct ctl_softc *)arg; 13570 struct ctl_be_lun *be_lun; 13571 int retval; 13572 13573 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13574 13575 for (;;) { 13576 retval = 0; 13577 mtx_lock(&softc->ctl_lock); 13578 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13579 if (be_lun != NULL) { 13580 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13581 mtx_unlock(&softc->ctl_lock); 13582 ctl_create_lun(be_lun); 13583 continue; 13584 } 13585 13586 /* Sleep until we have something to do. */ 13587 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13588 PDROP | PRIBIO, "-", 0); 13589 } 13590 } 13591 13592 static void 13593 ctl_thresh_thread(void *arg) 13594 { 13595 struct ctl_softc *softc = (struct ctl_softc *)arg; 13596 struct ctl_lun *lun; 13597 struct ctl_be_lun *be_lun; 13598 struct scsi_da_rw_recovery_page *rwpage; 13599 struct ctl_logical_block_provisioning_page *page; 13600 const char *attr; 13601 uint64_t thres, val; 13602 int i, e; 13603 13604 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13605 13606 for (;;) { 13607 mtx_lock(&softc->ctl_lock); 13608 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13609 be_lun = lun->be_lun; 13610 if ((lun->flags & CTL_LUN_DISABLED) || 13611 (lun->flags & CTL_LUN_OFFLINE) || 13612 lun->backend->lun_attr == NULL) 13613 continue; 13614 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13615 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13616 continue; 13617 e = 0; 13618 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13619 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13620 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13621 continue; 13622 thres = scsi_4btoul(page->descr[i].count); 13623 thres <<= CTL_LBP_EXPONENT; 13624 switch (page->descr[i].resource) { 13625 case 0x01: 13626 attr = "blocksavail"; 13627 break; 13628 case 0x02: 13629 attr = "blocksused"; 13630 break; 13631 case 0xf1: 13632 attr = "poolblocksavail"; 13633 break; 13634 case 0xf2: 13635 attr = "poolblocksused"; 13636 break; 13637 default: 13638 continue; 13639 } 13640 mtx_unlock(&softc->ctl_lock); // XXX 13641 val = lun->backend->lun_attr( 13642 lun->be_lun->be_lun, attr); 13643 mtx_lock(&softc->ctl_lock); 13644 if (val == UINT64_MAX) 13645 continue; 13646 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13647 == SLBPPD_ARMING_INC) 13648 e |= (val >= thres); 13649 else 13650 e |= (val <= thres); 13651 } 13652 mtx_lock(&lun->lun_lock); 13653 if (e) { 13654 if (lun->lasttpt == 0 || 13655 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13656 lun->lasttpt = time_uptime; 13657 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13658 } 13659 } else { 13660 lun->lasttpt = 0; 13661 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13662 } 13663 mtx_unlock(&lun->lun_lock); 13664 } 13665 mtx_unlock(&softc->ctl_lock); 13666 pause("-", CTL_LBP_PERIOD * hz); 13667 } 13668 } 13669 13670 static void 13671 ctl_enqueue_incoming(union ctl_io *io) 13672 { 13673 struct ctl_softc *softc = control_softc; 13674 struct ctl_thread *thr; 13675 u_int idx; 13676 13677 idx = (io->io_hdr.nexus.targ_port * 127 + 13678 io->io_hdr.nexus.initid.id) % worker_threads; 13679 thr = &softc->threads[idx]; 13680 mtx_lock(&thr->queue_lock); 13681 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13682 mtx_unlock(&thr->queue_lock); 13683 wakeup(thr); 13684 } 13685 13686 static void 13687 ctl_enqueue_rtr(union ctl_io *io) 13688 { 13689 struct ctl_softc *softc = control_softc; 13690 struct ctl_thread *thr; 13691 13692 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13693 mtx_lock(&thr->queue_lock); 13694 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13695 mtx_unlock(&thr->queue_lock); 13696 wakeup(thr); 13697 } 13698 13699 static void 13700 ctl_enqueue_done(union ctl_io *io) 13701 { 13702 struct ctl_softc *softc = control_softc; 13703 struct ctl_thread *thr; 13704 13705 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13706 mtx_lock(&thr->queue_lock); 13707 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13708 mtx_unlock(&thr->queue_lock); 13709 wakeup(thr); 13710 } 13711 13712 #ifdef notyet 13713 static void 13714 ctl_enqueue_isc(union ctl_io *io) 13715 { 13716 struct ctl_softc *softc = control_softc; 13717 struct ctl_thread *thr; 13718 13719 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13720 mtx_lock(&thr->queue_lock); 13721 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13722 mtx_unlock(&thr->queue_lock); 13723 wakeup(thr); 13724 } 13725 13726 /* Initialization and failover */ 13727 13728 void 13729 ctl_init_isc_msg(void) 13730 { 13731 printf("CTL: Still calling this thing\n"); 13732 } 13733 13734 /* 13735 * Init component 13736 * Initializes component into configuration defined by bootMode 13737 * (see hasc-sv.c) 13738 * returns hasc_Status: 13739 * OK 13740 * ERROR - fatal error 13741 */ 13742 static ctl_ha_comp_status 13743 ctl_isc_init(struct ctl_ha_component *c) 13744 { 13745 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13746 13747 c->status = ret; 13748 return ret; 13749 } 13750 13751 /* Start component 13752 * Starts component in state requested. If component starts successfully, 13753 * it must set its own state to the requestrd state 13754 * When requested state is HASC_STATE_HA, the component may refine it 13755 * by adding _SLAVE or _MASTER flags. 13756 * Currently allowed state transitions are: 13757 * UNKNOWN->HA - initial startup 13758 * UNKNOWN->SINGLE - initial startup when no parter detected 13759 * HA->SINGLE - failover 13760 * returns ctl_ha_comp_status: 13761 * OK - component successfully started in requested state 13762 * FAILED - could not start the requested state, failover may 13763 * be possible 13764 * ERROR - fatal error detected, no future startup possible 13765 */ 13766 static ctl_ha_comp_status 13767 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 13768 { 13769 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13770 13771 printf("%s: go\n", __func__); 13772 13773 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 13774 if (c->state == CTL_HA_STATE_UNKNOWN ) { 13775 control_softc->is_single = 0; 13776 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 13777 != CTL_HA_STATUS_SUCCESS) { 13778 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 13779 ret = CTL_HA_COMP_STATUS_ERROR; 13780 } 13781 } else if (CTL_HA_STATE_IS_HA(c->state) 13782 && CTL_HA_STATE_IS_SINGLE(state)){ 13783 // HA->SINGLE transition 13784 ctl_failover(); 13785 control_softc->is_single = 1; 13786 } else { 13787 printf("ctl_isc_start:Invalid state transition %X->%X\n", 13788 c->state, state); 13789 ret = CTL_HA_COMP_STATUS_ERROR; 13790 } 13791 if (CTL_HA_STATE_IS_SINGLE(state)) 13792 control_softc->is_single = 1; 13793 13794 c->state = state; 13795 c->status = ret; 13796 return ret; 13797 } 13798 13799 /* 13800 * Quiesce component 13801 * The component must clear any error conditions (set status to OK) and 13802 * prepare itself to another Start call 13803 * returns ctl_ha_comp_status: 13804 * OK 13805 * ERROR 13806 */ 13807 static ctl_ha_comp_status 13808 ctl_isc_quiesce(struct ctl_ha_component *c) 13809 { 13810 int ret = CTL_HA_COMP_STATUS_OK; 13811 13812 ctl_pause_rtr = 1; 13813 c->status = ret; 13814 return ret; 13815 } 13816 13817 struct ctl_ha_component ctl_ha_component_ctlisc = 13818 { 13819 .name = "CTL ISC", 13820 .state = CTL_HA_STATE_UNKNOWN, 13821 .init = ctl_isc_init, 13822 .start = ctl_isc_start, 13823 .quiesce = ctl_isc_quiesce 13824 }; 13825 #endif 13826 13827 /* 13828 * vim: ts=8 13829 */ 13830