1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright 2019 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2013 by Delphix. All rights reserved. 28 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 29 */ 30 31 #include <sys/conf.h> 32 #include <sys/file.h> 33 #include <sys/ddi.h> 34 #include <sys/sunddi.h> 35 #include <sys/modctl.h> 36 #include <sys/scsi/scsi.h> 37 #include <sys/scsi/generic/persist.h> 38 #include <sys/scsi/impl/scsi_reset_notify.h> 39 #include <sys/disp.h> 40 #include <sys/byteorder.h> 41 #include <sys/atomic.h> 42 #include <sys/ethernet.h> 43 #include <sys/sdt.h> 44 #include <sys/nvpair.h> 45 #include <sys/zone.h> 46 #include <sys/id_space.h> 47 48 #include <sys/stmf.h> 49 #include <sys/lpif.h> 50 #include <sys/portif.h> 51 #include <sys/stmf_ioctl.h> 52 #include <sys/pppt_ic_if.h> 53 54 #include "stmf_impl.h" 55 #include "lun_map.h" 56 #include "stmf_state.h" 57 #include "stmf_stats.h" 58 59 /* 60 * Lock order: 61 * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock 62 */ 63 64 static uint64_t stmf_session_counter = 0; 65 static uint16_t stmf_rtpid_counter = 0; 66 /* start messages at 1 */ 67 static uint64_t stmf_proxy_msg_id = 1; 68 #define MSG_ID_TM_BIT 0x8000000000000000 69 #define ALIGNED_TO_8BYTE_BOUNDARY(i) (((i) + 7) & ~7) 70 71 /* 72 * When stmf_io_deadman_enabled is set to B_TRUE, we check that finishing up 73 * I/O operations on an offlining LU doesn't take longer than stmf_io_deadman 74 * seconds. If it does, we trigger a panic to inform the user of hung I/O 75 * blocking us for too long. 76 */ 77 boolean_t stmf_io_deadman_enabled = B_TRUE; 78 int stmf_io_deadman = 1000; /* seconds */ 79 80 struct stmf_svc_clocks; 81 82 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 83 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 84 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 85 void **result); 86 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp); 87 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp); 88 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 89 cred_t *credp, int *rval); 90 static int stmf_get_stmf_state(stmf_state_desc_t *std); 91 static int stmf_set_stmf_state(stmf_state_desc_t *std); 92 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu, 93 char *info); 94 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state); 95 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state); 96 97 static void stmf_task_audit(stmf_i_scsi_task_t *itask, 98 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf); 99 100 static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp); 101 static char stmf_ctoi(char c); 102 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua); 103 void stmf_svc_init(); 104 stmf_status_t stmf_svc_fini(); 105 void stmf_svc(void *arg); 106 static void stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu); 107 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info); 108 static void stmf_svc_kill_obj_requests(void *obj); 109 static void stmf_svc_timeout(struct stmf_svc_clocks *); 110 void stmf_check_freetask(); 111 void stmf_abort_target_reset(scsi_task_t *task); 112 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, 113 int target_reset); 114 void stmf_target_reset_poll(struct scsi_task *task); 115 void stmf_handle_lun_reset(scsi_task_t *task); 116 void stmf_handle_target_reset(scsi_task_t *task); 117 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off); 118 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 119 uint32_t *err_ret); 120 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi); 121 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 122 uint32_t *err_ret); 123 void stmf_delete_ppd(stmf_pp_data_t *ppd); 124 void stmf_delete_all_ppds(); 125 void stmf_trace_clear(); 126 void stmf_worker_init(); 127 stmf_status_t stmf_worker_fini(); 128 void stmf_worker_task(void *arg); 129 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss); 130 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, 131 uint32_t type); 132 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg); 133 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg); 134 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg); 135 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg); 136 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s); 137 138 /* pppt modhandle */ 139 ddi_modhandle_t pppt_mod; 140 141 /* pppt modload imported functions */ 142 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc; 143 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc; 144 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc; 145 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc; 146 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc; 147 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc; 148 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc; 149 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc; 150 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc; 151 stmf_ic_tx_msg_func_t ic_tx_msg; 152 stmf_ic_msg_free_func_t ic_msg_free; 153 154 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask); 155 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask); 156 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask); 157 158 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, 159 stmf_data_buf_t *dbuf); 160 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, 161 stmf_data_buf_t *dbuf); 162 163 static void stmf_update_kstat_lu_q(scsi_task_t *, void()); 164 static void stmf_update_kstat_lport_q(scsi_task_t *, void()); 165 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *); 166 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *); 167 static hrtime_t stmf_update_rport_timestamps(hrtime_t *start_tstamp, 168 hrtime_t *done_tstamp, stmf_i_scsi_task_t *itask); 169 170 static int stmf_irport_compare(const void *void_irport1, 171 const void *void_irport2); 172 static void stmf_create_kstat_rport(stmf_i_remote_port_t *irport); 173 static void stmf_destroy_kstat_rport(stmf_i_remote_port_t *irport); 174 static int stmf_kstat_rport_update(kstat_t *ksp, int rw); 175 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid); 176 static void stmf_irport_destroy(stmf_i_remote_port_t *irport); 177 static stmf_i_remote_port_t *stmf_irport_register( 178 scsi_devid_desc_t *rport_devid); 179 static stmf_i_remote_port_t *stmf_irport_lookup_locked( 180 scsi_devid_desc_t *rport_devid); 181 static void stmf_irport_deregister(stmf_i_remote_port_t *irport); 182 183 extern struct mod_ops mod_driverops; 184 185 /* =====[ Tunables ]===== */ 186 /* Internal tracing */ 187 volatile int stmf_trace_on = 0; 188 volatile int stmf_trace_buf_size = (1 * 1024 * 1024); 189 /* 190 * The reason default task timeout is 75 is because we want the 191 * host to timeout 1st and mostly host timeout is 60 seconds. 192 */ 193 volatile int stmf_default_task_timeout = 75; 194 /* 195 * Setting this to one means, you are responsible for config load and keeping 196 * things in sync with persistent database. 197 */ 198 volatile int stmf_allow_modunload = 0; 199 200 volatile int stmf_nworkers = 512; 201 202 /* === [ Debugging and fault injection ] === */ 203 #ifdef DEBUG 204 volatile int stmf_drop_task_counter = 0; 205 volatile int stmf_drop_buf_counter = 0; 206 207 #endif 208 209 stmf_state_t stmf_state; 210 static stmf_lu_t *dlun0; 211 212 static uint8_t stmf_first_zero[] = 213 { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff }; 214 static uint8_t stmf_first_one[] = 215 { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 }; 216 217 static kmutex_t trace_buf_lock; 218 static int trace_buf_size; 219 static int trace_buf_curndx; 220 caddr_t stmf_trace_buf; 221 222 static enum { 223 STMF_WORKERS_DISABLED = 0, 224 STMF_WORKERS_ENABLING, 225 STMF_WORKERS_ENABLED 226 } stmf_workers_state = STMF_WORKERS_DISABLED; 227 static kmutex_t stmf_worker_sel_mx; 228 volatile uint32_t stmf_nworkers_cur = 0; /* # of workers currently running */ 229 static int stmf_worker_sel_counter = 0; 230 static uint32_t stmf_cur_ntasks = 0; 231 static clock_t stmf_wm_next = 0; 232 static int stmf_nworkers_accepting_cmds; 233 static stmf_worker_t *stmf_workers = NULL; 234 static clock_t stmf_worker_scale_down_timer = 0; 235 static int stmf_worker_scale_down_qd = 0; 236 237 static struct cb_ops stmf_cb_ops = { 238 stmf_open, /* open */ 239 stmf_close, /* close */ 240 nodev, /* strategy */ 241 nodev, /* print */ 242 nodev, /* dump */ 243 nodev, /* read */ 244 nodev, /* write */ 245 stmf_ioctl, /* ioctl */ 246 nodev, /* devmap */ 247 nodev, /* mmap */ 248 nodev, /* segmap */ 249 nochpoll, /* chpoll */ 250 ddi_prop_op, /* cb_prop_op */ 251 0, /* streamtab */ 252 D_NEW | D_MP, /* cb_flag */ 253 CB_REV, /* rev */ 254 nodev, /* aread */ 255 nodev /* awrite */ 256 }; 257 258 static struct dev_ops stmf_ops = { 259 DEVO_REV, 260 0, 261 stmf_getinfo, 262 nulldev, /* identify */ 263 nulldev, /* probe */ 264 stmf_attach, 265 stmf_detach, 266 nodev, /* reset */ 267 &stmf_cb_ops, 268 NULL, /* bus_ops */ 269 NULL /* power */ 270 }; 271 272 #define STMF_NAME "COMSTAR STMF" 273 #define STMF_MODULE_NAME "stmf" 274 275 static struct modldrv modldrv = { 276 &mod_driverops, 277 STMF_NAME, 278 &stmf_ops 279 }; 280 281 static struct modlinkage modlinkage = { 282 MODREV_1, 283 &modldrv, 284 NULL 285 }; 286 287 int 288 _init(void) 289 { 290 int ret; 291 292 ret = mod_install(&modlinkage); 293 if (ret) 294 return (ret); 295 stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP); 296 trace_buf_size = stmf_trace_buf_size; 297 trace_buf_curndx = 0; 298 mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0); 299 mutex_init(&stmf_worker_sel_mx, NULL, MUTEX_ADAPTIVE, 0); 300 bzero(&stmf_state, sizeof (stmf_state_t)); 301 /* STMF service is off by default */ 302 stmf_state.stmf_service_running = 0; 303 /* default lu/lport states are online */ 304 stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE; 305 stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE; 306 mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL); 307 cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL); 308 stmf_session_counter = (uint64_t)ddi_get_lbolt(); 309 avl_create(&stmf_state.stmf_irportlist, 310 stmf_irport_compare, sizeof (stmf_i_remote_port_t), 311 offsetof(stmf_i_remote_port_t, irport_ln)); 312 stmf_state.stmf_ilport_inst_space = 313 id_space_create("lport-instances", 0, MAX_ILPORT); 314 stmf_state.stmf_irport_inst_space = 315 id_space_create("rport-instances", 0, MAX_IRPORT); 316 stmf_view_init(); 317 stmf_svc_init(); 318 stmf_dlun_init(); 319 return (ret); 320 } 321 322 int 323 _fini(void) 324 { 325 int ret; 326 stmf_i_remote_port_t *irport; 327 void *avl_dest_cookie = NULL; 328 329 if (stmf_state.stmf_service_running) 330 return (EBUSY); 331 if ((!stmf_allow_modunload) && 332 (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) { 333 return (EBUSY); 334 } 335 if (stmf_state.stmf_nlps || stmf_state.stmf_npps) { 336 return (EBUSY); 337 } 338 if (stmf_dlun_fini() != STMF_SUCCESS) 339 return (EBUSY); 340 if (stmf_worker_fini() != STMF_SUCCESS) { 341 stmf_dlun_init(); 342 return (EBUSY); 343 } 344 if (stmf_svc_fini() != STMF_SUCCESS) { 345 stmf_dlun_init(); 346 stmf_worker_init(); 347 return (EBUSY); 348 } 349 350 ret = mod_remove(&modlinkage); 351 if (ret) { 352 stmf_svc_init(); 353 stmf_dlun_init(); 354 stmf_worker_init(); 355 return (ret); 356 } 357 358 stmf_view_clear_config(); 359 360 while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist, 361 &avl_dest_cookie)) != NULL) 362 stmf_irport_destroy(irport); 363 avl_destroy(&stmf_state.stmf_irportlist); 364 id_space_destroy(stmf_state.stmf_ilport_inst_space); 365 id_space_destroy(stmf_state.stmf_irport_inst_space); 366 367 kmem_free(stmf_trace_buf, stmf_trace_buf_size); 368 mutex_destroy(&trace_buf_lock); 369 mutex_destroy(&stmf_state.stmf_lock); 370 mutex_destroy(&stmf_worker_sel_mx); 371 cv_destroy(&stmf_state.stmf_cv); 372 return (ret); 373 } 374 375 int 376 _info(struct modinfo *modinfop) 377 { 378 return (mod_info(&modlinkage, modinfop)); 379 } 380 381 /* ARGSUSED */ 382 static int 383 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 384 { 385 switch (cmd) { 386 case DDI_INFO_DEVT2DEVINFO: 387 *result = stmf_state.stmf_dip; 388 break; 389 case DDI_INFO_DEVT2INSTANCE: 390 *result = 391 (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip); 392 break; 393 default: 394 return (DDI_FAILURE); 395 } 396 397 return (DDI_SUCCESS); 398 } 399 400 static int 401 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 402 { 403 switch (cmd) { 404 case DDI_ATTACH: 405 stmf_state.stmf_dip = dip; 406 407 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0, 408 DDI_NT_STMF, 0) != DDI_SUCCESS) { 409 break; 410 } 411 ddi_report_dev(dip); 412 return (DDI_SUCCESS); 413 } 414 415 return (DDI_FAILURE); 416 } 417 418 static int 419 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 420 { 421 switch (cmd) { 422 case DDI_DETACH: 423 ddi_remove_minor_node(dip, 0); 424 return (DDI_SUCCESS); 425 } 426 427 return (DDI_FAILURE); 428 } 429 430 /* ARGSUSED */ 431 static int 432 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp) 433 { 434 mutex_enter(&stmf_state.stmf_lock); 435 if (stmf_state.stmf_exclusive_open) { 436 mutex_exit(&stmf_state.stmf_lock); 437 return (EBUSY); 438 } 439 if (flag & FEXCL) { 440 if (stmf_state.stmf_opened) { 441 mutex_exit(&stmf_state.stmf_lock); 442 return (EBUSY); 443 } 444 stmf_state.stmf_exclusive_open = 1; 445 } 446 stmf_state.stmf_opened = 1; 447 mutex_exit(&stmf_state.stmf_lock); 448 return (0); 449 } 450 451 /* ARGSUSED */ 452 static int 453 stmf_close(dev_t dev, int flag, int otype, cred_t *credp) 454 { 455 mutex_enter(&stmf_state.stmf_lock); 456 stmf_state.stmf_opened = 0; 457 if (stmf_state.stmf_exclusive_open && 458 (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) { 459 stmf_state.stmf_config_state = STMF_CONFIG_NONE; 460 stmf_delete_all_ppds(); 461 stmf_view_clear_config(); 462 stmf_view_init(); 463 } 464 stmf_state.stmf_exclusive_open = 0; 465 mutex_exit(&stmf_state.stmf_lock); 466 return (0); 467 } 468 469 int 470 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd, 471 void **ibuf, void **obuf) 472 { 473 int ret; 474 475 *ibuf = NULL; 476 *obuf = NULL; 477 *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP); 478 479 ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode); 480 if (ret) 481 return (EFAULT); 482 if ((*iocd)->stmf_version != STMF_VERSION_1) { 483 ret = EINVAL; 484 goto copyin_iocdata_done; 485 } 486 if ((*iocd)->stmf_ibuf_size) { 487 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP); 488 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf), 489 *ibuf, (*iocd)->stmf_ibuf_size, mode); 490 } 491 if ((*iocd)->stmf_obuf_size) 492 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP); 493 494 if (ret == 0) 495 return (0); 496 ret = EFAULT; 497 copyin_iocdata_done:; 498 if (*obuf) { 499 kmem_free(*obuf, (*iocd)->stmf_obuf_size); 500 *obuf = NULL; 501 } 502 if (*ibuf) { 503 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size); 504 *ibuf = NULL; 505 } 506 kmem_free(*iocd, sizeof (stmf_iocdata_t)); 507 return (ret); 508 } 509 510 int 511 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf) 512 { 513 int ret; 514 515 if (iocd->stmf_obuf_size) { 516 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf, 517 iocd->stmf_obuf_size, mode); 518 if (ret) 519 return (EFAULT); 520 } 521 ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode); 522 if (ret) 523 return (EFAULT); 524 return (0); 525 } 526 527 /* ARGSUSED */ 528 static int 529 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 530 cred_t *credp, int *rval) 531 { 532 stmf_iocdata_t *iocd; 533 void *ibuf = NULL, *obuf = NULL; 534 slist_lu_t *luid_list; 535 slist_target_port_t *lportid_list; 536 stmf_i_lu_t *ilu; 537 stmf_i_local_port_t *ilport; 538 stmf_i_scsi_session_t *iss; 539 slist_scsi_session_t *iss_list; 540 sioc_lu_props_t *lup; 541 sioc_target_port_props_t *lportp; 542 stmf_ppioctl_data_t *ppi, *ppi_out = NULL; 543 uint64_t *ppi_token = NULL; 544 uint8_t *p_id, *id; 545 stmf_state_desc_t *std; 546 stmf_status_t ctl_ret; 547 stmf_state_change_info_t ssi; 548 int ret = 0; 549 uint32_t n; 550 int i; 551 stmf_group_op_data_t *grp_entry; 552 stmf_group_name_t *grpname; 553 stmf_view_op_entry_t *ve; 554 stmf_id_type_t idtype; 555 stmf_id_data_t *id_entry; 556 stmf_id_list_t *id_list; 557 stmf_view_entry_t *view_entry; 558 stmf_set_props_t *stmf_set_props; 559 uint32_t veid; 560 if ((cmd & 0xff000000) != STMF_IOCTL) { 561 return (ENOTTY); 562 } 563 564 if (drv_priv(credp) != 0) { 565 return (EPERM); 566 } 567 568 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf); 569 if (ret) 570 return (ret); 571 iocd->stmf_error = 0; 572 573 switch (cmd) { 574 case STMF_IOCTL_LU_LIST: 575 /* retrieves both registered/unregistered */ 576 mutex_enter(&stmf_state.stmf_lock); 577 id_list = &stmf_state.stmf_luid_list; 578 n = min(id_list->id_count, 579 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 580 iocd->stmf_obuf_max_nentries = id_list->id_count; 581 luid_list = (slist_lu_t *)obuf; 582 id_entry = id_list->idl_head; 583 for (i = 0; i < n; i++) { 584 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 585 id_entry = id_entry->id_next; 586 } 587 588 n = iocd->stmf_obuf_size/sizeof (slist_lu_t); 589 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 590 id = (uint8_t *)ilu->ilu_lu->lu_id; 591 if (stmf_lookup_id(id_list, 16, id + 4) == NULL) { 592 iocd->stmf_obuf_max_nentries++; 593 if (i < n) { 594 bcopy(id + 4, luid_list[i].lu_guid, 595 sizeof (slist_lu_t)); 596 i++; 597 } 598 } 599 } 600 iocd->stmf_obuf_nentries = i; 601 mutex_exit(&stmf_state.stmf_lock); 602 break; 603 604 case STMF_IOCTL_REG_LU_LIST: 605 mutex_enter(&stmf_state.stmf_lock); 606 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus; 607 n = min(stmf_state.stmf_nlus, 608 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 609 iocd->stmf_obuf_nentries = n; 610 ilu = stmf_state.stmf_ilulist; 611 luid_list = (slist_lu_t *)obuf; 612 for (i = 0; i < n; i++) { 613 uint8_t *id; 614 id = (uint8_t *)ilu->ilu_lu->lu_id; 615 bcopy(id + 4, luid_list[i].lu_guid, 16); 616 ilu = ilu->ilu_next; 617 } 618 mutex_exit(&stmf_state.stmf_lock); 619 break; 620 621 case STMF_IOCTL_VE_LU_LIST: 622 mutex_enter(&stmf_state.stmf_lock); 623 id_list = &stmf_state.stmf_luid_list; 624 n = min(id_list->id_count, 625 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 626 iocd->stmf_obuf_max_nentries = id_list->id_count; 627 iocd->stmf_obuf_nentries = n; 628 luid_list = (slist_lu_t *)obuf; 629 id_entry = id_list->idl_head; 630 for (i = 0; i < n; i++) { 631 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 632 id_entry = id_entry->id_next; 633 } 634 mutex_exit(&stmf_state.stmf_lock); 635 break; 636 637 case STMF_IOCTL_TARGET_PORT_LIST: 638 mutex_enter(&stmf_state.stmf_lock); 639 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports; 640 n = min(stmf_state.stmf_nlports, 641 (iocd->stmf_obuf_size)/sizeof (slist_target_port_t)); 642 iocd->stmf_obuf_nentries = n; 643 ilport = stmf_state.stmf_ilportlist; 644 lportid_list = (slist_target_port_t *)obuf; 645 for (i = 0; i < n; i++) { 646 uint8_t *id; 647 id = (uint8_t *)ilport->ilport_lport->lport_id; 648 bcopy(id, lportid_list[i].target, id[3] + 4); 649 ilport = ilport->ilport_next; 650 } 651 mutex_exit(&stmf_state.stmf_lock); 652 break; 653 654 case STMF_IOCTL_SESSION_LIST: 655 p_id = (uint8_t *)ibuf; 656 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) || 657 (iocd->stmf_ibuf_size < (p_id[3] + 4))) { 658 ret = EINVAL; 659 break; 660 } 661 mutex_enter(&stmf_state.stmf_lock); 662 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport = 663 ilport->ilport_next) { 664 uint8_t *id; 665 id = (uint8_t *)ilport->ilport_lport->lport_id; 666 if ((p_id[3] == id[3]) && 667 (bcmp(p_id + 4, id + 4, id[3]) == 0)) { 668 break; 669 } 670 } 671 if (ilport == NULL) { 672 mutex_exit(&stmf_state.stmf_lock); 673 ret = ENOENT; 674 break; 675 } 676 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions; 677 n = min(ilport->ilport_nsessions, 678 (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t)); 679 iocd->stmf_obuf_nentries = n; 680 iss = ilport->ilport_ss_list; 681 iss_list = (slist_scsi_session_t *)obuf; 682 for (i = 0; i < n; i++) { 683 uint8_t *id; 684 id = (uint8_t *)iss->iss_ss->ss_rport_id; 685 bcopy(id, iss_list[i].initiator, id[3] + 4); 686 iss_list[i].creation_time = (uint32_t) 687 iss->iss_creation_time; 688 if (iss->iss_ss->ss_rport_alias) { 689 (void) strncpy(iss_list[i].alias, 690 iss->iss_ss->ss_rport_alias, 255); 691 iss_list[i].alias[255] = '\0'; 692 } else { 693 iss_list[i].alias[0] = '\0'; 694 } 695 iss = iss->iss_next; 696 } 697 mutex_exit(&stmf_state.stmf_lock); 698 break; 699 700 case STMF_IOCTL_GET_LU_PROPERTIES: 701 p_id = (uint8_t *)ibuf; 702 if ((iocd->stmf_ibuf_size < 16) || 703 (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) || 704 (p_id[0] == 0)) { 705 ret = EINVAL; 706 break; 707 } 708 mutex_enter(&stmf_state.stmf_lock); 709 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 710 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 711 break; 712 } 713 if (ilu == NULL) { 714 mutex_exit(&stmf_state.stmf_lock); 715 ret = ENOENT; 716 break; 717 } 718 lup = (sioc_lu_props_t *)obuf; 719 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16); 720 lup->lu_state = ilu->ilu_state & 0x0f; 721 lup->lu_present = 1; /* XXX */ 722 (void) strncpy(lup->lu_provider_name, 723 ilu->ilu_lu->lu_lp->lp_name, 255); 724 lup->lu_provider_name[254] = '\0'; 725 if (ilu->ilu_lu->lu_alias) { 726 (void) strncpy(lup->lu_alias, 727 ilu->ilu_lu->lu_alias, 255); 728 lup->lu_alias[255] = '\0'; 729 } else { 730 lup->lu_alias[0] = '\0'; 731 } 732 mutex_exit(&stmf_state.stmf_lock); 733 break; 734 735 case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES: 736 p_id = (uint8_t *)ibuf; 737 if ((p_id == NULL) || 738 (iocd->stmf_ibuf_size < (p_id[3] + 4)) || 739 (iocd->stmf_obuf_size < 740 sizeof (sioc_target_port_props_t))) { 741 ret = EINVAL; 742 break; 743 } 744 mutex_enter(&stmf_state.stmf_lock); 745 for (ilport = stmf_state.stmf_ilportlist; ilport; 746 ilport = ilport->ilport_next) { 747 uint8_t *id; 748 id = (uint8_t *)ilport->ilport_lport->lport_id; 749 if ((p_id[3] == id[3]) && 750 (bcmp(p_id+4, id+4, id[3]) == 0)) 751 break; 752 } 753 if (ilport == NULL) { 754 mutex_exit(&stmf_state.stmf_lock); 755 ret = ENOENT; 756 break; 757 } 758 lportp = (sioc_target_port_props_t *)obuf; 759 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id, 760 ilport->ilport_lport->lport_id->ident_length + 4); 761 lportp->tgt_state = ilport->ilport_state & 0x0f; 762 lportp->tgt_present = 1; /* XXX */ 763 (void) strncpy(lportp->tgt_provider_name, 764 ilport->ilport_lport->lport_pp->pp_name, 255); 765 lportp->tgt_provider_name[254] = '\0'; 766 if (ilport->ilport_lport->lport_alias) { 767 (void) strncpy(lportp->tgt_alias, 768 ilport->ilport_lport->lport_alias, 255); 769 lportp->tgt_alias[255] = '\0'; 770 } else { 771 lportp->tgt_alias[0] = '\0'; 772 } 773 mutex_exit(&stmf_state.stmf_lock); 774 break; 775 776 case STMF_IOCTL_SET_STMF_STATE: 777 if ((ibuf == NULL) || 778 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 779 ret = EINVAL; 780 break; 781 } 782 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf); 783 break; 784 785 case STMF_IOCTL_GET_STMF_STATE: 786 if ((obuf == NULL) || 787 (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) { 788 ret = EINVAL; 789 break; 790 } 791 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf); 792 break; 793 794 case STMF_IOCTL_SET_ALUA_STATE: 795 if ((ibuf == NULL) || 796 (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) { 797 ret = EINVAL; 798 break; 799 } 800 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf); 801 break; 802 803 case STMF_IOCTL_GET_ALUA_STATE: 804 if ((obuf == NULL) || 805 (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) { 806 ret = EINVAL; 807 break; 808 } 809 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf); 810 break; 811 812 case STMF_IOCTL_SET_LU_STATE: 813 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 814 ssi.st_additional_info = NULL; 815 std = (stmf_state_desc_t *)ibuf; 816 if ((ibuf == NULL) || 817 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 818 ret = EINVAL; 819 break; 820 } 821 p_id = std->ident; 822 mutex_enter(&stmf_state.stmf_lock); 823 if (stmf_state.stmf_inventory_locked) { 824 mutex_exit(&stmf_state.stmf_lock); 825 ret = EBUSY; 826 break; 827 } 828 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 829 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 830 break; 831 } 832 if (ilu == NULL) { 833 mutex_exit(&stmf_state.stmf_lock); 834 ret = ENOENT; 835 break; 836 } 837 stmf_state.stmf_inventory_locked = 1; 838 mutex_exit(&stmf_state.stmf_lock); 839 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE : 840 STMF_CMD_LU_OFFLINE; 841 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi); 842 if (ctl_ret == STMF_ALREADY) 843 ret = 0; 844 else if (ctl_ret == STMF_BUSY) 845 ret = EBUSY; 846 else if (ctl_ret != STMF_SUCCESS) 847 ret = EIO; 848 mutex_enter(&stmf_state.stmf_lock); 849 stmf_state.stmf_inventory_locked = 0; 850 mutex_exit(&stmf_state.stmf_lock); 851 break; 852 853 case STMF_IOCTL_SET_STMF_PROPS: 854 if ((ibuf == NULL) || 855 (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) { 856 ret = EINVAL; 857 break; 858 } 859 stmf_set_props = (stmf_set_props_t *)ibuf; 860 mutex_enter(&stmf_state.stmf_lock); 861 if ((stmf_set_props->default_lu_state_value == 862 STMF_STATE_OFFLINE) || 863 (stmf_set_props->default_lu_state_value == 864 STMF_STATE_ONLINE)) { 865 stmf_state.stmf_default_lu_state = 866 stmf_set_props->default_lu_state_value; 867 } 868 if ((stmf_set_props->default_target_state_value == 869 STMF_STATE_OFFLINE) || 870 (stmf_set_props->default_target_state_value == 871 STMF_STATE_ONLINE)) { 872 stmf_state.stmf_default_lport_state = 873 stmf_set_props->default_target_state_value; 874 } 875 876 mutex_exit(&stmf_state.stmf_lock); 877 break; 878 879 case STMF_IOCTL_SET_TARGET_PORT_STATE: 880 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 881 ssi.st_additional_info = NULL; 882 std = (stmf_state_desc_t *)ibuf; 883 if ((ibuf == NULL) || 884 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 885 ret = EINVAL; 886 break; 887 } 888 p_id = std->ident; 889 mutex_enter(&stmf_state.stmf_lock); 890 if (stmf_state.stmf_inventory_locked) { 891 mutex_exit(&stmf_state.stmf_lock); 892 ret = EBUSY; 893 break; 894 } 895 for (ilport = stmf_state.stmf_ilportlist; ilport; 896 ilport = ilport->ilport_next) { 897 uint8_t *id; 898 id = (uint8_t *)ilport->ilport_lport->lport_id; 899 if ((id[3] == p_id[3]) && 900 (bcmp(id+4, p_id+4, id[3]) == 0)) { 901 break; 902 } 903 } 904 if (ilport == NULL) { 905 mutex_exit(&stmf_state.stmf_lock); 906 ret = ENOENT; 907 break; 908 } 909 stmf_state.stmf_inventory_locked = 1; 910 mutex_exit(&stmf_state.stmf_lock); 911 cmd = (std->state == STMF_STATE_ONLINE) ? 912 STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE; 913 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi); 914 if (ctl_ret == STMF_ALREADY) 915 ret = 0; 916 else if (ctl_ret == STMF_BUSY) 917 ret = EBUSY; 918 else if (ctl_ret != STMF_SUCCESS) 919 ret = EIO; 920 mutex_enter(&stmf_state.stmf_lock); 921 stmf_state.stmf_inventory_locked = 0; 922 mutex_exit(&stmf_state.stmf_lock); 923 break; 924 925 case STMF_IOCTL_ADD_HG_ENTRY: 926 idtype = STMF_ID_TYPE_HOST; 927 /* FALLTHROUGH */ 928 case STMF_IOCTL_ADD_TG_ENTRY: 929 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 930 ret = EACCES; 931 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 932 break; 933 } 934 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) { 935 idtype = STMF_ID_TYPE_TARGET; 936 } 937 grp_entry = (stmf_group_op_data_t *)ibuf; 938 if ((ibuf == NULL) || 939 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 940 ret = EINVAL; 941 break; 942 } 943 if (grp_entry->group.name[0] == '*') { 944 ret = EINVAL; 945 break; /* not allowed */ 946 } 947 mutex_enter(&stmf_state.stmf_lock); 948 ret = stmf_add_group_member(grp_entry->group.name, 949 grp_entry->group.name_size, 950 grp_entry->ident + 4, 951 grp_entry->ident[3], 952 idtype, 953 &iocd->stmf_error); 954 mutex_exit(&stmf_state.stmf_lock); 955 break; 956 case STMF_IOCTL_REMOVE_HG_ENTRY: 957 idtype = STMF_ID_TYPE_HOST; 958 /* FALLTHROUGH */ 959 case STMF_IOCTL_REMOVE_TG_ENTRY: 960 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 961 ret = EACCES; 962 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 963 break; 964 } 965 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) { 966 idtype = STMF_ID_TYPE_TARGET; 967 } 968 grp_entry = (stmf_group_op_data_t *)ibuf; 969 if ((ibuf == NULL) || 970 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 971 ret = EINVAL; 972 break; 973 } 974 if (grp_entry->group.name[0] == '*') { 975 ret = EINVAL; 976 break; /* not allowed */ 977 } 978 mutex_enter(&stmf_state.stmf_lock); 979 ret = stmf_remove_group_member(grp_entry->group.name, 980 grp_entry->group.name_size, 981 grp_entry->ident + 4, 982 grp_entry->ident[3], 983 idtype, 984 &iocd->stmf_error); 985 mutex_exit(&stmf_state.stmf_lock); 986 break; 987 case STMF_IOCTL_CREATE_HOST_GROUP: 988 idtype = STMF_ID_TYPE_HOST_GROUP; 989 /* FALLTHROUGH */ 990 case STMF_IOCTL_CREATE_TARGET_GROUP: 991 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 992 ret = EACCES; 993 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 994 break; 995 } 996 grpname = (stmf_group_name_t *)ibuf; 997 998 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP) 999 idtype = STMF_ID_TYPE_TARGET_GROUP; 1000 if ((ibuf == NULL) || 1001 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1002 ret = EINVAL; 1003 break; 1004 } 1005 if (grpname->name[0] == '*') { 1006 ret = EINVAL; 1007 break; /* not allowed */ 1008 } 1009 mutex_enter(&stmf_state.stmf_lock); 1010 ret = stmf_add_group(grpname->name, 1011 grpname->name_size, idtype, &iocd->stmf_error); 1012 mutex_exit(&stmf_state.stmf_lock); 1013 break; 1014 case STMF_IOCTL_REMOVE_HOST_GROUP: 1015 idtype = STMF_ID_TYPE_HOST_GROUP; 1016 /* FALLTHROUGH */ 1017 case STMF_IOCTL_REMOVE_TARGET_GROUP: 1018 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1019 ret = EACCES; 1020 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1021 break; 1022 } 1023 grpname = (stmf_group_name_t *)ibuf; 1024 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP) 1025 idtype = STMF_ID_TYPE_TARGET_GROUP; 1026 if ((ibuf == NULL) || 1027 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1028 ret = EINVAL; 1029 break; 1030 } 1031 if (grpname->name[0] == '*') { 1032 ret = EINVAL; 1033 break; /* not allowed */ 1034 } 1035 mutex_enter(&stmf_state.stmf_lock); 1036 ret = stmf_remove_group(grpname->name, 1037 grpname->name_size, idtype, &iocd->stmf_error); 1038 mutex_exit(&stmf_state.stmf_lock); 1039 break; 1040 case STMF_IOCTL_VALIDATE_VIEW: 1041 case STMF_IOCTL_ADD_VIEW_ENTRY: 1042 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1043 ret = EACCES; 1044 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1045 break; 1046 } 1047 ve = (stmf_view_op_entry_t *)ibuf; 1048 if ((ibuf == NULL) || 1049 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 1050 ret = EINVAL; 1051 break; 1052 } 1053 if (!ve->ve_lu_number_valid) 1054 ve->ve_lu_nbr[2] = 0xFF; 1055 if (ve->ve_all_hosts) { 1056 ve->ve_host_group.name[0] = '*'; 1057 ve->ve_host_group.name_size = 1; 1058 } 1059 if (ve->ve_all_targets) { 1060 ve->ve_target_group.name[0] = '*'; 1061 ve->ve_target_group.name_size = 1; 1062 } 1063 if (ve->ve_ndx_valid) 1064 veid = ve->ve_ndx; 1065 else 1066 veid = 0xffffffff; 1067 mutex_enter(&stmf_state.stmf_lock); 1068 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) { 1069 ret = stmf_add_ve(ve->ve_host_group.name, 1070 ve->ve_host_group.name_size, 1071 ve->ve_target_group.name, 1072 ve->ve_target_group.name_size, 1073 ve->ve_guid, 1074 &veid, 1075 ve->ve_lu_nbr, 1076 &iocd->stmf_error); 1077 } else { /* STMF_IOCTL_VALIDATE_VIEW */ 1078 ret = stmf_validate_lun_ve(ve->ve_host_group.name, 1079 ve->ve_host_group.name_size, 1080 ve->ve_target_group.name, 1081 ve->ve_target_group.name_size, 1082 ve->ve_lu_nbr, 1083 &iocd->stmf_error); 1084 } 1085 mutex_exit(&stmf_state.stmf_lock); 1086 if (ret == 0 && 1087 (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) && 1088 iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) { 1089 stmf_view_op_entry_t *ve_ret = 1090 (stmf_view_op_entry_t *)obuf; 1091 iocd->stmf_obuf_nentries = 1; 1092 iocd->stmf_obuf_max_nentries = 1; 1093 if (!ve->ve_ndx_valid) { 1094 ve_ret->ve_ndx = veid; 1095 ve_ret->ve_ndx_valid = 1; 1096 } 1097 if (!ve->ve_lu_number_valid) { 1098 ve_ret->ve_lu_number_valid = 1; 1099 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8); 1100 } 1101 } 1102 break; 1103 case STMF_IOCTL_REMOVE_VIEW_ENTRY: 1104 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1105 ret = EACCES; 1106 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1107 break; 1108 } 1109 ve = (stmf_view_op_entry_t *)ibuf; 1110 if ((ibuf == NULL) || 1111 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 1112 ret = EINVAL; 1113 break; 1114 } 1115 if (!ve->ve_ndx_valid) { 1116 ret = EINVAL; 1117 break; 1118 } 1119 mutex_enter(&stmf_state.stmf_lock); 1120 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx, 1121 &iocd->stmf_error); 1122 mutex_exit(&stmf_state.stmf_lock); 1123 break; 1124 case STMF_IOCTL_GET_HG_LIST: 1125 id_list = &stmf_state.stmf_hg_list; 1126 /* FALLTHROUGH */ 1127 case STMF_IOCTL_GET_TG_LIST: 1128 if (cmd == STMF_IOCTL_GET_TG_LIST) 1129 id_list = &stmf_state.stmf_tg_list; 1130 mutex_enter(&stmf_state.stmf_lock); 1131 iocd->stmf_obuf_max_nentries = id_list->id_count; 1132 n = min(id_list->id_count, 1133 (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t)); 1134 iocd->stmf_obuf_nentries = n; 1135 id_entry = id_list->idl_head; 1136 grpname = (stmf_group_name_t *)obuf; 1137 for (i = 0; i < n; i++) { 1138 if (id_entry->id_data[0] == '*') { 1139 if (iocd->stmf_obuf_nentries > 0) { 1140 iocd->stmf_obuf_nentries--; 1141 } 1142 id_entry = id_entry->id_next; 1143 continue; 1144 } 1145 grpname->name_size = id_entry->id_data_size; 1146 bcopy(id_entry->id_data, grpname->name, 1147 id_entry->id_data_size); 1148 grpname++; 1149 id_entry = id_entry->id_next; 1150 } 1151 mutex_exit(&stmf_state.stmf_lock); 1152 break; 1153 case STMF_IOCTL_GET_HG_ENTRIES: 1154 id_list = &stmf_state.stmf_hg_list; 1155 /* FALLTHROUGH */ 1156 case STMF_IOCTL_GET_TG_ENTRIES: 1157 grpname = (stmf_group_name_t *)ibuf; 1158 if ((ibuf == NULL) || 1159 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1160 ret = EINVAL; 1161 break; 1162 } 1163 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) { 1164 id_list = &stmf_state.stmf_tg_list; 1165 } 1166 mutex_enter(&stmf_state.stmf_lock); 1167 id_entry = stmf_lookup_id(id_list, grpname->name_size, 1168 grpname->name); 1169 if (!id_entry) 1170 ret = ENODEV; 1171 else { 1172 stmf_ge_ident_t *grp_entry; 1173 id_list = (stmf_id_list_t *)id_entry->id_impl_specific; 1174 iocd->stmf_obuf_max_nentries = id_list->id_count; 1175 n = min(id_list->id_count, 1176 iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t)); 1177 iocd->stmf_obuf_nentries = n; 1178 id_entry = id_list->idl_head; 1179 grp_entry = (stmf_ge_ident_t *)obuf; 1180 for (i = 0; i < n; i++) { 1181 bcopy(id_entry->id_data, grp_entry->ident, 1182 id_entry->id_data_size); 1183 grp_entry->ident_size = id_entry->id_data_size; 1184 id_entry = id_entry->id_next; 1185 grp_entry++; 1186 } 1187 } 1188 mutex_exit(&stmf_state.stmf_lock); 1189 break; 1190 1191 case STMF_IOCTL_GET_VE_LIST: 1192 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1193 mutex_enter(&stmf_state.stmf_lock); 1194 ve = (stmf_view_op_entry_t *)obuf; 1195 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1196 id_entry; id_entry = id_entry->id_next) { 1197 for (view_entry = (stmf_view_entry_t *) 1198 id_entry->id_impl_specific; view_entry; 1199 view_entry = view_entry->ve_next) { 1200 iocd->stmf_obuf_max_nentries++; 1201 if (iocd->stmf_obuf_nentries >= n) 1202 continue; 1203 ve->ve_ndx_valid = 1; 1204 ve->ve_ndx = view_entry->ve_id; 1205 ve->ve_lu_number_valid = 1; 1206 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1207 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1208 view_entry->ve_luid->id_data_size); 1209 if (view_entry->ve_hg->id_data[0] == '*') { 1210 ve->ve_all_hosts = 1; 1211 } else { 1212 bcopy(view_entry->ve_hg->id_data, 1213 ve->ve_host_group.name, 1214 view_entry->ve_hg->id_data_size); 1215 ve->ve_host_group.name_size = 1216 view_entry->ve_hg->id_data_size; 1217 } 1218 1219 if (view_entry->ve_tg->id_data[0] == '*') { 1220 ve->ve_all_targets = 1; 1221 } else { 1222 bcopy(view_entry->ve_tg->id_data, 1223 ve->ve_target_group.name, 1224 view_entry->ve_tg->id_data_size); 1225 ve->ve_target_group.name_size = 1226 view_entry->ve_tg->id_data_size; 1227 } 1228 ve++; 1229 iocd->stmf_obuf_nentries++; 1230 } 1231 } 1232 mutex_exit(&stmf_state.stmf_lock); 1233 break; 1234 1235 case STMF_IOCTL_LU_VE_LIST: 1236 p_id = (uint8_t *)ibuf; 1237 if ((iocd->stmf_ibuf_size != 16) || 1238 (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) { 1239 ret = EINVAL; 1240 break; 1241 } 1242 1243 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1244 mutex_enter(&stmf_state.stmf_lock); 1245 ve = (stmf_view_op_entry_t *)obuf; 1246 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1247 id_entry; id_entry = id_entry->id_next) { 1248 if (bcmp(id_entry->id_data, p_id, 16) != 0) 1249 continue; 1250 for (view_entry = (stmf_view_entry_t *) 1251 id_entry->id_impl_specific; view_entry; 1252 view_entry = view_entry->ve_next) { 1253 iocd->stmf_obuf_max_nentries++; 1254 if (iocd->stmf_obuf_nentries >= n) 1255 continue; 1256 ve->ve_ndx_valid = 1; 1257 ve->ve_ndx = view_entry->ve_id; 1258 ve->ve_lu_number_valid = 1; 1259 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1260 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1261 view_entry->ve_luid->id_data_size); 1262 if (view_entry->ve_hg->id_data[0] == '*') { 1263 ve->ve_all_hosts = 1; 1264 } else { 1265 bcopy(view_entry->ve_hg->id_data, 1266 ve->ve_host_group.name, 1267 view_entry->ve_hg->id_data_size); 1268 ve->ve_host_group.name_size = 1269 view_entry->ve_hg->id_data_size; 1270 } 1271 1272 if (view_entry->ve_tg->id_data[0] == '*') { 1273 ve->ve_all_targets = 1; 1274 } else { 1275 bcopy(view_entry->ve_tg->id_data, 1276 ve->ve_target_group.name, 1277 view_entry->ve_tg->id_data_size); 1278 ve->ve_target_group.name_size = 1279 view_entry->ve_tg->id_data_size; 1280 } 1281 ve++; 1282 iocd->stmf_obuf_nentries++; 1283 } 1284 break; 1285 } 1286 mutex_exit(&stmf_state.stmf_lock); 1287 break; 1288 1289 case STMF_IOCTL_LOAD_PP_DATA: 1290 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1291 ret = EACCES; 1292 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1293 break; 1294 } 1295 ppi = (stmf_ppioctl_data_t *)ibuf; 1296 if ((ppi == NULL) || 1297 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1298 ret = EINVAL; 1299 break; 1300 } 1301 /* returned token */ 1302 ppi_token = (uint64_t *)obuf; 1303 if ((ppi_token == NULL) || 1304 (iocd->stmf_obuf_size < sizeof (uint64_t))) { 1305 ret = EINVAL; 1306 break; 1307 } 1308 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error); 1309 break; 1310 1311 case STMF_IOCTL_GET_PP_DATA: 1312 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1313 ret = EACCES; 1314 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1315 break; 1316 } 1317 ppi = (stmf_ppioctl_data_t *)ibuf; 1318 if (ppi == NULL || 1319 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1320 ret = EINVAL; 1321 break; 1322 } 1323 ppi_out = (stmf_ppioctl_data_t *)obuf; 1324 if ((ppi_out == NULL) || 1325 (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) { 1326 ret = EINVAL; 1327 break; 1328 } 1329 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error); 1330 break; 1331 1332 case STMF_IOCTL_CLEAR_PP_DATA: 1333 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1334 ret = EACCES; 1335 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1336 break; 1337 } 1338 ppi = (stmf_ppioctl_data_t *)ibuf; 1339 if ((ppi == NULL) || 1340 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1341 ret = EINVAL; 1342 break; 1343 } 1344 ret = stmf_delete_ppd_ioctl(ppi); 1345 break; 1346 1347 case STMF_IOCTL_CLEAR_TRACE: 1348 stmf_trace_clear(); 1349 break; 1350 1351 case STMF_IOCTL_ADD_TRACE: 1352 if (iocd->stmf_ibuf_size && ibuf) { 1353 ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = '\0'; 1354 stmf_trace("\nstradm", "%s\n", ibuf); 1355 } 1356 break; 1357 1358 case STMF_IOCTL_GET_TRACE_POSITION: 1359 if (obuf && (iocd->stmf_obuf_size > 3)) { 1360 mutex_enter(&trace_buf_lock); 1361 *((int *)obuf) = trace_buf_curndx; 1362 mutex_exit(&trace_buf_lock); 1363 } else { 1364 ret = EINVAL; 1365 } 1366 break; 1367 1368 case STMF_IOCTL_GET_TRACE: 1369 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) { 1370 ret = EINVAL; 1371 break; 1372 } 1373 i = *((int *)ibuf); 1374 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) > 1375 trace_buf_size)) { 1376 ret = EINVAL; 1377 break; 1378 } 1379 mutex_enter(&trace_buf_lock); 1380 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size); 1381 mutex_exit(&trace_buf_lock); 1382 break; 1383 1384 default: 1385 ret = ENOTTY; 1386 } 1387 1388 if (ret == 0) { 1389 ret = stmf_copyout_iocdata(data, mode, iocd, obuf); 1390 } else if (iocd->stmf_error) { 1391 (void) stmf_copyout_iocdata(data, mode, iocd, obuf); 1392 } 1393 if (obuf) { 1394 kmem_free(obuf, iocd->stmf_obuf_size); 1395 obuf = NULL; 1396 } 1397 if (ibuf) { 1398 kmem_free(ibuf, iocd->stmf_ibuf_size); 1399 ibuf = NULL; 1400 } 1401 kmem_free(iocd, sizeof (stmf_iocdata_t)); 1402 return (ret); 1403 } 1404 1405 static int 1406 stmf_get_service_state() 1407 { 1408 stmf_i_local_port_t *ilport; 1409 stmf_i_lu_t *ilu; 1410 int online = 0; 1411 int offline = 0; 1412 int onlining = 0; 1413 int offlining = 0; 1414 1415 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1416 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1417 ilport = ilport->ilport_next) { 1418 if (ilport->ilport_state == STMF_STATE_OFFLINE) 1419 offline++; 1420 else if (ilport->ilport_state == STMF_STATE_ONLINE) 1421 online++; 1422 else if (ilport->ilport_state == STMF_STATE_ONLINING) 1423 onlining++; 1424 else if (ilport->ilport_state == STMF_STATE_OFFLINING) 1425 offlining++; 1426 } 1427 1428 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1429 ilu = ilu->ilu_next) { 1430 if (ilu->ilu_state == STMF_STATE_OFFLINE) 1431 offline++; 1432 else if (ilu->ilu_state == STMF_STATE_ONLINE) 1433 online++; 1434 else if (ilu->ilu_state == STMF_STATE_ONLINING) 1435 onlining++; 1436 else if (ilu->ilu_state == STMF_STATE_OFFLINING) 1437 offlining++; 1438 } 1439 1440 if (stmf_state.stmf_service_running) { 1441 if (onlining) 1442 return (STMF_STATE_ONLINING); 1443 else 1444 return (STMF_STATE_ONLINE); 1445 } 1446 1447 if (offlining) { 1448 return (STMF_STATE_OFFLINING); 1449 } 1450 1451 return (STMF_STATE_OFFLINE); 1452 } 1453 1454 static int 1455 stmf_set_stmf_state(stmf_state_desc_t *std) 1456 { 1457 stmf_i_local_port_t *ilport; 1458 stmf_i_lu_t *ilu; 1459 stmf_state_change_info_t ssi; 1460 int svc_state; 1461 1462 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 1463 ssi.st_additional_info = NULL; 1464 1465 mutex_enter(&stmf_state.stmf_lock); 1466 if (!stmf_state.stmf_exclusive_open) { 1467 mutex_exit(&stmf_state.stmf_lock); 1468 return (EACCES); 1469 } 1470 1471 if (stmf_state.stmf_inventory_locked) { 1472 mutex_exit(&stmf_state.stmf_lock); 1473 return (EBUSY); 1474 } 1475 1476 if ((std->state != STMF_STATE_ONLINE) && 1477 (std->state != STMF_STATE_OFFLINE)) { 1478 mutex_exit(&stmf_state.stmf_lock); 1479 return (EINVAL); 1480 } 1481 1482 svc_state = stmf_get_service_state(); 1483 if ((svc_state == STMF_STATE_OFFLINING) || 1484 (svc_state == STMF_STATE_ONLINING)) { 1485 mutex_exit(&stmf_state.stmf_lock); 1486 return (EBUSY); 1487 } 1488 1489 if (svc_state == STMF_STATE_OFFLINE) { 1490 if (std->config_state == STMF_CONFIG_INIT) { 1491 if (std->state != STMF_STATE_OFFLINE) { 1492 mutex_exit(&stmf_state.stmf_lock); 1493 return (EINVAL); 1494 } 1495 stmf_state.stmf_config_state = STMF_CONFIG_INIT; 1496 stmf_delete_all_ppds(); 1497 stmf_view_clear_config(); 1498 stmf_view_init(); 1499 mutex_exit(&stmf_state.stmf_lock); 1500 return (0); 1501 } 1502 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) || 1503 (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) { 1504 if (std->config_state != STMF_CONFIG_INIT_DONE) { 1505 mutex_exit(&stmf_state.stmf_lock); 1506 return (EINVAL); 1507 } 1508 stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE; 1509 } 1510 if (std->state == STMF_STATE_OFFLINE) { 1511 mutex_exit(&stmf_state.stmf_lock); 1512 return (0); 1513 } 1514 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) { 1515 mutex_exit(&stmf_state.stmf_lock); 1516 return (EINVAL); 1517 } 1518 stmf_state.stmf_inventory_locked = 1; 1519 stmf_state.stmf_service_running = 1; 1520 mutex_exit(&stmf_state.stmf_lock); 1521 1522 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1523 ilport = ilport->ilport_next) { 1524 if (stmf_state.stmf_default_lport_state != 1525 STMF_STATE_ONLINE) 1526 continue; 1527 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, 1528 ilport->ilport_lport, &ssi); 1529 } 1530 1531 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1532 ilu = ilu->ilu_next) { 1533 if (stmf_state.stmf_default_lu_state != 1534 STMF_STATE_ONLINE) 1535 continue; 1536 (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi); 1537 } 1538 mutex_enter(&stmf_state.stmf_lock); 1539 stmf_state.stmf_inventory_locked = 0; 1540 mutex_exit(&stmf_state.stmf_lock); 1541 return (0); 1542 } 1543 1544 /* svc_state is STMF_STATE_ONLINE here */ 1545 if ((std->state != STMF_STATE_OFFLINE) || 1546 (std->config_state == STMF_CONFIG_INIT)) { 1547 mutex_exit(&stmf_state.stmf_lock); 1548 return (EACCES); 1549 } 1550 1551 stmf_state.stmf_inventory_locked = 1; 1552 stmf_state.stmf_service_running = 0; 1553 1554 mutex_exit(&stmf_state.stmf_lock); 1555 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1556 ilport = ilport->ilport_next) { 1557 if (ilport->ilport_state != STMF_STATE_ONLINE) 1558 continue; 1559 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE, 1560 ilport->ilport_lport, &ssi); 1561 } 1562 1563 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1564 ilu = ilu->ilu_next) { 1565 if (ilu->ilu_state != STMF_STATE_ONLINE) 1566 continue; 1567 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi); 1568 } 1569 mutex_enter(&stmf_state.stmf_lock); 1570 stmf_state.stmf_inventory_locked = 0; 1571 mutex_exit(&stmf_state.stmf_lock); 1572 return (0); 1573 } 1574 1575 static int 1576 stmf_get_stmf_state(stmf_state_desc_t *std) 1577 { 1578 mutex_enter(&stmf_state.stmf_lock); 1579 std->state = stmf_get_service_state(); 1580 std->config_state = stmf_state.stmf_config_state; 1581 mutex_exit(&stmf_state.stmf_lock); 1582 1583 return (0); 1584 } 1585 1586 /* 1587 * handles registration message from pppt for a logical unit 1588 */ 1589 stmf_status_t 1590 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type) 1591 { 1592 stmf_i_lu_provider_t *ilp; 1593 stmf_lu_provider_t *lp; 1594 mutex_enter(&stmf_state.stmf_lock); 1595 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) { 1596 if (strcmp(msg->icrl_lu_provider_name, 1597 ilp->ilp_lp->lp_name) == 0) { 1598 lp = ilp->ilp_lp; 1599 mutex_exit(&stmf_state.stmf_lock); 1600 lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg, 1601 msg->icrl_cb_arg_len, type); 1602 return (STMF_SUCCESS); 1603 } 1604 } 1605 mutex_exit(&stmf_state.stmf_lock); 1606 return (STMF_SUCCESS); 1607 } 1608 1609 /* 1610 * handles de-registration message from pppt for a logical unit 1611 */ 1612 stmf_status_t 1613 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg) 1614 { 1615 stmf_i_lu_provider_t *ilp; 1616 stmf_lu_provider_t *lp; 1617 mutex_enter(&stmf_state.stmf_lock); 1618 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) { 1619 if (strcmp(msg->icrl_lu_provider_name, 1620 ilp->ilp_lp->lp_name) == 0) { 1621 lp = ilp->ilp_lp; 1622 mutex_exit(&stmf_state.stmf_lock); 1623 lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0, 1624 STMF_MSG_LU_DEREGISTER); 1625 return (STMF_SUCCESS); 1626 } 1627 } 1628 mutex_exit(&stmf_state.stmf_lock); 1629 return (STMF_SUCCESS); 1630 } 1631 1632 /* 1633 * helper function to find a task that matches a task_msgid 1634 */ 1635 scsi_task_t * 1636 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid) 1637 { 1638 stmf_i_lu_t *ilu; 1639 stmf_i_scsi_task_t *itask; 1640 1641 mutex_enter(&stmf_state.stmf_lock); 1642 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 1643 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) { 1644 break; 1645 } 1646 } 1647 1648 if (ilu == NULL) { 1649 mutex_exit(&stmf_state.stmf_lock); 1650 return (NULL); 1651 } 1652 1653 mutex_enter(&ilu->ilu_task_lock); 1654 for (itask = ilu->ilu_tasks; itask != NULL; 1655 itask = itask->itask_lu_next) { 1656 mutex_enter(&itask->itask_mutex); 1657 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 1658 ITASK_BEING_ABORTED)) { 1659 mutex_exit(&itask->itask_mutex); 1660 continue; 1661 } 1662 mutex_exit(&itask->itask_mutex); 1663 if (itask->itask_proxy_msg_id == task_msgid) { 1664 break; 1665 } 1666 } 1667 mutex_exit(&ilu->ilu_task_lock); 1668 mutex_exit(&stmf_state.stmf_lock); 1669 1670 if (itask != NULL) { 1671 return (itask->itask_task); 1672 } else { 1673 /* task not found. Likely already aborted. */ 1674 return (NULL); 1675 } 1676 } 1677 1678 /* 1679 * message received from pppt/ic 1680 */ 1681 stmf_status_t 1682 stmf_msg_rx(stmf_ic_msg_t *msg) 1683 { 1684 mutex_enter(&stmf_state.stmf_lock); 1685 if (stmf_state.stmf_alua_state != 1) { 1686 mutex_exit(&stmf_state.stmf_lock); 1687 cmn_err(CE_WARN, "stmf alua state is disabled"); 1688 ic_msg_free(msg); 1689 return (STMF_FAILURE); 1690 } 1691 mutex_exit(&stmf_state.stmf_lock); 1692 1693 switch (msg->icm_msg_type) { 1694 case STMF_ICM_REGISTER_LUN: 1695 (void) stmf_ic_lu_reg( 1696 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg, 1697 STMF_MSG_LU_REGISTER); 1698 break; 1699 case STMF_ICM_LUN_ACTIVE: 1700 (void) stmf_ic_lu_reg( 1701 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg, 1702 STMF_MSG_LU_ACTIVE); 1703 break; 1704 case STMF_ICM_DEREGISTER_LUN: 1705 (void) stmf_ic_lu_dereg( 1706 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg); 1707 break; 1708 case STMF_ICM_SCSI_DATA: 1709 (void) stmf_ic_rx_scsi_data( 1710 (stmf_ic_scsi_data_msg_t *)msg->icm_msg); 1711 break; 1712 case STMF_ICM_SCSI_STATUS: 1713 (void) stmf_ic_rx_scsi_status( 1714 (stmf_ic_scsi_status_msg_t *)msg->icm_msg); 1715 break; 1716 case STMF_ICM_STATUS: 1717 (void) stmf_ic_rx_status( 1718 (stmf_ic_status_msg_t *)msg->icm_msg); 1719 break; 1720 default: 1721 cmn_err(CE_WARN, "unknown message received %d", 1722 msg->icm_msg_type); 1723 ic_msg_free(msg); 1724 return (STMF_FAILURE); 1725 } 1726 ic_msg_free(msg); 1727 return (STMF_SUCCESS); 1728 } 1729 1730 stmf_status_t 1731 stmf_ic_rx_status(stmf_ic_status_msg_t *msg) 1732 { 1733 stmf_i_local_port_t *ilport; 1734 1735 if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) { 1736 /* for now, ignore other message status */ 1737 return (STMF_SUCCESS); 1738 } 1739 1740 if (msg->ics_status != STMF_SUCCESS) { 1741 return (STMF_SUCCESS); 1742 } 1743 1744 mutex_enter(&stmf_state.stmf_lock); 1745 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1746 ilport = ilport->ilport_next) { 1747 if (msg->ics_msgid == ilport->ilport_reg_msgid) { 1748 ilport->ilport_proxy_registered = 1; 1749 break; 1750 } 1751 } 1752 mutex_exit(&stmf_state.stmf_lock); 1753 return (STMF_SUCCESS); 1754 } 1755 1756 /* 1757 * handles scsi status message from pppt 1758 */ 1759 stmf_status_t 1760 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg) 1761 { 1762 scsi_task_t *task; 1763 1764 /* is this a task management command */ 1765 if (msg->icss_task_msgid & MSG_ID_TM_BIT) { 1766 return (STMF_SUCCESS); 1767 } 1768 1769 task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid); 1770 1771 if (task == NULL) { 1772 return (STMF_SUCCESS); 1773 } 1774 1775 task->task_scsi_status = msg->icss_status; 1776 task->task_sense_data = msg->icss_sense; 1777 task->task_sense_length = msg->icss_sense_len; 1778 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 1779 1780 return (STMF_SUCCESS); 1781 } 1782 1783 /* 1784 * handles scsi data message from pppt 1785 */ 1786 stmf_status_t 1787 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg) 1788 { 1789 stmf_i_scsi_task_t *itask; 1790 scsi_task_t *task; 1791 stmf_xfer_data_t *xd = NULL; 1792 stmf_data_buf_t *dbuf; 1793 uint32_t sz, minsz, xd_sz, asz; 1794 1795 /* is this a task management command */ 1796 if (msg->icsd_task_msgid & MSG_ID_TM_BIT) { 1797 return (STMF_SUCCESS); 1798 } 1799 1800 task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid); 1801 if (task == NULL) { 1802 stmf_ic_msg_t *ic_xfer_done_msg = NULL; 1803 static uint64_t data_msg_id; 1804 stmf_status_t ic_ret = STMF_FAILURE; 1805 mutex_enter(&stmf_state.stmf_lock); 1806 data_msg_id = stmf_proxy_msg_id++; 1807 mutex_exit(&stmf_state.stmf_lock); 1808 /* 1809 * send xfer done status to pppt 1810 * for now, set the session id to 0 as we cannot 1811 * ascertain it since we cannot find the task 1812 */ 1813 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc( 1814 msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id); 1815 if (ic_xfer_done_msg) { 1816 ic_ret = ic_tx_msg(ic_xfer_done_msg); 1817 if (ic_ret != STMF_IC_MSG_SUCCESS) { 1818 cmn_err(CE_WARN, "unable to xmit proxy msg"); 1819 } 1820 } 1821 return (STMF_FAILURE); 1822 } 1823 1824 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 1825 dbuf = itask->itask_proxy_dbuf; 1826 1827 task->task_cmd_xfer_length += msg->icsd_data_len; 1828 1829 if (task->task_additional_flags & 1830 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 1831 task->task_expected_xfer_length = 1832 task->task_cmd_xfer_length; 1833 } 1834 1835 sz = min(task->task_expected_xfer_length, 1836 task->task_cmd_xfer_length); 1837 1838 xd_sz = msg->icsd_data_len; 1839 asz = xd_sz + sizeof (*xd) - 4; 1840 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 1841 1842 if (xd == NULL) { 1843 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 1844 STMF_ALLOC_FAILURE, NULL); 1845 return (STMF_FAILURE); 1846 } 1847 1848 xd->alloc_size = asz; 1849 xd->size_left = xd_sz; 1850 bcopy(msg->icsd_data, xd->buf, xd_sz); 1851 1852 sz = min(sz, xd->size_left); 1853 xd->size_left = sz; 1854 minsz = min(512, sz); 1855 1856 if (dbuf == NULL) 1857 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 1858 if (dbuf == NULL) { 1859 kmem_free(xd, xd->alloc_size); 1860 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 1861 STMF_ALLOC_FAILURE, NULL); 1862 return (STMF_FAILURE); 1863 } 1864 dbuf->db_lu_private = xd; 1865 dbuf->db_relative_offset = task->task_nbytes_transferred; 1866 stmf_xd_to_dbuf(dbuf, 0); 1867 1868 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 1869 (void) stmf_xfer_data(task, dbuf, 0); 1870 return (STMF_SUCCESS); 1871 } 1872 1873 stmf_status_t 1874 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf) 1875 { 1876 stmf_i_scsi_task_t *itask = 1877 (stmf_i_scsi_task_t *)task->task_stmf_private; 1878 stmf_i_local_port_t *ilport = 1879 (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 1880 stmf_ic_msg_t *ic_cmd_msg; 1881 stmf_ic_msg_status_t ic_ret; 1882 stmf_status_t ret = STMF_FAILURE; 1883 1884 if (stmf_state.stmf_alua_state != 1) { 1885 cmn_err(CE_WARN, "stmf alua state is disabled"); 1886 return (STMF_FAILURE); 1887 } 1888 1889 if (ilport->ilport_proxy_registered == 0) { 1890 return (STMF_FAILURE); 1891 } 1892 1893 mutex_enter(&stmf_state.stmf_lock); 1894 itask->itask_proxy_msg_id = stmf_proxy_msg_id++; 1895 mutex_exit(&stmf_state.stmf_lock); 1896 itask->itask_proxy_dbuf = dbuf; 1897 1898 /* 1899 * stmf will now take over the task handling for this task 1900 * but it still needs to be treated differently from other 1901 * default handled tasks, hence the ITASK_PROXY_TASK. 1902 * If this is a task management function, we're really just 1903 * duping the command to the peer. Set the TM bit so that 1904 * we can recognize this on return since we won't be completing 1905 * the proxied task in that case. 1906 */ 1907 mutex_enter(&itask->itask_mutex); 1908 if (task->task_mgmt_function) { 1909 itask->itask_proxy_msg_id |= MSG_ID_TM_BIT; 1910 } else { 1911 if (itask->itask_flags & ITASK_BEING_ABORTED) { 1912 mutex_exit(&itask->itask_mutex); 1913 return (STMF_FAILURE); 1914 } 1915 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK; 1916 } 1917 if (dbuf) { 1918 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id, 1919 task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr, 1920 itask->itask_proxy_msg_id); 1921 } else { 1922 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id, 1923 task, 0, NULL, itask->itask_proxy_msg_id); 1924 } 1925 mutex_exit(&itask->itask_mutex); 1926 if (ic_cmd_msg) { 1927 ic_ret = ic_tx_msg(ic_cmd_msg); 1928 if (ic_ret == STMF_IC_MSG_SUCCESS) { 1929 ret = STMF_SUCCESS; 1930 } 1931 } 1932 return (ret); 1933 } 1934 1935 1936 stmf_status_t 1937 pppt_modload() 1938 { 1939 int error; 1940 1941 if (pppt_mod == NULL && ((pppt_mod = 1942 ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) { 1943 cmn_err(CE_WARN, "Unable to load pppt"); 1944 return (STMF_FAILURE); 1945 } 1946 1947 if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc = 1948 (stmf_ic_reg_port_msg_alloc_func_t) 1949 ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc", 1950 &error)) == NULL)) { 1951 cmn_err(CE_WARN, 1952 "Unable to find symbol - stmf_ic_reg_port_msg_alloc"); 1953 return (STMF_FAILURE); 1954 } 1955 1956 1957 if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc = 1958 (stmf_ic_dereg_port_msg_alloc_func_t) 1959 ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc", 1960 &error)) == NULL)) { 1961 cmn_err(CE_WARN, 1962 "Unable to find symbol - stmf_ic_dereg_port_msg_alloc"); 1963 return (STMF_FAILURE); 1964 } 1965 1966 if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc = 1967 (stmf_ic_reg_lun_msg_alloc_func_t) 1968 ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc", 1969 &error)) == NULL)) { 1970 cmn_err(CE_WARN, 1971 "Unable to find symbol - stmf_ic_reg_lun_msg_alloc"); 1972 return (STMF_FAILURE); 1973 } 1974 1975 if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc = 1976 (stmf_ic_lun_active_msg_alloc_func_t) 1977 ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc", 1978 &error)) == NULL)) { 1979 cmn_err(CE_WARN, 1980 "Unable to find symbol - stmf_ic_lun_active_msg_alloc"); 1981 return (STMF_FAILURE); 1982 } 1983 1984 if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc = 1985 (stmf_ic_dereg_lun_msg_alloc_func_t) 1986 ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc", 1987 &error)) == NULL)) { 1988 cmn_err(CE_WARN, 1989 "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc"); 1990 return (STMF_FAILURE); 1991 } 1992 1993 if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc = 1994 (stmf_ic_scsi_cmd_msg_alloc_func_t) 1995 ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc", 1996 &error)) == NULL)) { 1997 cmn_err(CE_WARN, 1998 "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc"); 1999 return (STMF_FAILURE); 2000 } 2001 2002 if (ic_scsi_data_xfer_done_msg_alloc == NULL && 2003 ((ic_scsi_data_xfer_done_msg_alloc = 2004 (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t) 2005 ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc", 2006 &error)) == NULL)) { 2007 cmn_err(CE_WARN, 2008 "Unable to find symbol -" 2009 "stmf_ic_scsi_data_xfer_done_msg_alloc"); 2010 return (STMF_FAILURE); 2011 } 2012 2013 if (ic_session_reg_msg_alloc == NULL && 2014 ((ic_session_reg_msg_alloc = 2015 (stmf_ic_session_create_msg_alloc_func_t) 2016 ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc", 2017 &error)) == NULL)) { 2018 cmn_err(CE_WARN, 2019 "Unable to find symbol -" 2020 "stmf_ic_session_create_msg_alloc"); 2021 return (STMF_FAILURE); 2022 } 2023 2024 if (ic_session_dereg_msg_alloc == NULL && 2025 ((ic_session_dereg_msg_alloc = 2026 (stmf_ic_session_destroy_msg_alloc_func_t) 2027 ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc", 2028 &error)) == NULL)) { 2029 cmn_err(CE_WARN, 2030 "Unable to find symbol -" 2031 "stmf_ic_session_destroy_msg_alloc"); 2032 return (STMF_FAILURE); 2033 } 2034 2035 if (ic_tx_msg == NULL && ((ic_tx_msg = 2036 (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg", 2037 &error)) == NULL)) { 2038 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg"); 2039 return (STMF_FAILURE); 2040 } 2041 2042 if (ic_msg_free == NULL && ((ic_msg_free = 2043 (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free", 2044 &error)) == NULL)) { 2045 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free"); 2046 return (STMF_FAILURE); 2047 } 2048 return (STMF_SUCCESS); 2049 } 2050 2051 static void 2052 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state) 2053 { 2054 mutex_enter(&stmf_state.stmf_lock); 2055 alua_state->alua_node = stmf_state.stmf_alua_node; 2056 alua_state->alua_state = stmf_state.stmf_alua_state; 2057 mutex_exit(&stmf_state.stmf_lock); 2058 } 2059 2060 2061 static int 2062 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state) 2063 { 2064 stmf_i_local_port_t *ilport; 2065 stmf_i_lu_t *ilu; 2066 stmf_lu_t *lu; 2067 stmf_ic_msg_status_t ic_ret; 2068 stmf_ic_msg_t *ic_reg_lun, *ic_reg_port; 2069 stmf_local_port_t *lport; 2070 int ret = 0; 2071 2072 if (alua_state->alua_state > 1 || alua_state->alua_node > 1) { 2073 return (EINVAL); 2074 } 2075 2076 mutex_enter(&stmf_state.stmf_lock); 2077 if (alua_state->alua_state == 1) { 2078 if (pppt_modload() == STMF_FAILURE) { 2079 ret = EIO; 2080 goto err; 2081 } 2082 if (alua_state->alua_node != 0) { 2083 /* reset existing rtpids to new base */ 2084 stmf_rtpid_counter = 255; 2085 } 2086 stmf_state.stmf_alua_node = alua_state->alua_node; 2087 stmf_state.stmf_alua_state = 1; 2088 /* register existing local ports with ppp */ 2089 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 2090 ilport = ilport->ilport_next) { 2091 /* skip standby ports and non-alua participants */ 2092 if (ilport->ilport_standby == 1 || 2093 ilport->ilport_alua == 0) { 2094 continue; 2095 } 2096 if (alua_state->alua_node != 0) { 2097 ilport->ilport_rtpid = 2098 atomic_inc_16_nv(&stmf_rtpid_counter); 2099 } 2100 lport = ilport->ilport_lport; 2101 ic_reg_port = ic_reg_port_msg_alloc( 2102 lport->lport_id, ilport->ilport_rtpid, 2103 0, NULL, stmf_proxy_msg_id); 2104 if (ic_reg_port) { 2105 ic_ret = ic_tx_msg(ic_reg_port); 2106 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2107 ilport->ilport_reg_msgid = 2108 stmf_proxy_msg_id++; 2109 } else { 2110 cmn_err(CE_WARN, 2111 "error on port registration " 2112 "port - %s", 2113 ilport->ilport_kstat_tgt_name); 2114 } 2115 } 2116 } 2117 /* register existing logical units */ 2118 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 2119 ilu = ilu->ilu_next) { 2120 if (ilu->ilu_access != STMF_LU_ACTIVE) { 2121 continue; 2122 } 2123 /* register with proxy module */ 2124 lu = ilu->ilu_lu; 2125 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 2126 lu->lu_lp->lp_alua_support) { 2127 ilu->ilu_alua = 1; 2128 /* allocate the register message */ 2129 ic_reg_lun = ic_reg_lun_msg_alloc( 2130 lu->lu_id->ident, lu->lu_lp->lp_name, 2131 lu->lu_proxy_reg_arg_len, 2132 (uint8_t *)lu->lu_proxy_reg_arg, 2133 stmf_proxy_msg_id); 2134 /* send the message */ 2135 if (ic_reg_lun) { 2136 ic_ret = ic_tx_msg(ic_reg_lun); 2137 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2138 stmf_proxy_msg_id++; 2139 } 2140 } 2141 } 2142 } 2143 } else { 2144 stmf_state.stmf_alua_state = 0; 2145 } 2146 2147 err: 2148 mutex_exit(&stmf_state.stmf_lock); 2149 return (ret); 2150 } 2151 2152 2153 typedef struct { 2154 void *bp; /* back pointer from internal struct to main struct */ 2155 int alloc_size; 2156 } __istmf_t; 2157 2158 typedef struct { 2159 __istmf_t *fp; /* Framework private */ 2160 void *cp; /* Caller private */ 2161 void *ss; /* struct specific */ 2162 } __stmf_t; 2163 2164 static struct { 2165 int shared; 2166 int fw_private; 2167 } stmf_sizes[] = { { 0, 0 }, 2168 { GET_STRUCT_SIZE(stmf_lu_provider_t), 2169 GET_STRUCT_SIZE(stmf_i_lu_provider_t) }, 2170 { GET_STRUCT_SIZE(stmf_port_provider_t), 2171 GET_STRUCT_SIZE(stmf_i_port_provider_t) }, 2172 { GET_STRUCT_SIZE(stmf_local_port_t), 2173 GET_STRUCT_SIZE(stmf_i_local_port_t) }, 2174 { GET_STRUCT_SIZE(stmf_lu_t), 2175 GET_STRUCT_SIZE(stmf_i_lu_t) }, 2176 { GET_STRUCT_SIZE(stmf_scsi_session_t), 2177 GET_STRUCT_SIZE(stmf_i_scsi_session_t) }, 2178 { GET_STRUCT_SIZE(scsi_task_t), 2179 GET_STRUCT_SIZE(stmf_i_scsi_task_t) }, 2180 { GET_STRUCT_SIZE(stmf_data_buf_t), 2181 GET_STRUCT_SIZE(__istmf_t) }, 2182 { GET_STRUCT_SIZE(stmf_dbuf_store_t), 2183 GET_STRUCT_SIZE(__istmf_t) } 2184 2185 }; 2186 2187 void * 2188 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags) 2189 { 2190 int stmf_size; 2191 int kmem_flag; 2192 __stmf_t *sh; 2193 2194 if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS)) 2195 return (NULL); 2196 2197 if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) { 2198 kmem_flag = KM_NOSLEEP; 2199 } else { 2200 kmem_flag = KM_SLEEP; 2201 } 2202 2203 additional_size = (additional_size + 7) & (~7); 2204 stmf_size = stmf_sizes[struct_id].shared + 2205 stmf_sizes[struct_id].fw_private + additional_size; 2206 2207 if (flags & AF_DONTZERO) 2208 sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag); 2209 else 2210 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag); 2211 2212 if (sh == NULL) 2213 return (NULL); 2214 2215 /* 2216 * In principle, the implementation inside stmf_alloc should not 2217 * be changed anyway. But the original order of framework private 2218 * data and caller private data does not support sglist in the caller 2219 * private data. 2220 * To work around this, the memory segments of framework private 2221 * data and caller private data are re-ordered here. 2222 * A better solution is to provide a specific interface to allocate 2223 * the sglist, then we will not need this workaround any more. 2224 * But before the new interface is available, the memory segment 2225 * ordering should be kept as is. 2226 */ 2227 sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared); 2228 sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh, 2229 stmf_sizes[struct_id].shared + additional_size); 2230 2231 sh->fp->bp = sh; 2232 /* Just store the total size instead of storing additional size */ 2233 sh->fp->alloc_size = stmf_size; 2234 2235 return (sh); 2236 } 2237 2238 void 2239 stmf_free(void *ptr) 2240 { 2241 __stmf_t *sh = (__stmf_t *)ptr; 2242 2243 /* 2244 * So far we dont need any struct specific processing. If such 2245 * a need ever arises, then store the struct id in the framework 2246 * private section and get it here as sh->fp->struct_id. 2247 */ 2248 kmem_free(ptr, sh->fp->alloc_size); 2249 } 2250 2251 /* 2252 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the 2253 * framework and returns a pointer to framework private data for the lu. 2254 * Returns NULL if the lu was not found. 2255 */ 2256 stmf_i_lu_t * 2257 stmf_lookup_lu(stmf_lu_t *lu) 2258 { 2259 stmf_i_lu_t *ilu; 2260 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2261 2262 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2263 if (ilu->ilu_lu == lu) 2264 return (ilu); 2265 } 2266 return (NULL); 2267 } 2268 2269 /* 2270 * Given a pointer to stmf_local_port_t, verifies if this lport is registered 2271 * with the framework and returns a pointer to framework private data for 2272 * the lport. 2273 * Returns NULL if the lport was not found. 2274 */ 2275 stmf_i_local_port_t * 2276 stmf_lookup_lport(stmf_local_port_t *lport) 2277 { 2278 stmf_i_local_port_t *ilport; 2279 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2280 2281 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 2282 ilport = ilport->ilport_next) { 2283 if (ilport->ilport_lport == lport) 2284 return (ilport); 2285 } 2286 return (NULL); 2287 } 2288 2289 stmf_status_t 2290 stmf_register_lu_provider(stmf_lu_provider_t *lp) 2291 { 2292 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 2293 stmf_pp_data_t *ppd; 2294 uint32_t cb_flags; 2295 2296 if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2) 2297 return (STMF_FAILURE); 2298 2299 mutex_enter(&stmf_state.stmf_lock); 2300 ilp->ilp_next = stmf_state.stmf_ilplist; 2301 stmf_state.stmf_ilplist = ilp; 2302 stmf_state.stmf_nlps++; 2303 2304 /* See if we need to do a callback */ 2305 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2306 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) { 2307 break; 2308 } 2309 } 2310 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 2311 goto rlp_bail_out; 2312 } 2313 ilp->ilp_ppd = ppd; 2314 ppd->ppd_provider = ilp; 2315 if (lp->lp_cb == NULL) 2316 goto rlp_bail_out; 2317 ilp->ilp_cb_in_progress = 1; 2318 cb_flags = STMF_PCB_PREG_COMPLETE; 2319 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2320 cb_flags |= STMF_PCB_STMF_ONLINING; 2321 mutex_exit(&stmf_state.stmf_lock); 2322 lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2323 mutex_enter(&stmf_state.stmf_lock); 2324 ilp->ilp_cb_in_progress = 0; 2325 2326 rlp_bail_out: 2327 mutex_exit(&stmf_state.stmf_lock); 2328 2329 return (STMF_SUCCESS); 2330 } 2331 2332 stmf_status_t 2333 stmf_deregister_lu_provider(stmf_lu_provider_t *lp) 2334 { 2335 stmf_i_lu_provider_t **ppilp; 2336 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 2337 2338 mutex_enter(&stmf_state.stmf_lock); 2339 if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) { 2340 mutex_exit(&stmf_state.stmf_lock); 2341 return (STMF_BUSY); 2342 } 2343 for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL; 2344 ppilp = &((*ppilp)->ilp_next)) { 2345 if (*ppilp == ilp) { 2346 *ppilp = ilp->ilp_next; 2347 stmf_state.stmf_nlps--; 2348 if (ilp->ilp_ppd) { 2349 ilp->ilp_ppd->ppd_provider = NULL; 2350 ilp->ilp_ppd = NULL; 2351 } 2352 mutex_exit(&stmf_state.stmf_lock); 2353 return (STMF_SUCCESS); 2354 } 2355 } 2356 mutex_exit(&stmf_state.stmf_lock); 2357 return (STMF_NOT_FOUND); 2358 } 2359 2360 stmf_status_t 2361 stmf_register_port_provider(stmf_port_provider_t *pp) 2362 { 2363 stmf_i_port_provider_t *ipp = 2364 (stmf_i_port_provider_t *)pp->pp_stmf_private; 2365 stmf_pp_data_t *ppd; 2366 uint32_t cb_flags; 2367 2368 if (pp->pp_portif_rev != PORTIF_REV_1) 2369 return (STMF_FAILURE); 2370 2371 mutex_enter(&stmf_state.stmf_lock); 2372 ipp->ipp_next = stmf_state.stmf_ipplist; 2373 stmf_state.stmf_ipplist = ipp; 2374 stmf_state.stmf_npps++; 2375 /* See if we need to do a callback */ 2376 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2377 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) { 2378 break; 2379 } 2380 } 2381 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 2382 goto rpp_bail_out; 2383 } 2384 ipp->ipp_ppd = ppd; 2385 ppd->ppd_provider = ipp; 2386 if (pp->pp_cb == NULL) 2387 goto rpp_bail_out; 2388 ipp->ipp_cb_in_progress = 1; 2389 cb_flags = STMF_PCB_PREG_COMPLETE; 2390 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2391 cb_flags |= STMF_PCB_STMF_ONLINING; 2392 mutex_exit(&stmf_state.stmf_lock); 2393 pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2394 mutex_enter(&stmf_state.stmf_lock); 2395 ipp->ipp_cb_in_progress = 0; 2396 2397 rpp_bail_out: 2398 mutex_exit(&stmf_state.stmf_lock); 2399 2400 return (STMF_SUCCESS); 2401 } 2402 2403 stmf_status_t 2404 stmf_deregister_port_provider(stmf_port_provider_t *pp) 2405 { 2406 stmf_i_port_provider_t *ipp = 2407 (stmf_i_port_provider_t *)pp->pp_stmf_private; 2408 stmf_i_port_provider_t **ppipp; 2409 2410 mutex_enter(&stmf_state.stmf_lock); 2411 if (ipp->ipp_npps || ipp->ipp_cb_in_progress) { 2412 mutex_exit(&stmf_state.stmf_lock); 2413 return (STMF_BUSY); 2414 } 2415 for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL; 2416 ppipp = &((*ppipp)->ipp_next)) { 2417 if (*ppipp == ipp) { 2418 *ppipp = ipp->ipp_next; 2419 stmf_state.stmf_npps--; 2420 if (ipp->ipp_ppd) { 2421 ipp->ipp_ppd->ppd_provider = NULL; 2422 ipp->ipp_ppd = NULL; 2423 } 2424 mutex_exit(&stmf_state.stmf_lock); 2425 return (STMF_SUCCESS); 2426 } 2427 } 2428 mutex_exit(&stmf_state.stmf_lock); 2429 return (STMF_NOT_FOUND); 2430 } 2431 2432 int 2433 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 2434 uint32_t *err_ret) 2435 { 2436 stmf_i_port_provider_t *ipp; 2437 stmf_i_lu_provider_t *ilp; 2438 stmf_pp_data_t *ppd; 2439 nvlist_t *nv; 2440 int s; 2441 int ret; 2442 2443 *err_ret = 0; 2444 2445 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2446 return (EINVAL); 2447 } 2448 2449 mutex_enter(&stmf_state.stmf_lock); 2450 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2451 if (ppi->ppi_lu_provider) { 2452 if (!ppd->ppd_lu_provider) 2453 continue; 2454 } else if (ppi->ppi_port_provider) { 2455 if (!ppd->ppd_port_provider) 2456 continue; 2457 } 2458 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2459 break; 2460 } 2461 2462 if (ppd == NULL) { 2463 /* New provider */ 2464 s = strlen(ppi->ppi_name); 2465 if (s > 254) { 2466 mutex_exit(&stmf_state.stmf_lock); 2467 return (EINVAL); 2468 } 2469 s += sizeof (stmf_pp_data_t) - 7; 2470 2471 ppd = kmem_zalloc(s, KM_NOSLEEP); 2472 if (ppd == NULL) { 2473 mutex_exit(&stmf_state.stmf_lock); 2474 return (ENOMEM); 2475 } 2476 ppd->ppd_alloc_size = s; 2477 (void) strcpy(ppd->ppd_name, ppi->ppi_name); 2478 2479 /* See if this provider already exists */ 2480 if (ppi->ppi_lu_provider) { 2481 ppd->ppd_lu_provider = 1; 2482 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; 2483 ilp = ilp->ilp_next) { 2484 if (strcmp(ppi->ppi_name, 2485 ilp->ilp_lp->lp_name) == 0) { 2486 ppd->ppd_provider = ilp; 2487 ilp->ilp_ppd = ppd; 2488 break; 2489 } 2490 } 2491 } else { 2492 ppd->ppd_port_provider = 1; 2493 for (ipp = stmf_state.stmf_ipplist; ipp != NULL; 2494 ipp = ipp->ipp_next) { 2495 if (strcmp(ppi->ppi_name, 2496 ipp->ipp_pp->pp_name) == 0) { 2497 ppd->ppd_provider = ipp; 2498 ipp->ipp_ppd = ppd; 2499 break; 2500 } 2501 } 2502 } 2503 2504 /* Link this ppd in */ 2505 ppd->ppd_next = stmf_state.stmf_ppdlist; 2506 stmf_state.stmf_ppdlist = ppd; 2507 } 2508 2509 /* 2510 * User is requesting that the token be checked. 2511 * If there was another set after the user's get 2512 * it's an error 2513 */ 2514 if (ppi->ppi_token_valid) { 2515 if (ppi->ppi_token != ppd->ppd_token) { 2516 *err_ret = STMF_IOCERR_PPD_UPDATED; 2517 mutex_exit(&stmf_state.stmf_lock); 2518 return (EINVAL); 2519 } 2520 } 2521 2522 if ((ret = nvlist_unpack((char *)ppi->ppi_data, 2523 (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) { 2524 mutex_exit(&stmf_state.stmf_lock); 2525 return (ret); 2526 } 2527 2528 /* Free any existing lists and add this one to the ppd */ 2529 if (ppd->ppd_nv) 2530 nvlist_free(ppd->ppd_nv); 2531 ppd->ppd_nv = nv; 2532 2533 /* set the token for writes */ 2534 ppd->ppd_token++; 2535 /* return token to caller */ 2536 if (ppi_token) { 2537 *ppi_token = ppd->ppd_token; 2538 } 2539 2540 /* If there is a provider registered, do the notifications */ 2541 if (ppd->ppd_provider) { 2542 uint32_t cb_flags = 0; 2543 2544 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2545 cb_flags |= STMF_PCB_STMF_ONLINING; 2546 if (ppi->ppi_lu_provider) { 2547 ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider; 2548 if (ilp->ilp_lp->lp_cb == NULL) 2549 goto bail_out; 2550 ilp->ilp_cb_in_progress = 1; 2551 mutex_exit(&stmf_state.stmf_lock); 2552 ilp->ilp_lp->lp_cb(ilp->ilp_lp, 2553 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2554 mutex_enter(&stmf_state.stmf_lock); 2555 ilp->ilp_cb_in_progress = 0; 2556 } else { 2557 ipp = (stmf_i_port_provider_t *)ppd->ppd_provider; 2558 if (ipp->ipp_pp->pp_cb == NULL) 2559 goto bail_out; 2560 ipp->ipp_cb_in_progress = 1; 2561 mutex_exit(&stmf_state.stmf_lock); 2562 ipp->ipp_pp->pp_cb(ipp->ipp_pp, 2563 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2564 mutex_enter(&stmf_state.stmf_lock); 2565 ipp->ipp_cb_in_progress = 0; 2566 } 2567 } 2568 2569 bail_out: 2570 mutex_exit(&stmf_state.stmf_lock); 2571 2572 return (0); 2573 } 2574 2575 void 2576 stmf_delete_ppd(stmf_pp_data_t *ppd) 2577 { 2578 stmf_pp_data_t **pppd; 2579 2580 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2581 if (ppd->ppd_provider) { 2582 if (ppd->ppd_lu_provider) { 2583 ((stmf_i_lu_provider_t *) 2584 ppd->ppd_provider)->ilp_ppd = NULL; 2585 } else { 2586 ((stmf_i_port_provider_t *) 2587 ppd->ppd_provider)->ipp_ppd = NULL; 2588 } 2589 ppd->ppd_provider = NULL; 2590 } 2591 2592 for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL; 2593 pppd = &((*pppd)->ppd_next)) { 2594 if (*pppd == ppd) 2595 break; 2596 } 2597 2598 if (*pppd == NULL) 2599 return; 2600 2601 *pppd = ppd->ppd_next; 2602 if (ppd->ppd_nv) 2603 nvlist_free(ppd->ppd_nv); 2604 2605 kmem_free(ppd, ppd->ppd_alloc_size); 2606 } 2607 2608 int 2609 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi) 2610 { 2611 stmf_pp_data_t *ppd; 2612 int ret = ENOENT; 2613 2614 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2615 return (EINVAL); 2616 } 2617 2618 mutex_enter(&stmf_state.stmf_lock); 2619 2620 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2621 if (ppi->ppi_lu_provider) { 2622 if (!ppd->ppd_lu_provider) 2623 continue; 2624 } else if (ppi->ppi_port_provider) { 2625 if (!ppd->ppd_port_provider) 2626 continue; 2627 } 2628 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2629 break; 2630 } 2631 2632 if (ppd) { 2633 ret = 0; 2634 stmf_delete_ppd(ppd); 2635 } 2636 mutex_exit(&stmf_state.stmf_lock); 2637 2638 return (ret); 2639 } 2640 2641 int 2642 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 2643 uint32_t *err_ret) 2644 { 2645 stmf_pp_data_t *ppd; 2646 size_t req_size; 2647 int ret = ENOENT; 2648 char *bufp = (char *)ppi_out->ppi_data; 2649 2650 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2651 return (EINVAL); 2652 } 2653 2654 mutex_enter(&stmf_state.stmf_lock); 2655 2656 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2657 if (ppi->ppi_lu_provider) { 2658 if (!ppd->ppd_lu_provider) 2659 continue; 2660 } else if (ppi->ppi_port_provider) { 2661 if (!ppd->ppd_port_provider) 2662 continue; 2663 } 2664 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2665 break; 2666 } 2667 2668 if (ppd && ppd->ppd_nv) { 2669 ppi_out->ppi_token = ppd->ppd_token; 2670 if ((ret = nvlist_size(ppd->ppd_nv, &req_size, 2671 NV_ENCODE_XDR)) != 0) { 2672 goto done; 2673 } 2674 ppi_out->ppi_data_size = req_size; 2675 if (req_size > ppi->ppi_data_size) { 2676 *err_ret = STMF_IOCERR_INSUFFICIENT_BUF; 2677 ret = EINVAL; 2678 goto done; 2679 } 2680 2681 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size, 2682 NV_ENCODE_XDR, 0)) != 0) { 2683 goto done; 2684 } 2685 ret = 0; 2686 } 2687 2688 done: 2689 mutex_exit(&stmf_state.stmf_lock); 2690 2691 return (ret); 2692 } 2693 2694 void 2695 stmf_delete_all_ppds() 2696 { 2697 stmf_pp_data_t *ppd, *nppd; 2698 2699 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2700 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) { 2701 nppd = ppd->ppd_next; 2702 stmf_delete_ppd(ppd); 2703 } 2704 } 2705 2706 /* 2707 * 16 is the max string length of a protocol_ident, increase 2708 * the size if needed. 2709 */ 2710 #define STMF_KSTAT_LU_SZ (STMF_GUID_INPUT + 1 + 256) 2711 #define STMF_KSTAT_TGT_SZ (256 * 2 + 16) 2712 #define STMF_KSTAT_RPORT_DATAMAX (sizeof (stmf_kstat_rport_info_t) / \ 2713 sizeof (kstat_named_t)) 2714 2715 /* 2716 * This array matches the Protocol Identifier in stmf_ioctl.h 2717 */ 2718 #define MAX_PROTO_STR_LEN 32 2719 2720 char *protocol_ident[PROTOCOL_ANY] = { 2721 "Fibre Channel", 2722 "Parallel SCSI", 2723 "SSA", 2724 "IEEE_1394", 2725 "SRP", 2726 "iSCSI", 2727 "SAS", 2728 "ADT", 2729 "ATAPI", 2730 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN" 2731 }; 2732 2733 /* 2734 * Update the lun wait/run queue count 2735 */ 2736 static void 2737 stmf_update_kstat_lu_q(scsi_task_t *task, void func()) 2738 { 2739 stmf_i_lu_t *ilu; 2740 kstat_io_t *kip; 2741 2742 if (task->task_lu == dlun0) 2743 return; 2744 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2745 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2746 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2747 if (kip != NULL) { 2748 func(kip); 2749 } 2750 } 2751 } 2752 2753 /* 2754 * Update the target(lport) wait/run queue count 2755 */ 2756 static void 2757 stmf_update_kstat_lport_q(scsi_task_t *task, void func()) 2758 { 2759 stmf_i_local_port_t *ilp; 2760 kstat_io_t *kip; 2761 2762 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2763 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2764 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2765 if (kip != NULL) { 2766 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2767 func(kip); 2768 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2769 } 2770 } 2771 } 2772 2773 static void 2774 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2775 { 2776 stmf_i_local_port_t *ilp; 2777 kstat_io_t *kip; 2778 2779 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2780 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2781 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2782 if (kip != NULL) { 2783 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2784 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2785 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2786 } 2787 } 2788 } 2789 2790 static void 2791 stmf_update_kstat_rport_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2792 { 2793 stmf_i_scsi_session_t *iss; 2794 stmf_i_remote_port_t *irport; 2795 kstat_io_t *kip; 2796 2797 iss = task->task_session->ss_stmf_private; 2798 irport = iss->iss_irport; 2799 if (irport->irport_kstat_io != NULL) { 2800 kip = KSTAT_IO_PTR(irport->irport_kstat_io); 2801 mutex_enter(irport->irport_kstat_io->ks_lock); 2802 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2803 mutex_exit(irport->irport_kstat_io->ks_lock); 2804 } 2805 } 2806 2807 static void 2808 stmf_update_kstat_rport_estat(scsi_task_t *task) 2809 { 2810 stmf_i_scsi_task_t *itask; 2811 stmf_i_scsi_session_t *iss; 2812 stmf_i_remote_port_t *irport; 2813 stmf_kstat_rport_estat_t *ks_estat; 2814 hrtime_t lat = 0; 2815 uint32_t n = 0; 2816 2817 itask = task->task_stmf_private; 2818 iss = task->task_session->ss_stmf_private; 2819 irport = iss->iss_irport; 2820 2821 if (irport->irport_kstat_estat == NULL) 2822 return; 2823 2824 ks_estat = (stmf_kstat_rport_estat_t *)KSTAT_NAMED_PTR( 2825 irport->irport_kstat_estat); 2826 2827 mutex_enter(irport->irport_kstat_estat->ks_lock); 2828 2829 if (task->task_flags & TF_READ_DATA) 2830 n = atomic_dec_32_nv(&irport->irport_nread_tasks); 2831 else if (task->task_flags & TF_WRITE_DATA) 2832 n = atomic_dec_32_nv(&irport->irport_nwrite_tasks); 2833 2834 if (itask->itask_read_xfer > 0) { 2835 ks_estat->i_nread_tasks.value.ui64++; 2836 lat = stmf_update_rport_timestamps( 2837 &irport->irport_rdstart_timestamp, 2838 &irport->irport_rddone_timestamp, itask); 2839 if (n == 0) 2840 ks_estat->i_rport_read_latency.value.ui64 += lat; 2841 } else if ((itask->itask_write_xfer > 0) || 2842 (task->task_flags & TF_INITIAL_BURST)) { 2843 ks_estat->i_nwrite_tasks.value.ui64++; 2844 lat = stmf_update_rport_timestamps( 2845 &irport->irport_wrstart_timestamp, 2846 &irport->irport_wrdone_timestamp, itask); 2847 if (n == 0) 2848 ks_estat->i_rport_write_latency.value.ui64 += lat; 2849 } 2850 2851 if (n == 0) { 2852 if (task->task_flags & TF_READ_DATA) { 2853 irport->irport_rdstart_timestamp = LLONG_MAX; 2854 irport->irport_rddone_timestamp = 0; 2855 } else if (task->task_flags & TF_WRITE_DATA) { 2856 irport->irport_wrstart_timestamp = LLONG_MAX; 2857 irport->irport_wrdone_timestamp = 0; 2858 } 2859 } 2860 2861 mutex_exit(irport->irport_kstat_estat->ks_lock); 2862 } 2863 2864 static hrtime_t 2865 stmf_update_rport_timestamps(hrtime_t *start_tstamp, hrtime_t *done_tstamp, 2866 stmf_i_scsi_task_t *itask) 2867 { 2868 *start_tstamp = MIN(*start_tstamp, itask->itask_start_timestamp); 2869 if ((*done_tstamp == 0) && 2870 (itask->itask_xfer_done_timestamp == 0)) { 2871 *done_tstamp = *start_tstamp; 2872 } else { 2873 *done_tstamp = MAX(*done_tstamp, 2874 itask->itask_xfer_done_timestamp); 2875 } 2876 2877 return (*done_tstamp - *start_tstamp); 2878 } 2879 2880 static void 2881 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2882 { 2883 stmf_i_lu_t *ilu; 2884 kstat_io_t *kip; 2885 2886 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2887 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2888 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2889 if (kip != NULL) { 2890 mutex_enter(ilu->ilu_kstat_io->ks_lock); 2891 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2892 mutex_exit(ilu->ilu_kstat_io->ks_lock); 2893 } 2894 } 2895 } 2896 2897 static void 2898 stmf_create_kstat_lu(stmf_i_lu_t *ilu) 2899 { 2900 char ks_nm[KSTAT_STRLEN]; 2901 stmf_kstat_lu_info_t *ks_lu; 2902 2903 /* create kstat lun info */ 2904 ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ, 2905 KM_NOSLEEP); 2906 if (ks_lu == NULL) { 2907 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2908 return; 2909 } 2910 2911 bzero(ks_nm, sizeof (ks_nm)); 2912 (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu); 2913 if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0, 2914 ks_nm, "misc", KSTAT_TYPE_NAMED, 2915 sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t), 2916 KSTAT_FLAG_VIRTUAL)) == NULL) { 2917 kmem_free(ks_lu, STMF_KSTAT_LU_SZ); 2918 cmn_err(CE_WARN, "STMF: kstat_create lu failed"); 2919 return; 2920 } 2921 2922 ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ; 2923 ilu->ilu_kstat_info->ks_data = ks_lu; 2924 2925 kstat_named_init(&ks_lu->i_lun_guid, "lun-guid", 2926 KSTAT_DATA_STRING); 2927 kstat_named_init(&ks_lu->i_lun_alias, "lun-alias", 2928 KSTAT_DATA_STRING); 2929 2930 /* convert guid to hex string */ 2931 int i; 2932 uint8_t *p = ilu->ilu_lu->lu_id->ident; 2933 bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid)); 2934 for (i = 0; i < STMF_GUID_INPUT / 2; i++) { 2935 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]); 2936 } 2937 kstat_named_setstr(&ks_lu->i_lun_guid, 2938 (const char *)ilu->ilu_ascii_hex_guid); 2939 kstat_named_setstr(&ks_lu->i_lun_alias, 2940 (const char *)ilu->ilu_lu->lu_alias); 2941 kstat_install(ilu->ilu_kstat_info); 2942 2943 /* create kstat lun io */ 2944 bzero(ks_nm, sizeof (ks_nm)); 2945 (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu); 2946 if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2947 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2948 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed"); 2949 return; 2950 } 2951 mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0); 2952 ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock; 2953 kstat_install(ilu->ilu_kstat_io); 2954 } 2955 2956 static void 2957 stmf_create_kstat_lport(stmf_i_local_port_t *ilport) 2958 { 2959 char ks_nm[KSTAT_STRLEN]; 2960 stmf_kstat_tgt_info_t *ks_tgt; 2961 int id, len; 2962 2963 /* create kstat lport info */ 2964 ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ, 2965 KM_NOSLEEP); 2966 if (ks_tgt == NULL) { 2967 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2968 return; 2969 } 2970 2971 bzero(ks_nm, sizeof (ks_nm)); 2972 (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport); 2973 if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME, 2974 0, ks_nm, "misc", KSTAT_TYPE_NAMED, 2975 sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t), 2976 KSTAT_FLAG_VIRTUAL)) == NULL) { 2977 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ); 2978 cmn_err(CE_WARN, "STMF: kstat_create target failed"); 2979 return; 2980 } 2981 2982 ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ; 2983 ilport->ilport_kstat_info->ks_data = ks_tgt; 2984 2985 kstat_named_init(&ks_tgt->i_tgt_name, "target-name", 2986 KSTAT_DATA_STRING); 2987 kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias", 2988 KSTAT_DATA_STRING); 2989 kstat_named_init(&ks_tgt->i_protocol, "protocol", 2990 KSTAT_DATA_STRING); 2991 2992 /* ident might not be null terminated */ 2993 len = ilport->ilport_lport->lport_id->ident_length; 2994 bcopy(ilport->ilport_lport->lport_id->ident, 2995 ilport->ilport_kstat_tgt_name, len); 2996 ilport->ilport_kstat_tgt_name[len + 1] = '\0'; 2997 kstat_named_setstr(&ks_tgt->i_tgt_name, 2998 (const char *)ilport->ilport_kstat_tgt_name); 2999 kstat_named_setstr(&ks_tgt->i_tgt_alias, 3000 (const char *)ilport->ilport_lport->lport_alias); 3001 /* protocol */ 3002 if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) { 3003 cmn_err(CE_WARN, "STMF: protocol_id out of bound"); 3004 id = PROTOCOL_ANY; 3005 } 3006 kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]); 3007 kstat_install(ilport->ilport_kstat_info); 3008 3009 /* create kstat lport io */ 3010 bzero(ks_nm, sizeof (ks_nm)); 3011 (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport); 3012 if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 3013 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 3014 cmn_err(CE_WARN, "STMF: kstat_create target_io failed"); 3015 return; 3016 } 3017 mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0); 3018 ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock; 3019 kstat_install(ilport->ilport_kstat_io); 3020 } 3021 3022 /* 3023 * set the asymmetric access state for a logical unit 3024 * caller is responsible for establishing SCSI unit attention on 3025 * state change 3026 */ 3027 stmf_status_t 3028 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state) 3029 { 3030 stmf_i_lu_t *ilu; 3031 uint8_t *p1, *p2; 3032 3033 if ((access_state != STMF_LU_STANDBY) && 3034 (access_state != STMF_LU_ACTIVE)) { 3035 return (STMF_INVALID_ARG); 3036 } 3037 3038 p1 = &lu->lu_id->ident[0]; 3039 mutex_enter(&stmf_state.stmf_lock); 3040 if (stmf_state.stmf_inventory_locked) { 3041 mutex_exit(&stmf_state.stmf_lock); 3042 return (STMF_BUSY); 3043 } 3044 3045 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 3046 p2 = &ilu->ilu_lu->lu_id->ident[0]; 3047 if (bcmp(p1, p2, 16) == 0) { 3048 break; 3049 } 3050 } 3051 3052 if (!ilu) { 3053 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3054 } else { 3055 /* 3056 * We're changing access state on an existing logical unit 3057 * Send the proxy registration message for this logical unit 3058 * if we're in alua mode. 3059 * If the requested state is STMF_LU_ACTIVE, we want to register 3060 * this logical unit. 3061 * If the requested state is STMF_LU_STANDBY, we're going to 3062 * abort all tasks for this logical unit. 3063 */ 3064 if (stmf_state.stmf_alua_state == 1 && 3065 access_state == STMF_LU_ACTIVE) { 3066 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 3067 stmf_ic_msg_t *ic_reg_lun; 3068 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 3069 lu->lu_lp->lp_alua_support) { 3070 ilu->ilu_alua = 1; 3071 /* allocate the register message */ 3072 ic_reg_lun = ic_lun_active_msg_alloc(p1, 3073 lu->lu_lp->lp_name, 3074 lu->lu_proxy_reg_arg_len, 3075 (uint8_t *)lu->lu_proxy_reg_arg, 3076 stmf_proxy_msg_id); 3077 /* send the message */ 3078 if (ic_reg_lun) { 3079 ic_ret = ic_tx_msg(ic_reg_lun); 3080 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3081 stmf_proxy_msg_id++; 3082 } 3083 } 3084 } 3085 } else if (stmf_state.stmf_alua_state == 1 && 3086 access_state == STMF_LU_STANDBY) { 3087 /* abort all tasks for this lu */ 3088 stmf_task_lu_killall(lu, NULL, STMF_ABORTED); 3089 } 3090 } 3091 3092 ilu->ilu_access = access_state; 3093 3094 mutex_exit(&stmf_state.stmf_lock); 3095 return (STMF_SUCCESS); 3096 } 3097 3098 3099 stmf_status_t 3100 stmf_register_lu(stmf_lu_t *lu) 3101 { 3102 stmf_i_lu_t *ilu; 3103 uint8_t *p1, *p2; 3104 stmf_state_change_info_t ssci; 3105 stmf_id_data_t *luid; 3106 3107 if ((lu->lu_id->ident_type != ID_TYPE_NAA) || 3108 (lu->lu_id->ident_length != 16) || 3109 ((lu->lu_id->ident[0] & 0xf0) != 0x60)) { 3110 return (STMF_INVALID_ARG); 3111 } 3112 p1 = &lu->lu_id->ident[0]; 3113 mutex_enter(&stmf_state.stmf_lock); 3114 if (stmf_state.stmf_inventory_locked) { 3115 mutex_exit(&stmf_state.stmf_lock); 3116 return (STMF_BUSY); 3117 } 3118 3119 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 3120 p2 = &ilu->ilu_lu->lu_id->ident[0]; 3121 if (bcmp(p1, p2, 16) == 0) { 3122 mutex_exit(&stmf_state.stmf_lock); 3123 return (STMF_ALREADY); 3124 } 3125 } 3126 3127 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3128 luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 3129 lu->lu_id->ident_length, lu->lu_id->ident); 3130 if (luid) { 3131 luid->id_pt_to_object = (void *)ilu; 3132 ilu->ilu_luid = luid; 3133 } 3134 ilu->ilu_alias = NULL; 3135 3136 ilu->ilu_next = stmf_state.stmf_ilulist; 3137 ilu->ilu_prev = NULL; 3138 if (ilu->ilu_next) 3139 ilu->ilu_next->ilu_prev = ilu; 3140 stmf_state.stmf_ilulist = ilu; 3141 stmf_state.stmf_nlus++; 3142 if (lu->lu_lp) { 3143 ((stmf_i_lu_provider_t *) 3144 (lu->lu_lp->lp_stmf_private))->ilp_nlus++; 3145 } 3146 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 3147 STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl); 3148 cv_init(&ilu->ilu_offline_pending_cv, NULL, CV_DRIVER, NULL); 3149 stmf_create_kstat_lu(ilu); 3150 /* 3151 * register with proxy module if available and logical unit 3152 * is in active state 3153 */ 3154 if (stmf_state.stmf_alua_state == 1 && 3155 ilu->ilu_access == STMF_LU_ACTIVE) { 3156 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 3157 stmf_ic_msg_t *ic_reg_lun; 3158 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 3159 lu->lu_lp->lp_alua_support) { 3160 ilu->ilu_alua = 1; 3161 /* allocate the register message */ 3162 ic_reg_lun = ic_reg_lun_msg_alloc(p1, 3163 lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len, 3164 (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id); 3165 /* send the message */ 3166 if (ic_reg_lun) { 3167 ic_ret = ic_tx_msg(ic_reg_lun); 3168 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3169 stmf_proxy_msg_id++; 3170 } 3171 } 3172 } 3173 } 3174 mutex_exit(&stmf_state.stmf_lock); 3175 3176 /* check the default state for lu */ 3177 if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) { 3178 ilu->ilu_prev_state = STMF_STATE_OFFLINE; 3179 } else { 3180 ilu->ilu_prev_state = STMF_STATE_ONLINE; 3181 if (stmf_state.stmf_service_running) { 3182 ssci.st_rflags = 0; 3183 ssci.st_additional_info = NULL; 3184 (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci); 3185 } 3186 } 3187 3188 /* XXX: Generate event */ 3189 return (STMF_SUCCESS); 3190 } 3191 3192 stmf_status_t 3193 stmf_deregister_lu(stmf_lu_t *lu) 3194 { 3195 stmf_i_lu_t *ilu; 3196 3197 mutex_enter(&stmf_state.stmf_lock); 3198 if (stmf_state.stmf_inventory_locked) { 3199 mutex_exit(&stmf_state.stmf_lock); 3200 return (STMF_BUSY); 3201 } 3202 ilu = stmf_lookup_lu(lu); 3203 if (ilu == NULL) { 3204 mutex_exit(&stmf_state.stmf_lock); 3205 return (STMF_INVALID_ARG); 3206 } 3207 if (ilu->ilu_state == STMF_STATE_OFFLINE) { 3208 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 3209 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) { 3210 cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock); 3211 } 3212 if (ilu->ilu_ntasks) { 3213 stmf_i_scsi_task_t *itask, *nitask; 3214 3215 nitask = ilu->ilu_tasks; 3216 do { 3217 itask = nitask; 3218 nitask = itask->itask_lu_next; 3219 lu->lu_task_free(itask->itask_task); 3220 stmf_free(itask->itask_task); 3221 } while (nitask != NULL); 3222 3223 ilu->ilu_tasks = ilu->ilu_free_tasks = NULL; 3224 ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0; 3225 } 3226 /* de-register with proxy if available */ 3227 if (ilu->ilu_access == STMF_LU_ACTIVE && 3228 stmf_state.stmf_alua_state == 1) { 3229 /* de-register with proxy module */ 3230 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 3231 stmf_ic_msg_t *ic_dereg_lun; 3232 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 3233 lu->lu_lp->lp_alua_support) { 3234 ilu->ilu_alua = 1; 3235 /* allocate the de-register message */ 3236 ic_dereg_lun = ic_dereg_lun_msg_alloc( 3237 lu->lu_id->ident, lu->lu_lp->lp_name, 0, 3238 NULL, stmf_proxy_msg_id); 3239 /* send the message */ 3240 if (ic_dereg_lun) { 3241 ic_ret = ic_tx_msg(ic_dereg_lun); 3242 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3243 stmf_proxy_msg_id++; 3244 } 3245 } 3246 } 3247 } 3248 3249 if (ilu->ilu_next) 3250 ilu->ilu_next->ilu_prev = ilu->ilu_prev; 3251 if (ilu->ilu_prev) 3252 ilu->ilu_prev->ilu_next = ilu->ilu_next; 3253 else 3254 stmf_state.stmf_ilulist = ilu->ilu_next; 3255 stmf_state.stmf_nlus--; 3256 3257 if (ilu == stmf_state.stmf_svc_ilu_draining) { 3258 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 3259 } 3260 if (ilu == stmf_state.stmf_svc_ilu_timing) { 3261 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 3262 } 3263 if (lu->lu_lp) { 3264 ((stmf_i_lu_provider_t *) 3265 (lu->lu_lp->lp_stmf_private))->ilp_nlus--; 3266 } 3267 if (ilu->ilu_luid) { 3268 ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object = 3269 NULL; 3270 ilu->ilu_luid = NULL; 3271 } 3272 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl); 3273 } else { 3274 mutex_exit(&stmf_state.stmf_lock); 3275 return (STMF_BUSY); 3276 } 3277 if (ilu->ilu_kstat_info) { 3278 kmem_free(ilu->ilu_kstat_info->ks_data, STMF_KSTAT_LU_SZ); 3279 kstat_delete(ilu->ilu_kstat_info); 3280 } 3281 if (ilu->ilu_kstat_io) { 3282 kstat_delete(ilu->ilu_kstat_io); 3283 mutex_destroy(&ilu->ilu_kstat_lock); 3284 } 3285 cv_destroy(&ilu->ilu_offline_pending_cv); 3286 mutex_exit(&stmf_state.stmf_lock); 3287 return (STMF_SUCCESS); 3288 } 3289 3290 void 3291 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid) 3292 { 3293 stmf_i_local_port_t *ilport = 3294 (stmf_i_local_port_t *)lport->lport_stmf_private; 3295 ilport->ilport_rtpid = rtpid; 3296 ilport->ilport_standby = 1; 3297 } 3298 3299 void 3300 stmf_set_port_alua(stmf_local_port_t *lport) 3301 { 3302 stmf_i_local_port_t *ilport = 3303 (stmf_i_local_port_t *)lport->lport_stmf_private; 3304 ilport->ilport_alua = 1; 3305 } 3306 3307 stmf_status_t 3308 stmf_register_local_port(stmf_local_port_t *lport) 3309 { 3310 stmf_i_local_port_t *ilport; 3311 stmf_state_change_info_t ssci; 3312 int start_workers = 0; 3313 3314 mutex_enter(&stmf_state.stmf_lock); 3315 if (stmf_state.stmf_inventory_locked) { 3316 mutex_exit(&stmf_state.stmf_lock); 3317 return (STMF_BUSY); 3318 } 3319 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 3320 rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL); 3321 3322 ilport->ilport_instance = 3323 id_alloc_nosleep(stmf_state.stmf_ilport_inst_space); 3324 if (ilport->ilport_instance == -1) { 3325 mutex_exit(&stmf_state.stmf_lock); 3326 return (STMF_FAILURE); 3327 } 3328 ilport->ilport_next = stmf_state.stmf_ilportlist; 3329 ilport->ilport_prev = NULL; 3330 if (ilport->ilport_next) 3331 ilport->ilport_next->ilport_prev = ilport; 3332 stmf_state.stmf_ilportlist = ilport; 3333 stmf_state.stmf_nlports++; 3334 if (lport->lport_pp) { 3335 ((stmf_i_port_provider_t *) 3336 (lport->lport_pp->pp_stmf_private))->ipp_npps++; 3337 } 3338 ilport->ilport_tg = 3339 stmf_lookup_group_for_target(lport->lport_id->ident, 3340 lport->lport_id->ident_length); 3341 3342 /* 3343 * rtpid will/must be set if this is a standby port 3344 * only register ports that are not standby (proxy) ports 3345 * and ports that are alua participants (ilport_alua == 1) 3346 */ 3347 if (ilport->ilport_standby == 0) { 3348 ilport->ilport_rtpid = atomic_inc_16_nv(&stmf_rtpid_counter); 3349 } 3350 3351 if (stmf_state.stmf_alua_state == 1 && 3352 ilport->ilport_standby == 0 && 3353 ilport->ilport_alua == 1) { 3354 stmf_ic_msg_t *ic_reg_port; 3355 stmf_ic_msg_status_t ic_ret; 3356 stmf_local_port_t *lport; 3357 lport = ilport->ilport_lport; 3358 ic_reg_port = ic_reg_port_msg_alloc( 3359 lport->lport_id, ilport->ilport_rtpid, 3360 0, NULL, stmf_proxy_msg_id); 3361 if (ic_reg_port) { 3362 ic_ret = ic_tx_msg(ic_reg_port); 3363 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3364 ilport->ilport_reg_msgid = stmf_proxy_msg_id++; 3365 } else { 3366 cmn_err(CE_WARN, "error on port registration " 3367 "port - %s", ilport->ilport_kstat_tgt_name); 3368 } 3369 } 3370 } 3371 STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl); 3372 stmf_create_kstat_lport(ilport); 3373 if (stmf_workers_state == STMF_WORKERS_DISABLED) { 3374 stmf_workers_state = STMF_WORKERS_ENABLING; 3375 start_workers = 1; 3376 } 3377 mutex_exit(&stmf_state.stmf_lock); 3378 3379 if (start_workers) 3380 stmf_worker_init(); 3381 3382 /* the default state of LPORT */ 3383 3384 if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) { 3385 ilport->ilport_prev_state = STMF_STATE_OFFLINE; 3386 } else { 3387 ilport->ilport_prev_state = STMF_STATE_ONLINE; 3388 if (stmf_state.stmf_service_running) { 3389 ssci.st_rflags = 0; 3390 ssci.st_additional_info = NULL; 3391 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci); 3392 } 3393 } 3394 3395 /* XXX: Generate event */ 3396 return (STMF_SUCCESS); 3397 } 3398 3399 stmf_status_t 3400 stmf_deregister_local_port(stmf_local_port_t *lport) 3401 { 3402 stmf_i_local_port_t *ilport; 3403 3404 mutex_enter(&stmf_state.stmf_lock); 3405 if (stmf_state.stmf_inventory_locked) { 3406 mutex_exit(&stmf_state.stmf_lock); 3407 return (STMF_BUSY); 3408 } 3409 3410 /* dequeue all object requests from active queue */ 3411 stmf_svc_kill_obj_requests(lport); 3412 3413 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 3414 3415 /* 3416 * deregister ports that are not standby (proxy) 3417 */ 3418 if (stmf_state.stmf_alua_state == 1 && 3419 ilport->ilport_standby == 0 && 3420 ilport->ilport_alua == 1) { 3421 stmf_ic_msg_t *ic_dereg_port; 3422 stmf_ic_msg_status_t ic_ret; 3423 ic_dereg_port = ic_dereg_port_msg_alloc( 3424 lport->lport_id, 0, NULL, stmf_proxy_msg_id); 3425 if (ic_dereg_port) { 3426 ic_ret = ic_tx_msg(ic_dereg_port); 3427 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3428 stmf_proxy_msg_id++; 3429 } 3430 } 3431 } 3432 3433 if (ilport->ilport_nsessions == 0) { 3434 if (ilport->ilport_next) 3435 ilport->ilport_next->ilport_prev = ilport->ilport_prev; 3436 if (ilport->ilport_prev) 3437 ilport->ilport_prev->ilport_next = ilport->ilport_next; 3438 else 3439 stmf_state.stmf_ilportlist = ilport->ilport_next; 3440 id_free(stmf_state.stmf_ilport_inst_space, 3441 ilport->ilport_instance); 3442 rw_destroy(&ilport->ilport_lock); 3443 stmf_state.stmf_nlports--; 3444 if (lport->lport_pp) { 3445 ((stmf_i_port_provider_t *) 3446 (lport->lport_pp->pp_stmf_private))->ipp_npps--; 3447 } 3448 ilport->ilport_tg = NULL; 3449 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl); 3450 } else { 3451 mutex_exit(&stmf_state.stmf_lock); 3452 return (STMF_BUSY); 3453 } 3454 if (ilport->ilport_kstat_info) { 3455 kmem_free(ilport->ilport_kstat_info->ks_data, 3456 STMF_KSTAT_TGT_SZ); 3457 kstat_delete(ilport->ilport_kstat_info); 3458 } 3459 if (ilport->ilport_kstat_io) { 3460 kstat_delete(ilport->ilport_kstat_io); 3461 mutex_destroy(&ilport->ilport_kstat_lock); 3462 } 3463 mutex_exit(&stmf_state.stmf_lock); 3464 return (STMF_SUCCESS); 3465 } 3466 3467 /* 3468 * Rport id/instance mappings remain valid until STMF is unloaded 3469 */ 3470 static int 3471 stmf_irport_compare(const void *void_irport1, const void *void_irport2) 3472 { 3473 const stmf_i_remote_port_t *irport1 = void_irport1; 3474 const stmf_i_remote_port_t *irport2 = void_irport2; 3475 int result; 3476 3477 /* Sort by code set then ident */ 3478 if (irport1->irport_id->code_set < 3479 irport2->irport_id->code_set) { 3480 return (-1); 3481 } else if (irport1->irport_id->code_set > 3482 irport2->irport_id->code_set) { 3483 return (1); 3484 } 3485 3486 /* Next by ident length */ 3487 if (irport1->irport_id->ident_length < 3488 irport2->irport_id->ident_length) { 3489 return (-1); 3490 } else if (irport1->irport_id->ident_length > 3491 irport2->irport_id->ident_length) { 3492 return (1); 3493 } 3494 3495 /* Code set and ident length both match, now compare idents */ 3496 result = memcmp(irport1->irport_id->ident, 3497 irport2->irport_id->ident, 3498 irport1->irport_id->ident_length); 3499 3500 if (result < 0) { 3501 return (-1); 3502 } else if (result > 0) { 3503 return (1); 3504 } 3505 3506 return (0); 3507 } 3508 3509 static stmf_i_remote_port_t * 3510 stmf_irport_create(scsi_devid_desc_t *rport_devid) 3511 { 3512 int alloc_len; 3513 stmf_i_remote_port_t *irport; 3514 3515 /* 3516 * Lookup will bump the refcnt if there's an existing rport 3517 * context for this identifier. 3518 */ 3519 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 3520 3521 alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) + 3522 rport_devid->ident_length - 1; 3523 irport = kmem_zalloc(alloc_len, KM_NOSLEEP); 3524 if (irport == NULL) { 3525 return (NULL); 3526 } 3527 3528 irport->irport_instance = 3529 id_alloc_nosleep(stmf_state.stmf_irport_inst_space); 3530 if (irport->irport_instance == -1) { 3531 kmem_free(irport, alloc_len); 3532 return (NULL); 3533 } 3534 3535 irport->irport_id = 3536 (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */ 3537 bcopy(rport_devid, irport->irport_id, 3538 sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1); 3539 irport->irport_refcnt = 1; 3540 mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL); 3541 irport->irport_rdstart_timestamp = LLONG_MAX; 3542 irport->irport_wrstart_timestamp = LLONG_MAX; 3543 3544 return (irport); 3545 } 3546 3547 static void 3548 stmf_irport_destroy(stmf_i_remote_port_t *irport) 3549 { 3550 stmf_destroy_kstat_rport(irport); 3551 id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance); 3552 mutex_destroy(&irport->irport_mutex); 3553 kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) + 3554 irport->irport_id->ident_length - 1); 3555 } 3556 3557 static void 3558 stmf_create_kstat_rport(stmf_i_remote_port_t *irport) 3559 { 3560 scsi_devid_desc_t *id = irport->irport_id; 3561 char ks_nm[KSTAT_STRLEN]; 3562 stmf_kstat_rport_info_t *ks_info; 3563 stmf_kstat_rport_estat_t *ks_estat; 3564 char *ident = NULL; 3565 3566 ks_info = kmem_zalloc(sizeof (*ks_info), KM_NOSLEEP); 3567 if (ks_info == NULL) 3568 goto err_out; 3569 3570 (void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_%"PRIxPTR"", 3571 (uintptr_t)irport); 3572 irport->irport_kstat_info = kstat_create(STMF_MODULE_NAME, 0, 3573 ks_nm, "misc", KSTAT_TYPE_NAMED, 3574 STMF_KSTAT_RPORT_DATAMAX - STMF_RPORT_INFO_LIMIT, 3575 KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE); 3576 if (irport->irport_kstat_info == NULL) { 3577 kmem_free(ks_info, sizeof (*ks_info)); 3578 goto err_out; 3579 } 3580 3581 irport->irport_kstat_info->ks_data = ks_info; 3582 irport->irport_kstat_info->ks_private = irport; 3583 irport->irport_kstat_info->ks_update = stmf_kstat_rport_update; 3584 ident = kmem_alloc(id->ident_length + 1, KM_NOSLEEP); 3585 if (ident == NULL) { 3586 kstat_delete(irport->irport_kstat_info); 3587 irport->irport_kstat_info = NULL; 3588 kmem_free(ks_info, sizeof (*ks_info)); 3589 goto err_out; 3590 } 3591 3592 (void) memcpy(ident, id->ident, id->ident_length); 3593 ident[id->ident_length] = '\0'; 3594 kstat_named_init(&ks_info->i_rport_name, "name", KSTAT_DATA_STRING); 3595 kstat_named_init(&ks_info->i_protocol, "protocol", 3596 KSTAT_DATA_STRING); 3597 3598 kstat_named_setstr(&ks_info->i_rport_name, ident); 3599 kstat_named_setstr(&ks_info->i_protocol, 3600 protocol_ident[irport->irport_id->protocol_id]); 3601 irport->irport_kstat_info->ks_lock = &irport->irport_mutex; 3602 irport->irport_info_dirty = B_TRUE; 3603 kstat_install(irport->irport_kstat_info); 3604 3605 (void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_io_%"PRIxPTR"", 3606 (uintptr_t)irport); 3607 irport->irport_kstat_io = kstat_create(STMF_MODULE_NAME, 0, ks_nm, 3608 "io", KSTAT_TYPE_IO, 1, 0); 3609 if (irport->irport_kstat_io == NULL) 3610 goto err_out; 3611 3612 irport->irport_kstat_io->ks_lock = &irport->irport_mutex; 3613 kstat_install(irport->irport_kstat_io); 3614 3615 (void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_st_%"PRIxPTR"", 3616 (uintptr_t)irport); 3617 irport->irport_kstat_estat = kstat_create(STMF_MODULE_NAME, 0, ks_nm, 3618 "misc", KSTAT_TYPE_NAMED, 3619 sizeof (*ks_estat) / sizeof (kstat_named_t), 0); 3620 if (irport->irport_kstat_estat == NULL) 3621 goto err_out; 3622 3623 ks_estat = (stmf_kstat_rport_estat_t *)KSTAT_NAMED_PTR( 3624 irport->irport_kstat_estat); 3625 kstat_named_init(&ks_estat->i_rport_read_latency, 3626 "rlatency", KSTAT_DATA_UINT64); 3627 kstat_named_init(&ks_estat->i_rport_write_latency, 3628 "wlatency", KSTAT_DATA_UINT64); 3629 kstat_named_init(&ks_estat->i_nread_tasks, "rntasks", 3630 KSTAT_DATA_UINT64); 3631 kstat_named_init(&ks_estat->i_nwrite_tasks, "wntasks", 3632 KSTAT_DATA_UINT64); 3633 irport->irport_kstat_estat->ks_lock = &irport->irport_mutex; 3634 kstat_install(irport->irport_kstat_estat); 3635 3636 return; 3637 3638 err_out: 3639 (void) memcpy(ks_nm, id->ident, MAX(KSTAT_STRLEN - 1, 3640 id->ident_length)); 3641 ks_nm[id->ident_length] = '\0'; 3642 cmn_err(CE_WARN, "STMF: remote port kstat creation failed: %s", ks_nm); 3643 } 3644 3645 static void 3646 stmf_destroy_kstat_rport(stmf_i_remote_port_t *irport) 3647 { 3648 if (irport->irport_kstat_io != NULL) { 3649 kstat_delete(irport->irport_kstat_io); 3650 } 3651 if (irport->irport_kstat_estat != NULL) { 3652 kstat_delete(irport->irport_kstat_estat); 3653 } 3654 if (irport->irport_kstat_info != NULL) { 3655 stmf_kstat_rport_info_t *ks_info; 3656 kstat_named_t *knp; 3657 void *ptr; 3658 int i; 3659 3660 ks_info = (stmf_kstat_rport_info_t *)KSTAT_NAMED_PTR( 3661 irport->irport_kstat_info); 3662 kstat_delete(irport->irport_kstat_info); 3663 ptr = KSTAT_NAMED_STR_PTR(&ks_info->i_rport_name); 3664 kmem_free(ptr, KSTAT_NAMED_STR_BUFLEN(&ks_info->i_rport_name)); 3665 3666 for (i = 0, knp = ks_info->i_rport_uinfo; 3667 i < STMF_RPORT_INFO_LIMIT; i++, knp++) { 3668 ptr = KSTAT_NAMED_STR_PTR(knp); 3669 if (ptr != NULL) 3670 kmem_free(ptr, KSTAT_NAMED_STR_BUFLEN(knp)); 3671 } 3672 kmem_free(ks_info, sizeof (*ks_info)); 3673 } 3674 } 3675 3676 static stmf_i_remote_port_t * 3677 stmf_irport_register(scsi_devid_desc_t *rport_devid) 3678 { 3679 stmf_i_remote_port_t *irport; 3680 3681 mutex_enter(&stmf_state.stmf_lock); 3682 3683 /* 3684 * Lookup will bump the refcnt if there's an existing rport 3685 * context for this identifier. 3686 */ 3687 if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) { 3688 mutex_exit(&stmf_state.stmf_lock); 3689 return (irport); 3690 } 3691 3692 irport = stmf_irport_create(rport_devid); 3693 if (irport == NULL) { 3694 mutex_exit(&stmf_state.stmf_lock); 3695 return (NULL); 3696 } 3697 3698 stmf_create_kstat_rport(irport); 3699 avl_add(&stmf_state.stmf_irportlist, irport); 3700 mutex_exit(&stmf_state.stmf_lock); 3701 3702 return (irport); 3703 } 3704 3705 static stmf_i_remote_port_t * 3706 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid) 3707 { 3708 stmf_i_remote_port_t *irport; 3709 stmf_i_remote_port_t tmp_irport; 3710 3711 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 3712 tmp_irport.irport_id = rport_devid; 3713 irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL); 3714 if (irport != NULL) { 3715 mutex_enter(&irport->irport_mutex); 3716 irport->irport_refcnt++; 3717 mutex_exit(&irport->irport_mutex); 3718 } 3719 3720 return (irport); 3721 } 3722 3723 static void 3724 stmf_irport_deregister(stmf_i_remote_port_t *irport) 3725 { 3726 /* 3727 * If we were actually going to remove unreferenced remote ports 3728 * we would want to acquire stmf_state.stmf_lock before getting 3729 * the irport mutex. 3730 * 3731 * Instead we're just going to leave it there even if unreferenced. 3732 */ 3733 mutex_enter(&irport->irport_mutex); 3734 irport->irport_refcnt--; 3735 mutex_exit(&irport->irport_mutex); 3736 } 3737 3738 /* 3739 * Port provider has to make sure that register/deregister session and 3740 * port are serialized calls. 3741 */ 3742 stmf_status_t 3743 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 3744 { 3745 stmf_i_scsi_session_t *iss; 3746 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 3747 lport->lport_stmf_private; 3748 uint8_t lun[8]; 3749 3750 /* 3751 * Port state has to be online to register a scsi session. It is 3752 * possible that we started an offline operation and a new SCSI 3753 * session started at the same time (in that case also we are going 3754 * to fail the registeration). But any other state is simply 3755 * a bad port provider implementation. 3756 */ 3757 if (ilport->ilport_state != STMF_STATE_ONLINE) { 3758 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 3759 stmf_trace(lport->lport_alias, "Port is trying to " 3760 "register a session while the state is neither " 3761 "online nor offlining"); 3762 } 3763 return (STMF_FAILURE); 3764 } 3765 bzero(lun, 8); 3766 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3767 if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) { 3768 stmf_trace(lport->lport_alias, "Could not register " 3769 "remote port during session registration"); 3770 return (STMF_FAILURE); 3771 } 3772 3773 iss->iss_flags |= ISS_BEING_CREATED; 3774 3775 if (ss->ss_rport == NULL) { 3776 iss->iss_flags |= ISS_NULL_TPTID; 3777 ss->ss_rport = stmf_scsilib_devid_to_remote_port( 3778 ss->ss_rport_id); 3779 if (ss->ss_rport == NULL) { 3780 iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED); 3781 stmf_trace(lport->lport_alias, "Device id to " 3782 "remote port conversion failed"); 3783 return (STMF_FAILURE); 3784 } 3785 } else { 3786 if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid, 3787 ss->ss_rport->rport_tptid_sz, NULL)) { 3788 iss->iss_flags &= ~ISS_BEING_CREATED; 3789 stmf_trace(lport->lport_alias, "Remote port " 3790 "transport id validation failed"); 3791 return (STMF_FAILURE); 3792 } 3793 } 3794 3795 /* sessions use the ilport_lock. No separate lock is required */ 3796 iss->iss_lockp = &ilport->ilport_lock; 3797 3798 if (iss->iss_sm != NULL) 3799 cmn_err(CE_PANIC, "create lun map called with non NULL map"); 3800 iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t), 3801 KM_SLEEP); 3802 3803 mutex_enter(&stmf_state.stmf_lock); 3804 rw_enter(&ilport->ilport_lock, RW_WRITER); 3805 (void) stmf_session_create_lun_map(ilport, iss); 3806 ilport->ilport_nsessions++; 3807 iss->iss_next = ilport->ilport_ss_list; 3808 ilport->ilport_ss_list = iss; 3809 rw_exit(&ilport->ilport_lock); 3810 mutex_exit(&stmf_state.stmf_lock); 3811 3812 iss->iss_creation_time = ddi_get_time(); 3813 ss->ss_session_id = atomic_inc_64_nv(&stmf_session_counter); 3814 iss->iss_flags &= ~ISS_BEING_CREATED; 3815 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */ 3816 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED; 3817 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport, 3818 stmf_scsi_session_t *, ss); 3819 return (STMF_SUCCESS); 3820 } 3821 3822 stmf_status_t 3823 stmf_add_rport_info(stmf_scsi_session_t *ss, 3824 const char *prop_name, const char *prop_value) 3825 { 3826 stmf_i_scsi_session_t *iss = ss->ss_stmf_private; 3827 stmf_i_remote_port_t *irport = iss->iss_irport; 3828 kstat_named_t *knp; 3829 char *s; 3830 int i; 3831 3832 s = strdup(prop_value); 3833 3834 mutex_enter(irport->irport_kstat_info->ks_lock); 3835 /* Make sure the caller doesn't try to add already existing property */ 3836 knp = KSTAT_NAMED_PTR(irport->irport_kstat_info); 3837 for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) { 3838 if (KSTAT_NAMED_STR_PTR(knp) == NULL) 3839 break; 3840 3841 ASSERT(strcmp(knp->name, prop_name) != 0); 3842 } 3843 3844 if (i == STMF_KSTAT_RPORT_DATAMAX) { 3845 mutex_exit(irport->irport_kstat_info->ks_lock); 3846 kmem_free(s, strlen(s) + 1); 3847 return (STMF_FAILURE); 3848 } 3849 3850 irport->irport_info_dirty = B_TRUE; 3851 kstat_named_init(knp, prop_name, KSTAT_DATA_STRING); 3852 kstat_named_setstr(knp, s); 3853 mutex_exit(irport->irport_kstat_info->ks_lock); 3854 3855 return (STMF_SUCCESS); 3856 } 3857 3858 void 3859 stmf_remove_rport_info(stmf_scsi_session_t *ss, 3860 const char *prop_name) 3861 { 3862 stmf_i_scsi_session_t *iss = ss->ss_stmf_private; 3863 stmf_i_remote_port_t *irport = iss->iss_irport; 3864 kstat_named_t *knp; 3865 char *s; 3866 int i; 3867 uint32_t len; 3868 3869 mutex_enter(irport->irport_kstat_info->ks_lock); 3870 knp = KSTAT_NAMED_PTR(irport->irport_kstat_info); 3871 for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) { 3872 if ((knp->name != NULL) && (strcmp(knp->name, prop_name) == 0)) 3873 break; 3874 } 3875 3876 if (i == STMF_KSTAT_RPORT_DATAMAX) { 3877 mutex_exit(irport->irport_kstat_info->ks_lock); 3878 return; 3879 } 3880 3881 s = KSTAT_NAMED_STR_PTR(knp); 3882 len = KSTAT_NAMED_STR_BUFLEN(knp); 3883 3884 for (; i < STMF_KSTAT_RPORT_DATAMAX - 1; i++, knp++) { 3885 kstat_named_init(knp, knp[1].name, KSTAT_DATA_STRING); 3886 kstat_named_setstr(knp, KSTAT_NAMED_STR_PTR(&knp[1])); 3887 } 3888 kstat_named_init(knp, "", KSTAT_DATA_STRING); 3889 3890 irport->irport_info_dirty = B_TRUE; 3891 mutex_exit(irport->irport_kstat_info->ks_lock); 3892 kmem_free(s, len); 3893 } 3894 3895 static int 3896 stmf_kstat_rport_update(kstat_t *ksp, int rw) 3897 { 3898 stmf_i_remote_port_t *irport = ksp->ks_private; 3899 kstat_named_t *knp; 3900 uint_t ndata = 0; 3901 size_t dsize = 0; 3902 int i; 3903 3904 if (rw == KSTAT_WRITE) 3905 return (EACCES); 3906 3907 if (!irport->irport_info_dirty) 3908 return (0); 3909 3910 knp = KSTAT_NAMED_PTR(ksp); 3911 for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) { 3912 if (KSTAT_NAMED_STR_PTR(knp) == NULL) 3913 break; 3914 ndata++; 3915 dsize += KSTAT_NAMED_STR_BUFLEN(knp); 3916 } 3917 3918 ksp->ks_ndata = ndata; 3919 ksp->ks_data_size = sizeof (kstat_named_t) * ndata + dsize; 3920 irport->irport_info_dirty = B_FALSE; 3921 3922 return (0); 3923 } 3924 3925 void 3926 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 3927 { 3928 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 3929 lport->lport_stmf_private; 3930 stmf_i_scsi_session_t *iss, **ppss; 3931 int found = 0; 3932 stmf_ic_msg_t *ic_session_dereg; 3933 stmf_status_t ic_ret = STMF_FAILURE; 3934 stmf_lun_map_t *sm; 3935 stmf_i_lu_t *ilu; 3936 uint16_t n; 3937 stmf_lun_map_ent_t *ent; 3938 3939 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport, 3940 stmf_scsi_session_t *, ss); 3941 3942 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3943 if (ss->ss_rport_alias) { 3944 ss->ss_rport_alias = NULL; 3945 } 3946 3947 try_dereg_ss_again: 3948 mutex_enter(&stmf_state.stmf_lock); 3949 atomic_and_32(&iss->iss_flags, 3950 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 3951 if (iss->iss_flags & ISS_EVENT_ACTIVE) { 3952 mutex_exit(&stmf_state.stmf_lock); 3953 delay(1); 3954 goto try_dereg_ss_again; 3955 } 3956 3957 /* dereg proxy session if not standby port */ 3958 if (stmf_state.stmf_alua_state == 1 && 3959 ilport->ilport_standby == 0 && 3960 ilport->ilport_alua == 1) { 3961 ic_session_dereg = ic_session_dereg_msg_alloc( 3962 ss, stmf_proxy_msg_id); 3963 if (ic_session_dereg) { 3964 ic_ret = ic_tx_msg(ic_session_dereg); 3965 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3966 stmf_proxy_msg_id++; 3967 } 3968 } 3969 } 3970 3971 rw_enter(&ilport->ilport_lock, RW_WRITER); 3972 for (ppss = &ilport->ilport_ss_list; *ppss != NULL; 3973 ppss = &((*ppss)->iss_next)) { 3974 if (iss == (*ppss)) { 3975 *ppss = (*ppss)->iss_next; 3976 found = 1; 3977 break; 3978 } 3979 } 3980 if (!found) { 3981 cmn_err(CE_PANIC, "Deregister session called for non existent" 3982 " session"); 3983 } 3984 ilport->ilport_nsessions--; 3985 3986 stmf_irport_deregister(iss->iss_irport); 3987 /* 3988 * to avoid conflict with updating session's map, 3989 * which only grab stmf_lock 3990 */ 3991 sm = iss->iss_sm; 3992 iss->iss_sm = NULL; 3993 iss->iss_hg = NULL; 3994 3995 rw_exit(&ilport->ilport_lock); 3996 3997 if (sm->lm_nentries) { 3998 for (n = 0; n < sm->lm_nentries; n++) { 3999 if ((ent = (stmf_lun_map_ent_t *)sm->lm_plus[n]) 4000 != NULL) { 4001 if (ent->ent_itl_datap) { 4002 stmf_do_itl_dereg(ent->ent_lu, 4003 ent->ent_itl_datap, 4004 STMF_ITL_REASON_IT_NEXUS_LOSS); 4005 } 4006 ilu = (stmf_i_lu_t *) 4007 ent->ent_lu->lu_stmf_private; 4008 atomic_dec_32(&ilu->ilu_ref_cnt); 4009 kmem_free(sm->lm_plus[n], 4010 sizeof (stmf_lun_map_ent_t)); 4011 } 4012 } 4013 kmem_free(sm->lm_plus, 4014 sizeof (stmf_lun_map_ent_t *) * sm->lm_nentries); 4015 } 4016 kmem_free(sm, sizeof (*sm)); 4017 4018 if (iss->iss_flags & ISS_NULL_TPTID) { 4019 stmf_remote_port_free(ss->ss_rport); 4020 } 4021 4022 mutex_exit(&stmf_state.stmf_lock); 4023 } 4024 4025 4026 4027 stmf_i_scsi_session_t * 4028 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked) 4029 { 4030 stmf_i_local_port_t *ilport; 4031 stmf_i_scsi_session_t *iss; 4032 4033 mutex_enter(&stmf_state.stmf_lock); 4034 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 4035 ilport = ilport->ilport_next) { 4036 rw_enter(&ilport->ilport_lock, RW_WRITER); 4037 for (iss = ilport->ilport_ss_list; iss != NULL; 4038 iss = iss->iss_next) { 4039 if (iss->iss_ss->ss_session_id == session_id) { 4040 if (!stay_locked) 4041 rw_exit(&ilport->ilport_lock); 4042 mutex_exit(&stmf_state.stmf_lock); 4043 return (iss); 4044 } 4045 } 4046 rw_exit(&ilport->ilport_lock); 4047 } 4048 mutex_exit(&stmf_state.stmf_lock); 4049 return (NULL); 4050 } 4051 4052 void 4053 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl) 4054 { 4055 stmf_itl_data_t **itlpp; 4056 stmf_i_lu_t *ilu; 4057 4058 ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED); 4059 4060 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 4061 mutex_enter(&ilu->ilu_task_lock); 4062 for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL; 4063 itlpp = &(*itlpp)->itl_next) { 4064 if ((*itlpp) == itl) 4065 break; 4066 } 4067 ASSERT((*itlpp) != NULL); 4068 *itlpp = itl->itl_next; 4069 mutex_exit(&ilu->ilu_task_lock); 4070 lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle, 4071 (uint32_t)itl->itl_hdlrm_reason); 4072 4073 kmem_free(itl, sizeof (*itl)); 4074 } 4075 4076 stmf_status_t 4077 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun, 4078 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle) 4079 { 4080 stmf_itl_data_t *itl; 4081 stmf_i_scsi_session_t *iss; 4082 stmf_lun_map_ent_t *lun_map_ent; 4083 stmf_i_lu_t *ilu; 4084 uint16_t n; 4085 4086 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 4087 if (ss == NULL) { 4088 iss = stmf_session_id_to_issptr(session_id, 1); 4089 if (iss == NULL) 4090 return (STMF_NOT_FOUND); 4091 } else { 4092 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 4093 } 4094 4095 mutex_enter(&stmf_state.stmf_lock); 4096 rw_enter(iss->iss_lockp, RW_WRITER); 4097 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 4098 lun_map_ent = (stmf_lun_map_ent_t *) 4099 stmf_get_ent_from_map(iss->iss_sm, n); 4100 if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) { 4101 rw_exit(iss->iss_lockp); 4102 mutex_exit(&stmf_state.stmf_lock); 4103 return (STMF_NOT_FOUND); 4104 } 4105 if (lun_map_ent->ent_itl_datap != NULL) { 4106 rw_exit(iss->iss_lockp); 4107 mutex_exit(&stmf_state.stmf_lock); 4108 return (STMF_ALREADY); 4109 } 4110 4111 itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP); 4112 if (itl == NULL) { 4113 rw_exit(iss->iss_lockp); 4114 mutex_exit(&stmf_state.stmf_lock); 4115 return (STMF_ALLOC_FAILURE); 4116 } 4117 4118 itl->itl_ilu = ilu; 4119 itl->itl_session = iss; 4120 itl->itl_counter = 1; 4121 itl->itl_lun = n; 4122 itl->itl_handle = itl_handle; 4123 4124 mutex_enter(&ilu->ilu_task_lock); 4125 itl->itl_next = ilu->ilu_itl_list; 4126 ilu->ilu_itl_list = itl; 4127 mutex_exit(&ilu->ilu_task_lock); 4128 lun_map_ent->ent_itl_datap = itl; 4129 rw_exit(iss->iss_lockp); 4130 mutex_exit(&stmf_state.stmf_lock); 4131 4132 return (STMF_SUCCESS); 4133 } 4134 4135 void 4136 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason) 4137 { 4138 uint8_t old, new; 4139 4140 do { 4141 old = new = itl->itl_flags; 4142 if (old & STMF_ITL_BEING_TERMINATED) 4143 return; 4144 new |= STMF_ITL_BEING_TERMINATED; 4145 } while (atomic_cas_8(&itl->itl_flags, old, new) != old); 4146 itl->itl_hdlrm_reason = hdlrm_reason; 4147 4148 ASSERT(itl->itl_counter); 4149 4150 if (atomic_dec_32_nv(&itl->itl_counter)) 4151 return; 4152 4153 stmf_release_itl_handle(lu, itl); 4154 } 4155 4156 stmf_status_t 4157 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu) 4158 { 4159 stmf_i_lu_t *ilu; 4160 stmf_i_local_port_t *ilport; 4161 stmf_i_scsi_session_t *iss; 4162 stmf_lun_map_t *lm; 4163 stmf_lun_map_ent_t *ent; 4164 uint32_t nmaps, nu; 4165 stmf_itl_data_t **itl_list; 4166 int i; 4167 4168 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 4169 4170 dereg_itl_start:; 4171 nmaps = ilu->ilu_ref_cnt; 4172 if (nmaps == 0) 4173 return (STMF_NOT_FOUND); 4174 itl_list = (stmf_itl_data_t **)kmem_zalloc( 4175 nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP); 4176 mutex_enter(&stmf_state.stmf_lock); 4177 if (nmaps != ilu->ilu_ref_cnt) { 4178 /* Something changed, start all over */ 4179 mutex_exit(&stmf_state.stmf_lock); 4180 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 4181 goto dereg_itl_start; 4182 } 4183 nu = 0; 4184 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 4185 ilport = ilport->ilport_next) { 4186 rw_enter(&ilport->ilport_lock, RW_WRITER); 4187 for (iss = ilport->ilport_ss_list; iss != NULL; 4188 iss = iss->iss_next) { 4189 lm = iss->iss_sm; 4190 if (!lm) 4191 continue; 4192 for (i = 0; i < lm->lm_nentries; i++) { 4193 if (lm->lm_plus[i] == NULL) 4194 continue; 4195 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 4196 if ((ent->ent_lu == lu) && 4197 (ent->ent_itl_datap)) { 4198 itl_list[nu++] = ent->ent_itl_datap; 4199 ent->ent_itl_datap = NULL; 4200 if (nu == nmaps) { 4201 rw_exit(&ilport->ilport_lock); 4202 goto dai_scan_done; 4203 } 4204 } 4205 } /* lun table for a session */ 4206 } /* sessions */ 4207 rw_exit(&ilport->ilport_lock); 4208 } /* ports */ 4209 4210 dai_scan_done: 4211 mutex_exit(&stmf_state.stmf_lock); 4212 4213 for (i = 0; i < nu; i++) { 4214 stmf_do_itl_dereg(lu, itl_list[i], 4215 STMF_ITL_REASON_DEREG_REQUEST); 4216 } 4217 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 4218 4219 return (STMF_SUCCESS); 4220 } 4221 4222 stmf_data_buf_t * 4223 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize, 4224 uint32_t flags) 4225 { 4226 stmf_i_scsi_task_t *itask = 4227 (stmf_i_scsi_task_t *)task->task_stmf_private; 4228 stmf_local_port_t *lport = task->task_lport; 4229 stmf_data_buf_t *dbuf; 4230 uint8_t ndx; 4231 4232 ndx = stmf_first_zero[itask->itask_allocated_buf_map]; 4233 if (ndx == 0xff) 4234 return (NULL); 4235 dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf( 4236 task, size, pminsize, flags); 4237 if (dbuf) { 4238 task->task_cur_nbufs++; 4239 itask->itask_allocated_buf_map |= (1 << ndx); 4240 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE; 4241 dbuf->db_handle = ndx; 4242 return (dbuf); 4243 } 4244 4245 return (NULL); 4246 } 4247 4248 stmf_status_t 4249 stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags) 4250 { 4251 stmf_i_scsi_task_t *itask = 4252 (stmf_i_scsi_task_t *)task->task_stmf_private; 4253 stmf_local_port_t *lport = task->task_lport; 4254 uint8_t ndx; 4255 stmf_status_t ret; 4256 4257 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF); 4258 ASSERT(lport->lport_ds->ds_setup_dbuf != NULL); 4259 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF); 4260 4261 if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0) 4262 return (STMF_FAILURE); 4263 if (lport->lport_ds->ds_setup_dbuf == NULL) 4264 return (STMF_FAILURE); 4265 4266 ndx = stmf_first_zero[itask->itask_allocated_buf_map]; 4267 if (ndx == 0xff) 4268 return (STMF_FAILURE); 4269 ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags); 4270 if (ret == STMF_FAILURE) 4271 return (STMF_FAILURE); 4272 itask->itask_dbufs[ndx] = dbuf; 4273 task->task_cur_nbufs++; 4274 itask->itask_allocated_buf_map |= (1 << ndx); 4275 dbuf->db_handle = ndx; 4276 4277 return (STMF_SUCCESS); 4278 } 4279 4280 void 4281 stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf) 4282 { 4283 stmf_i_scsi_task_t *itask = 4284 (stmf_i_scsi_task_t *)task->task_stmf_private; 4285 stmf_local_port_t *lport = task->task_lport; 4286 4287 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF); 4288 ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL); 4289 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF); 4290 4291 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle); 4292 task->task_cur_nbufs--; 4293 lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf); 4294 } 4295 4296 void 4297 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf) 4298 { 4299 stmf_i_scsi_task_t *itask = 4300 (stmf_i_scsi_task_t *)task->task_stmf_private; 4301 stmf_local_port_t *lport = task->task_lport; 4302 4303 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle); 4304 task->task_cur_nbufs--; 4305 lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf); 4306 } 4307 4308 stmf_data_buf_t * 4309 stmf_handle_to_buf(scsi_task_t *task, uint8_t h) 4310 { 4311 stmf_i_scsi_task_t *itask; 4312 4313 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4314 if (h > 3) 4315 return (NULL); 4316 return (itask->itask_dbufs[h]); 4317 } 4318 4319 /* ARGSUSED */ 4320 struct scsi_task * 4321 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss, 4322 uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id) 4323 { 4324 stmf_lu_t *lu; 4325 stmf_i_scsi_session_t *iss; 4326 stmf_i_lu_t *ilu; 4327 stmf_i_scsi_task_t *itask; 4328 stmf_i_scsi_task_t **ppitask; 4329 scsi_task_t *task; 4330 uint8_t *l; 4331 stmf_lun_map_ent_t *lun_map_ent; 4332 uint16_t cdb_length; 4333 uint16_t luNbr; 4334 uint8_t new_task = 0; 4335 4336 /* 4337 * We allocate 7 extra bytes for CDB to provide a cdb pointer which 4338 * is guaranteed to be 8 byte aligned. Some LU providers like OSD 4339 * depend upon this alignment. 4340 */ 4341 if (cdb_length_in >= 16) 4342 cdb_length = cdb_length_in + 7; 4343 else 4344 cdb_length = 16 + 7; 4345 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 4346 luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 4347 rw_enter(iss->iss_lockp, RW_READER); 4348 lun_map_ent = 4349 (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr); 4350 if (!lun_map_ent) { 4351 lu = dlun0; 4352 } else { 4353 lu = lun_map_ent->ent_lu; 4354 } 4355 4356 ilu = lu->lu_stmf_private; 4357 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4358 rw_exit(iss->iss_lockp); 4359 return (NULL); 4360 } 4361 4362 /* 4363 * if the LUN is being offlined or is offline then only command 4364 * that are to query the LUN are allowed. These are handled in 4365 * stmf via the dlun0 vector. It is possible that a race condition 4366 * will cause other commands to arrive while the lun is in the 4367 * process of being offlined. Check for those and just let the 4368 * protocol stack handle the error. 4369 */ 4370 if ((ilu->ilu_state == STMF_STATE_OFFLINING) || 4371 (ilu->ilu_state == STMF_STATE_OFFLINE)) { 4372 if (lu != dlun0) { 4373 rw_exit(iss->iss_lockp); 4374 return (NULL); 4375 } 4376 } 4377 4378 do { 4379 if (ilu->ilu_free_tasks == NULL) { 4380 new_task = 1; 4381 break; 4382 } 4383 mutex_enter(&ilu->ilu_task_lock); 4384 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) && 4385 ((*ppitask)->itask_cdb_buf_size < cdb_length); 4386 ppitask = &((*ppitask)->itask_lu_free_next)) 4387 ; 4388 if (*ppitask) { 4389 itask = *ppitask; 4390 *ppitask = (*ppitask)->itask_lu_free_next; 4391 ilu->ilu_ntasks_free--; 4392 if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free) 4393 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 4394 } else { 4395 new_task = 1; 4396 } 4397 mutex_exit(&ilu->ilu_task_lock); 4398 /* CONSTCOND */ 4399 } while (0); 4400 4401 if (!new_task) { 4402 /* 4403 * Save the task_cdb pointer and zero per cmd fields. 4404 * We know the task_cdb_length is large enough by task 4405 * selection process above. 4406 */ 4407 uint8_t *save_cdb; 4408 uintptr_t t_start, t_end; 4409 4410 task = itask->itask_task; 4411 save_cdb = task->task_cdb; /* save */ 4412 t_start = (uintptr_t)&task->task_flags; 4413 t_end = (uintptr_t)&task->task_extended_cmd; 4414 bzero((void *)t_start, (size_t)(t_end - t_start)); 4415 task->task_cdb = save_cdb; /* restore */ 4416 itask->itask_ncmds = 0; 4417 } else { 4418 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK, 4419 cdb_length, AF_FORCE_NOSLEEP); 4420 if (task == NULL) { 4421 rw_exit(iss->iss_lockp); 4422 return (NULL); 4423 } 4424 task->task_lu = lu; 4425 task->task_cdb = (uint8_t *)task->task_port_private; 4426 if ((ulong_t)(task->task_cdb) & 7ul) { 4427 task->task_cdb = (uint8_t *)(((ulong_t) 4428 (task->task_cdb) + 7ul) & ~(7ul)); 4429 } 4430 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4431 itask->itask_cdb_buf_size = cdb_length; 4432 mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL); 4433 mutex_init(&itask->itask_mutex, NULL, MUTEX_DRIVER, NULL); 4434 } 4435 4436 /* 4437 * Since a LUN can be mapped as different LUN ids to different initiator 4438 * groups, we need to set LUN id for a new task and reset LUN id for 4439 * a reused task. 4440 */ 4441 l = task->task_lun_no; 4442 l[0] = lun[0]; 4443 l[1] = lun[1]; 4444 l[2] = lun[2]; 4445 l[3] = lun[3]; 4446 l[4] = lun[4]; 4447 l[5] = lun[5]; 4448 l[6] = lun[6]; 4449 l[7] = lun[7]; 4450 4451 mutex_enter(&itask->itask_mutex); 4452 task->task_session = ss; 4453 task->task_lport = lport; 4454 task->task_cdb_length = cdb_length_in; 4455 itask->itask_flags = ITASK_IN_TRANSITION; 4456 itask->itask_waitq_time = 0; 4457 itask->itask_lu_read_time = itask->itask_lu_write_time = 0; 4458 itask->itask_lport_read_time = itask->itask_lport_write_time = 0; 4459 itask->itask_read_xfer = itask->itask_write_xfer = 0; 4460 itask->itask_audit_index = 0; 4461 bzero(&itask->itask_audit_records[0], 4462 sizeof (stmf_task_audit_rec_t) * ITASK_TASK_AUDIT_DEPTH); 4463 mutex_exit(&itask->itask_mutex); 4464 4465 if (new_task) { 4466 if (lu->lu_task_alloc(task) != STMF_SUCCESS) { 4467 rw_exit(iss->iss_lockp); 4468 stmf_free(task); 4469 return (NULL); 4470 } 4471 mutex_enter(&ilu->ilu_task_lock); 4472 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4473 mutex_exit(&ilu->ilu_task_lock); 4474 rw_exit(iss->iss_lockp); 4475 stmf_free(task); 4476 return (NULL); 4477 } 4478 itask->itask_lu_next = ilu->ilu_tasks; 4479 if (ilu->ilu_tasks) 4480 ilu->ilu_tasks->itask_lu_prev = itask; 4481 ilu->ilu_tasks = itask; 4482 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */ 4483 ilu->ilu_ntasks++; 4484 mutex_exit(&ilu->ilu_task_lock); 4485 } 4486 4487 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr; 4488 atomic_inc_32(itask->itask_ilu_task_cntr); 4489 itask->itask_start_time = ddi_get_lbolt(); 4490 4491 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap = 4492 lun_map_ent->ent_itl_datap) != NULL)) { 4493 atomic_inc_32(&itask->itask_itl_datap->itl_counter); 4494 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle; 4495 } else { 4496 itask->itask_itl_datap = NULL; 4497 task->task_lu_itl_handle = NULL; 4498 } 4499 4500 rw_exit(iss->iss_lockp); 4501 return (task); 4502 } 4503 4504 /* ARGSUSED */ 4505 static void 4506 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss) 4507 { 4508 stmf_i_scsi_task_t *itask = 4509 (stmf_i_scsi_task_t *)task->task_stmf_private; 4510 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 4511 4512 ASSERT(rw_lock_held(iss->iss_lockp)); 4513 ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0); 4514 ASSERT((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0); 4515 ASSERT((itask->itask_flags & ITASK_IN_TRANSITION) == 0); 4516 ASSERT((itask->itask_flags & ITASK_KNOWN_TO_LU) == 0); 4517 ASSERT(mutex_owned(&itask->itask_mutex)); 4518 4519 itask->itask_flags = ITASK_IN_FREE_LIST; 4520 itask->itask_ncmds = 0; 4521 itask->itask_proxy_msg_id = 0; 4522 atomic_dec_32(itask->itask_ilu_task_cntr); 4523 itask->itask_worker_next = NULL; 4524 mutex_exit(&itask->itask_mutex); 4525 4526 mutex_enter(&ilu->ilu_task_lock); 4527 itask->itask_lu_free_next = ilu->ilu_free_tasks; 4528 ilu->ilu_free_tasks = itask; 4529 ilu->ilu_ntasks_free++; 4530 if (ilu->ilu_ntasks == ilu->ilu_ntasks_free) 4531 cv_signal(&ilu->ilu_offline_pending_cv); 4532 mutex_exit(&ilu->ilu_task_lock); 4533 } 4534 4535 void 4536 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu) 4537 { 4538 uint32_t num_to_release, ndx; 4539 stmf_i_scsi_task_t *itask; 4540 stmf_lu_t *lu = ilu->ilu_lu; 4541 4542 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free); 4543 4544 /* free half of the minimal free of the free tasks */ 4545 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2; 4546 if (!num_to_release) { 4547 return; 4548 } 4549 for (ndx = 0; ndx < num_to_release; ndx++) { 4550 mutex_enter(&ilu->ilu_task_lock); 4551 itask = ilu->ilu_free_tasks; 4552 if (itask == NULL) { 4553 mutex_exit(&ilu->ilu_task_lock); 4554 break; 4555 } 4556 ilu->ilu_free_tasks = itask->itask_lu_free_next; 4557 ilu->ilu_ntasks_free--; 4558 mutex_exit(&ilu->ilu_task_lock); 4559 4560 lu->lu_task_free(itask->itask_task); 4561 mutex_enter(&ilu->ilu_task_lock); 4562 if (itask->itask_lu_next) 4563 itask->itask_lu_next->itask_lu_prev = 4564 itask->itask_lu_prev; 4565 if (itask->itask_lu_prev) 4566 itask->itask_lu_prev->itask_lu_next = 4567 itask->itask_lu_next; 4568 else 4569 ilu->ilu_tasks = itask->itask_lu_next; 4570 4571 ilu->ilu_ntasks--; 4572 mutex_exit(&ilu->ilu_task_lock); 4573 stmf_free(itask->itask_task); 4574 } 4575 } 4576 4577 /* 4578 * Called with stmf_lock held 4579 */ 4580 void 4581 stmf_check_freetask() 4582 { 4583 stmf_i_lu_t *ilu; 4584 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 4585 4586 /* stmf_svc_ilu_draining may get changed after stmf_lock is released */ 4587 while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) { 4588 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 4589 if (!ilu->ilu_ntasks_min_free) { 4590 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 4591 continue; 4592 } 4593 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 4594 mutex_exit(&stmf_state.stmf_lock); 4595 stmf_task_lu_check_freelist(ilu); 4596 /* 4597 * we do not care about the accuracy of 4598 * ilu_ntasks_min_free, so we don't lock here 4599 */ 4600 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 4601 mutex_enter(&stmf_state.stmf_lock); 4602 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 4603 cv_broadcast(&stmf_state.stmf_cv); 4604 if (ddi_get_lbolt() >= endtime) 4605 break; 4606 } 4607 } 4608 4609 /* 4610 * Since this method is looking to find tasks that are stuck, lost, or senile 4611 * it should be more willing to give up scaning during this time period. This 4612 * is why mutex_tryenter is now used instead of the standard mutex_enter. 4613 * There has been at least one case were the following occurred. 4614 * 4615 * 1) The iscsit_deferred() method is trying to register a session and 4616 * needs the global lock which is held. 4617 * 2) Another thread which holds the global lock is trying to deregister a 4618 * session and needs the session lock. 4619 * 3) A third thread is allocating a stmf task that has grabbed the session 4620 * lock and is trying to grab the lun task lock. 4621 * 4) There's a timeout thread that has the lun task lock and is trying to grab 4622 * a specific task lock. 4623 * 5) The thread that has the task lock is waiting for the ref count to go to 4624 * zero. 4625 * 6) There's a task that would drop the count to zero, but it's in the task 4626 * queue waiting to run and is stuck because of #1 is currently block. 4627 * 4628 * This method is number 4 in the above chain of events. Had this code 4629 * originally used mutex_tryenter the chain would have been broken and the 4630 * system wouldn't have hung. So, now this method uses mutex_tryenter and 4631 * you know why it does so. 4632 */ 4633 /* ---- Only one thread calls stmf_do_ilu_timeouts so no lock required ---- */ 4634 typedef struct stmf_bailout_cnt { 4635 int no_ilu_lock; 4636 int no_task_lock; 4637 int tasks_checked; 4638 } stmf_bailout_cnt_t; 4639 4640 stmf_bailout_cnt_t stmf_bailout; 4641 4642 static void 4643 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu) 4644 { 4645 clock_t l = ddi_get_lbolt(); 4646 clock_t ps = drv_usectohz(1000000); 4647 stmf_i_scsi_task_t *itask; 4648 scsi_task_t *task; 4649 uint32_t to; 4650 4651 if (mutex_tryenter(&ilu->ilu_task_lock) == 0) { 4652 stmf_bailout.no_ilu_lock++; 4653 return; 4654 } 4655 4656 for (itask = ilu->ilu_tasks; itask != NULL; 4657 itask = itask->itask_lu_next) { 4658 if (mutex_tryenter(&itask->itask_mutex) == 0) { 4659 stmf_bailout.no_task_lock++; 4660 continue; 4661 } 4662 stmf_bailout.tasks_checked++; 4663 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 4664 ITASK_BEING_ABORTED)) { 4665 mutex_exit(&itask->itask_mutex); 4666 continue; 4667 } 4668 task = itask->itask_task; 4669 if (task->task_timeout == 0) 4670 to = stmf_default_task_timeout; 4671 else 4672 to = task->task_timeout; 4673 4674 if ((itask->itask_start_time + (to * ps)) > l) { 4675 mutex_exit(&itask->itask_mutex); 4676 continue; 4677 } 4678 mutex_exit(&itask->itask_mutex); 4679 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 4680 STMF_TIMEOUT, NULL); 4681 } 4682 mutex_exit(&ilu->ilu_task_lock); 4683 } 4684 4685 /* 4686 * Called with stmf_lock held 4687 */ 4688 void 4689 stmf_check_ilu_timing() 4690 { 4691 stmf_i_lu_t *ilu; 4692 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 4693 4694 /* stmf_svc_ilu_timing may get changed after stmf_lock is released */ 4695 while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) { 4696 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 4697 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) { 4698 if (ilu->ilu_task_cntr2 == 0) { 4699 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2; 4700 continue; 4701 } 4702 } else { 4703 if (ilu->ilu_task_cntr1 == 0) { 4704 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 4705 continue; 4706 } 4707 } 4708 /* 4709 * If we are here then it means that there is some slowdown 4710 * in tasks on this lu. We need to check. 4711 */ 4712 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 4713 mutex_exit(&stmf_state.stmf_lock); 4714 stmf_do_ilu_timeouts(ilu); 4715 mutex_enter(&stmf_state.stmf_lock); 4716 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 4717 cv_broadcast(&stmf_state.stmf_cv); 4718 if (ddi_get_lbolt() >= endtime) 4719 break; 4720 } 4721 } 4722 4723 /* 4724 * Kills all tasks on a lu except tm_task 4725 */ 4726 void 4727 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s) 4728 { 4729 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 4730 stmf_i_scsi_task_t *itask; 4731 4732 mutex_enter(&ilu->ilu_task_lock); 4733 for (itask = ilu->ilu_tasks; itask != NULL; 4734 itask = itask->itask_lu_next) { 4735 mutex_enter(&itask->itask_mutex); 4736 if (itask->itask_flags & ITASK_IN_FREE_LIST) { 4737 mutex_exit(&itask->itask_mutex); 4738 continue; 4739 } 4740 mutex_exit(&itask->itask_mutex); 4741 if (itask->itask_task == tm_task) 4742 continue; 4743 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL); 4744 } 4745 mutex_exit(&ilu->ilu_task_lock); 4746 } 4747 4748 void 4749 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport) 4750 { 4751 int i; 4752 uint8_t map; 4753 4754 if ((map = itask->itask_allocated_buf_map) == 0) 4755 return; 4756 for (i = 0; i < 4; i++) { 4757 if (map & 1) { 4758 stmf_data_buf_t *dbuf; 4759 4760 dbuf = itask->itask_dbufs[i]; 4761 if (dbuf->db_xfer_start_timestamp) { 4762 stmf_lport_xfer_done(itask, dbuf); 4763 } 4764 if (dbuf->db_flags & DB_LU_DATA_BUF) { 4765 /* 4766 * LU needs to clean up buffer. 4767 * LU is required to free the buffer 4768 * in the xfer_done handler. 4769 */ 4770 scsi_task_t *task = itask->itask_task; 4771 stmf_lu_t *lu = task->task_lu; 4772 4773 lu->lu_dbuf_free(task, dbuf); 4774 ASSERT(((itask->itask_allocated_buf_map>>i) 4775 & 1) == 0); /* must be gone */ 4776 } else { 4777 ASSERT(dbuf->db_lu_private == NULL); 4778 dbuf->db_lu_private = NULL; 4779 lport->lport_ds->ds_free_data_buf( 4780 lport->lport_ds, dbuf); 4781 } 4782 } 4783 map >>= 1; 4784 } 4785 itask->itask_allocated_buf_map = 0; 4786 } 4787 4788 void 4789 stmf_task_free(scsi_task_t *task) 4790 { 4791 stmf_local_port_t *lport = task->task_lport; 4792 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4793 task->task_stmf_private; 4794 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 4795 task->task_session->ss_stmf_private; 4796 stmf_lu_t *lu = task->task_lu; 4797 4798 stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL); 4799 ASSERT(mutex_owned(&itask->itask_mutex)); 4800 if ((lu != NULL) && (lu->lu_task_done != NULL)) 4801 lu->lu_task_done(task); 4802 stmf_free_task_bufs(itask, lport); 4803 stmf_itl_task_done(itask); 4804 DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task, 4805 hrtime_t, 4806 itask->itask_done_timestamp - itask->itask_start_timestamp); 4807 if (itask->itask_itl_datap) { 4808 if (atomic_dec_32_nv(&itask->itask_itl_datap->itl_counter) == 4809 0) { 4810 stmf_release_itl_handle(task->task_lu, 4811 itask->itask_itl_datap); 4812 } 4813 } 4814 4815 /* 4816 * To prevent a deadlock condition must release the itask_mutex, 4817 * grab a reader lock on iss_lockp and then reacquire the itask_mutex. 4818 */ 4819 mutex_exit(&itask->itask_mutex); 4820 rw_enter(iss->iss_lockp, RW_READER); 4821 mutex_enter(&itask->itask_mutex); 4822 4823 lport->lport_task_free(task); 4824 if (itask->itask_worker) { 4825 atomic_dec_32(&stmf_cur_ntasks); 4826 atomic_dec_32(&itask->itask_worker->worker_ref_count); 4827 } 4828 /* 4829 * After calling stmf_task_lu_free, the task pointer can no longer 4830 * be trusted. 4831 */ 4832 stmf_task_lu_free(task, iss); 4833 rw_exit(iss->iss_lockp); 4834 } 4835 4836 void 4837 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 4838 { 4839 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4840 task->task_stmf_private; 4841 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 4842 int nv; 4843 uint32_t new; 4844 uint32_t ct; 4845 stmf_worker_t *w; 4846 uint8_t tm; 4847 4848 if (task->task_max_nbufs > 4) 4849 task->task_max_nbufs = 4; 4850 task->task_cur_nbufs = 0; 4851 /* Latest value of currently running tasks */ 4852 ct = atomic_inc_32_nv(&stmf_cur_ntasks); 4853 4854 /* Select the next worker using round robin */ 4855 mutex_enter(&stmf_worker_sel_mx); 4856 stmf_worker_sel_counter++; 4857 if (stmf_worker_sel_counter >= stmf_nworkers) 4858 stmf_worker_sel_counter = 0; 4859 nv = stmf_worker_sel_counter; 4860 4861 /* if the selected worker is not idle then bump to the next worker */ 4862 if (stmf_workers[nv].worker_queue_depth > 0) { 4863 stmf_worker_sel_counter++; 4864 if (stmf_worker_sel_counter >= stmf_nworkers) 4865 stmf_worker_sel_counter = 0; 4866 nv = stmf_worker_sel_counter; 4867 } 4868 mutex_exit(&stmf_worker_sel_mx); 4869 4870 w = &stmf_workers[nv]; 4871 4872 mutex_enter(&itask->itask_mutex); 4873 mutex_enter(&w->worker_lock); 4874 4875 itask->itask_worker = w; 4876 4877 /* 4878 * Track max system load inside the worker as we already have the 4879 * worker lock (no point implementing another lock). The service 4880 * thread will do the comparisons and figure out the max overall 4881 * system load. 4882 */ 4883 if (w->worker_max_sys_qdepth_pu < ct) 4884 w->worker_max_sys_qdepth_pu = ct; 4885 4886 new = itask->itask_flags; 4887 new |= ITASK_KNOWN_TO_TGT_PORT; 4888 if (task->task_mgmt_function) { 4889 tm = task->task_mgmt_function; 4890 if ((tm == TM_TARGET_RESET) || 4891 (tm == TM_TARGET_COLD_RESET) || 4892 (tm == TM_TARGET_WARM_RESET)) { 4893 new |= ITASK_DEFAULT_HANDLING; 4894 } 4895 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 4896 new |= ITASK_DEFAULT_HANDLING; 4897 } 4898 new &= ~ITASK_IN_TRANSITION; 4899 itask->itask_flags = new; 4900 4901 stmf_itl_task_start(itask); 4902 4903 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK; 4904 itask->itask_ncmds = 1; 4905 4906 if ((task->task_flags & TF_INITIAL_BURST) && 4907 !(curthread->t_flag & T_INTR_THREAD)) { 4908 stmf_update_kstat_lu_io(task, dbuf); 4909 stmf_update_kstat_lport_io(task, dbuf); 4910 stmf_update_kstat_rport_io(task, dbuf); 4911 } 4912 4913 stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf); 4914 if (dbuf) { 4915 itask->itask_allocated_buf_map = 1; 4916 itask->itask_dbufs[0] = dbuf; 4917 dbuf->db_handle = 0; 4918 } else { 4919 itask->itask_allocated_buf_map = 0; 4920 itask->itask_dbufs[0] = NULL; 4921 } 4922 4923 STMF_ENQUEUE_ITASK(w, itask); 4924 4925 mutex_exit(&w->worker_lock); 4926 mutex_exit(&itask->itask_mutex); 4927 4928 /* 4929 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE 4930 * was set between checking of ILU_RESET_ACTIVE and clearing of the 4931 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here. 4932 */ 4933 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4934 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL); 4935 } 4936 } 4937 4938 static void 4939 stmf_task_audit(stmf_i_scsi_task_t *itask, 4940 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf) 4941 { 4942 stmf_task_audit_rec_t *ar; 4943 4944 mutex_enter(&itask->itask_audit_mutex); 4945 ar = &itask->itask_audit_records[itask->itask_audit_index++]; 4946 itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1); 4947 ar->ta_event = te; 4948 ar->ta_cmd_or_iof = cmd_or_iof; 4949 ar->ta_itask_flags = itask->itask_flags; 4950 ar->ta_dbuf = dbuf; 4951 gethrestime(&ar->ta_timestamp); 4952 mutex_exit(&itask->itask_audit_mutex); 4953 } 4954 4955 4956 /* 4957 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++ 4958 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already 4959 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot 4960 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course 4961 * the LU will make this call only if we call the LU's abort entry point. 4962 * we will only call that entry point if ITASK_KNOWN_TO_LU was set. 4963 * 4964 * Same logic applies for the port. 4965 * 4966 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU 4967 * and KNOWN_TO_TGT_PORT are reset. 4968 * 4969 * +++++++++++++++++++++++++++++++++++++++++++++++ 4970 */ 4971 4972 stmf_status_t 4973 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags) 4974 { 4975 stmf_status_t ret = STMF_SUCCESS; 4976 4977 stmf_i_scsi_task_t *itask = 4978 (stmf_i_scsi_task_t *)task->task_stmf_private; 4979 4980 stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf); 4981 4982 mutex_enter(&itask->itask_mutex); 4983 if (ioflags & STMF_IOF_LU_DONE) { 4984 if (itask->itask_flags & ITASK_BEING_ABORTED) { 4985 mutex_exit(&itask->itask_mutex); 4986 return (STMF_ABORTED); 4987 } 4988 itask->itask_flags &= ~ITASK_KNOWN_TO_LU; 4989 } 4990 if ((itask->itask_flags & ITASK_BEING_ABORTED) != 0) { 4991 mutex_exit(&itask->itask_mutex); 4992 return (STMF_ABORTED); 4993 } 4994 mutex_exit(&itask->itask_mutex); 4995 4996 #ifdef DEBUG 4997 if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) { 4998 if (atomic_dec_32_nv((uint32_t *)&stmf_drop_buf_counter) == 1) 4999 return (STMF_SUCCESS); 5000 } 5001 #endif 5002 5003 stmf_update_kstat_lu_io(task, dbuf); 5004 stmf_update_kstat_lport_io(task, dbuf); 5005 stmf_update_kstat_rport_io(task, dbuf); 5006 stmf_lport_xfer_start(itask, dbuf); 5007 if (ioflags & STMF_IOF_STATS_ONLY) { 5008 stmf_lport_xfer_done(itask, dbuf); 5009 return (STMF_SUCCESS); 5010 } 5011 5012 dbuf->db_flags |= DB_LPORT_XFER_ACTIVE; 5013 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags); 5014 5015 /* 5016 * Port provider may have already called the buffer callback in 5017 * which case dbuf->db_xfer_start_timestamp will be 0. 5018 */ 5019 if (ret != STMF_SUCCESS) { 5020 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE; 5021 if (dbuf->db_xfer_start_timestamp != 0) 5022 stmf_lport_xfer_done(itask, dbuf); 5023 } 5024 5025 return (ret); 5026 } 5027 5028 void 5029 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof) 5030 { 5031 stmf_i_scsi_task_t *itask = 5032 (stmf_i_scsi_task_t *)task->task_stmf_private; 5033 stmf_i_local_port_t *ilport; 5034 stmf_worker_t *w = itask->itask_worker; 5035 uint32_t new; 5036 uint8_t update_queue_flags, free_it, queue_it; 5037 5038 stmf_lport_xfer_done(itask, dbuf); 5039 5040 stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf); 5041 5042 /* Guard against unexpected completions from the lport */ 5043 if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) { 5044 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE; 5045 } else { 5046 /* 5047 * This should never happen. 5048 */ 5049 ilport = task->task_lport->lport_stmf_private; 5050 ilport->ilport_unexpected_comp++; 5051 cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p", 5052 (void *)task, (void *)dbuf); 5053 return; 5054 } 5055 5056 mutex_enter(&itask->itask_mutex); 5057 mutex_enter(&w->worker_lock); 5058 new = itask->itask_flags; 5059 if (itask->itask_flags & ITASK_BEING_ABORTED) { 5060 mutex_exit(&w->worker_lock); 5061 mutex_exit(&itask->itask_mutex); 5062 return; 5063 } 5064 free_it = 0; 5065 if (iof & STMF_IOF_LPORT_DONE) { 5066 new &= ~ITASK_KNOWN_TO_TGT_PORT; 5067 task->task_completion_status = dbuf->db_xfer_status; 5068 free_it = 1; 5069 } 5070 /* 5071 * If the task is known to LU then queue it. But if 5072 * it is already queued (multiple completions) then 5073 * just update the buffer information by grabbing the 5074 * worker lock. If the task is not known to LU, 5075 * completed/aborted, then see if we need to 5076 * free this task. 5077 */ 5078 if (itask->itask_flags & ITASK_KNOWN_TO_LU) { 5079 free_it = 0; 5080 update_queue_flags = 1; 5081 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) { 5082 queue_it = 0; 5083 } else { 5084 queue_it = 1; 5085 } 5086 } else { 5087 update_queue_flags = 0; 5088 queue_it = 0; 5089 } 5090 itask->itask_flags = new; 5091 5092 if (update_queue_flags) { 5093 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE; 5094 5095 ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0); 5096 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 5097 5098 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd; 5099 if (queue_it) { 5100 STMF_ENQUEUE_ITASK(w, itask); 5101 } 5102 mutex_exit(&w->worker_lock); 5103 mutex_exit(&itask->itask_mutex); 5104 return; 5105 } 5106 5107 mutex_exit(&w->worker_lock); 5108 if (free_it) { 5109 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 5110 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 5111 ITASK_BEING_ABORTED)) == 0) { 5112 stmf_task_free(task); 5113 return; 5114 } 5115 } 5116 mutex_exit(&itask->itask_mutex); 5117 } 5118 5119 stmf_status_t 5120 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags) 5121 { 5122 DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task); 5123 5124 stmf_i_scsi_task_t *itask = 5125 (stmf_i_scsi_task_t *)task->task_stmf_private; 5126 5127 stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL); 5128 5129 mutex_enter(&itask->itask_mutex); 5130 if (ioflags & STMF_IOF_LU_DONE) { 5131 if (itask->itask_flags & ITASK_BEING_ABORTED) { 5132 mutex_exit(&itask->itask_mutex); 5133 return (STMF_ABORTED); 5134 } 5135 itask->itask_flags &= ~ITASK_KNOWN_TO_LU; 5136 } 5137 5138 if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) { 5139 mutex_exit(&itask->itask_mutex); 5140 return (STMF_SUCCESS); 5141 } 5142 5143 if (itask->itask_flags & ITASK_BEING_ABORTED) { 5144 mutex_exit(&itask->itask_mutex); 5145 return (STMF_ABORTED); 5146 } 5147 mutex_exit(&itask->itask_mutex); 5148 5149 if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) { 5150 task->task_status_ctrl = 0; 5151 task->task_resid = 0; 5152 } else if (task->task_cmd_xfer_length > 5153 task->task_expected_xfer_length) { 5154 task->task_status_ctrl = TASK_SCTRL_OVER; 5155 task->task_resid = task->task_cmd_xfer_length - 5156 task->task_expected_xfer_length; 5157 } else if (task->task_nbytes_transferred < 5158 task->task_expected_xfer_length) { 5159 task->task_status_ctrl = TASK_SCTRL_UNDER; 5160 task->task_resid = task->task_expected_xfer_length - 5161 task->task_nbytes_transferred; 5162 } else { 5163 task->task_status_ctrl = 0; 5164 task->task_resid = 0; 5165 } 5166 return (task->task_lport->lport_send_status(task, ioflags)); 5167 } 5168 5169 void 5170 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof) 5171 { 5172 stmf_i_scsi_task_t *itask = 5173 (stmf_i_scsi_task_t *)task->task_stmf_private; 5174 stmf_worker_t *w = itask->itask_worker; 5175 uint32_t new; 5176 uint8_t free_it, queue_it; 5177 5178 stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL); 5179 5180 mutex_enter(&itask->itask_mutex); 5181 mutex_enter(&w->worker_lock); 5182 new = itask->itask_flags; 5183 if (itask->itask_flags & ITASK_BEING_ABORTED) { 5184 mutex_exit(&w->worker_lock); 5185 mutex_exit(&itask->itask_mutex); 5186 return; 5187 } 5188 free_it = 0; 5189 if (iof & STMF_IOF_LPORT_DONE) { 5190 new &= ~ITASK_KNOWN_TO_TGT_PORT; 5191 free_it = 1; 5192 } 5193 /* 5194 * If the task is known to LU then queue it. But if 5195 * it is already queued (multiple completions) then 5196 * just update the buffer information by grabbing the 5197 * worker lock. If the task is not known to LU, 5198 * completed/aborted, then see if we need to 5199 * free this task. 5200 */ 5201 if (itask->itask_flags & ITASK_KNOWN_TO_LU) { 5202 free_it = 0; 5203 queue_it = 1; 5204 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) { 5205 cmn_err(CE_PANIC, "status completion received" 5206 " when task is already in worker queue " 5207 " task = %p", (void *)task); 5208 } 5209 } else { 5210 queue_it = 0; 5211 } 5212 itask->itask_flags = new; 5213 task->task_completion_status = s; 5214 5215 if (queue_it) { 5216 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 5217 itask->itask_cmd_stack[itask->itask_ncmds++] = 5218 ITASK_CMD_STATUS_DONE; 5219 5220 STMF_ENQUEUE_ITASK(w, itask); 5221 mutex_exit(&w->worker_lock); 5222 mutex_exit(&itask->itask_mutex); 5223 return; 5224 } 5225 5226 mutex_exit(&w->worker_lock); 5227 5228 if (free_it) { 5229 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 5230 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 5231 ITASK_BEING_ABORTED)) == 0) { 5232 stmf_task_free(task); 5233 return; 5234 } else { 5235 cmn_err(CE_PANIC, "LU is done with the task but LPORT " 5236 " is not done, itask %p itask_flags %x", 5237 (void *)itask, itask->itask_flags); 5238 } 5239 } 5240 mutex_exit(&itask->itask_mutex); 5241 } 5242 5243 void 5244 stmf_task_lu_done(scsi_task_t *task) 5245 { 5246 stmf_i_scsi_task_t *itask = 5247 (stmf_i_scsi_task_t *)task->task_stmf_private; 5248 stmf_worker_t *w = itask->itask_worker; 5249 5250 mutex_enter(&itask->itask_mutex); 5251 mutex_enter(&w->worker_lock); 5252 if (itask->itask_flags & ITASK_BEING_ABORTED) { 5253 mutex_exit(&w->worker_lock); 5254 mutex_exit(&itask->itask_mutex); 5255 return; 5256 } 5257 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) { 5258 cmn_err(CE_PANIC, "task_lu_done received" 5259 " when task is in worker queue " 5260 " task = %p", (void *)task); 5261 } 5262 itask->itask_flags &= ~ITASK_KNOWN_TO_LU; 5263 5264 mutex_exit(&w->worker_lock); 5265 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 5266 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 5267 ITASK_BEING_ABORTED)) == 0) { 5268 stmf_task_free(task); 5269 return; 5270 } else { 5271 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but " 5272 " the task is still not done, task = %p", (void *)task); 5273 } 5274 mutex_exit(&itask->itask_mutex); 5275 } 5276 5277 void 5278 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s) 5279 { 5280 stmf_i_scsi_task_t *itask = 5281 (stmf_i_scsi_task_t *)task->task_stmf_private; 5282 stmf_worker_t *w; 5283 5284 stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL); 5285 5286 mutex_enter(&itask->itask_mutex); 5287 if ((itask->itask_flags & ITASK_BEING_ABORTED) || 5288 ((itask->itask_flags & (ITASK_KNOWN_TO_TGT_PORT | 5289 ITASK_KNOWN_TO_LU)) == 0)) { 5290 mutex_exit(&itask->itask_mutex); 5291 return; 5292 } 5293 itask->itask_flags |= ITASK_BEING_ABORTED; 5294 task->task_completion_status = s; 5295 5296 if (((w = itask->itask_worker) == NULL) || 5297 (itask->itask_flags & ITASK_IN_TRANSITION)) { 5298 mutex_exit(&itask->itask_mutex); 5299 return; 5300 } 5301 5302 /* Queue it and get out */ 5303 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) { 5304 mutex_exit(&itask->itask_mutex); 5305 return; 5306 } 5307 mutex_enter(&w->worker_lock); 5308 STMF_ENQUEUE_ITASK(w, itask); 5309 mutex_exit(&w->worker_lock); 5310 mutex_exit(&itask->itask_mutex); 5311 } 5312 5313 void 5314 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg) 5315 { 5316 stmf_i_scsi_task_t *itask = NULL; 5317 uint32_t f, rf; 5318 5319 DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task, 5320 stmf_status_t, s); 5321 5322 switch (abort_cmd) { 5323 case STMF_QUEUE_ABORT_LU: 5324 stmf_task_lu_killall((stmf_lu_t *)arg, task, s); 5325 return; 5326 case STMF_QUEUE_TASK_ABORT: 5327 stmf_queue_task_for_abort(task, s); 5328 return; 5329 case STMF_REQUEUE_TASK_ABORT_LPORT: 5330 rf = ITASK_TGT_PORT_ABORT_CALLED; 5331 f = ITASK_KNOWN_TO_TGT_PORT; 5332 break; 5333 case STMF_REQUEUE_TASK_ABORT_LU: 5334 rf = ITASK_LU_ABORT_CALLED; 5335 f = ITASK_KNOWN_TO_LU; 5336 break; 5337 default: 5338 return; 5339 } 5340 5341 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 5342 mutex_enter(&itask->itask_mutex); 5343 f |= ITASK_BEING_ABORTED | rf; 5344 5345 if ((itask->itask_flags & f) != f) { 5346 mutex_exit(&itask->itask_mutex); 5347 return; 5348 } 5349 itask->itask_flags &= ~rf; 5350 mutex_exit(&itask->itask_mutex); 5351 5352 } 5353 5354 /* 5355 * NOTE: stmf_abort_task_offline will release and then reacquire the 5356 * itask_mutex. This is required to prevent a lock order violation. 5357 */ 5358 void 5359 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 5360 { 5361 char info[STMF_CHANGE_INFO_LEN]; 5362 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 5363 unsigned long long st; 5364 5365 stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL); 5366 ASSERT(mutex_owned(&itask->itask_mutex)); 5367 st = s; /* gcc fix */ 5368 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 5369 (void) snprintf(info, sizeof (info), 5370 "task %p, lu failed to abort ret=%llx", (void *)task, st); 5371 } else if ((iof & STMF_IOF_LU_DONE) == 0) { 5372 (void) snprintf(info, sizeof (info), 5373 "Task aborted but LU is not finished, task =" 5374 "%p, s=%llx, iof=%x", (void *)task, st, iof); 5375 } else { 5376 /* 5377 * LU abort successfully 5378 */ 5379 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU); 5380 return; 5381 } 5382 5383 stmf_abort_task_offline(task, 1, info); 5384 } 5385 5386 /* 5387 * NOTE: stmf_abort_task_offline will release and then reacquire the 5388 * itask_mutex. This is required to prevent a lock order violation. 5389 */ 5390 void 5391 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 5392 { 5393 char info[STMF_CHANGE_INFO_LEN]; 5394 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 5395 unsigned long long st; 5396 5397 ASSERT(mutex_owned(&itask->itask_mutex)); 5398 stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL); 5399 st = s; 5400 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 5401 (void) snprintf(info, sizeof (info), 5402 "task %p, tgt port failed to abort ret=%llx", (void *)task, 5403 st); 5404 } else if ((iof & STMF_IOF_LPORT_DONE) == 0) { 5405 (void) snprintf(info, sizeof (info), 5406 "Task aborted but tgt port is not finished, " 5407 "task=%p, s=%llx, iof=%x", (void *)task, st, iof); 5408 } else { 5409 /* 5410 * LPORT abort successfully 5411 */ 5412 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_TGT_PORT); 5413 return; 5414 } 5415 5416 stmf_abort_task_offline(task, 0, info); 5417 } 5418 5419 void 5420 stmf_task_lport_aborted_unlocked(scsi_task_t *task, stmf_status_t s, 5421 uint32_t iof) 5422 { 5423 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 5424 5425 mutex_enter(&itask->itask_mutex); 5426 stmf_task_lport_aborted(task, s, iof); 5427 mutex_exit(&itask->itask_mutex); 5428 } 5429 5430 stmf_status_t 5431 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout) 5432 { 5433 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 5434 task->task_stmf_private; 5435 stmf_worker_t *w = itask->itask_worker; 5436 int i; 5437 5438 mutex_enter(&itask->itask_mutex); 5439 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU); 5440 mutex_enter(&w->worker_lock); 5441 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 5442 mutex_exit(&w->worker_lock); 5443 mutex_exit(&itask->itask_mutex); 5444 return (STMF_BUSY); 5445 } 5446 for (i = 0; i < itask->itask_ncmds; i++) { 5447 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) { 5448 mutex_exit(&w->worker_lock); 5449 mutex_exit(&itask->itask_mutex); 5450 return (STMF_SUCCESS); 5451 } 5452 } 5453 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU; 5454 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 5455 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 5456 } else { 5457 clock_t t = drv_usectohz(timeout * 1000); 5458 if (t == 0) 5459 t = 1; 5460 itask->itask_poll_timeout = ddi_get_lbolt() + t; 5461 } 5462 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 5463 STMF_ENQUEUE_ITASK(w, itask); 5464 } 5465 mutex_exit(&w->worker_lock); 5466 mutex_exit(&itask->itask_mutex); 5467 return (STMF_SUCCESS); 5468 } 5469 5470 stmf_status_t 5471 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout) 5472 { 5473 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 5474 task->task_stmf_private; 5475 stmf_worker_t *w = itask->itask_worker; 5476 int i; 5477 5478 mutex_enter(&itask->itask_mutex); 5479 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT); 5480 mutex_enter(&w->worker_lock); 5481 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 5482 mutex_exit(&w->worker_lock); 5483 mutex_exit(&itask->itask_mutex); 5484 return (STMF_BUSY); 5485 } 5486 for (i = 0; i < itask->itask_ncmds; i++) { 5487 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) { 5488 mutex_exit(&w->worker_lock); 5489 mutex_exit(&itask->itask_mutex); 5490 return (STMF_SUCCESS); 5491 } 5492 } 5493 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT; 5494 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 5495 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 5496 } else { 5497 clock_t t = drv_usectohz(timeout * 1000); 5498 if (t == 0) 5499 t = 1; 5500 itask->itask_poll_timeout = ddi_get_lbolt() + t; 5501 } 5502 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 5503 STMF_ENQUEUE_ITASK(w, itask); 5504 } 5505 mutex_exit(&w->worker_lock); 5506 mutex_exit(&itask->itask_mutex); 5507 return (STMF_SUCCESS); 5508 } 5509 5510 void 5511 stmf_do_task_abort(scsi_task_t *task) 5512 { 5513 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 5514 stmf_lu_t *lu; 5515 stmf_local_port_t *lport; 5516 unsigned long long ret; 5517 uint32_t new = 0; 5518 uint8_t call_lu_abort, call_port_abort; 5519 char info[STMF_CHANGE_INFO_LEN]; 5520 5521 lu = task->task_lu; 5522 lport = task->task_lport; 5523 mutex_enter(&itask->itask_mutex); 5524 new = itask->itask_flags; 5525 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 5526 ITASK_LU_ABORT_CALLED)) == ITASK_KNOWN_TO_LU) { 5527 new |= ITASK_LU_ABORT_CALLED; 5528 call_lu_abort = 1; 5529 } else { 5530 call_lu_abort = 0; 5531 } 5532 itask->itask_flags = new; 5533 5534 if (call_lu_abort) { 5535 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) { 5536 ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 5537 } else { 5538 ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 5539 } 5540 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 5541 stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE); 5542 } else if (ret == STMF_BUSY) { 5543 atomic_and_32(&itask->itask_flags, 5544 ~ITASK_LU_ABORT_CALLED); 5545 } else if (ret != STMF_SUCCESS) { 5546 (void) snprintf(info, sizeof (info), 5547 "Abort failed by LU %p, ret %llx", (void *)lu, ret); 5548 stmf_abort_task_offline(task, 1, info); 5549 } 5550 } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) { 5551 if (ddi_get_lbolt() > (itask->itask_start_time + 5552 STMF_SEC2TICK(lu->lu_abort_timeout? 5553 lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) { 5554 (void) snprintf(info, sizeof (info), 5555 "lu abort timed out"); 5556 stmf_abort_task_offline(itask->itask_task, 1, info); 5557 } 5558 } 5559 5560 /* 5561 * NOTE: After the call to either stmf_abort_task_offline() or 5562 * stmf_task_lu_abort() the itask_mutex was dropped and reacquired 5563 * to avoid a deadlock situation with stmf_state.stmf_lock. 5564 */ 5565 5566 new = itask->itask_flags; 5567 if ((itask->itask_flags & (ITASK_KNOWN_TO_TGT_PORT | 5568 ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) { 5569 new |= ITASK_TGT_PORT_ABORT_CALLED; 5570 call_port_abort = 1; 5571 } else { 5572 call_port_abort = 0; 5573 } 5574 itask->itask_flags = new; 5575 5576 if (call_port_abort) { 5577 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0); 5578 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 5579 stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE); 5580 } else if (ret == STMF_BUSY) { 5581 atomic_and_32(&itask->itask_flags, 5582 ~ITASK_TGT_PORT_ABORT_CALLED); 5583 } else if (ret != STMF_SUCCESS) { 5584 (void) snprintf(info, sizeof (info), 5585 "Abort failed by tgt port %p ret %llx", 5586 (void *)lport, ret); 5587 stmf_abort_task_offline(task, 0, info); 5588 } 5589 } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) { 5590 if (ddi_get_lbolt() > (itask->itask_start_time + 5591 STMF_SEC2TICK(lport->lport_abort_timeout? 5592 lport->lport_abort_timeout : 5593 ITASK_DEFAULT_ABORT_TIMEOUT))) { 5594 (void) snprintf(info, sizeof (info), 5595 "lport abort timed out"); 5596 stmf_abort_task_offline(itask->itask_task, 0, info); 5597 } 5598 } 5599 mutex_exit(&itask->itask_mutex); 5600 } 5601 5602 stmf_status_t 5603 stmf_ctl(int cmd, void *obj, void *arg) 5604 { 5605 stmf_status_t ret; 5606 stmf_i_lu_t *ilu; 5607 stmf_i_local_port_t *ilport; 5608 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg; 5609 5610 mutex_enter(&stmf_state.stmf_lock); 5611 ret = STMF_INVALID_ARG; 5612 if (cmd & STMF_CMD_LU_OP) { 5613 ilu = stmf_lookup_lu((stmf_lu_t *)obj); 5614 if (ilu == NULL) { 5615 goto stmf_ctl_lock_exit; 5616 } 5617 DTRACE_PROBE3(lu__state__change, 5618 stmf_lu_t *, ilu->ilu_lu, 5619 int, cmd, stmf_state_change_info_t *, ssci); 5620 } else if (cmd & STMF_CMD_LPORT_OP) { 5621 ilport = stmf_lookup_lport((stmf_local_port_t *)obj); 5622 if (ilport == NULL) { 5623 goto stmf_ctl_lock_exit; 5624 } 5625 DTRACE_PROBE3(lport__state__change, 5626 stmf_local_port_t *, ilport->ilport_lport, 5627 int, cmd, stmf_state_change_info_t *, ssci); 5628 } else { 5629 goto stmf_ctl_lock_exit; 5630 } 5631 5632 switch (cmd) { 5633 case STMF_CMD_LU_ONLINE: 5634 switch (ilu->ilu_state) { 5635 case STMF_STATE_OFFLINE: 5636 ret = STMF_SUCCESS; 5637 break; 5638 case STMF_STATE_ONLINE: 5639 case STMF_STATE_ONLINING: 5640 ret = STMF_ALREADY; 5641 break; 5642 case STMF_STATE_OFFLINING: 5643 ret = STMF_BUSY; 5644 break; 5645 default: 5646 ret = STMF_BADSTATE; 5647 break; 5648 } 5649 if (ret != STMF_SUCCESS) 5650 goto stmf_ctl_lock_exit; 5651 5652 ilu->ilu_state = STMF_STATE_ONLINING; 5653 mutex_exit(&stmf_state.stmf_lock); 5654 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5655 break; 5656 5657 case STMF_CMD_LU_ONLINE_COMPLETE: 5658 if (ilu->ilu_state != STMF_STATE_ONLINING) { 5659 ret = STMF_BADSTATE; 5660 goto stmf_ctl_lock_exit; 5661 } 5662 if (((stmf_change_status_t *)arg)->st_completion_status == 5663 STMF_SUCCESS) { 5664 ilu->ilu_state = STMF_STATE_ONLINE; 5665 mutex_exit(&stmf_state.stmf_lock); 5666 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 5667 STMF_ACK_LU_ONLINE_COMPLETE, arg); 5668 mutex_enter(&stmf_state.stmf_lock); 5669 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 5670 } else { 5671 /* XXX: should throw a meesage an record more data */ 5672 ilu->ilu_state = STMF_STATE_OFFLINE; 5673 } 5674 ret = STMF_SUCCESS; 5675 goto stmf_ctl_lock_exit; 5676 5677 case STMF_CMD_LU_OFFLINE: 5678 switch (ilu->ilu_state) { 5679 case STMF_STATE_ONLINE: 5680 ret = STMF_SUCCESS; 5681 break; 5682 case STMF_STATE_OFFLINE: 5683 case STMF_STATE_OFFLINING: 5684 ret = STMF_ALREADY; 5685 break; 5686 case STMF_STATE_ONLINING: 5687 ret = STMF_BUSY; 5688 break; 5689 default: 5690 ret = STMF_BADSTATE; 5691 break; 5692 } 5693 if (ret != STMF_SUCCESS) 5694 goto stmf_ctl_lock_exit; 5695 ilu->ilu_state = STMF_STATE_OFFLINING; 5696 mutex_exit(&stmf_state.stmf_lock); 5697 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5698 break; 5699 5700 case STMF_CMD_LU_OFFLINE_COMPLETE: 5701 if (ilu->ilu_state != STMF_STATE_OFFLINING) { 5702 ret = STMF_BADSTATE; 5703 goto stmf_ctl_lock_exit; 5704 } 5705 if (((stmf_change_status_t *)arg)->st_completion_status == 5706 STMF_SUCCESS) { 5707 ilu->ilu_state = STMF_STATE_OFFLINE; 5708 mutex_exit(&stmf_state.stmf_lock); 5709 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 5710 STMF_ACK_LU_OFFLINE_COMPLETE, arg); 5711 mutex_enter(&stmf_state.stmf_lock); 5712 } else { 5713 ilu->ilu_state = STMF_STATE_ONLINE; 5714 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 5715 } 5716 mutex_exit(&stmf_state.stmf_lock); 5717 break; 5718 5719 /* 5720 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online. 5721 * It's related with hardware disable/enable. 5722 */ 5723 case STMF_CMD_LPORT_ONLINE: 5724 switch (ilport->ilport_state) { 5725 case STMF_STATE_OFFLINE: 5726 ret = STMF_SUCCESS; 5727 break; 5728 case STMF_STATE_ONLINE: 5729 case STMF_STATE_ONLINING: 5730 ret = STMF_ALREADY; 5731 break; 5732 case STMF_STATE_OFFLINING: 5733 ret = STMF_BUSY; 5734 break; 5735 default: 5736 ret = STMF_BADSTATE; 5737 break; 5738 } 5739 if (ret != STMF_SUCCESS) 5740 goto stmf_ctl_lock_exit; 5741 5742 /* 5743 * Only user request can recover the port from the 5744 * FORCED_OFFLINE state 5745 */ 5746 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) { 5747 if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) { 5748 ret = STMF_FAILURE; 5749 goto stmf_ctl_lock_exit; 5750 } 5751 } 5752 5753 /* 5754 * Avoid too frequent request to online 5755 */ 5756 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) { 5757 ilport->ilport_online_times = 0; 5758 ilport->ilport_avg_interval = 0; 5759 } 5760 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) && 5761 (ilport->ilport_online_times >= 4)) { 5762 ret = STMF_FAILURE; 5763 ilport->ilport_flags |= ILPORT_FORCED_OFFLINE; 5764 stmf_trace(NULL, "stmf_ctl: too frequent request to " 5765 "online the port"); 5766 cmn_err(CE_WARN, "stmf_ctl: too frequent request to " 5767 "online the port, set FORCED_OFFLINE now"); 5768 goto stmf_ctl_lock_exit; 5769 } 5770 if (ilport->ilport_online_times > 0) { 5771 if (ilport->ilport_online_times == 1) { 5772 ilport->ilport_avg_interval = ddi_get_lbolt() - 5773 ilport->ilport_last_online_clock; 5774 } else { 5775 ilport->ilport_avg_interval = 5776 (ilport->ilport_avg_interval + 5777 ddi_get_lbolt() - 5778 ilport->ilport_last_online_clock) >> 1; 5779 } 5780 } 5781 ilport->ilport_last_online_clock = ddi_get_lbolt(); 5782 ilport->ilport_online_times++; 5783 5784 /* 5785 * Submit online service request 5786 */ 5787 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE; 5788 ilport->ilport_state = STMF_STATE_ONLINING; 5789 mutex_exit(&stmf_state.stmf_lock); 5790 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5791 break; 5792 5793 case STMF_CMD_LPORT_ONLINE_COMPLETE: 5794 if (ilport->ilport_state != STMF_STATE_ONLINING) { 5795 ret = STMF_BADSTATE; 5796 goto stmf_ctl_lock_exit; 5797 } 5798 if (((stmf_change_status_t *)arg)->st_completion_status == 5799 STMF_SUCCESS) { 5800 ilport->ilport_state = STMF_STATE_ONLINE; 5801 mutex_exit(&stmf_state.stmf_lock); 5802 ((stmf_local_port_t *)obj)->lport_ctl( 5803 (stmf_local_port_t *)obj, 5804 STMF_ACK_LPORT_ONLINE_COMPLETE, arg); 5805 mutex_enter(&stmf_state.stmf_lock); 5806 } else { 5807 ilport->ilport_state = STMF_STATE_OFFLINE; 5808 } 5809 ret = STMF_SUCCESS; 5810 goto stmf_ctl_lock_exit; 5811 5812 case STMF_CMD_LPORT_OFFLINE: 5813 switch (ilport->ilport_state) { 5814 case STMF_STATE_ONLINE: 5815 ret = STMF_SUCCESS; 5816 break; 5817 case STMF_STATE_OFFLINE: 5818 case STMF_STATE_OFFLINING: 5819 ret = STMF_ALREADY; 5820 break; 5821 case STMF_STATE_ONLINING: 5822 ret = STMF_BUSY; 5823 break; 5824 default: 5825 ret = STMF_BADSTATE; 5826 break; 5827 } 5828 if (ret != STMF_SUCCESS) 5829 goto stmf_ctl_lock_exit; 5830 5831 ilport->ilport_state = STMF_STATE_OFFLINING; 5832 mutex_exit(&stmf_state.stmf_lock); 5833 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5834 break; 5835 5836 case STMF_CMD_LPORT_OFFLINE_COMPLETE: 5837 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 5838 ret = STMF_BADSTATE; 5839 goto stmf_ctl_lock_exit; 5840 } 5841 if (((stmf_change_status_t *)arg)->st_completion_status == 5842 STMF_SUCCESS) { 5843 ilport->ilport_state = STMF_STATE_OFFLINE; 5844 mutex_exit(&stmf_state.stmf_lock); 5845 ((stmf_local_port_t *)obj)->lport_ctl( 5846 (stmf_local_port_t *)obj, 5847 STMF_ACK_LPORT_OFFLINE_COMPLETE, arg); 5848 mutex_enter(&stmf_state.stmf_lock); 5849 } else { 5850 ilport->ilport_state = STMF_STATE_ONLINE; 5851 } 5852 mutex_exit(&stmf_state.stmf_lock); 5853 break; 5854 5855 default: 5856 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd); 5857 ret = STMF_INVALID_ARG; 5858 goto stmf_ctl_lock_exit; 5859 } 5860 5861 return (STMF_SUCCESS); 5862 5863 stmf_ctl_lock_exit:; 5864 mutex_exit(&stmf_state.stmf_lock); 5865 return (ret); 5866 } 5867 5868 /* ARGSUSED */ 5869 stmf_status_t 5870 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 5871 uint32_t *bufsizep) 5872 { 5873 return (STMF_NOT_SUPPORTED); 5874 } 5875 5876 /* ARGSUSED */ 5877 stmf_status_t 5878 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 5879 uint32_t *bufsizep) 5880 { 5881 uint32_t cl = SI_GET_CLASS(cmd); 5882 5883 if (cl == SI_STMF) { 5884 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep)); 5885 } 5886 if (cl == SI_LPORT) { 5887 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1, 5888 arg2, buf, bufsizep)); 5889 } else if (cl == SI_LU) { 5890 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf, 5891 bufsizep)); 5892 } 5893 5894 return (STMF_NOT_SUPPORTED); 5895 } 5896 5897 /* 5898 * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by 5899 * stmf to register local ports. The ident should have 20 bytes in buffer 5900 * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string. 5901 */ 5902 void 5903 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn, 5904 uint8_t protocol_id) 5905 { 5906 char wwn_str[20+1]; 5907 5908 sdid->protocol_id = protocol_id; 5909 sdid->piv = 1; 5910 sdid->code_set = CODE_SET_ASCII; 5911 sdid->association = ID_IS_TARGET_PORT; 5912 sdid->ident_length = 20; 5913 /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */ 5914 (void) snprintf(wwn_str, sizeof (wwn_str), 5915 "wwn.%02X%02X%02X%02X%02X%02X%02X%02X", 5916 wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]); 5917 bcopy(wwn_str, (char *)sdid->ident, 20); 5918 } 5919 5920 5921 stmf_xfer_data_t * 5922 stmf_prepare_tpgs_data(uint8_t ilu_alua) 5923 { 5924 stmf_xfer_data_t *xd; 5925 stmf_i_local_port_t *ilport; 5926 uint8_t *p; 5927 uint32_t sz, asz, nports = 0, nports_standby = 0; 5928 5929 mutex_enter(&stmf_state.stmf_lock); 5930 /* check if any ports are standby and create second group */ 5931 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 5932 ilport = ilport->ilport_next) { 5933 if (ilport->ilport_standby == 1) { 5934 nports_standby++; 5935 } else { 5936 nports++; 5937 } 5938 } 5939 5940 /* 5941 * Section 6.25 REPORT TARGET PORT GROUPS 5942 * The reply can contain many group replies. Each group is limited 5943 * to 255 port identifiers so we'll need to limit the amount of 5944 * data returned. For FC ports there's a physical limitation in 5945 * machines that make reaching 255 ports very, very unlikely. For 5946 * iSCSI on the other hand recent changes mean the port count could 5947 * be as high as 4096 (current limit). Limiting the data returned 5948 * for iSCSI isn't as bad as it sounds. This information is only 5949 * important for ALUA, which isn't supported for iSCSI. iSCSI uses 5950 * virtual IP addresses to deal with node fail over in a cluster. 5951 */ 5952 nports = min(nports, 255); 5953 nports_standby = min(nports_standby, 255); 5954 5955 /* 5956 * The first 4 bytes of the returned data is the length. The 5957 * size of the Target Port Group header is 8 bytes. So, that's where 5958 * the 12 comes from. Each port entry is 4 bytes in size. 5959 */ 5960 sz = (nports * 4) + 12; 5961 if (nports_standby != 0 && ilu_alua != 0) { 5962 /* --- Only add 8 bytes since it's just the Group header ---- */ 5963 sz += (nports_standby * 4) + 8; 5964 } 5965 5966 /* 5967 * The stmf_xfer_data structure contains 4 bytes that will be 5968 * part of the data buffer. So, subtract the 4 bytes from the space 5969 * needed. 5970 */ 5971 asz = sizeof (*xd) + sz - 4; 5972 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 5973 if (xd == NULL) { 5974 mutex_exit(&stmf_state.stmf_lock); 5975 return (NULL); 5976 } 5977 xd->alloc_size = asz; 5978 xd->size_left = sz; 5979 5980 p = xd->buf; 5981 5982 /* ---- length values never include the field that holds the size --- */ 5983 *((uint32_t *)p) = BE_32(sz - 4); 5984 p += 4; 5985 5986 /* ---- Now fill out the first Target Group header ---- */ 5987 p[0] = 0x80; /* PREF */ 5988 p[1] = 5; /* AO_SUP, S_SUP */ 5989 if (stmf_state.stmf_alua_node == 1) { 5990 p[3] = 1; /* Group 1 */ 5991 } else { 5992 p[3] = 0; /* Group 0 */ 5993 } 5994 p[7] = nports & 0xff; 5995 p += 8; 5996 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL && nports != 0; 5997 ilport = ilport->ilport_next) { 5998 if (ilport->ilport_standby == 1) { 5999 continue; 6000 } 6001 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 6002 p += 4; 6003 nports--; 6004 } 6005 if (nports_standby != 0 && ilu_alua != 0) { 6006 p[0] = 0x02; /* Non PREF, Standby */ 6007 p[1] = 5; /* AO_SUP, S_SUP */ 6008 if (stmf_state.stmf_alua_node == 1) { 6009 p[3] = 0; /* Group 0 */ 6010 } else { 6011 p[3] = 1; /* Group 1 */ 6012 } 6013 p[7] = nports_standby & 0xff; 6014 p += 8; 6015 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL && 6016 nports_standby != 0; ilport = ilport->ilport_next) { 6017 if (ilport->ilport_standby == 0) { 6018 continue; 6019 } 6020 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 6021 p += 4; 6022 nports_standby--; 6023 } 6024 } 6025 6026 mutex_exit(&stmf_state.stmf_lock); 6027 6028 return (xd); 6029 } 6030 6031 struct scsi_devid_desc * 6032 stmf_scsilib_get_devid_desc(uint16_t rtpid) 6033 { 6034 scsi_devid_desc_t *devid = NULL; 6035 stmf_i_local_port_t *ilport; 6036 6037 mutex_enter(&stmf_state.stmf_lock); 6038 6039 for (ilport = stmf_state.stmf_ilportlist; ilport; 6040 ilport = ilport->ilport_next) { 6041 if (ilport->ilport_rtpid == rtpid) { 6042 scsi_devid_desc_t *id = ilport->ilport_lport->lport_id; 6043 uint32_t id_sz = sizeof (scsi_devid_desc_t) + 6044 id->ident_length; 6045 devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz, 6046 KM_NOSLEEP); 6047 if (devid != NULL) { 6048 bcopy(id, devid, id_sz); 6049 } 6050 break; 6051 } 6052 } 6053 6054 mutex_exit(&stmf_state.stmf_lock); 6055 return (devid); 6056 } 6057 6058 uint16_t 6059 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid) 6060 { 6061 stmf_i_local_port_t *ilport; 6062 scsi_devid_desc_t *id; 6063 uint16_t rtpid = 0; 6064 6065 mutex_enter(&stmf_state.stmf_lock); 6066 for (ilport = stmf_state.stmf_ilportlist; ilport; 6067 ilport = ilport->ilport_next) { 6068 id = ilport->ilport_lport->lport_id; 6069 if ((devid->ident_length == id->ident_length) && 6070 (memcmp(devid->ident, id->ident, id->ident_length) == 0)) { 6071 rtpid = ilport->ilport_rtpid; 6072 break; 6073 } 6074 } 6075 mutex_exit(&stmf_state.stmf_lock); 6076 return (rtpid); 6077 } 6078 6079 static uint16_t stmf_lu_id_gen_number = 0; 6080 6081 stmf_status_t 6082 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id) 6083 { 6084 return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id)); 6085 } 6086 6087 stmf_status_t 6088 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id, 6089 scsi_devid_desc_t *lu_id) 6090 { 6091 uint8_t *p; 6092 struct timeval32 timestamp32; 6093 uint32_t *t = (uint32_t *)×tamp32; 6094 struct ether_addr mac; 6095 uint8_t *e = (uint8_t *)&mac; 6096 int hid = (int)host_id; 6097 uint16_t gen_number; 6098 6099 if (company_id == COMPANY_ID_NONE) 6100 company_id = COMPANY_ID_SUN; 6101 6102 if (lu_id->ident_length != 0x10) 6103 return (STMF_INVALID_ARG); 6104 6105 p = (uint8_t *)lu_id; 6106 6107 gen_number = atomic_inc_16_nv(&stmf_lu_id_gen_number); 6108 6109 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10; 6110 p[4] = ((company_id >> 20) & 0xf) | 0x60; 6111 p[5] = (company_id >> 12) & 0xff; 6112 p[6] = (company_id >> 4) & 0xff; 6113 p[7] = (company_id << 4) & 0xf0; 6114 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) { 6115 hid = BE_32((int)zone_get_hostid(NULL)); 6116 } 6117 if (hid != 0) { 6118 e[0] = (hid >> 24) & 0xff; 6119 e[1] = (hid >> 16) & 0xff; 6120 e[2] = (hid >> 8) & 0xff; 6121 e[3] = hid & 0xff; 6122 e[4] = e[5] = 0; 6123 } 6124 bcopy(e, p+8, 6); 6125 uniqtime32(×tamp32); 6126 *t = BE_32(*t); 6127 bcopy(t, p+14, 4); 6128 p[18] = (gen_number >> 8) & 0xff; 6129 p[19] = gen_number & 0xff; 6130 6131 return (STMF_SUCCESS); 6132 } 6133 6134 /* 6135 * saa is sense key, ASC, ASCQ 6136 */ 6137 void 6138 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa) 6139 { 6140 uint8_t sd[18]; 6141 task->task_scsi_status = st; 6142 if (st == 2) { 6143 bzero(sd, 18); 6144 sd[0] = 0x70; 6145 sd[2] = (saa >> 16) & 0xf; 6146 sd[7] = 10; 6147 sd[12] = (saa >> 8) & 0xff; 6148 sd[13] = saa & 0xff; 6149 task->task_sense_data = sd; 6150 task->task_sense_length = 18; 6151 } else { 6152 task->task_sense_data = NULL; 6153 task->task_sense_length = 0; 6154 } 6155 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 6156 } 6157 6158 uint32_t 6159 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page, 6160 uint32_t page_len, uint8_t byte0, uint32_t vpd_mask) 6161 { 6162 uint8_t *p = NULL; 6163 uint8_t small_buf[32]; 6164 uint32_t sz = 0; 6165 uint32_t n = 4; 6166 uint32_t m = 0; 6167 uint32_t last_bit = 0; 6168 6169 if (page_len < 4) 6170 return (0); 6171 if (page_len > 65535) 6172 page_len = 65535; 6173 6174 page[0] = byte0; 6175 page[1] = 0x83; 6176 6177 /* CONSTCOND */ 6178 while (1) { 6179 m += sz; 6180 if (sz && (page_len > n)) { 6181 uint32_t copysz; 6182 copysz = page_len > (n + sz) ? sz : page_len - n; 6183 bcopy(p, page + n, copysz); 6184 n += copysz; 6185 } 6186 vpd_mask &= ~last_bit; 6187 if (vpd_mask == 0) 6188 break; 6189 6190 if (vpd_mask & STMF_VPD_LU_ID) { 6191 last_bit = STMF_VPD_LU_ID; 6192 sz = task->task_lu->lu_id->ident_length + 4; 6193 p = (uint8_t *)task->task_lu->lu_id; 6194 continue; 6195 } else if (vpd_mask & STMF_VPD_TARGET_ID) { 6196 last_bit = STMF_VPD_TARGET_ID; 6197 sz = task->task_lport->lport_id->ident_length + 4; 6198 p = (uint8_t *)task->task_lport->lport_id; 6199 continue; 6200 } else if (vpd_mask & STMF_VPD_TP_GROUP) { 6201 stmf_i_local_port_t *ilport; 6202 last_bit = STMF_VPD_TP_GROUP; 6203 p = small_buf; 6204 bzero(p, 8); 6205 p[0] = 1; 6206 p[1] = 0x15; 6207 p[3] = 4; 6208 ilport = (stmf_i_local_port_t *) 6209 task->task_lport->lport_stmf_private; 6210 /* 6211 * If we're in alua mode, group 1 contains all alua 6212 * participating ports and all standby ports 6213 * > 255. Otherwise, if we're in alua mode, any local 6214 * ports (non standby/pppt) are also in group 1 if the 6215 * alua node is 1. Otherwise the group is 0. 6216 */ 6217 if ((stmf_state.stmf_alua_state && 6218 (ilport->ilport_alua || ilport->ilport_standby) && 6219 ilport->ilport_rtpid > 255) || 6220 (stmf_state.stmf_alua_node == 1 && 6221 ilport->ilport_standby != 1)) { 6222 p[7] = 1; /* Group 1 */ 6223 } 6224 sz = 8; 6225 continue; 6226 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) { 6227 stmf_i_local_port_t *ilport; 6228 6229 last_bit = STMF_VPD_RELATIVE_TP_ID; 6230 p = small_buf; 6231 bzero(p, 8); 6232 p[0] = 1; 6233 p[1] = 0x14; 6234 p[3] = 4; 6235 ilport = (stmf_i_local_port_t *) 6236 task->task_lport->lport_stmf_private; 6237 p[6] = (ilport->ilport_rtpid >> 8) & 0xff; 6238 p[7] = ilport->ilport_rtpid & 0xff; 6239 sz = 8; 6240 continue; 6241 } else { 6242 cmn_err(CE_WARN, "Invalid vpd_mask"); 6243 break; 6244 } 6245 } 6246 6247 page[2] = (m >> 8) & 0xff; 6248 page[3] = m & 0xff; 6249 6250 return (n); 6251 } 6252 6253 void 6254 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf) 6255 { 6256 stmf_i_scsi_task_t *itask = 6257 (stmf_i_scsi_task_t *)task->task_stmf_private; 6258 stmf_i_lu_t *ilu = 6259 (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 6260 stmf_xfer_data_t *xd; 6261 uint32_t sz, minsz; 6262 6263 mutex_enter(&itask->itask_mutex); 6264 itask->itask_flags |= ITASK_DEFAULT_HANDLING; 6265 6266 task->task_cmd_xfer_length = 6267 ((((uint32_t)task->task_cdb[6]) << 24) | 6268 (((uint32_t)task->task_cdb[7]) << 16) | 6269 (((uint32_t)task->task_cdb[8]) << 8) | 6270 ((uint32_t)task->task_cdb[9])); 6271 6272 if (task->task_additional_flags & 6273 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6274 task->task_expected_xfer_length = 6275 task->task_cmd_xfer_length; 6276 } 6277 mutex_exit(&itask->itask_mutex); 6278 6279 if (task->task_cmd_xfer_length == 0) { 6280 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6281 return; 6282 } 6283 if (task->task_cmd_xfer_length < 4) { 6284 stmf_scsilib_send_status(task, STATUS_CHECK, 6285 STMF_SAA_INVALID_FIELD_IN_CDB); 6286 return; 6287 } 6288 6289 sz = min(task->task_expected_xfer_length, 6290 task->task_cmd_xfer_length); 6291 6292 xd = stmf_prepare_tpgs_data(ilu->ilu_alua); 6293 6294 if (xd == NULL) { 6295 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6296 STMF_ALLOC_FAILURE, NULL); 6297 return; 6298 } 6299 6300 sz = min(sz, xd->size_left); 6301 xd->size_left = sz; 6302 minsz = min(512, sz); 6303 6304 if (dbuf == NULL) 6305 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 6306 if (dbuf == NULL) { 6307 kmem_free(xd, xd->alloc_size); 6308 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6309 STMF_ALLOC_FAILURE, NULL); 6310 return; 6311 } 6312 dbuf->db_lu_private = xd; 6313 stmf_xd_to_dbuf(dbuf, 1); 6314 6315 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 6316 (void) stmf_xfer_data(task, dbuf, 0); 6317 6318 } 6319 6320 void 6321 stmf_scsilib_handle_task_mgmt(scsi_task_t *task) 6322 { 6323 6324 switch (task->task_mgmt_function) { 6325 /* 6326 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET 6327 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state 6328 * in these cases. This needs to be changed to abort only the required 6329 * set. 6330 */ 6331 case TM_ABORT_TASK: 6332 case TM_ABORT_TASK_SET: 6333 case TM_CLEAR_TASK_SET: 6334 case TM_LUN_RESET: 6335 stmf_handle_lun_reset(task); 6336 /* issue the reset to the proxy node as well */ 6337 if (stmf_state.stmf_alua_state == 1) { 6338 (void) stmf_proxy_scsi_cmd(task, NULL); 6339 } 6340 return; 6341 case TM_TARGET_RESET: 6342 case TM_TARGET_COLD_RESET: 6343 case TM_TARGET_WARM_RESET: 6344 stmf_handle_target_reset(task); 6345 return; 6346 default: 6347 /* We dont support this task mgmt function */ 6348 stmf_scsilib_send_status(task, STATUS_CHECK, 6349 STMF_SAA_INVALID_FIELD_IN_CMD_IU); 6350 return; 6351 } 6352 } 6353 6354 void 6355 stmf_handle_lun_reset(scsi_task_t *task) 6356 { 6357 stmf_i_scsi_task_t *itask; 6358 stmf_i_lu_t *ilu; 6359 6360 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 6361 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 6362 6363 /* 6364 * To sync with target reset, grab this lock. The LU is not going 6365 * anywhere as there is atleast one task pending (this task). 6366 */ 6367 mutex_enter(&stmf_state.stmf_lock); 6368 6369 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 6370 mutex_exit(&stmf_state.stmf_lock); 6371 stmf_scsilib_send_status(task, STATUS_CHECK, 6372 STMF_SAA_OPERATION_IN_PROGRESS); 6373 return; 6374 } 6375 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 6376 mutex_exit(&stmf_state.stmf_lock); 6377 6378 /* 6379 * Mark this task as the one causing LU reset so that we know who 6380 * was responsible for setting the ILU_RESET_ACTIVE. In case this 6381 * task itself gets aborted, we will clear ILU_RESET_ACTIVE. 6382 */ 6383 mutex_enter(&itask->itask_mutex); 6384 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET; 6385 mutex_exit(&itask->itask_mutex); 6386 6387 /* Initiatiate abort on all commands on this LU except this one */ 6388 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu); 6389 6390 /* Start polling on this task */ 6391 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 6392 != STMF_SUCCESS) { 6393 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 6394 NULL); 6395 return; 6396 } 6397 } 6398 6399 void 6400 stmf_handle_target_reset(scsi_task_t *task) 6401 { 6402 stmf_i_scsi_task_t *itask; 6403 stmf_i_lu_t *ilu; 6404 stmf_i_scsi_session_t *iss; 6405 stmf_lun_map_t *lm; 6406 stmf_lun_map_ent_t *lm_ent; 6407 int i, lf; 6408 6409 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 6410 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private; 6411 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 6412 6413 /* 6414 * To sync with LUN reset, grab this lock. The session is not going 6415 * anywhere as there is atleast one task pending (this task). 6416 */ 6417 mutex_enter(&stmf_state.stmf_lock); 6418 6419 /* Grab the session lock as a writer to prevent any changes in it */ 6420 rw_enter(iss->iss_lockp, RW_WRITER); 6421 6422 if (iss->iss_flags & ISS_RESET_ACTIVE) { 6423 rw_exit(iss->iss_lockp); 6424 mutex_exit(&stmf_state.stmf_lock); 6425 stmf_scsilib_send_status(task, STATUS_CHECK, 6426 STMF_SAA_OPERATION_IN_PROGRESS); 6427 return; 6428 } 6429 atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE); 6430 6431 /* 6432 * Now go through each LUN in this session and make sure all of them 6433 * can be reset. 6434 */ 6435 lm = iss->iss_sm; 6436 for (i = 0, lf = 0; i < lm->lm_nentries; i++) { 6437 if (lm->lm_plus[i] == NULL) 6438 continue; 6439 lf++; 6440 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6441 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 6442 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 6443 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6444 rw_exit(iss->iss_lockp); 6445 mutex_exit(&stmf_state.stmf_lock); 6446 stmf_scsilib_send_status(task, STATUS_CHECK, 6447 STMF_SAA_OPERATION_IN_PROGRESS); 6448 return; 6449 } 6450 } 6451 if (lf == 0) { 6452 /* No luns in this session */ 6453 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6454 rw_exit(iss->iss_lockp); 6455 mutex_exit(&stmf_state.stmf_lock); 6456 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6457 return; 6458 } 6459 6460 /* ok, start the damage */ 6461 mutex_enter(&itask->itask_mutex); 6462 itask->itask_flags |= ITASK_DEFAULT_HANDLING | 6463 ITASK_CAUSING_TARGET_RESET; 6464 mutex_exit(&itask->itask_mutex); 6465 for (i = 0; i < lm->lm_nentries; i++) { 6466 if (lm->lm_plus[i] == NULL) 6467 continue; 6468 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6469 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 6470 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 6471 } 6472 6473 for (i = 0; i < lm->lm_nentries; i++) { 6474 if (lm->lm_plus[i] == NULL) 6475 continue; 6476 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6477 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, 6478 lm_ent->ent_lu); 6479 } 6480 6481 rw_exit(iss->iss_lockp); 6482 mutex_exit(&stmf_state.stmf_lock); 6483 6484 /* Start polling on this task */ 6485 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 6486 != STMF_SUCCESS) { 6487 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 6488 NULL); 6489 return; 6490 } 6491 } 6492 6493 int 6494 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask) 6495 { 6496 scsi_task_t *task = itask->itask_task; 6497 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 6498 task->task_session->ss_stmf_private; 6499 6500 rw_enter(iss->iss_lockp, RW_WRITER); 6501 if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) || 6502 (task->task_cdb[0] == SCMD_INQUIRY)) { 6503 rw_exit(iss->iss_lockp); 6504 return (0); 6505 } 6506 atomic_and_32(&iss->iss_flags, 6507 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 6508 rw_exit(iss->iss_lockp); 6509 6510 if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 6511 return (0); 6512 } 6513 stmf_scsilib_send_status(task, STATUS_CHECK, 6514 STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED); 6515 return (1); 6516 } 6517 6518 void 6519 stmf_worker_init() 6520 { 6521 uint32_t i; 6522 stmf_worker_t *w; 6523 6524 /* Make local copy of global tunables */ 6525 6526 /* 6527 * Allow workers to be scaled down to a very low number for cases 6528 * where the load is light. If the number of threads gets below 6529 * 4 assume it is a mistake and force the threads back to a 6530 * reasonable number. The low limit of 4 is simply legacy and 6531 * may be too low. 6532 */ 6533 ASSERT(stmf_workers == NULL); 6534 if (stmf_nworkers < 4) { 6535 stmf_nworkers = 64; 6536 } 6537 6538 stmf_workers = (stmf_worker_t *)kmem_zalloc( 6539 sizeof (stmf_worker_t) * stmf_nworkers, KM_SLEEP); 6540 for (i = 0; i < stmf_nworkers; i++) { 6541 stmf_worker_t *w = &stmf_workers[i]; 6542 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL); 6543 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL); 6544 } 6545 stmf_workers_state = STMF_WORKERS_ENABLED; 6546 6547 /* Check if we are starting */ 6548 if (stmf_nworkers_cur < stmf_nworkers - 1) { 6549 for (i = stmf_nworkers_cur; i < stmf_nworkers; i++) { 6550 w = &stmf_workers[i]; 6551 w->worker_tid = thread_create(NULL, 0, stmf_worker_task, 6552 (void *)&stmf_workers[i], 0, &p0, TS_RUN, 6553 minclsyspri); 6554 stmf_nworkers_accepting_cmds++; 6555 } 6556 return; 6557 } 6558 6559 /* Lets wait for atleast one worker to start */ 6560 while (stmf_nworkers_cur == 0) 6561 delay(drv_usectohz(20 * 1000)); 6562 } 6563 6564 stmf_status_t 6565 stmf_worker_fini() 6566 { 6567 int i; 6568 clock_t sb; 6569 6570 if (stmf_workers_state == STMF_WORKERS_DISABLED) 6571 return (STMF_SUCCESS); 6572 ASSERT(stmf_workers); 6573 stmf_workers_state = STMF_WORKERS_DISABLED; 6574 cv_signal(&stmf_state.stmf_cv); 6575 6576 sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000); 6577 /* Wait for all the threads to die */ 6578 while (stmf_nworkers_cur != 0) { 6579 if (ddi_get_lbolt() > sb) { 6580 stmf_workers_state = STMF_WORKERS_ENABLED; 6581 return (STMF_BUSY); 6582 } 6583 delay(drv_usectohz(100 * 1000)); 6584 } 6585 for (i = 0; i < stmf_nworkers; i++) { 6586 stmf_worker_t *w = &stmf_workers[i]; 6587 mutex_destroy(&w->worker_lock); 6588 cv_destroy(&w->worker_cv); 6589 } 6590 kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_nworkers); 6591 stmf_workers = NULL; 6592 6593 return (STMF_SUCCESS); 6594 } 6595 6596 void 6597 stmf_worker_task(void *arg) 6598 { 6599 stmf_worker_t *w; 6600 stmf_i_scsi_session_t *iss; 6601 scsi_task_t *task; 6602 stmf_i_scsi_task_t *itask; 6603 stmf_data_buf_t *dbuf; 6604 stmf_lu_t *lu; 6605 clock_t wait_timer = 0; 6606 clock_t wait_ticks, wait_delta = 0; 6607 uint8_t curcmd; 6608 uint8_t abort_free; 6609 uint8_t wait_queue; 6610 uint8_t dec_qdepth; 6611 6612 w = (stmf_worker_t *)arg; 6613 wait_ticks = drv_usectohz(10000); 6614 6615 DTRACE_PROBE1(worker__create, stmf_worker_t, w); 6616 mutex_enter(&w->worker_lock); 6617 w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE; 6618 atomic_inc_32(&stmf_nworkers_cur); 6619 6620 stmf_worker_loop: 6621 if ((w->worker_ref_count == 0) && 6622 (w->worker_flags & STMF_WORKER_TERMINATE)) { 6623 w->worker_flags &= ~(STMF_WORKER_STARTED | 6624 STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE); 6625 w->worker_tid = NULL; 6626 mutex_exit(&w->worker_lock); 6627 DTRACE_PROBE1(worker__destroy, stmf_worker_t, w); 6628 atomic_dec_32(&stmf_nworkers_cur); 6629 thread_exit(); 6630 } 6631 6632 /* CONSTCOND */ 6633 while (1) { 6634 /* worker lock is held at this point */ 6635 dec_qdepth = 0; 6636 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) { 6637 wait_timer = 0; 6638 wait_delta = 0; 6639 if (w->worker_wait_head) { 6640 ASSERT(w->worker_wait_tail); 6641 if (w->worker_task_head == NULL) 6642 w->worker_task_head = 6643 w->worker_wait_head; 6644 else 6645 w->worker_task_tail->itask_worker_next = 6646 w->worker_wait_head; 6647 w->worker_task_tail = w->worker_wait_tail; 6648 w->worker_wait_head = w->worker_wait_tail = 6649 NULL; 6650 } 6651 } 6652 6653 STMF_DEQUEUE_ITASK(w, itask); 6654 if (itask == NULL) 6655 break; 6656 6657 ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0); 6658 task = itask->itask_task; 6659 DTRACE_PROBE2(worker__active, stmf_worker_t, w, 6660 scsi_task_t *, task); 6661 wait_queue = 0; 6662 abort_free = 0; 6663 mutex_exit(&w->worker_lock); 6664 mutex_enter(&itask->itask_mutex); 6665 mutex_enter(&w->worker_lock); 6666 6667 if (itask->itask_ncmds > 0) { 6668 curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1]; 6669 } else { 6670 ASSERT(itask->itask_flags & ITASK_BEING_ABORTED); 6671 } 6672 if (itask->itask_flags & ITASK_BEING_ABORTED) { 6673 itask->itask_ncmds = 1; 6674 curcmd = itask->itask_cmd_stack[0] = 6675 ITASK_CMD_ABORT; 6676 goto out_itask_flag_loop; 6677 } else if ((curcmd & ITASK_CMD_MASK) == ITASK_CMD_NEW_TASK) { 6678 /* 6679 * set ITASK_KSTAT_IN_RUNQ, this flag 6680 * will not reset until task completed 6681 */ 6682 itask->itask_flags |= ITASK_KNOWN_TO_LU | 6683 ITASK_KSTAT_IN_RUNQ; 6684 } else { 6685 goto out_itask_flag_loop; 6686 } 6687 6688 out_itask_flag_loop: 6689 6690 /* 6691 * Decide if this task needs to go to a queue and/or if 6692 * we can decrement the itask_cmd_stack. 6693 */ 6694 if (curcmd == ITASK_CMD_ABORT) { 6695 if (itask->itask_flags & (ITASK_KNOWN_TO_LU | 6696 ITASK_KNOWN_TO_TGT_PORT)) { 6697 wait_queue = 1; 6698 } else { 6699 abort_free = 1; 6700 } 6701 } else if ((curcmd & ITASK_CMD_POLL) && 6702 (itask->itask_poll_timeout > ddi_get_lbolt())) { 6703 wait_queue = 1; 6704 } 6705 6706 if (wait_queue) { 6707 itask->itask_worker_next = NULL; 6708 if (w->worker_wait_tail) { 6709 w->worker_wait_tail->itask_worker_next = itask; 6710 } else { 6711 w->worker_wait_head = itask; 6712 } 6713 w->worker_wait_tail = itask; 6714 if (wait_timer == 0) { 6715 wait_timer = ddi_get_lbolt() + wait_ticks; 6716 wait_delta = wait_ticks; 6717 } 6718 } else if ((--(itask->itask_ncmds)) != 0) { 6719 itask->itask_worker_next = NULL; 6720 if (w->worker_task_tail) { 6721 w->worker_task_tail->itask_worker_next = itask; 6722 } else { 6723 w->worker_task_head = itask; 6724 } 6725 w->worker_task_tail = itask; 6726 } else { 6727 atomic_and_32(&itask->itask_flags, 6728 ~ITASK_IN_WORKER_QUEUE); 6729 /* 6730 * This is where the queue depth should go down by 6731 * one but we delay that on purpose to account for 6732 * the call into the provider. The actual decrement 6733 * happens after the worker has done its job. 6734 */ 6735 dec_qdepth = 1; 6736 itask->itask_waitq_time += 6737 gethrtime() - itask->itask_waitq_enter_timestamp; 6738 } 6739 6740 /* We made it here means we are going to call LU */ 6741 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) 6742 lu = task->task_lu; 6743 else 6744 lu = dlun0; 6745 6746 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)]; 6747 mutex_exit(&w->worker_lock); 6748 curcmd &= ITASK_CMD_MASK; 6749 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf); 6750 mutex_exit(&itask->itask_mutex); 6751 6752 switch (curcmd) { 6753 case ITASK_CMD_NEW_TASK: 6754 iss = (stmf_i_scsi_session_t *) 6755 task->task_session->ss_stmf_private; 6756 stmf_itl_lu_new_task(itask); 6757 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) { 6758 if (stmf_handle_cmd_during_ic(itask)) { 6759 break; 6760 } 6761 } 6762 #ifdef DEBUG 6763 if (stmf_drop_task_counter > 0) { 6764 if (atomic_dec_32_nv( 6765 (uint32_t *)&stmf_drop_task_counter) == 1) { 6766 break; 6767 } 6768 } 6769 #endif 6770 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task); 6771 lu->lu_new_task(task, dbuf); 6772 break; 6773 case ITASK_CMD_DATA_XFER_DONE: 6774 lu->lu_dbuf_xfer_done(task, dbuf); 6775 break; 6776 case ITASK_CMD_STATUS_DONE: 6777 lu->lu_send_status_done(task); 6778 break; 6779 case ITASK_CMD_ABORT: 6780 if (abort_free) { 6781 mutex_enter(&itask->itask_mutex); 6782 stmf_task_free(task); 6783 } else { 6784 stmf_do_task_abort(task); 6785 } 6786 break; 6787 case ITASK_CMD_POLL_LU: 6788 if (!wait_queue) { 6789 lu->lu_task_poll(task); 6790 } 6791 break; 6792 case ITASK_CMD_POLL_LPORT: 6793 if (!wait_queue) 6794 task->task_lport->lport_task_poll(task); 6795 break; 6796 case ITASK_CMD_SEND_STATUS: 6797 /* case ITASK_CMD_XFER_DATA: */ 6798 break; 6799 } 6800 6801 mutex_enter(&w->worker_lock); 6802 if (dec_qdepth) { 6803 w->worker_queue_depth--; 6804 } 6805 } 6806 if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) { 6807 if (w->worker_ref_count == 0) 6808 goto stmf_worker_loop; 6809 else { 6810 wait_timer = ddi_get_lbolt() + 1; 6811 wait_delta = 1; 6812 } 6813 } 6814 w->worker_flags &= ~STMF_WORKER_ACTIVE; 6815 if (wait_timer) { 6816 DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w); 6817 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock, 6818 wait_delta, TR_CLOCK_TICK); 6819 } else { 6820 DTRACE_PROBE1(worker__sleep, stmf_worker_t, w); 6821 cv_wait(&w->worker_cv, &w->worker_lock); 6822 } 6823 DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w); 6824 w->worker_flags |= STMF_WORKER_ACTIVE; 6825 goto stmf_worker_loop; 6826 } 6827 6828 /* 6829 * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private). 6830 * If all the data has been filled out, frees the xd and makes 6831 * db_lu_private NULL. 6832 */ 6833 void 6834 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off) 6835 { 6836 stmf_xfer_data_t *xd; 6837 uint8_t *p; 6838 int i; 6839 uint32_t s; 6840 6841 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 6842 dbuf->db_data_size = 0; 6843 if (set_rel_off) 6844 dbuf->db_relative_offset = xd->size_done; 6845 for (i = 0; i < dbuf->db_sglist_length; i++) { 6846 s = min(xd->size_left, dbuf->db_sglist[i].seg_length); 6847 p = &xd->buf[xd->size_done]; 6848 bcopy(p, dbuf->db_sglist[i].seg_addr, s); 6849 xd->size_left -= s; 6850 xd->size_done += s; 6851 dbuf->db_data_size += s; 6852 if (xd->size_left == 0) { 6853 kmem_free(xd, xd->alloc_size); 6854 dbuf->db_lu_private = NULL; 6855 return; 6856 } 6857 } 6858 } 6859 6860 /* ARGSUSED */ 6861 stmf_status_t 6862 stmf_dlun0_task_alloc(scsi_task_t *task) 6863 { 6864 return (STMF_SUCCESS); 6865 } 6866 6867 void 6868 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 6869 { 6870 uint8_t *cdbp = (uint8_t *)&task->task_cdb[0]; 6871 stmf_i_scsi_session_t *iss; 6872 uint32_t sz, minsz; 6873 uint8_t *p; 6874 stmf_xfer_data_t *xd; 6875 uint8_t inq_page_length = 31; 6876 6877 if (task->task_mgmt_function) { 6878 stmf_scsilib_handle_task_mgmt(task); 6879 return; 6880 } 6881 6882 switch (cdbp[0]) { 6883 case SCMD_INQUIRY: 6884 /* 6885 * Basic protocol checks. In addition, only reply to 6886 * standard inquiry. Otherwise, the LU provider needs 6887 * to respond. 6888 */ 6889 6890 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) { 6891 stmf_scsilib_send_status(task, STATUS_CHECK, 6892 STMF_SAA_INVALID_FIELD_IN_CDB); 6893 return; 6894 } 6895 6896 task->task_cmd_xfer_length = 6897 (((uint32_t)cdbp[3]) << 8) | cdbp[4]; 6898 6899 if (task->task_additional_flags & 6900 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6901 task->task_expected_xfer_length = 6902 task->task_cmd_xfer_length; 6903 } 6904 6905 sz = min(task->task_expected_xfer_length, 6906 min(36, task->task_cmd_xfer_length)); 6907 minsz = 36; 6908 6909 if (sz == 0) { 6910 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6911 return; 6912 } 6913 6914 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) { 6915 /* 6916 * Ignore any preallocated dbuf if the size is less 6917 * than 36. It will be freed during the task_free. 6918 */ 6919 dbuf = NULL; 6920 } 6921 if (dbuf == NULL) 6922 dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0); 6923 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) { 6924 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6925 STMF_ALLOC_FAILURE, NULL); 6926 return; 6927 } 6928 dbuf->db_lu_private = NULL; 6929 6930 p = dbuf->db_sglist[0].seg_addr; 6931 6932 /* 6933 * Standard inquiry handling only. 6934 */ 6935 6936 bzero(p, inq_page_length + 5); 6937 6938 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN; 6939 p[2] = 5; 6940 p[3] = 0x12; 6941 p[4] = inq_page_length; 6942 p[6] = 0x80; 6943 6944 (void) strncpy((char *)p+8, "SUN ", 8); 6945 (void) strncpy((char *)p+16, "COMSTAR ", 16); 6946 (void) strncpy((char *)p+32, "1.0 ", 4); 6947 6948 dbuf->db_data_size = sz; 6949 dbuf->db_relative_offset = 0; 6950 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 6951 (void) stmf_xfer_data(task, dbuf, 0); 6952 6953 return; 6954 6955 case SCMD_REPORT_LUNS: 6956 task->task_cmd_xfer_length = 6957 ((((uint32_t)task->task_cdb[6]) << 24) | 6958 (((uint32_t)task->task_cdb[7]) << 16) | 6959 (((uint32_t)task->task_cdb[8]) << 8) | 6960 ((uint32_t)task->task_cdb[9])); 6961 6962 if (task->task_additional_flags & 6963 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6964 task->task_expected_xfer_length = 6965 task->task_cmd_xfer_length; 6966 } 6967 6968 sz = min(task->task_expected_xfer_length, 6969 task->task_cmd_xfer_length); 6970 6971 if (sz < 16) { 6972 stmf_scsilib_send_status(task, STATUS_CHECK, 6973 STMF_SAA_INVALID_FIELD_IN_CDB); 6974 return; 6975 } 6976 6977 iss = (stmf_i_scsi_session_t *) 6978 task->task_session->ss_stmf_private; 6979 rw_enter(iss->iss_lockp, RW_WRITER); 6980 xd = stmf_session_prepare_report_lun_data(iss->iss_sm); 6981 rw_exit(iss->iss_lockp); 6982 6983 if (xd == NULL) { 6984 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6985 STMF_ALLOC_FAILURE, NULL); 6986 return; 6987 } 6988 6989 sz = min(sz, xd->size_left); 6990 xd->size_left = sz; 6991 minsz = min(512, sz); 6992 6993 if (dbuf == NULL) 6994 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 6995 if (dbuf == NULL) { 6996 kmem_free(xd, xd->alloc_size); 6997 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6998 STMF_ALLOC_FAILURE, NULL); 6999 return; 7000 } 7001 dbuf->db_lu_private = xd; 7002 stmf_xd_to_dbuf(dbuf, 1); 7003 7004 atomic_and_32(&iss->iss_flags, 7005 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 7006 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 7007 (void) stmf_xfer_data(task, dbuf, 0); 7008 return; 7009 } 7010 7011 stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE); 7012 } 7013 7014 void 7015 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf) 7016 { 7017 stmf_i_scsi_task_t *itask = 7018 (stmf_i_scsi_task_t *)task->task_stmf_private; 7019 7020 if (dbuf->db_xfer_status != STMF_SUCCESS) { 7021 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 7022 dbuf->db_xfer_status, NULL); 7023 return; 7024 } 7025 task->task_nbytes_transferred += dbuf->db_data_size; 7026 if (dbuf->db_lu_private) { 7027 /* There is more */ 7028 stmf_xd_to_dbuf(dbuf, 1); 7029 (void) stmf_xfer_data(task, dbuf, 0); 7030 return; 7031 } 7032 7033 stmf_free_dbuf(task, dbuf); 7034 /* 7035 * If this is a proxy task, it will need to be completed from the 7036 * proxy port provider. This message lets pppt know that the xfer 7037 * is complete. When we receive the status from pppt, we will 7038 * then relay that status back to the lport. 7039 */ 7040 if (itask->itask_flags & ITASK_PROXY_TASK) { 7041 stmf_ic_msg_t *ic_xfer_done_msg = NULL; 7042 stmf_status_t ic_ret = STMF_FAILURE; 7043 uint64_t session_msg_id; 7044 mutex_enter(&stmf_state.stmf_lock); 7045 session_msg_id = stmf_proxy_msg_id++; 7046 mutex_exit(&stmf_state.stmf_lock); 7047 /* send xfer done status to pppt */ 7048 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc( 7049 itask->itask_proxy_msg_id, 7050 task->task_session->ss_session_id, 7051 STMF_SUCCESS, session_msg_id); 7052 if (ic_xfer_done_msg) { 7053 ic_ret = ic_tx_msg(ic_xfer_done_msg); 7054 if (ic_ret != STMF_IC_MSG_SUCCESS) { 7055 cmn_err(CE_WARN, "unable to xmit session msg"); 7056 } 7057 } 7058 /* task will be completed from pppt */ 7059 return; 7060 } 7061 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 7062 } 7063 7064 /* ARGSUSED */ 7065 void 7066 stmf_dlun0_status_done(scsi_task_t *task) 7067 { 7068 } 7069 7070 /* ARGSUSED */ 7071 void 7072 stmf_dlun0_task_free(scsi_task_t *task) 7073 { 7074 } 7075 7076 /* ARGSUSED */ 7077 stmf_status_t 7078 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags) 7079 { 7080 scsi_task_t *task = (scsi_task_t *)arg; 7081 stmf_i_scsi_task_t *itask = 7082 (stmf_i_scsi_task_t *)task->task_stmf_private; 7083 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 7084 int i; 7085 uint8_t map; 7086 7087 if ((task->task_mgmt_function) && (itask->itask_flags & 7088 (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) { 7089 switch (task->task_mgmt_function) { 7090 case TM_ABORT_TASK: 7091 case TM_ABORT_TASK_SET: 7092 case TM_CLEAR_TASK_SET: 7093 case TM_LUN_RESET: 7094 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 7095 break; 7096 case TM_TARGET_RESET: 7097 case TM_TARGET_COLD_RESET: 7098 case TM_TARGET_WARM_RESET: 7099 stmf_abort_target_reset(task); 7100 break; 7101 } 7102 return (STMF_ABORT_SUCCESS); 7103 } 7104 7105 /* 7106 * OK so its not a task mgmt. Make sure we free any xd sitting 7107 * inside any dbuf. 7108 */ 7109 if ((map = itask->itask_allocated_buf_map) != 0) { 7110 for (i = 0; i < 4; i++) { 7111 if ((map & 1) && 7112 ((itask->itask_dbufs[i])->db_lu_private)) { 7113 stmf_xfer_data_t *xd; 7114 stmf_data_buf_t *dbuf; 7115 7116 dbuf = itask->itask_dbufs[i]; 7117 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 7118 dbuf->db_lu_private = NULL; 7119 kmem_free(xd, xd->alloc_size); 7120 } 7121 map >>= 1; 7122 } 7123 } 7124 return (STMF_ABORT_SUCCESS); 7125 } 7126 7127 void 7128 stmf_dlun0_task_poll(struct scsi_task *task) 7129 { 7130 /* Right now we only do this for handling task management functions */ 7131 ASSERT(task->task_mgmt_function); 7132 7133 switch (task->task_mgmt_function) { 7134 case TM_ABORT_TASK: 7135 case TM_ABORT_TASK_SET: 7136 case TM_CLEAR_TASK_SET: 7137 case TM_LUN_RESET: 7138 (void) stmf_lun_reset_poll(task->task_lu, task, 0); 7139 return; 7140 case TM_TARGET_RESET: 7141 case TM_TARGET_COLD_RESET: 7142 case TM_TARGET_WARM_RESET: 7143 stmf_target_reset_poll(task); 7144 return; 7145 } 7146 } 7147 7148 /* ARGSUSED */ 7149 void 7150 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg) 7151 { 7152 /* This function will never be called */ 7153 cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd); 7154 } 7155 7156 /* ARGSUSED */ 7157 void 7158 stmf_dlun0_task_done(struct scsi_task *task) 7159 { 7160 } 7161 7162 void 7163 stmf_dlun_init() 7164 { 7165 stmf_i_lu_t *ilu; 7166 7167 dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0); 7168 dlun0->lu_task_alloc = stmf_dlun0_task_alloc; 7169 dlun0->lu_new_task = stmf_dlun0_new_task; 7170 dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done; 7171 dlun0->lu_send_status_done = stmf_dlun0_status_done; 7172 dlun0->lu_task_free = stmf_dlun0_task_free; 7173 dlun0->lu_abort = stmf_dlun0_abort; 7174 dlun0->lu_task_poll = stmf_dlun0_task_poll; 7175 dlun0->lu_ctl = stmf_dlun0_ctl; 7176 dlun0->lu_task_done = stmf_dlun0_task_done; 7177 7178 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 7179 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 7180 } 7181 7182 stmf_status_t 7183 stmf_dlun_fini() 7184 { 7185 stmf_i_lu_t *ilu; 7186 7187 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 7188 7189 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 7190 if (ilu->ilu_ntasks) { 7191 stmf_i_scsi_task_t *itask, *nitask; 7192 7193 nitask = ilu->ilu_tasks; 7194 do { 7195 itask = nitask; 7196 nitask = itask->itask_lu_next; 7197 dlun0->lu_task_free(itask->itask_task); 7198 stmf_free(itask->itask_task); 7199 } while (nitask != NULL); 7200 7201 } 7202 stmf_free(dlun0); 7203 return (STMF_SUCCESS); 7204 } 7205 7206 void 7207 stmf_abort_target_reset(scsi_task_t *task) 7208 { 7209 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 7210 task->task_session->ss_stmf_private; 7211 stmf_lun_map_t *lm; 7212 stmf_lun_map_ent_t *lm_ent; 7213 stmf_i_lu_t *ilu; 7214 int i; 7215 7216 rw_enter(iss->iss_lockp, RW_READER); 7217 lm = iss->iss_sm; 7218 for (i = 0; i < lm->lm_nentries; i++) { 7219 if (lm->lm_plus[i] == NULL) 7220 continue; 7221 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 7222 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 7223 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 7224 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 7225 } 7226 } 7227 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 7228 rw_exit(iss->iss_lockp); 7229 } 7230 7231 /* 7232 * The return value is only used by function managing target reset. 7233 */ 7234 stmf_status_t 7235 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset) 7236 { 7237 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 7238 int ntasks_pending; 7239 7240 ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free; 7241 /* 7242 * This function is also used during Target reset. The idea is that 7243 * once all the commands are aborted, call the LU's reset entry 7244 * point (abort entry point with a reset flag). But if this Task 7245 * mgmt is running on this LU then all the tasks cannot be aborted. 7246 * one task (this task) will still be running which is OK. 7247 */ 7248 if ((ntasks_pending == 0) || ((task->task_lu == lu) && 7249 (ntasks_pending == 1))) { 7250 stmf_status_t ret; 7251 7252 if ((task->task_mgmt_function == TM_LUN_RESET) || 7253 (task->task_mgmt_function == TM_TARGET_RESET) || 7254 (task->task_mgmt_function == TM_TARGET_WARM_RESET) || 7255 (task->task_mgmt_function == TM_TARGET_COLD_RESET)) { 7256 ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0); 7257 } else { 7258 ret = STMF_SUCCESS; 7259 } 7260 if (ret == STMF_SUCCESS) { 7261 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 7262 } 7263 if (target_reset) { 7264 return (ret); 7265 } 7266 if (ret == STMF_SUCCESS) { 7267 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 7268 return (ret); 7269 } 7270 if (ret != STMF_BUSY) { 7271 stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL); 7272 return (ret); 7273 } 7274 } 7275 7276 if (target_reset) { 7277 /* Tell target reset polling code that we are not done */ 7278 return (STMF_BUSY); 7279 } 7280 7281 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 7282 != STMF_SUCCESS) { 7283 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 7284 STMF_ALLOC_FAILURE, NULL); 7285 return (STMF_SUCCESS); 7286 } 7287 7288 return (STMF_SUCCESS); 7289 } 7290 7291 void 7292 stmf_target_reset_poll(struct scsi_task *task) 7293 { 7294 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 7295 task->task_session->ss_stmf_private; 7296 stmf_lun_map_t *lm; 7297 stmf_lun_map_ent_t *lm_ent; 7298 stmf_i_lu_t *ilu; 7299 stmf_status_t ret; 7300 int i; 7301 int not_done = 0; 7302 7303 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE); 7304 7305 rw_enter(iss->iss_lockp, RW_READER); 7306 lm = iss->iss_sm; 7307 for (i = 0; i < lm->lm_nentries; i++) { 7308 if (lm->lm_plus[i] == NULL) 7309 continue; 7310 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 7311 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 7312 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 7313 rw_exit(iss->iss_lockp); 7314 ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1); 7315 rw_enter(iss->iss_lockp, RW_READER); 7316 if (ret == STMF_SUCCESS) 7317 continue; 7318 not_done = 1; 7319 if (ret != STMF_BUSY) { 7320 rw_exit(iss->iss_lockp); 7321 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 7322 STMF_ABORTED, NULL); 7323 return; 7324 } 7325 } 7326 } 7327 rw_exit(iss->iss_lockp); 7328 7329 if (not_done) { 7330 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 7331 != STMF_SUCCESS) { 7332 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 7333 STMF_ALLOC_FAILURE, NULL); 7334 return; 7335 } 7336 return; 7337 } 7338 7339 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 7340 7341 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 7342 } 7343 7344 stmf_status_t 7345 stmf_lu_add_event(stmf_lu_t *lu, int eventid) 7346 { 7347 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 7348 7349 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 7350 return (STMF_INVALID_ARG); 7351 } 7352 7353 STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid); 7354 return (STMF_SUCCESS); 7355 } 7356 7357 stmf_status_t 7358 stmf_lu_remove_event(stmf_lu_t *lu, int eventid) 7359 { 7360 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 7361 7362 if (eventid == STMF_EVENT_ALL) { 7363 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl); 7364 return (STMF_SUCCESS); 7365 } 7366 7367 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 7368 return (STMF_INVALID_ARG); 7369 } 7370 7371 STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid); 7372 return (STMF_SUCCESS); 7373 } 7374 7375 stmf_status_t 7376 stmf_lport_add_event(stmf_local_port_t *lport, int eventid) 7377 { 7378 stmf_i_local_port_t *ilport = 7379 (stmf_i_local_port_t *)lport->lport_stmf_private; 7380 7381 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 7382 return (STMF_INVALID_ARG); 7383 } 7384 7385 STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid); 7386 return (STMF_SUCCESS); 7387 } 7388 7389 stmf_status_t 7390 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid) 7391 { 7392 stmf_i_local_port_t *ilport = 7393 (stmf_i_local_port_t *)lport->lport_stmf_private; 7394 7395 if (eventid == STMF_EVENT_ALL) { 7396 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl); 7397 return (STMF_SUCCESS); 7398 } 7399 7400 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 7401 return (STMF_INVALID_ARG); 7402 } 7403 7404 STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid); 7405 return (STMF_SUCCESS); 7406 } 7407 7408 void 7409 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags) 7410 { 7411 if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) && 7412 (ilu->ilu_lu->lu_event_handler != NULL)) { 7413 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags); 7414 } 7415 } 7416 7417 void 7418 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg, 7419 uint32_t flags) 7420 { 7421 if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) && 7422 (ilport->ilport_lport->lport_event_handler != NULL)) { 7423 ilport->ilport_lport->lport_event_handler( 7424 ilport->ilport_lport, eventid, arg, flags); 7425 } 7426 } 7427 7428 /* 7429 * With the possibility of having multiple itl sessions pointing to the 7430 * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize 7431 * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer 7432 * statistics. 7433 */ 7434 void 7435 stmf_itl_task_start(stmf_i_scsi_task_t *itask) 7436 { 7437 stmf_itl_data_t *itl = itask->itask_itl_datap; 7438 scsi_task_t *task = itask->itask_task; 7439 stmf_i_lu_t *ilu; 7440 stmf_i_scsi_session_t *iss = 7441 itask->itask_task->task_session->ss_stmf_private; 7442 stmf_i_remote_port_t *irport = iss->iss_irport; 7443 7444 if (itl == NULL || task->task_lu == dlun0) 7445 return; 7446 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 7447 itask->itask_start_timestamp = gethrtime(); 7448 itask->itask_xfer_done_timestamp = 0; 7449 if (ilu->ilu_kstat_io != NULL) { 7450 mutex_enter(ilu->ilu_kstat_io->ks_lock); 7451 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter); 7452 mutex_exit(ilu->ilu_kstat_io->ks_lock); 7453 } 7454 7455 if (irport->irport_kstat_estat != NULL) { 7456 if (task->task_flags & TF_READ_DATA) 7457 atomic_inc_32(&irport->irport_nread_tasks); 7458 else if (task->task_flags & TF_WRITE_DATA) 7459 atomic_inc_32(&irport->irport_nwrite_tasks); 7460 } 7461 7462 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter); 7463 } 7464 7465 void 7466 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask) 7467 { 7468 stmf_itl_data_t *itl = itask->itask_itl_datap; 7469 scsi_task_t *task = itask->itask_task; 7470 stmf_i_lu_t *ilu; 7471 7472 if (itl == NULL || task->task_lu == dlun0) 7473 return; 7474 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 7475 if (ilu->ilu_kstat_io != NULL) { 7476 mutex_enter(ilu->ilu_kstat_io->ks_lock); 7477 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq); 7478 mutex_exit(ilu->ilu_kstat_io->ks_lock); 7479 } 7480 7481 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq); 7482 } 7483 7484 void 7485 stmf_itl_task_done(stmf_i_scsi_task_t *itask) 7486 { 7487 stmf_itl_data_t *itl = itask->itask_itl_datap; 7488 scsi_task_t *task = itask->itask_task; 7489 stmf_i_lu_t *ilu; 7490 7491 itask->itask_done_timestamp = gethrtime(); 7492 7493 if (itl == NULL || task->task_lu == dlun0) 7494 return; 7495 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 7496 7497 if (ilu->ilu_kstat_io == NULL) 7498 return; 7499 7500 stmf_update_kstat_rport_estat(task); 7501 7502 mutex_enter(ilu->ilu_kstat_io->ks_lock); 7503 7504 if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) { 7505 stmf_update_kstat_lu_q(task, kstat_runq_exit); 7506 mutex_exit(ilu->ilu_kstat_io->ks_lock); 7507 stmf_update_kstat_lport_q(task, kstat_runq_exit); 7508 } else { 7509 stmf_update_kstat_lu_q(task, kstat_waitq_exit); 7510 mutex_exit(ilu->ilu_kstat_io->ks_lock); 7511 stmf_update_kstat_lport_q(task, kstat_waitq_exit); 7512 } 7513 } 7514 7515 void 7516 stmf_lu_xfer_done(scsi_task_t *task, boolean_t read, hrtime_t elapsed_time) 7517 { 7518 stmf_i_scsi_task_t *itask = task->task_stmf_private; 7519 7520 if (task->task_lu == dlun0) 7521 return; 7522 7523 if (read) { 7524 atomic_add_64((uint64_t *)&itask->itask_lu_read_time, 7525 elapsed_time); 7526 } else { 7527 atomic_add_64((uint64_t *)&itask->itask_lu_write_time, 7528 elapsed_time); 7529 } 7530 } 7531 7532 static void 7533 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf) 7534 { 7535 stmf_itl_data_t *itl = itask->itask_itl_datap; 7536 7537 if (itl == NULL) 7538 return; 7539 7540 DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task, 7541 stmf_data_buf_t *, dbuf); 7542 7543 dbuf->db_xfer_start_timestamp = gethrtime(); 7544 } 7545 7546 static void 7547 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf) 7548 { 7549 stmf_itl_data_t *itl = itask->itask_itl_datap; 7550 hrtime_t elapsed_time; 7551 uint64_t xfer_size; 7552 7553 if (itl == NULL) 7554 return; 7555 7556 xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ? 7557 dbuf->db_data_size : 0; 7558 7559 itask->itask_xfer_done_timestamp = gethrtime(); 7560 elapsed_time = itask->itask_xfer_done_timestamp - 7561 dbuf->db_xfer_start_timestamp; 7562 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) { 7563 atomic_add_64((uint64_t *)&itask->itask_lport_read_time, 7564 elapsed_time); 7565 atomic_add_64((uint64_t *)&itask->itask_read_xfer, 7566 xfer_size); 7567 } else { 7568 atomic_add_64((uint64_t *)&itask->itask_lport_write_time, 7569 elapsed_time); 7570 atomic_add_64((uint64_t *)&itask->itask_write_xfer, 7571 xfer_size); 7572 } 7573 7574 DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task, 7575 stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time); 7576 7577 dbuf->db_xfer_start_timestamp = 0; 7578 } 7579 7580 void 7581 stmf_svc_init() 7582 { 7583 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 7584 return; 7585 list_create(&stmf_state.stmf_svc_list, sizeof (stmf_svc_req_t), 7586 offsetof(stmf_svc_req_t, svc_list_entry)); 7587 stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1, 7588 TASKQ_DEFAULTPRI, 0); 7589 (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq, 7590 stmf_svc, 0, DDI_SLEEP); 7591 } 7592 7593 stmf_status_t 7594 stmf_svc_fini() 7595 { 7596 uint32_t i; 7597 7598 mutex_enter(&stmf_state.stmf_lock); 7599 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) { 7600 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE; 7601 cv_signal(&stmf_state.stmf_cv); 7602 } 7603 mutex_exit(&stmf_state.stmf_lock); 7604 7605 /* Wait for 5 seconds */ 7606 for (i = 0; i < 500; i++) { 7607 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 7608 delay(drv_usectohz(10000)); 7609 else 7610 break; 7611 } 7612 if (i == 500) 7613 return (STMF_BUSY); 7614 7615 list_destroy(&stmf_state.stmf_svc_list); 7616 ddi_taskq_destroy(stmf_state.stmf_svc_taskq); 7617 7618 return (STMF_SUCCESS); 7619 } 7620 7621 struct stmf_svc_clocks { 7622 clock_t drain_start, drain_next; 7623 clock_t timing_start, timing_next; 7624 clock_t worker_delay; 7625 }; 7626 7627 /* ARGSUSED */ 7628 void 7629 stmf_svc(void *arg) 7630 { 7631 stmf_svc_req_t *req; 7632 stmf_lu_t *lu; 7633 stmf_i_lu_t *ilu; 7634 stmf_local_port_t *lport; 7635 struct stmf_svc_clocks clks = { 0 }; 7636 7637 mutex_enter(&stmf_state.stmf_lock); 7638 stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE; 7639 7640 while (!(stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE)) { 7641 if (list_is_empty(&stmf_state.stmf_svc_list)) { 7642 stmf_svc_timeout(&clks); 7643 continue; 7644 } 7645 7646 /* 7647 * Pop the front request from the active list. After this, 7648 * the request will no longer be referenced by global state, 7649 * so it should be safe to access it without holding the 7650 * stmf state lock. 7651 */ 7652 req = list_remove_head(&stmf_state.stmf_svc_list); 7653 if (req == NULL) 7654 continue; 7655 7656 switch (req->svc_cmd) { 7657 case STMF_CMD_LPORT_ONLINE: 7658 /* Fallthrough */ 7659 case STMF_CMD_LPORT_OFFLINE: 7660 mutex_exit(&stmf_state.stmf_lock); 7661 lport = (stmf_local_port_t *)req->svc_obj; 7662 lport->lport_ctl(lport, req->svc_cmd, &req->svc_info); 7663 break; 7664 case STMF_CMD_LU_ONLINE: 7665 mutex_exit(&stmf_state.stmf_lock); 7666 lu = (stmf_lu_t *)req->svc_obj; 7667 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 7668 break; 7669 case STMF_CMD_LU_OFFLINE: 7670 /* Remove all mappings of this LU */ 7671 stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj); 7672 /* Kill all the pending I/Os for this LU */ 7673 mutex_exit(&stmf_state.stmf_lock); 7674 stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL, 7675 STMF_ABORTED); 7676 lu = (stmf_lu_t *)req->svc_obj; 7677 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 7678 stmf_wait_ilu_tasks_finish(ilu); 7679 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 7680 break; 7681 default: 7682 cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d", 7683 req->svc_cmd); 7684 } 7685 7686 kmem_free(req, req->svc_req_alloc_size); 7687 mutex_enter(&stmf_state.stmf_lock); 7688 } 7689 7690 stmf_state.stmf_svc_flags &= ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE); 7691 mutex_exit(&stmf_state.stmf_lock); 7692 } 7693 7694 static void 7695 stmf_svc_timeout(struct stmf_svc_clocks *clks) 7696 { 7697 clock_t td; 7698 stmf_i_local_port_t *ilport, *next_ilport; 7699 stmf_i_scsi_session_t *iss; 7700 7701 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 7702 7703 td = drv_usectohz(20000); 7704 7705 /* Do timeouts */ 7706 if (stmf_state.stmf_nlus && 7707 ((!clks->timing_next) || (ddi_get_lbolt() >= clks->timing_next))) { 7708 if (!stmf_state.stmf_svc_ilu_timing) { 7709 /* we are starting a new round */ 7710 stmf_state.stmf_svc_ilu_timing = 7711 stmf_state.stmf_ilulist; 7712 clks->timing_start = ddi_get_lbolt(); 7713 } 7714 7715 stmf_check_ilu_timing(); 7716 if (!stmf_state.stmf_svc_ilu_timing) { 7717 /* we finished a complete round */ 7718 clks->timing_next = 7719 clks->timing_start + drv_usectohz(5*1000*1000); 7720 } else { 7721 /* we still have some ilu items to check */ 7722 clks->timing_next = 7723 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 7724 } 7725 7726 if (!list_is_empty(&stmf_state.stmf_svc_list)) 7727 return; 7728 } 7729 7730 /* Check if there are free tasks to clear */ 7731 if (stmf_state.stmf_nlus && 7732 ((!clks->drain_next) || (ddi_get_lbolt() >= clks->drain_next))) { 7733 if (!stmf_state.stmf_svc_ilu_draining) { 7734 /* we are starting a new round */ 7735 stmf_state.stmf_svc_ilu_draining = 7736 stmf_state.stmf_ilulist; 7737 clks->drain_start = ddi_get_lbolt(); 7738 } 7739 7740 stmf_check_freetask(); 7741 if (!stmf_state.stmf_svc_ilu_draining) { 7742 /* we finished a complete round */ 7743 clks->drain_next = 7744 clks->drain_start + drv_usectohz(10*1000*1000); 7745 } else { 7746 /* we still have some ilu items to check */ 7747 clks->drain_next = 7748 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 7749 } 7750 7751 if (!list_is_empty(&stmf_state.stmf_svc_list)) 7752 return; 7753 } 7754 7755 /* Check if any active session got its 1st LUN */ 7756 if (stmf_state.stmf_process_initial_luns) { 7757 int stmf_level = 0; 7758 int port_level; 7759 7760 for (ilport = stmf_state.stmf_ilportlist; ilport; 7761 ilport = next_ilport) { 7762 int ilport_lock_held; 7763 next_ilport = ilport->ilport_next; 7764 7765 if ((ilport->ilport_flags & 7766 ILPORT_SS_GOT_INITIAL_LUNS) == 0) 7767 continue; 7768 7769 port_level = 0; 7770 rw_enter(&ilport->ilport_lock, RW_READER); 7771 ilport_lock_held = 1; 7772 7773 for (iss = ilport->ilport_ss_list; iss; 7774 iss = iss->iss_next) { 7775 if ((iss->iss_flags & 7776 ISS_GOT_INITIAL_LUNS) == 0) 7777 continue; 7778 7779 port_level++; 7780 stmf_level++; 7781 atomic_and_32(&iss->iss_flags, 7782 ~ISS_GOT_INITIAL_LUNS); 7783 atomic_or_32(&iss->iss_flags, 7784 ISS_EVENT_ACTIVE); 7785 rw_exit(&ilport->ilport_lock); 7786 ilport_lock_held = 0; 7787 mutex_exit(&stmf_state.stmf_lock); 7788 stmf_generate_lport_event(ilport, 7789 LPORT_EVENT_INITIAL_LUN_MAPPED, 7790 iss->iss_ss, 0); 7791 atomic_and_32(&iss->iss_flags, 7792 ~ISS_EVENT_ACTIVE); 7793 mutex_enter(&stmf_state.stmf_lock); 7794 /* 7795 * scan all the ilports again as the 7796 * ilport list might have changed. 7797 */ 7798 next_ilport = stmf_state.stmf_ilportlist; 7799 break; 7800 } 7801 7802 if (port_level == 0) 7803 atomic_and_32(&ilport->ilport_flags, 7804 ~ILPORT_SS_GOT_INITIAL_LUNS); 7805 /* drop the lock if we are holding it. */ 7806 if (ilport_lock_held == 1) 7807 rw_exit(&ilport->ilport_lock); 7808 7809 /* Max 4 session at a time */ 7810 if (stmf_level >= 4) 7811 break; 7812 } 7813 7814 if (stmf_level == 0) 7815 stmf_state.stmf_process_initial_luns = 0; 7816 } 7817 7818 stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE; 7819 (void) cv_reltimedwait(&stmf_state.stmf_cv, 7820 &stmf_state.stmf_lock, td, TR_CLOCK_TICK); 7821 stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE; 7822 } 7823 7824 /* 7825 * Waits for ongoing I/O tasks to finish on an LU in preparation for 7826 * the LU's offlining. The LU should already be in an Offlining state 7827 * (otherwise I/O to the LU might never end). There is an additional 7828 * enforcement of this via a deadman timer check. 7829 */ 7830 static void 7831 stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu) 7832 { 7833 clock_t start, now, deadline; 7834 7835 start = now = ddi_get_lbolt(); 7836 deadline = start + drv_usectohz(stmf_io_deadman * 1000000llu); 7837 mutex_enter(&ilu->ilu_task_lock); 7838 while (ilu->ilu_ntasks != ilu->ilu_ntasks_free) { 7839 (void) cv_timedwait(&ilu->ilu_offline_pending_cv, 7840 &ilu->ilu_task_lock, deadline); 7841 now = ddi_get_lbolt(); 7842 if (now > deadline) { 7843 if (stmf_io_deadman_enabled) { 7844 cmn_err(CE_PANIC, "stmf_svc: I/O deadman hit " 7845 "on STMF_CMD_LU_OFFLINE after %d seconds", 7846 stmf_io_deadman); 7847 } else { 7848 /* keep on spinning */ 7849 deadline = now + drv_usectohz(stmf_io_deadman * 7850 1000000llu); 7851 } 7852 } 7853 } 7854 mutex_exit(&ilu->ilu_task_lock); 7855 DTRACE_PROBE1(deadman__timeout__wait, clock_t, now - start); 7856 } 7857 7858 void 7859 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info) 7860 { 7861 stmf_svc_req_t *req; 7862 int s; 7863 7864 ASSERT(!mutex_owned(&stmf_state.stmf_lock)); 7865 s = sizeof (stmf_svc_req_t); 7866 if (info->st_additional_info) { 7867 s += strlen(info->st_additional_info) + 1; 7868 } 7869 req = kmem_zalloc(s, KM_SLEEP); 7870 7871 req->svc_cmd = cmd; 7872 req->svc_obj = obj; 7873 req->svc_info.st_rflags = info->st_rflags; 7874 if (info->st_additional_info) { 7875 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req, 7876 sizeof (stmf_svc_req_t))); 7877 (void) strcpy(req->svc_info.st_additional_info, 7878 info->st_additional_info); 7879 } 7880 req->svc_req_alloc_size = s; 7881 7882 mutex_enter(&stmf_state.stmf_lock); 7883 list_insert_tail(&stmf_state.stmf_svc_list, req); 7884 if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) { 7885 cv_signal(&stmf_state.stmf_cv); 7886 } 7887 mutex_exit(&stmf_state.stmf_lock); 7888 } 7889 7890 static void 7891 stmf_svc_kill_obj_requests(void *obj) 7892 { 7893 stmf_svc_req_t *req; 7894 7895 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 7896 7897 for (req = list_head(&stmf_state.stmf_svc_list); req != NULL; 7898 req = list_next(&stmf_state.stmf_svc_list, req)) { 7899 if (req->svc_obj == obj) { 7900 list_remove(&stmf_state.stmf_svc_list, req); 7901 kmem_free(req, req->svc_req_alloc_size); 7902 } 7903 } 7904 } 7905 7906 void 7907 stmf_trace(caddr_t ident, const char *fmt, ...) 7908 { 7909 va_list args; 7910 char tbuf[160]; 7911 int len; 7912 7913 if (!stmf_trace_on) 7914 return; 7915 len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "", 7916 ddi_get_lbolt()); 7917 va_start(args, fmt); 7918 len += vsnprintf(tbuf + len, 158 - len, fmt, args); 7919 va_end(args); 7920 7921 if (len > 158) { 7922 len = 158; 7923 } 7924 tbuf[len++] = '\n'; 7925 tbuf[len] = '\0'; 7926 7927 mutex_enter(&trace_buf_lock); 7928 bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1); 7929 trace_buf_curndx += len; 7930 if (trace_buf_curndx > (trace_buf_size - 320)) 7931 trace_buf_curndx = 0; 7932 mutex_exit(&trace_buf_lock); 7933 } 7934 7935 void 7936 stmf_trace_clear() 7937 { 7938 if (!stmf_trace_on) 7939 return; 7940 mutex_enter(&trace_buf_lock); 7941 trace_buf_curndx = 0; 7942 if (trace_buf_size > 0) 7943 stmf_trace_buf[0] = '\0'; 7944 mutex_exit(&trace_buf_lock); 7945 } 7946 7947 /* 7948 * NOTE: Due to lock order problems that are not possible to fix this 7949 * method drops and reacquires the itask_mutex around the call to stmf_ctl. 7950 * Another possible work around would be to use a dispatch queue and have 7951 * the call to stmf_ctl run on another thread that's not holding the 7952 * itask_mutex. The problem with that approach is that it's difficult to 7953 * determine what impact an asynchronous change would have on the system state. 7954 */ 7955 static void 7956 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info) 7957 { 7958 stmf_state_change_info_t change_info; 7959 void *ctl_private; 7960 uint32_t ctl_cmd; 7961 int msg = 0; 7962 stmf_i_scsi_task_t *itask = 7963 (stmf_i_scsi_task_t *)task->task_stmf_private; 7964 7965 stmf_trace("FROM STMF", "abort_task_offline called for %s: %s", 7966 offline_lu ? "LU" : "LPORT", info ? info : "no additional info"); 7967 change_info.st_additional_info = info; 7968 ASSERT(mutex_owned(&itask->itask_mutex)); 7969 7970 if (offline_lu) { 7971 change_info.st_rflags = STMF_RFLAG_RESET | 7972 STMF_RFLAG_LU_ABORT; 7973 ctl_private = task->task_lu; 7974 if (((stmf_i_lu_t *) 7975 task->task_lu->lu_stmf_private)->ilu_state == 7976 STMF_STATE_ONLINE) { 7977 msg = 1; 7978 } 7979 ctl_cmd = STMF_CMD_LU_OFFLINE; 7980 } else { 7981 change_info.st_rflags = STMF_RFLAG_RESET | 7982 STMF_RFLAG_LPORT_ABORT; 7983 ctl_private = task->task_lport; 7984 if (((stmf_i_local_port_t *) 7985 task->task_lport->lport_stmf_private)->ilport_state == 7986 STMF_STATE_ONLINE) { 7987 msg = 1; 7988 } 7989 ctl_cmd = STMF_CMD_LPORT_OFFLINE; 7990 } 7991 7992 if (msg) { 7993 stmf_trace(0, "Calling stmf_ctl to offline %s : %s", 7994 offline_lu ? "LU" : "LPORT", info ? info : 7995 "<no additional info>"); 7996 } 7997 mutex_exit(&itask->itask_mutex); 7998 (void) stmf_ctl(ctl_cmd, ctl_private, &change_info); 7999 mutex_enter(&itask->itask_mutex); 8000 } 8001 8002 static char 8003 stmf_ctoi(char c) 8004 { 8005 if ((c >= '0') && (c <= '9')) 8006 c -= '0'; 8007 else if ((c >= 'A') && (c <= 'F')) 8008 c = c - 'A' + 10; 8009 else if ((c >= 'a') && (c <= 'f')) 8010 c = c - 'a' + 10; 8011 else 8012 c = -1; 8013 return (c); 8014 } 8015 8016 /* Convert from Hex value in ASCII format to the equivalent bytes */ 8017 static boolean_t 8018 stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp) 8019 { 8020 int ii; 8021 8022 for (ii = 0; ii < dplen; ii++) { 8023 char nibble1, nibble2; 8024 char enc_char = *c++; 8025 nibble1 = stmf_ctoi(enc_char); 8026 8027 enc_char = *c++; 8028 nibble2 = stmf_ctoi(enc_char); 8029 if (nibble1 == -1 || nibble2 == -1) 8030 return (B_FALSE); 8031 8032 dp[ii] = (nibble1 << 4) | nibble2; 8033 } 8034 return (B_TRUE); 8035 } 8036 8037 boolean_t 8038 stmf_scsilib_tptid_validate(scsi_transport_id_t *tptid, uint32_t total_sz, 8039 uint16_t *tptid_sz) 8040 { 8041 uint16_t tpd_len = SCSI_TPTID_SIZE; 8042 8043 if (tptid_sz) 8044 *tptid_sz = 0; 8045 if (total_sz < sizeof (scsi_transport_id_t)) 8046 return (B_FALSE); 8047 8048 switch (tptid->protocol_id) { 8049 8050 case PROTOCOL_FIBRE_CHANNEL: 8051 /* FC Transport ID validation checks. SPC3 rev23, Table 284 */ 8052 if (total_sz < tpd_len || tptid->format_code != 0) 8053 return (B_FALSE); 8054 break; 8055 8056 case PROTOCOL_iSCSI: /* CSTYLED */ 8057 { 8058 iscsi_transport_id_t *iscsiid; 8059 uint16_t adn_len, name_len; 8060 8061 /* Check for valid format code, SPC3 rev 23 Table 288 */ 8062 if ((total_sz < tpd_len) || 8063 (tptid->format_code != 0 && tptid->format_code != 1)) 8064 return (B_FALSE); 8065 8066 iscsiid = (iscsi_transport_id_t *)tptid; 8067 adn_len = READ_SCSI16(iscsiid->add_len, uint16_t); 8068 tpd_len = sizeof (iscsi_transport_id_t) + adn_len - 1; 8069 8070 /* 8071 * iSCSI Transport ID validation checks. 8072 * As per SPC3 rev 23 Section 7.5.4.6 and Table 289 & Table 290 8073 */ 8074 if (adn_len < 20 || (adn_len % 4 != 0)) 8075 return (B_FALSE); 8076 8077 name_len = strnlen(iscsiid->iscsi_name, adn_len); 8078 if (name_len == 0 || name_len >= adn_len) 8079 return (B_FALSE); 8080 8081 /* If the format_code is 1 check for ISID seperator */ 8082 if ((tptid->format_code == 1) && (strstr(iscsiid->iscsi_name, 8083 SCSI_TPTID_ISCSI_ISID_SEPERATOR) == NULL)) 8084 return (B_FALSE); 8085 8086 } 8087 break; 8088 8089 case PROTOCOL_SRP: 8090 /* SRP Transport ID validation checks. SPC3 rev23, Table 287 */ 8091 if (total_sz < tpd_len || tptid->format_code != 0) 8092 return (B_FALSE); 8093 break; 8094 8095 case PROTOCOL_PARALLEL_SCSI: 8096 case PROTOCOL_SSA: 8097 case PROTOCOL_IEEE_1394: 8098 case PROTOCOL_SAS: 8099 case PROTOCOL_ADT: 8100 case PROTOCOL_ATAPI: 8101 default: /* CSTYLED */ 8102 { 8103 stmf_dflt_scsi_tptid_t *dflttpd; 8104 8105 tpd_len = sizeof (stmf_dflt_scsi_tptid_t); 8106 if (total_sz < tpd_len) 8107 return (B_FALSE); 8108 dflttpd = (stmf_dflt_scsi_tptid_t *)tptid; 8109 tpd_len = tpd_len + SCSI_READ16(&dflttpd->ident_len) - 1; 8110 if (total_sz < tpd_len) 8111 return (B_FALSE); 8112 } 8113 break; 8114 } 8115 if (tptid_sz) 8116 *tptid_sz = tpd_len; 8117 return (B_TRUE); 8118 } 8119 8120 boolean_t 8121 stmf_scsilib_tptid_compare(scsi_transport_id_t *tpd1, scsi_transport_id_t *tpd2) 8122 { 8123 if ((tpd1->protocol_id != tpd2->protocol_id) || 8124 (tpd1->format_code != tpd2->format_code)) 8125 return (B_FALSE); 8126 8127 switch (tpd1->protocol_id) { 8128 8129 case PROTOCOL_iSCSI: /* CSTYLED */ 8130 { 8131 iscsi_transport_id_t *iscsitpd1, *iscsitpd2; 8132 uint16_t len; 8133 8134 iscsitpd1 = (iscsi_transport_id_t *)tpd1; 8135 iscsitpd2 = (iscsi_transport_id_t *)tpd2; 8136 len = SCSI_READ16(&iscsitpd1->add_len); 8137 if ((memcmp(iscsitpd1->add_len, iscsitpd2->add_len, 2) != 0) || 8138 (memcmp(iscsitpd1->iscsi_name, iscsitpd2->iscsi_name, len) 8139 != 0)) 8140 return (B_FALSE); 8141 } 8142 break; 8143 8144 case PROTOCOL_SRP: /* CSTYLED */ 8145 { 8146 scsi_srp_transport_id_t *srptpd1, *srptpd2; 8147 8148 srptpd1 = (scsi_srp_transport_id_t *)tpd1; 8149 srptpd2 = (scsi_srp_transport_id_t *)tpd2; 8150 if (memcmp(srptpd1->srp_name, srptpd2->srp_name, 8151 sizeof (srptpd1->srp_name)) != 0) 8152 return (B_FALSE); 8153 } 8154 break; 8155 8156 case PROTOCOL_FIBRE_CHANNEL: /* CSTYLED */ 8157 { 8158 scsi_fc_transport_id_t *fctpd1, *fctpd2; 8159 8160 fctpd1 = (scsi_fc_transport_id_t *)tpd1; 8161 fctpd2 = (scsi_fc_transport_id_t *)tpd2; 8162 if (memcmp(fctpd1->port_name, fctpd2->port_name, 8163 sizeof (fctpd1->port_name)) != 0) 8164 return (B_FALSE); 8165 } 8166 break; 8167 8168 case PROTOCOL_PARALLEL_SCSI: 8169 case PROTOCOL_SSA: 8170 case PROTOCOL_IEEE_1394: 8171 case PROTOCOL_SAS: 8172 case PROTOCOL_ADT: 8173 case PROTOCOL_ATAPI: 8174 default: /* CSTYLED */ 8175 { 8176 stmf_dflt_scsi_tptid_t *dflt1, *dflt2; 8177 uint16_t len; 8178 8179 dflt1 = (stmf_dflt_scsi_tptid_t *)tpd1; 8180 dflt2 = (stmf_dflt_scsi_tptid_t *)tpd2; 8181 len = SCSI_READ16(&dflt1->ident_len); 8182 if ((memcmp(dflt1->ident_len, dflt2->ident_len, 2) != 0) || 8183 (memcmp(dflt1->ident, dflt2->ident, len) != 0)) 8184 return (B_FALSE); 8185 } 8186 break; 8187 } 8188 return (B_TRUE); 8189 } 8190 8191 /* 8192 * Changes devid_desc to corresponding TransportID format 8193 * Returns :- pointer to stmf_remote_port_t 8194 * Note :- Allocates continous memory for stmf_remote_port_t and TransportID, 8195 * This memory need to be freed when this remote_port is no longer 8196 * used. 8197 */ 8198 stmf_remote_port_t * 8199 stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t *devid) 8200 { 8201 struct scsi_fc_transport_id *fc_tpd; 8202 struct iscsi_transport_id *iscsi_tpd; 8203 struct scsi_srp_transport_id *srp_tpd; 8204 struct stmf_dflt_scsi_tptid *dflt_tpd; 8205 uint16_t ident_len, sz = 0; 8206 stmf_remote_port_t *rpt = NULL; 8207 8208 ident_len = devid->ident_length; 8209 ASSERT(ident_len); 8210 switch (devid->protocol_id) { 8211 case PROTOCOL_FIBRE_CHANNEL: 8212 sz = sizeof (scsi_fc_transport_id_t); 8213 rpt = stmf_remote_port_alloc(sz); 8214 rpt->rport_tptid->format_code = 0; 8215 rpt->rport_tptid->protocol_id = devid->protocol_id; 8216 fc_tpd = (scsi_fc_transport_id_t *)rpt->rport_tptid; 8217 /* 8218 * convert from "wwn.xxxxxxxxxxxxxxxx" to 8-byte binary 8219 * skip first 4 byte for "wwn." 8220 */ 8221 ASSERT(strncmp("wwn.", (char *)devid->ident, 4) == 0); 8222 if ((ident_len < SCSI_TPTID_FC_PORT_NAME_SIZE * 2 + 4) || 8223 !stmf_base16_str_to_binary((char *)devid->ident + 4, 8224 SCSI_TPTID_FC_PORT_NAME_SIZE, fc_tpd->port_name)) 8225 goto devid_to_remote_port_fail; 8226 break; 8227 8228 case PROTOCOL_iSCSI: 8229 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (iscsi_transport_id_t) + 8230 ident_len - 1); 8231 rpt = stmf_remote_port_alloc(sz); 8232 rpt->rport_tptid->format_code = 0; 8233 rpt->rport_tptid->protocol_id = devid->protocol_id; 8234 iscsi_tpd = (iscsi_transport_id_t *)rpt->rport_tptid; 8235 SCSI_WRITE16(iscsi_tpd->add_len, ident_len); 8236 (void) memcpy(iscsi_tpd->iscsi_name, devid->ident, ident_len); 8237 break; 8238 8239 case PROTOCOL_SRP: 8240 sz = sizeof (scsi_srp_transport_id_t); 8241 rpt = stmf_remote_port_alloc(sz); 8242 rpt->rport_tptid->format_code = 0; 8243 rpt->rport_tptid->protocol_id = devid->protocol_id; 8244 srp_tpd = (scsi_srp_transport_id_t *)rpt->rport_tptid; 8245 /* 8246 * convert from "eui.xxxxxxxxxxxxxxx" to 8-byte binary 8247 * skip first 4 byte for "eui." 8248 * Assume 8-byte initiator-extension part of srp_name is NOT 8249 * stored in devid and hence will be set as zero 8250 */ 8251 ASSERT(strncmp("eui.", (char *)devid->ident, 4) == 0); 8252 if ((ident_len < (SCSI_TPTID_SRP_PORT_NAME_LEN - 8) * 2 + 4) || 8253 !stmf_base16_str_to_binary((char *)devid->ident+4, 8254 SCSI_TPTID_SRP_PORT_NAME_LEN, srp_tpd->srp_name)) 8255 goto devid_to_remote_port_fail; 8256 break; 8257 8258 case PROTOCOL_PARALLEL_SCSI: 8259 case PROTOCOL_SSA: 8260 case PROTOCOL_IEEE_1394: 8261 case PROTOCOL_SAS: 8262 case PROTOCOL_ADT: 8263 case PROTOCOL_ATAPI: 8264 default : 8265 ident_len = devid->ident_length; 8266 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (stmf_dflt_scsi_tptid_t) + 8267 ident_len - 1); 8268 rpt = stmf_remote_port_alloc(sz); 8269 rpt->rport_tptid->format_code = 0; 8270 rpt->rport_tptid->protocol_id = devid->protocol_id; 8271 dflt_tpd = (stmf_dflt_scsi_tptid_t *)rpt->rport_tptid; 8272 SCSI_WRITE16(dflt_tpd->ident_len, ident_len); 8273 (void) memcpy(dflt_tpd->ident, devid->ident, ident_len); 8274 break; 8275 } 8276 return (rpt); 8277 8278 devid_to_remote_port_fail: 8279 stmf_remote_port_free(rpt); 8280 return (NULL); 8281 8282 } 8283 8284 stmf_remote_port_t * 8285 stmf_remote_port_alloc(uint16_t tptid_sz) 8286 { 8287 stmf_remote_port_t *rpt; 8288 rpt = (stmf_remote_port_t *)kmem_zalloc( 8289 sizeof (stmf_remote_port_t) + tptid_sz, KM_SLEEP); 8290 rpt->rport_tptid_sz = tptid_sz; 8291 rpt->rport_tptid = (scsi_transport_id_t *)(rpt + 1); 8292 return (rpt); 8293 } 8294 8295 void 8296 stmf_remote_port_free(stmf_remote_port_t *rpt) 8297 { 8298 /* 8299 * Note: stmf_scsilib_devid_to_remote_port() function allocates 8300 * remote port structures for all transports in the same way, So 8301 * it is safe to deallocate it in a protocol independent manner. 8302 * If any of the allocation method changes, corresponding changes 8303 * need to be made here too. 8304 */ 8305 kmem_free(rpt, sizeof (stmf_remote_port_t) + rpt->rport_tptid_sz); 8306 } 8307 8308 stmf_lu_t * 8309 stmf_check_and_hold_lu(scsi_task_t *task, uint8_t *guid) 8310 { 8311 stmf_i_scsi_session_t *iss; 8312 stmf_lu_t *lu; 8313 stmf_i_lu_t *ilu = NULL; 8314 stmf_lun_map_t *sm; 8315 stmf_lun_map_ent_t *lme; 8316 int i; 8317 8318 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private; 8319 rw_enter(iss->iss_lockp, RW_READER); 8320 sm = iss->iss_sm; 8321 8322 for (i = 0; i < sm->lm_nentries; i++) { 8323 if (sm->lm_plus[i] == NULL) 8324 continue; 8325 lme = (stmf_lun_map_ent_t *)sm->lm_plus[i]; 8326 lu = lme->ent_lu; 8327 if (bcmp(lu->lu_id->ident, guid, 16) == 0) { 8328 break; 8329 } 8330 lu = NULL; 8331 } 8332 8333 if (!lu) { 8334 goto hold_lu_done; 8335 } 8336 8337 ilu = lu->lu_stmf_private; 8338 mutex_enter(&ilu->ilu_task_lock); 8339 ilu->ilu_additional_ref++; 8340 mutex_exit(&ilu->ilu_task_lock); 8341 8342 hold_lu_done: 8343 rw_exit(iss->iss_lockp); 8344 return (lu); 8345 } 8346 8347 void 8348 stmf_release_lu(stmf_lu_t *lu) 8349 { 8350 stmf_i_lu_t *ilu; 8351 8352 ilu = lu->lu_stmf_private; 8353 ASSERT(ilu->ilu_additional_ref != 0); 8354 mutex_enter(&ilu->ilu_task_lock); 8355 ilu->ilu_additional_ref--; 8356 mutex_exit(&ilu->ilu_task_lock); 8357 } 8358 8359 int 8360 stmf_is_task_being_aborted(scsi_task_t *task) 8361 { 8362 stmf_i_scsi_task_t *itask; 8363 8364 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 8365 if (itask->itask_flags & ITASK_BEING_ABORTED) 8366 return (1); 8367 8368 return (0); 8369 } 8370 8371 volatile boolean_t stmf_pgr_aptpl_always = B_FALSE; 8372 8373 boolean_t 8374 stmf_is_pgr_aptpl_always() 8375 { 8376 return (stmf_pgr_aptpl_always); 8377 } 8378