1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/conf.h> 27 #include <sys/file.h> 28 #include <sys/ddi.h> 29 #include <sys/sunddi.h> 30 #include <sys/modctl.h> 31 #include <sys/scsi/scsi.h> 32 #include <sys/scsi/impl/scsi_reset_notify.h> 33 #include <sys/disp.h> 34 #include <sys/byteorder.h> 35 #include <sys/atomic.h> 36 #include <sys/ethernet.h> 37 #include <sys/sdt.h> 38 #include <sys/nvpair.h> 39 #include <sys/zone.h> 40 41 #include <stmf.h> 42 #include <lpif.h> 43 #include <portif.h> 44 #include <stmf_ioctl.h> 45 #include <stmf_impl.h> 46 #include <lun_map.h> 47 #include <stmf_state.h> 48 #include <pppt_ic_if.h> 49 50 static uint64_t stmf_session_counter = 0; 51 static uint16_t stmf_rtpid_counter = 0; 52 /* start messages at 1 */ 53 static uint64_t stmf_proxy_msg_id = 1; 54 55 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 56 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 57 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 58 void **result); 59 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp); 60 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp); 61 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 62 cred_t *credp, int *rval); 63 static int stmf_get_stmf_state(stmf_state_desc_t *std); 64 static int stmf_set_stmf_state(stmf_state_desc_t *std); 65 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu, 66 char *info); 67 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state); 68 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state); 69 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua); 70 void stmf_svc_init(); 71 stmf_status_t stmf_svc_fini(); 72 void stmf_svc(void *arg); 73 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info); 74 void stmf_check_freetask(); 75 void stmf_abort_target_reset(scsi_task_t *task); 76 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, 77 int target_reset); 78 void stmf_target_reset_poll(struct scsi_task *task); 79 void stmf_handle_lun_reset(scsi_task_t *task); 80 void stmf_handle_target_reset(scsi_task_t *task); 81 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf); 82 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 83 uint32_t *err_ret); 84 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi); 85 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 86 uint32_t *err_ret); 87 void stmf_delete_ppd(stmf_pp_data_t *ppd); 88 void stmf_delete_all_ppds(); 89 void stmf_trace_clear(); 90 void stmf_worker_init(); 91 stmf_status_t stmf_worker_fini(); 92 void stmf_worker_mgmt(); 93 void stmf_worker_task(void *arg); 94 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss); 95 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, 96 uint32_t type); 97 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg); 98 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg); 99 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg); 100 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg); 101 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s); 102 103 /* pppt modhandle */ 104 ddi_modhandle_t pppt_mod; 105 106 /* pppt modload imported functions */ 107 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc; 108 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc; 109 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc; 110 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc; 111 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc; 112 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc; 113 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc; 114 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc; 115 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc; 116 stmf_ic_tx_msg_func_t ic_tx_msg; 117 stmf_ic_msg_free_func_t ic_msg_free; 118 119 static void stmf_update_kstat_lu_q(scsi_task_t *, void()); 120 static void stmf_update_kstat_lport_q(scsi_task_t *, void()); 121 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *); 122 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *); 123 124 extern struct mod_ops mod_driverops; 125 126 /* =====[ Tunables ]===== */ 127 /* Internal tracing */ 128 volatile int stmf_trace_on = 1; 129 volatile int stmf_trace_buf_size = (1 * 1024 * 1024); 130 /* 131 * The reason default task timeout is 75 is because we want the 132 * host to timeout 1st and mostly host timeout is 60 seconds. 133 */ 134 volatile int stmf_default_task_timeout = 75; 135 /* 136 * Setting this to one means, you are responsible for config load and keeping 137 * things in sync with persistent database. 138 */ 139 volatile int stmf_allow_modunload = 0; 140 141 volatile int stmf_max_nworkers = 256; 142 volatile int stmf_min_nworkers = 4; 143 volatile int stmf_worker_scale_down_delay = 20; 144 145 /* === [ Debugging and fault injection ] === */ 146 #ifdef DEBUG 147 volatile int stmf_drop_task_counter = 0; 148 volatile int stmf_drop_buf_counter = 0; 149 150 #endif 151 152 stmf_state_t stmf_state; 153 static stmf_lu_t *dlun0; 154 155 static uint8_t stmf_first_zero[] = 156 { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff }; 157 static uint8_t stmf_first_one[] = 158 { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 }; 159 160 static kmutex_t trace_buf_lock; 161 static int trace_buf_size; 162 static int trace_buf_curndx; 163 caddr_t stmf_trace_buf; 164 165 static enum { 166 STMF_WORKERS_DISABLED = 0, 167 STMF_WORKERS_ENABLING, 168 STMF_WORKERS_ENABLED 169 } stmf_workers_state = STMF_WORKERS_DISABLED; 170 static int stmf_i_max_nworkers; 171 static int stmf_i_min_nworkers; 172 static int stmf_nworkers_cur; /* # of workers currently running */ 173 static int stmf_nworkers_needed; /* # of workers need to be running */ 174 static int stmf_worker_sel_counter = 0; 175 static uint32_t stmf_cur_ntasks = 0; 176 static clock_t stmf_wm_last = 0; 177 /* 178 * This is equal to stmf_nworkers_cur while we are increasing # workers and 179 * stmf_nworkers_needed while we are decreasing the worker count. 180 */ 181 static int stmf_nworkers_accepting_cmds; 182 static stmf_worker_t *stmf_workers = NULL; 183 static clock_t stmf_worker_mgmt_delay = 2; 184 static clock_t stmf_worker_scale_down_timer = 0; 185 static int stmf_worker_scale_down_qd = 0; 186 187 static struct cb_ops stmf_cb_ops = { 188 stmf_open, /* open */ 189 stmf_close, /* close */ 190 nodev, /* strategy */ 191 nodev, /* print */ 192 nodev, /* dump */ 193 nodev, /* read */ 194 nodev, /* write */ 195 stmf_ioctl, /* ioctl */ 196 nodev, /* devmap */ 197 nodev, /* mmap */ 198 nodev, /* segmap */ 199 nochpoll, /* chpoll */ 200 ddi_prop_op, /* cb_prop_op */ 201 0, /* streamtab */ 202 D_NEW | D_MP, /* cb_flag */ 203 CB_REV, /* rev */ 204 nodev, /* aread */ 205 nodev /* awrite */ 206 }; 207 208 static struct dev_ops stmf_ops = { 209 DEVO_REV, 210 0, 211 stmf_getinfo, 212 nulldev, /* identify */ 213 nulldev, /* probe */ 214 stmf_attach, 215 stmf_detach, 216 nodev, /* reset */ 217 &stmf_cb_ops, 218 NULL, /* bus_ops */ 219 NULL /* power */ 220 }; 221 222 #define STMF_NAME "COMSTAR STMF" 223 #define STMF_MODULE_NAME "stmf" 224 225 static struct modldrv modldrv = { 226 &mod_driverops, 227 STMF_NAME, 228 &stmf_ops 229 }; 230 231 static struct modlinkage modlinkage = { 232 MODREV_1, 233 &modldrv, 234 NULL 235 }; 236 237 int 238 _init(void) 239 { 240 int ret; 241 242 ret = mod_install(&modlinkage); 243 if (ret) 244 return (ret); 245 stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP); 246 trace_buf_size = stmf_trace_buf_size; 247 trace_buf_curndx = 0; 248 mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0); 249 bzero(&stmf_state, sizeof (stmf_state_t)); 250 /* STMF service is off by default */ 251 stmf_state.stmf_service_running = 0; 252 mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL); 253 cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL); 254 stmf_session_counter = (uint64_t)ddi_get_lbolt(); 255 stmf_view_init(); 256 stmf_svc_init(); 257 stmf_dlun_init(); 258 return (ret); 259 } 260 261 int 262 _fini(void) 263 { 264 int ret; 265 266 if (stmf_state.stmf_service_running) 267 return (EBUSY); 268 if ((!stmf_allow_modunload) && 269 (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) { 270 return (EBUSY); 271 } 272 if (stmf_state.stmf_nlps || stmf_state.stmf_npps) { 273 return (EBUSY); 274 } 275 if (stmf_dlun_fini() != STMF_SUCCESS) 276 return (EBUSY); 277 if (stmf_worker_fini() != STMF_SUCCESS) { 278 stmf_dlun_init(); 279 return (EBUSY); 280 } 281 if (stmf_svc_fini() != STMF_SUCCESS) { 282 stmf_dlun_init(); 283 stmf_worker_init(); 284 return (EBUSY); 285 } 286 287 ret = mod_remove(&modlinkage); 288 if (ret) { 289 stmf_svc_init(); 290 stmf_dlun_init(); 291 stmf_worker_init(); 292 return (ret); 293 } 294 295 stmf_view_clear_config(); 296 kmem_free(stmf_trace_buf, stmf_trace_buf_size); 297 mutex_destroy(&trace_buf_lock); 298 mutex_destroy(&stmf_state.stmf_lock); 299 cv_destroy(&stmf_state.stmf_cv); 300 return (ret); 301 } 302 303 int 304 _info(struct modinfo *modinfop) 305 { 306 return (mod_info(&modlinkage, modinfop)); 307 } 308 309 /* ARGSUSED */ 310 static int 311 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 312 { 313 switch (cmd) { 314 case DDI_INFO_DEVT2DEVINFO: 315 *result = stmf_state.stmf_dip; 316 break; 317 case DDI_INFO_DEVT2INSTANCE: 318 *result = 319 (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip); 320 break; 321 default: 322 return (DDI_FAILURE); 323 } 324 325 return (DDI_SUCCESS); 326 } 327 328 static int 329 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 330 { 331 switch (cmd) { 332 case DDI_ATTACH: 333 stmf_state.stmf_dip = dip; 334 335 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0, 336 DDI_NT_STMF, 0) != DDI_SUCCESS) { 337 break; 338 } 339 ddi_report_dev(dip); 340 return (DDI_SUCCESS); 341 } 342 343 return (DDI_FAILURE); 344 } 345 346 static int 347 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 348 { 349 switch (cmd) { 350 case DDI_DETACH: 351 ddi_remove_minor_node(dip, 0); 352 return (DDI_SUCCESS); 353 } 354 355 return (DDI_FAILURE); 356 } 357 358 /* ARGSUSED */ 359 static int 360 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp) 361 { 362 mutex_enter(&stmf_state.stmf_lock); 363 if (stmf_state.stmf_exclusive_open) { 364 mutex_exit(&stmf_state.stmf_lock); 365 return (EBUSY); 366 } 367 if (flag & FEXCL) { 368 if (stmf_state.stmf_opened) { 369 mutex_exit(&stmf_state.stmf_lock); 370 return (EBUSY); 371 } 372 stmf_state.stmf_exclusive_open = 1; 373 } 374 stmf_state.stmf_opened = 1; 375 mutex_exit(&stmf_state.stmf_lock); 376 return (0); 377 } 378 379 /* ARGSUSED */ 380 static int 381 stmf_close(dev_t dev, int flag, int otype, cred_t *credp) 382 { 383 mutex_enter(&stmf_state.stmf_lock); 384 stmf_state.stmf_opened = 0; 385 if (stmf_state.stmf_exclusive_open && 386 (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) { 387 stmf_state.stmf_config_state = STMF_CONFIG_NONE; 388 stmf_delete_all_ppds(); 389 stmf_view_clear_config(); 390 stmf_view_init(); 391 } 392 stmf_state.stmf_exclusive_open = 0; 393 mutex_exit(&stmf_state.stmf_lock); 394 return (0); 395 } 396 397 int 398 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd, 399 void **ibuf, void **obuf) 400 { 401 int ret; 402 403 *ibuf = NULL; 404 *obuf = NULL; 405 *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP); 406 407 ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode); 408 if (ret) 409 return (EFAULT); 410 if ((*iocd)->stmf_version != STMF_VERSION_1) { 411 ret = EINVAL; 412 goto copyin_iocdata_done; 413 } 414 if ((*iocd)->stmf_ibuf_size) { 415 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP); 416 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf), 417 *ibuf, (*iocd)->stmf_ibuf_size, mode); 418 } 419 if ((*iocd)->stmf_obuf_size) 420 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP); 421 422 if (ret == 0) 423 return (0); 424 ret = EFAULT; 425 copyin_iocdata_done:; 426 if (*obuf) { 427 kmem_free(*obuf, (*iocd)->stmf_obuf_size); 428 *obuf = NULL; 429 } 430 if (*ibuf) { 431 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size); 432 *ibuf = NULL; 433 } 434 kmem_free(*iocd, sizeof (stmf_iocdata_t)); 435 return (ret); 436 } 437 438 int 439 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf) 440 { 441 int ret; 442 443 if (iocd->stmf_obuf_size) { 444 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf, 445 iocd->stmf_obuf_size, mode); 446 if (ret) 447 return (EFAULT); 448 } 449 ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode); 450 if (ret) 451 return (EFAULT); 452 return (0); 453 } 454 455 /* ARGSUSED */ 456 static int 457 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 458 cred_t *credp, int *rval) 459 { 460 stmf_iocdata_t *iocd; 461 void *ibuf = NULL, *obuf = NULL; 462 slist_lu_t *luid_list; 463 slist_target_port_t *lportid_list; 464 stmf_i_lu_t *ilu; 465 stmf_i_local_port_t *ilport; 466 stmf_i_scsi_session_t *iss; 467 slist_scsi_session_t *iss_list; 468 sioc_lu_props_t *lup; 469 sioc_target_port_props_t *lportp; 470 stmf_ppioctl_data_t *ppi, *ppi_out = NULL; 471 uint64_t *ppi_token = NULL; 472 uint8_t *p_id, *id; 473 stmf_state_desc_t *std; 474 stmf_status_t ctl_ret; 475 stmf_state_change_info_t ssi; 476 int ret = 0; 477 uint32_t n; 478 int i; 479 stmf_group_op_data_t *grp_entry; 480 stmf_group_name_t *grpname; 481 stmf_view_op_entry_t *ve; 482 stmf_id_type_t idtype; 483 stmf_id_data_t *id_entry; 484 stmf_id_list_t *id_list; 485 stmf_view_entry_t *view_entry; 486 uint32_t veid; 487 488 if ((cmd & 0xff000000) != STMF_IOCTL) { 489 return (ENOTTY); 490 } 491 492 if (drv_priv(credp) != 0) { 493 return (EPERM); 494 } 495 496 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf); 497 if (ret) 498 return (ret); 499 iocd->stmf_error = 0; 500 501 switch (cmd) { 502 case STMF_IOCTL_LU_LIST: 503 /* retrieves both registered/unregistered */ 504 mutex_enter(&stmf_state.stmf_lock); 505 id_list = &stmf_state.stmf_luid_list; 506 n = min(id_list->id_count, 507 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 508 iocd->stmf_obuf_max_nentries = id_list->id_count; 509 luid_list = (slist_lu_t *)obuf; 510 id_entry = id_list->idl_head; 511 for (i = 0; i < n; i++) { 512 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 513 id_entry = id_entry->id_next; 514 } 515 516 n = iocd->stmf_obuf_size/sizeof (slist_lu_t); 517 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 518 id = (uint8_t *)ilu->ilu_lu->lu_id; 519 if (stmf_lookup_id(id_list, 16, id + 4) == NULL) { 520 iocd->stmf_obuf_max_nentries++; 521 if (i < n) { 522 bcopy(id + 4, luid_list[i].lu_guid, 523 sizeof (slist_lu_t)); 524 i++; 525 } 526 } 527 } 528 iocd->stmf_obuf_nentries = i; 529 mutex_exit(&stmf_state.stmf_lock); 530 break; 531 532 case STMF_IOCTL_REG_LU_LIST: 533 mutex_enter(&stmf_state.stmf_lock); 534 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus; 535 n = min(stmf_state.stmf_nlus, 536 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 537 iocd->stmf_obuf_nentries = n; 538 ilu = stmf_state.stmf_ilulist; 539 luid_list = (slist_lu_t *)obuf; 540 for (i = 0; i < n; i++) { 541 uint8_t *id; 542 id = (uint8_t *)ilu->ilu_lu->lu_id; 543 bcopy(id + 4, luid_list[i].lu_guid, 16); 544 ilu = ilu->ilu_next; 545 } 546 mutex_exit(&stmf_state.stmf_lock); 547 break; 548 549 case STMF_IOCTL_VE_LU_LIST: 550 mutex_enter(&stmf_state.stmf_lock); 551 id_list = &stmf_state.stmf_luid_list; 552 n = min(id_list->id_count, 553 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 554 iocd->stmf_obuf_max_nentries = id_list->id_count; 555 iocd->stmf_obuf_nentries = n; 556 luid_list = (slist_lu_t *)obuf; 557 id_entry = id_list->idl_head; 558 for (i = 0; i < n; i++) { 559 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 560 id_entry = id_entry->id_next; 561 } 562 mutex_exit(&stmf_state.stmf_lock); 563 break; 564 565 case STMF_IOCTL_TARGET_PORT_LIST: 566 mutex_enter(&stmf_state.stmf_lock); 567 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports; 568 n = min(stmf_state.stmf_nlports, 569 (iocd->stmf_obuf_size)/sizeof (slist_target_port_t)); 570 iocd->stmf_obuf_nentries = n; 571 ilport = stmf_state.stmf_ilportlist; 572 lportid_list = (slist_target_port_t *)obuf; 573 for (i = 0; i < n; i++) { 574 uint8_t *id; 575 id = (uint8_t *)ilport->ilport_lport->lport_id; 576 bcopy(id, lportid_list[i].target, id[3] + 4); 577 ilport = ilport->ilport_next; 578 } 579 mutex_exit(&stmf_state.stmf_lock); 580 break; 581 582 case STMF_IOCTL_SESSION_LIST: 583 p_id = (uint8_t *)ibuf; 584 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) || 585 (iocd->stmf_ibuf_size < (p_id[3] + 4))) { 586 ret = EINVAL; 587 break; 588 } 589 mutex_enter(&stmf_state.stmf_lock); 590 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport = 591 ilport->ilport_next) { 592 uint8_t *id; 593 id = (uint8_t *)ilport->ilport_lport->lport_id; 594 if ((p_id[3] == id[3]) && 595 (bcmp(p_id + 4, id + 4, id[3]) == 0)) { 596 break; 597 } 598 } 599 if (ilport == NULL) { 600 mutex_exit(&stmf_state.stmf_lock); 601 ret = ENOENT; 602 break; 603 } 604 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions; 605 n = min(ilport->ilport_nsessions, 606 (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t)); 607 iocd->stmf_obuf_nentries = n; 608 iss = ilport->ilport_ss_list; 609 iss_list = (slist_scsi_session_t *)obuf; 610 for (i = 0; i < n; i++) { 611 uint8_t *id; 612 id = (uint8_t *)iss->iss_ss->ss_rport_id; 613 bcopy(id, iss_list[i].initiator, id[3] + 4); 614 iss_list[i].creation_time = (uint32_t) 615 iss->iss_creation_time; 616 if (iss->iss_ss->ss_rport_alias) { 617 (void) strncpy(iss_list[i].alias, 618 iss->iss_ss->ss_rport_alias, 255); 619 iss_list[i].alias[255] = 0; 620 } else { 621 iss_list[i].alias[0] = 0; 622 } 623 iss = iss->iss_next; 624 } 625 mutex_exit(&stmf_state.stmf_lock); 626 break; 627 628 case STMF_IOCTL_GET_LU_PROPERTIES: 629 p_id = (uint8_t *)ibuf; 630 if ((iocd->stmf_ibuf_size < 16) || 631 (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) || 632 (p_id[0] == 0)) { 633 ret = EINVAL; 634 break; 635 } 636 mutex_enter(&stmf_state.stmf_lock); 637 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 638 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 639 break; 640 } 641 if (ilu == NULL) { 642 mutex_exit(&stmf_state.stmf_lock); 643 ret = ENOENT; 644 break; 645 } 646 lup = (sioc_lu_props_t *)obuf; 647 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16); 648 lup->lu_state = ilu->ilu_state & 0x0f; 649 lup->lu_present = 1; /* XXX */ 650 (void) strncpy(lup->lu_provider_name, 651 ilu->ilu_lu->lu_lp->lp_name, 255); 652 lup->lu_provider_name[254] = 0; 653 if (ilu->ilu_lu->lu_alias) { 654 (void) strncpy(lup->lu_alias, 655 ilu->ilu_lu->lu_alias, 255); 656 lup->lu_alias[255] = 0; 657 } else { 658 lup->lu_alias[0] = 0; 659 } 660 mutex_exit(&stmf_state.stmf_lock); 661 break; 662 663 case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES: 664 p_id = (uint8_t *)ibuf; 665 if ((p_id == NULL) || 666 (iocd->stmf_ibuf_size < (p_id[3] + 4)) || 667 (iocd->stmf_obuf_size < 668 sizeof (sioc_target_port_props_t))) { 669 ret = EINVAL; 670 break; 671 } 672 mutex_enter(&stmf_state.stmf_lock); 673 for (ilport = stmf_state.stmf_ilportlist; ilport; 674 ilport = ilport->ilport_next) { 675 uint8_t *id; 676 id = (uint8_t *)ilport->ilport_lport->lport_id; 677 if ((p_id[3] == id[3]) && 678 (bcmp(p_id+4, id+4, id[3]) == 0)) 679 break; 680 } 681 if (ilport == NULL) { 682 mutex_exit(&stmf_state.stmf_lock); 683 ret = ENOENT; 684 break; 685 } 686 lportp = (sioc_target_port_props_t *)obuf; 687 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id, 688 ilport->ilport_lport->lport_id->ident_length + 4); 689 lportp->tgt_state = ilport->ilport_state & 0x0f; 690 lportp->tgt_present = 1; /* XXX */ 691 (void) strncpy(lportp->tgt_provider_name, 692 ilport->ilport_lport->lport_pp->pp_name, 255); 693 lportp->tgt_provider_name[254] = 0; 694 if (ilport->ilport_lport->lport_alias) { 695 (void) strncpy(lportp->tgt_alias, 696 ilport->ilport_lport->lport_alias, 255); 697 lportp->tgt_alias[255] = 0; 698 } else { 699 lportp->tgt_alias[0] = 0; 700 } 701 mutex_exit(&stmf_state.stmf_lock); 702 break; 703 704 case STMF_IOCTL_SET_STMF_STATE: 705 if ((ibuf == NULL) || 706 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 707 ret = EINVAL; 708 break; 709 } 710 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf); 711 break; 712 713 case STMF_IOCTL_GET_STMF_STATE: 714 if ((obuf == NULL) || 715 (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) { 716 ret = EINVAL; 717 break; 718 } 719 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf); 720 break; 721 722 case STMF_IOCTL_SET_ALUA_STATE: 723 if ((ibuf == NULL) || 724 (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) { 725 ret = EINVAL; 726 break; 727 } 728 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf); 729 break; 730 731 case STMF_IOCTL_GET_ALUA_STATE: 732 if ((obuf == NULL) || 733 (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) { 734 ret = EINVAL; 735 break; 736 } 737 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf); 738 break; 739 740 case STMF_IOCTL_SET_LU_STATE: 741 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 742 ssi.st_additional_info = NULL; 743 std = (stmf_state_desc_t *)ibuf; 744 if ((ibuf == NULL) || 745 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 746 ret = EINVAL; 747 break; 748 } 749 p_id = std->ident; 750 mutex_enter(&stmf_state.stmf_lock); 751 if (stmf_state.stmf_inventory_locked) { 752 mutex_exit(&stmf_state.stmf_lock); 753 ret = EBUSY; 754 break; 755 } 756 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 757 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 758 break; 759 } 760 if (ilu == NULL) { 761 mutex_exit(&stmf_state.stmf_lock); 762 ret = ENOENT; 763 break; 764 } 765 stmf_state.stmf_inventory_locked = 1; 766 mutex_exit(&stmf_state.stmf_lock); 767 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE : 768 STMF_CMD_LU_OFFLINE; 769 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi); 770 if (ctl_ret == STMF_ALREADY) 771 ret = 0; 772 else if (ctl_ret != STMF_SUCCESS) 773 ret = EIO; 774 mutex_enter(&stmf_state.stmf_lock); 775 stmf_state.stmf_inventory_locked = 0; 776 mutex_exit(&stmf_state.stmf_lock); 777 break; 778 779 case STMF_IOCTL_SET_TARGET_PORT_STATE: 780 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 781 ssi.st_additional_info = NULL; 782 std = (stmf_state_desc_t *)ibuf; 783 if ((ibuf == NULL) || 784 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 785 ret = EINVAL; 786 break; 787 } 788 p_id = std->ident; 789 mutex_enter(&stmf_state.stmf_lock); 790 if (stmf_state.stmf_inventory_locked) { 791 mutex_exit(&stmf_state.stmf_lock); 792 ret = EBUSY; 793 break; 794 } 795 for (ilport = stmf_state.stmf_ilportlist; ilport; 796 ilport = ilport->ilport_next) { 797 uint8_t *id; 798 id = (uint8_t *)ilport->ilport_lport->lport_id; 799 if ((id[3] == p_id[3]) && 800 (bcmp(id+4, p_id+4, id[3]) == 0)) { 801 break; 802 } 803 } 804 if (ilport == NULL) { 805 mutex_exit(&stmf_state.stmf_lock); 806 ret = ENOENT; 807 break; 808 } 809 stmf_state.stmf_inventory_locked = 1; 810 mutex_exit(&stmf_state.stmf_lock); 811 cmd = (std->state == STMF_STATE_ONLINE) ? 812 STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE; 813 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi); 814 if (ctl_ret == STMF_ALREADY) 815 ret = 0; 816 else if (ctl_ret != STMF_SUCCESS) 817 ret = EIO; 818 mutex_enter(&stmf_state.stmf_lock); 819 stmf_state.stmf_inventory_locked = 0; 820 mutex_exit(&stmf_state.stmf_lock); 821 break; 822 823 case STMF_IOCTL_ADD_HG_ENTRY: 824 idtype = STMF_ID_TYPE_HOST; 825 /* FALLTHROUGH */ 826 case STMF_IOCTL_ADD_TG_ENTRY: 827 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 828 ret = EACCES; 829 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 830 break; 831 } 832 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) { 833 idtype = STMF_ID_TYPE_TARGET; 834 } 835 grp_entry = (stmf_group_op_data_t *)ibuf; 836 if ((ibuf == NULL) || 837 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 838 ret = EINVAL; 839 break; 840 } 841 if (grp_entry->group.name[0] == '*') { 842 ret = EINVAL; 843 break; /* not allowed */ 844 } 845 mutex_enter(&stmf_state.stmf_lock); 846 ret = stmf_add_group_member(grp_entry->group.name, 847 grp_entry->group.name_size, 848 grp_entry->ident + 4, 849 grp_entry->ident[3], 850 idtype, 851 &iocd->stmf_error); 852 mutex_exit(&stmf_state.stmf_lock); 853 break; 854 case STMF_IOCTL_REMOVE_HG_ENTRY: 855 idtype = STMF_ID_TYPE_HOST; 856 /* FALLTHROUGH */ 857 case STMF_IOCTL_REMOVE_TG_ENTRY: 858 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 859 ret = EACCES; 860 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 861 break; 862 } 863 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) { 864 idtype = STMF_ID_TYPE_TARGET; 865 } 866 grp_entry = (stmf_group_op_data_t *)ibuf; 867 if ((ibuf == NULL) || 868 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 869 ret = EINVAL; 870 break; 871 } 872 if (grp_entry->group.name[0] == '*') { 873 ret = EINVAL; 874 break; /* not allowed */ 875 } 876 mutex_enter(&stmf_state.stmf_lock); 877 ret = stmf_remove_group_member(grp_entry->group.name, 878 grp_entry->group.name_size, 879 grp_entry->ident + 4, 880 grp_entry->ident[3], 881 idtype, 882 &iocd->stmf_error); 883 mutex_exit(&stmf_state.stmf_lock); 884 break; 885 case STMF_IOCTL_CREATE_HOST_GROUP: 886 idtype = STMF_ID_TYPE_HOST_GROUP; 887 /* FALLTHROUGH */ 888 case STMF_IOCTL_CREATE_TARGET_GROUP: 889 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 890 ret = EACCES; 891 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 892 break; 893 } 894 grpname = (stmf_group_name_t *)ibuf; 895 896 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP) 897 idtype = STMF_ID_TYPE_TARGET_GROUP; 898 if ((ibuf == NULL) || 899 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 900 ret = EINVAL; 901 break; 902 } 903 if (grpname->name[0] == '*') { 904 ret = EINVAL; 905 break; /* not allowed */ 906 } 907 mutex_enter(&stmf_state.stmf_lock); 908 ret = stmf_add_group(grpname->name, 909 grpname->name_size, idtype, &iocd->stmf_error); 910 mutex_exit(&stmf_state.stmf_lock); 911 break; 912 case STMF_IOCTL_REMOVE_HOST_GROUP: 913 idtype = STMF_ID_TYPE_HOST_GROUP; 914 /* FALLTHROUGH */ 915 case STMF_IOCTL_REMOVE_TARGET_GROUP: 916 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 917 ret = EACCES; 918 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 919 break; 920 } 921 grpname = (stmf_group_name_t *)ibuf; 922 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP) 923 idtype = STMF_ID_TYPE_TARGET_GROUP; 924 if ((ibuf == NULL) || 925 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 926 ret = EINVAL; 927 break; 928 } 929 if (grpname->name[0] == '*') { 930 ret = EINVAL; 931 break; /* not allowed */ 932 } 933 mutex_enter(&stmf_state.stmf_lock); 934 ret = stmf_remove_group(grpname->name, 935 grpname->name_size, idtype, &iocd->stmf_error); 936 mutex_exit(&stmf_state.stmf_lock); 937 break; 938 case STMF_IOCTL_VALIDATE_VIEW: 939 case STMF_IOCTL_ADD_VIEW_ENTRY: 940 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 941 ret = EACCES; 942 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 943 break; 944 } 945 ve = (stmf_view_op_entry_t *)ibuf; 946 if ((ibuf == NULL) || 947 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 948 ret = EINVAL; 949 break; 950 } 951 if (!ve->ve_lu_number_valid) 952 ve->ve_lu_nbr[2] = 0xFF; 953 if (ve->ve_all_hosts) { 954 ve->ve_host_group.name[0] = '*'; 955 ve->ve_host_group.name_size = 1; 956 } 957 if (ve->ve_all_targets) { 958 ve->ve_target_group.name[0] = '*'; 959 ve->ve_target_group.name_size = 1; 960 } 961 if (ve->ve_ndx_valid) 962 veid = ve->ve_ndx; 963 else 964 veid = 0xffffffff; 965 mutex_enter(&stmf_state.stmf_lock); 966 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) { 967 ret = stmf_add_ve(ve->ve_host_group.name, 968 ve->ve_host_group.name_size, 969 ve->ve_target_group.name, 970 ve->ve_target_group.name_size, 971 ve->ve_guid, 972 &veid, 973 ve->ve_lu_nbr, 974 &iocd->stmf_error); 975 } else { /* STMF_IOCTL_VALIDATE_VIEW */ 976 ret = stmf_validate_lun_ve(ve->ve_host_group.name, 977 ve->ve_host_group.name_size, 978 ve->ve_target_group.name, 979 ve->ve_target_group.name_size, 980 ve->ve_lu_nbr, 981 &iocd->stmf_error); 982 } 983 mutex_exit(&stmf_state.stmf_lock); 984 if (ret == 0 && 985 (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) && 986 iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) { 987 stmf_view_op_entry_t *ve_ret = 988 (stmf_view_op_entry_t *)obuf; 989 iocd->stmf_obuf_nentries = 1; 990 iocd->stmf_obuf_max_nentries = 1; 991 if (!ve->ve_ndx_valid) { 992 ve_ret->ve_ndx = veid; 993 ve_ret->ve_ndx_valid = 1; 994 } 995 if (!ve->ve_lu_number_valid) { 996 ve_ret->ve_lu_number_valid = 1; 997 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8); 998 } 999 } 1000 break; 1001 case STMF_IOCTL_REMOVE_VIEW_ENTRY: 1002 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1003 ret = EACCES; 1004 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1005 break; 1006 } 1007 ve = (stmf_view_op_entry_t *)ibuf; 1008 if ((ibuf == NULL) || 1009 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 1010 ret = EINVAL; 1011 break; 1012 } 1013 if (!ve->ve_ndx_valid) { 1014 ret = EINVAL; 1015 break; 1016 } 1017 mutex_enter(&stmf_state.stmf_lock); 1018 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx, 1019 &iocd->stmf_error); 1020 mutex_exit(&stmf_state.stmf_lock); 1021 break; 1022 case STMF_IOCTL_GET_HG_LIST: 1023 id_list = &stmf_state.stmf_hg_list; 1024 /* FALLTHROUGH */ 1025 case STMF_IOCTL_GET_TG_LIST: 1026 if (cmd == STMF_IOCTL_GET_TG_LIST) 1027 id_list = &stmf_state.stmf_tg_list; 1028 mutex_enter(&stmf_state.stmf_lock); 1029 iocd->stmf_obuf_max_nentries = id_list->id_count; 1030 n = min(id_list->id_count, 1031 (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t)); 1032 iocd->stmf_obuf_nentries = n; 1033 id_entry = id_list->idl_head; 1034 grpname = (stmf_group_name_t *)obuf; 1035 for (i = 0; i < n; i++) { 1036 if (id_entry->id_data[0] == '*') { 1037 if (iocd->stmf_obuf_nentries > 0) { 1038 iocd->stmf_obuf_nentries--; 1039 } 1040 id_entry = id_entry->id_next; 1041 continue; 1042 } 1043 grpname->name_size = id_entry->id_data_size; 1044 bcopy(id_entry->id_data, grpname->name, 1045 id_entry->id_data_size); 1046 grpname++; 1047 id_entry = id_entry->id_next; 1048 } 1049 mutex_exit(&stmf_state.stmf_lock); 1050 break; 1051 case STMF_IOCTL_GET_HG_ENTRIES: 1052 id_list = &stmf_state.stmf_hg_list; 1053 /* FALLTHROUGH */ 1054 case STMF_IOCTL_GET_TG_ENTRIES: 1055 grpname = (stmf_group_name_t *)ibuf; 1056 if ((ibuf == NULL) || 1057 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1058 ret = EINVAL; 1059 break; 1060 } 1061 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) { 1062 id_list = &stmf_state.stmf_tg_list; 1063 } 1064 mutex_enter(&stmf_state.stmf_lock); 1065 id_entry = stmf_lookup_id(id_list, grpname->name_size, 1066 grpname->name); 1067 if (!id_entry) 1068 ret = ENODEV; 1069 else { 1070 stmf_ge_ident_t *grp_entry; 1071 id_list = (stmf_id_list_t *)id_entry->id_impl_specific; 1072 iocd->stmf_obuf_max_nentries = id_list->id_count; 1073 n = min(id_list->id_count, 1074 iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t)); 1075 iocd->stmf_obuf_nentries = n; 1076 id_entry = id_list->idl_head; 1077 grp_entry = (stmf_ge_ident_t *)obuf; 1078 for (i = 0; i < n; i++) { 1079 bcopy(id_entry->id_data, grp_entry->ident, 1080 id_entry->id_data_size); 1081 grp_entry->ident_size = id_entry->id_data_size; 1082 id_entry = id_entry->id_next; 1083 grp_entry++; 1084 } 1085 } 1086 mutex_exit(&stmf_state.stmf_lock); 1087 break; 1088 1089 case STMF_IOCTL_GET_VE_LIST: 1090 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1091 mutex_enter(&stmf_state.stmf_lock); 1092 ve = (stmf_view_op_entry_t *)obuf; 1093 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1094 id_entry; id_entry = id_entry->id_next) { 1095 for (view_entry = (stmf_view_entry_t *) 1096 id_entry->id_impl_specific; view_entry; 1097 view_entry = view_entry->ve_next) { 1098 iocd->stmf_obuf_max_nentries++; 1099 if (iocd->stmf_obuf_nentries >= n) 1100 continue; 1101 ve->ve_ndx_valid = 1; 1102 ve->ve_ndx = view_entry->ve_id; 1103 ve->ve_lu_number_valid = 1; 1104 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1105 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1106 view_entry->ve_luid->id_data_size); 1107 if (view_entry->ve_hg->id_data[0] == '*') { 1108 ve->ve_all_hosts = 1; 1109 } else { 1110 bcopy(view_entry->ve_hg->id_data, 1111 ve->ve_host_group.name, 1112 view_entry->ve_hg->id_data_size); 1113 ve->ve_host_group.name_size = 1114 view_entry->ve_hg->id_data_size; 1115 } 1116 1117 if (view_entry->ve_tg->id_data[0] == '*') { 1118 ve->ve_all_targets = 1; 1119 } else { 1120 bcopy(view_entry->ve_tg->id_data, 1121 ve->ve_target_group.name, 1122 view_entry->ve_tg->id_data_size); 1123 ve->ve_target_group.name_size = 1124 view_entry->ve_tg->id_data_size; 1125 } 1126 ve++; 1127 iocd->stmf_obuf_nentries++; 1128 } 1129 } 1130 mutex_exit(&stmf_state.stmf_lock); 1131 break; 1132 1133 case STMF_IOCTL_LU_VE_LIST: 1134 p_id = (uint8_t *)ibuf; 1135 if ((iocd->stmf_ibuf_size != 16) || 1136 (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) { 1137 ret = EINVAL; 1138 break; 1139 } 1140 1141 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1142 mutex_enter(&stmf_state.stmf_lock); 1143 ve = (stmf_view_op_entry_t *)obuf; 1144 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1145 id_entry; id_entry = id_entry->id_next) { 1146 if (bcmp(id_entry->id_data, p_id, 16) != 0) 1147 continue; 1148 for (view_entry = (stmf_view_entry_t *) 1149 id_entry->id_impl_specific; view_entry; 1150 view_entry = view_entry->ve_next) { 1151 iocd->stmf_obuf_max_nentries++; 1152 if (iocd->stmf_obuf_nentries >= n) 1153 continue; 1154 ve->ve_ndx_valid = 1; 1155 ve->ve_ndx = view_entry->ve_id; 1156 ve->ve_lu_number_valid = 1; 1157 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1158 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1159 view_entry->ve_luid->id_data_size); 1160 if (view_entry->ve_hg->id_data[0] == '*') { 1161 ve->ve_all_hosts = 1; 1162 } else { 1163 bcopy(view_entry->ve_hg->id_data, 1164 ve->ve_host_group.name, 1165 view_entry->ve_hg->id_data_size); 1166 ve->ve_host_group.name_size = 1167 view_entry->ve_hg->id_data_size; 1168 } 1169 1170 if (view_entry->ve_tg->id_data[0] == '*') { 1171 ve->ve_all_targets = 1; 1172 } else { 1173 bcopy(view_entry->ve_tg->id_data, 1174 ve->ve_target_group.name, 1175 view_entry->ve_tg->id_data_size); 1176 ve->ve_target_group.name_size = 1177 view_entry->ve_tg->id_data_size; 1178 } 1179 ve++; 1180 iocd->stmf_obuf_nentries++; 1181 } 1182 break; 1183 } 1184 mutex_exit(&stmf_state.stmf_lock); 1185 break; 1186 1187 case STMF_IOCTL_LOAD_PP_DATA: 1188 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1189 ret = EACCES; 1190 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1191 break; 1192 } 1193 ppi = (stmf_ppioctl_data_t *)ibuf; 1194 if ((ppi == NULL) || 1195 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1196 ret = EINVAL; 1197 break; 1198 } 1199 /* returned token */ 1200 ppi_token = (uint64_t *)obuf; 1201 if ((ppi_token == NULL) || 1202 (iocd->stmf_obuf_size < sizeof (uint64_t))) { 1203 ret = EINVAL; 1204 break; 1205 } 1206 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error); 1207 break; 1208 1209 case STMF_IOCTL_GET_PP_DATA: 1210 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1211 ret = EACCES; 1212 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1213 break; 1214 } 1215 ppi = (stmf_ppioctl_data_t *)ibuf; 1216 if (ppi == NULL || 1217 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1218 ret = EINVAL; 1219 break; 1220 } 1221 ppi_out = (stmf_ppioctl_data_t *)obuf; 1222 if ((ppi_out == NULL) || 1223 (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) { 1224 ret = EINVAL; 1225 break; 1226 } 1227 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error); 1228 break; 1229 1230 case STMF_IOCTL_CLEAR_PP_DATA: 1231 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1232 ret = EACCES; 1233 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1234 break; 1235 } 1236 ppi = (stmf_ppioctl_data_t *)ibuf; 1237 if ((ppi == NULL) || 1238 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1239 ret = EINVAL; 1240 break; 1241 } 1242 ret = stmf_delete_ppd_ioctl(ppi); 1243 break; 1244 1245 case STMF_IOCTL_CLEAR_TRACE: 1246 stmf_trace_clear(); 1247 break; 1248 1249 case STMF_IOCTL_ADD_TRACE: 1250 if (iocd->stmf_ibuf_size && ibuf) { 1251 ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0; 1252 stmf_trace("\nstradm", "%s\n", ibuf); 1253 } 1254 break; 1255 1256 case STMF_IOCTL_GET_TRACE_POSITION: 1257 if (obuf && (iocd->stmf_obuf_size > 3)) { 1258 mutex_enter(&trace_buf_lock); 1259 *((int *)obuf) = trace_buf_curndx; 1260 mutex_exit(&trace_buf_lock); 1261 } else { 1262 ret = EINVAL; 1263 } 1264 break; 1265 1266 case STMF_IOCTL_GET_TRACE: 1267 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) { 1268 ret = EINVAL; 1269 break; 1270 } 1271 i = *((int *)ibuf); 1272 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) > 1273 trace_buf_size)) { 1274 ret = EINVAL; 1275 break; 1276 } 1277 mutex_enter(&trace_buf_lock); 1278 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size); 1279 mutex_exit(&trace_buf_lock); 1280 break; 1281 1282 default: 1283 ret = ENOTTY; 1284 } 1285 1286 if (ret == 0) { 1287 ret = stmf_copyout_iocdata(data, mode, iocd, obuf); 1288 } else if (iocd->stmf_error) { 1289 (void) stmf_copyout_iocdata(data, mode, iocd, obuf); 1290 } 1291 if (obuf) { 1292 kmem_free(obuf, iocd->stmf_obuf_size); 1293 obuf = NULL; 1294 } 1295 if (ibuf) { 1296 kmem_free(ibuf, iocd->stmf_ibuf_size); 1297 ibuf = NULL; 1298 } 1299 kmem_free(iocd, sizeof (stmf_iocdata_t)); 1300 return (ret); 1301 } 1302 1303 static int 1304 stmf_get_service_state() 1305 { 1306 stmf_i_local_port_t *ilport; 1307 stmf_i_lu_t *ilu; 1308 int online = 0; 1309 int offline = 0; 1310 int onlining = 0; 1311 int offlining = 0; 1312 1313 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1314 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1315 ilport = ilport->ilport_next) { 1316 if (ilport->ilport_state == STMF_STATE_OFFLINE) 1317 offline++; 1318 else if (ilport->ilport_state == STMF_STATE_ONLINE) 1319 online++; 1320 else if (ilport->ilport_state == STMF_STATE_ONLINING) 1321 onlining++; 1322 else if (ilport->ilport_state == STMF_STATE_OFFLINING) 1323 offlining++; 1324 } 1325 1326 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1327 ilu = ilu->ilu_next) { 1328 if (ilu->ilu_state == STMF_STATE_OFFLINE) 1329 offline++; 1330 else if (ilu->ilu_state == STMF_STATE_ONLINE) 1331 online++; 1332 else if (ilu->ilu_state == STMF_STATE_ONLINING) 1333 onlining++; 1334 else if (ilu->ilu_state == STMF_STATE_OFFLINING) 1335 offlining++; 1336 } 1337 1338 if (stmf_state.stmf_service_running) { 1339 if (onlining) 1340 return (STMF_STATE_ONLINING); 1341 else 1342 return (STMF_STATE_ONLINE); 1343 } 1344 1345 if (offlining) { 1346 return (STMF_STATE_OFFLINING); 1347 } 1348 1349 return (STMF_STATE_OFFLINE); 1350 } 1351 1352 static int 1353 stmf_set_stmf_state(stmf_state_desc_t *std) 1354 { 1355 stmf_i_local_port_t *ilport; 1356 stmf_i_lu_t *ilu; 1357 stmf_state_change_info_t ssi; 1358 int svc_state; 1359 1360 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 1361 ssi.st_additional_info = NULL; 1362 1363 mutex_enter(&stmf_state.stmf_lock); 1364 if (!stmf_state.stmf_exclusive_open) { 1365 mutex_exit(&stmf_state.stmf_lock); 1366 return (EACCES); 1367 } 1368 1369 if (stmf_state.stmf_inventory_locked) { 1370 mutex_exit(&stmf_state.stmf_lock); 1371 return (EBUSY); 1372 } 1373 1374 if ((std->state != STMF_STATE_ONLINE) && 1375 (std->state != STMF_STATE_OFFLINE)) { 1376 mutex_exit(&stmf_state.stmf_lock); 1377 return (EINVAL); 1378 } 1379 1380 svc_state = stmf_get_service_state(); 1381 if ((svc_state == STMF_STATE_OFFLINING) || 1382 (svc_state == STMF_STATE_ONLINING)) { 1383 mutex_exit(&stmf_state.stmf_lock); 1384 return (EBUSY); 1385 } 1386 1387 if (svc_state == STMF_STATE_OFFLINE) { 1388 if (std->config_state == STMF_CONFIG_INIT) { 1389 if (std->state != STMF_STATE_OFFLINE) { 1390 mutex_exit(&stmf_state.stmf_lock); 1391 return (EINVAL); 1392 } 1393 stmf_state.stmf_config_state = STMF_CONFIG_INIT; 1394 stmf_delete_all_ppds(); 1395 stmf_view_clear_config(); 1396 stmf_view_init(); 1397 mutex_exit(&stmf_state.stmf_lock); 1398 return (0); 1399 } 1400 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) || 1401 (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) { 1402 if (std->config_state != STMF_CONFIG_INIT_DONE) { 1403 mutex_exit(&stmf_state.stmf_lock); 1404 return (EINVAL); 1405 } 1406 stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE; 1407 } 1408 if (std->state == STMF_STATE_OFFLINE) { 1409 mutex_exit(&stmf_state.stmf_lock); 1410 return (0); 1411 } 1412 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) { 1413 mutex_exit(&stmf_state.stmf_lock); 1414 return (EINVAL); 1415 } 1416 stmf_state.stmf_inventory_locked = 1; 1417 stmf_state.stmf_service_running = 1; 1418 mutex_exit(&stmf_state.stmf_lock); 1419 1420 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1421 ilport = ilport->ilport_next) { 1422 if (ilport->ilport_prev_state != STMF_STATE_ONLINE) 1423 continue; 1424 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, 1425 ilport->ilport_lport, &ssi); 1426 } 1427 1428 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1429 ilu = ilu->ilu_next) { 1430 if (ilu->ilu_prev_state != STMF_STATE_ONLINE) 1431 continue; 1432 (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi); 1433 } 1434 mutex_enter(&stmf_state.stmf_lock); 1435 stmf_state.stmf_inventory_locked = 0; 1436 mutex_exit(&stmf_state.stmf_lock); 1437 return (0); 1438 } 1439 1440 /* svc_state is STMF_STATE_ONLINE here */ 1441 if ((std->state != STMF_STATE_OFFLINE) || 1442 (std->config_state == STMF_CONFIG_INIT)) { 1443 mutex_exit(&stmf_state.stmf_lock); 1444 return (EACCES); 1445 } 1446 1447 stmf_state.stmf_inventory_locked = 1; 1448 stmf_state.stmf_service_running = 0; 1449 1450 mutex_exit(&stmf_state.stmf_lock); 1451 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1452 ilport = ilport->ilport_next) { 1453 if (ilport->ilport_state != STMF_STATE_ONLINE) 1454 continue; 1455 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE, 1456 ilport->ilport_lport, &ssi); 1457 } 1458 1459 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1460 ilu = ilu->ilu_next) { 1461 if (ilu->ilu_state != STMF_STATE_ONLINE) 1462 continue; 1463 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi); 1464 } 1465 mutex_enter(&stmf_state.stmf_lock); 1466 stmf_state.stmf_inventory_locked = 0; 1467 mutex_exit(&stmf_state.stmf_lock); 1468 return (0); 1469 } 1470 1471 static int 1472 stmf_get_stmf_state(stmf_state_desc_t *std) 1473 { 1474 mutex_enter(&stmf_state.stmf_lock); 1475 std->state = stmf_get_service_state(); 1476 std->config_state = stmf_state.stmf_config_state; 1477 mutex_exit(&stmf_state.stmf_lock); 1478 1479 return (0); 1480 } 1481 1482 /* 1483 * handles registration message from pppt for a logical unit 1484 */ 1485 stmf_status_t 1486 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type) 1487 { 1488 stmf_i_lu_provider_t *ilp; 1489 stmf_lu_provider_t *lp; 1490 mutex_enter(&stmf_state.stmf_lock); 1491 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) { 1492 if (strcmp(msg->icrl_lu_provider_name, 1493 ilp->ilp_lp->lp_name) == 0) { 1494 lp = ilp->ilp_lp; 1495 mutex_exit(&stmf_state.stmf_lock); 1496 lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg, 1497 msg->icrl_cb_arg_len, type); 1498 return (STMF_SUCCESS); 1499 } 1500 } 1501 mutex_exit(&stmf_state.stmf_lock); 1502 return (STMF_SUCCESS); 1503 } 1504 1505 /* 1506 * handles de-registration message from pppt for a logical unit 1507 */ 1508 stmf_status_t 1509 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg) 1510 { 1511 stmf_i_lu_provider_t *ilp; 1512 stmf_lu_provider_t *lp; 1513 mutex_enter(&stmf_state.stmf_lock); 1514 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) { 1515 if (strcmp(msg->icrl_lu_provider_name, 1516 ilp->ilp_lp->lp_name) == 0) { 1517 lp = ilp->ilp_lp; 1518 mutex_exit(&stmf_state.stmf_lock); 1519 lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0, 1520 STMF_MSG_LU_DEREGISTER); 1521 return (STMF_SUCCESS); 1522 } 1523 } 1524 mutex_exit(&stmf_state.stmf_lock); 1525 return (STMF_SUCCESS); 1526 } 1527 1528 /* 1529 * helper function to find a task that matches a task_msgid 1530 */ 1531 scsi_task_t * 1532 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid) 1533 { 1534 stmf_i_lu_t *ilu; 1535 stmf_i_scsi_task_t *itask; 1536 1537 mutex_enter(&stmf_state.stmf_lock); 1538 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 1539 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) { 1540 break; 1541 } 1542 } 1543 1544 if (ilu == NULL) { 1545 mutex_exit(&stmf_state.stmf_lock); 1546 return (NULL); 1547 } 1548 1549 mutex_enter(&ilu->ilu_task_lock); 1550 for (itask = ilu->ilu_tasks; itask != NULL; 1551 itask = itask->itask_lu_next) { 1552 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 1553 ITASK_BEING_ABORTED)) { 1554 continue; 1555 } 1556 if (itask->itask_proxy_msg_id == task_msgid) { 1557 break; 1558 } 1559 } 1560 mutex_exit(&ilu->ilu_task_lock); 1561 mutex_exit(&stmf_state.stmf_lock); 1562 1563 if (itask != NULL) { 1564 return (itask->itask_task); 1565 } else { 1566 /* task not found. Likely already aborted. */ 1567 return (NULL); 1568 } 1569 } 1570 1571 /* 1572 * message received from pppt/ic 1573 */ 1574 stmf_status_t 1575 stmf_msg_rx(stmf_ic_msg_t *msg) 1576 { 1577 mutex_enter(&stmf_state.stmf_lock); 1578 if (stmf_state.stmf_alua_state != 1) { 1579 mutex_exit(&stmf_state.stmf_lock); 1580 cmn_err(CE_WARN, "stmf alua state is disabled"); 1581 ic_msg_free(msg); 1582 return (STMF_FAILURE); 1583 } 1584 mutex_exit(&stmf_state.stmf_lock); 1585 1586 switch (msg->icm_msg_type) { 1587 case STMF_ICM_REGISTER_LUN: 1588 (void) stmf_ic_lu_reg( 1589 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg, 1590 STMF_MSG_LU_REGISTER); 1591 break; 1592 case STMF_ICM_LUN_ACTIVE: 1593 (void) stmf_ic_lu_reg( 1594 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg, 1595 STMF_MSG_LU_ACTIVE); 1596 break; 1597 case STMF_ICM_DEREGISTER_LUN: 1598 (void) stmf_ic_lu_dereg( 1599 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg); 1600 break; 1601 case STMF_ICM_SCSI_DATA: 1602 (void) stmf_ic_rx_scsi_data( 1603 (stmf_ic_scsi_data_msg_t *)msg->icm_msg); 1604 break; 1605 case STMF_ICM_SCSI_STATUS: 1606 (void) stmf_ic_rx_scsi_status( 1607 (stmf_ic_scsi_status_msg_t *)msg->icm_msg); 1608 break; 1609 case STMF_ICM_STATUS: 1610 (void) stmf_ic_rx_status( 1611 (stmf_ic_status_msg_t *)msg->icm_msg); 1612 break; 1613 default: 1614 cmn_err(CE_WARN, "unknown message received %d", 1615 msg->icm_msg_type); 1616 ic_msg_free(msg); 1617 return (STMF_FAILURE); 1618 } 1619 ic_msg_free(msg); 1620 return (STMF_SUCCESS); 1621 } 1622 1623 stmf_status_t 1624 stmf_ic_rx_status(stmf_ic_status_msg_t *msg) 1625 { 1626 stmf_i_local_port_t *ilport; 1627 1628 if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) { 1629 /* for now, ignore other message status */ 1630 return (STMF_SUCCESS); 1631 } 1632 1633 if (msg->ics_status != STMF_SUCCESS) { 1634 return (STMF_SUCCESS); 1635 } 1636 1637 mutex_enter(&stmf_state.stmf_lock); 1638 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1639 ilport = ilport->ilport_next) { 1640 if (msg->ics_msgid == ilport->ilport_reg_msgid) { 1641 ilport->ilport_proxy_registered = 1; 1642 break; 1643 } 1644 } 1645 mutex_exit(&stmf_state.stmf_lock); 1646 return (STMF_SUCCESS); 1647 } 1648 1649 /* 1650 * handles scsi status message from pppt 1651 */ 1652 stmf_status_t 1653 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg) 1654 { 1655 scsi_task_t *task; 1656 1657 task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid); 1658 1659 if (task == NULL) { 1660 return (STMF_SUCCESS); 1661 } 1662 1663 task->task_scsi_status = msg->icss_status; 1664 task->task_sense_data = msg->icss_sense; 1665 task->task_sense_length = msg->icss_sense_len; 1666 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 1667 1668 return (STMF_SUCCESS); 1669 } 1670 1671 /* 1672 * handles scsi data message from pppt 1673 */ 1674 stmf_status_t 1675 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg) 1676 { 1677 stmf_i_scsi_task_t *itask; 1678 scsi_task_t *task; 1679 stmf_xfer_data_t *xd = NULL; 1680 stmf_data_buf_t *dbuf; 1681 uint32_t sz, minsz, xd_sz, asz; 1682 1683 task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid); 1684 if (task == NULL) { 1685 stmf_ic_msg_t *ic_xfer_done_msg = NULL; 1686 static uint64_t data_msg_id; 1687 stmf_status_t ic_ret = STMF_FAILURE; 1688 mutex_enter(&stmf_state.stmf_lock); 1689 data_msg_id = stmf_proxy_msg_id++; 1690 mutex_exit(&stmf_state.stmf_lock); 1691 /* 1692 * send xfer done status to pppt 1693 * for now, set the session id to 0 as we cannot 1694 * ascertain it since we cannot find the task 1695 */ 1696 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc( 1697 msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id); 1698 if (ic_xfer_done_msg) { 1699 ic_ret = ic_tx_msg(ic_xfer_done_msg); 1700 if (ic_ret != STMF_IC_MSG_SUCCESS) { 1701 cmn_err(CE_WARN, "unable to xmit proxy msg"); 1702 } 1703 } 1704 return (STMF_FAILURE); 1705 } 1706 1707 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 1708 dbuf = itask->itask_proxy_dbuf; 1709 1710 task->task_cmd_xfer_length = msg->icsd_data_len; 1711 1712 if (task->task_additional_flags & 1713 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 1714 task->task_expected_xfer_length = 1715 task->task_cmd_xfer_length; 1716 } 1717 1718 sz = min(task->task_expected_xfer_length, 1719 task->task_cmd_xfer_length); 1720 1721 xd_sz = msg->icsd_data_len; 1722 asz = xd_sz + sizeof (*xd) - 4; 1723 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 1724 1725 if (xd == NULL) { 1726 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 1727 STMF_ALLOC_FAILURE, NULL); 1728 return (STMF_FAILURE); 1729 } 1730 1731 xd->alloc_size = asz; 1732 xd->size_left = xd_sz; 1733 bcopy(msg->icsd_data, xd->buf, xd_sz); 1734 1735 sz = min(sz, xd->size_left); 1736 xd->size_left = sz; 1737 minsz = min(512, sz); 1738 1739 if (dbuf == NULL) 1740 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 1741 if (dbuf == NULL) { 1742 kmem_free(xd, xd->alloc_size); 1743 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 1744 STMF_ALLOC_FAILURE, NULL); 1745 return (STMF_FAILURE); 1746 } 1747 dbuf->db_lu_private = xd; 1748 stmf_xd_to_dbuf(dbuf); 1749 1750 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 1751 (void) stmf_xfer_data(task, dbuf, 0); 1752 return (STMF_SUCCESS); 1753 } 1754 1755 stmf_status_t 1756 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf) 1757 { 1758 stmf_i_scsi_task_t *itask = 1759 (stmf_i_scsi_task_t *)task->task_stmf_private; 1760 stmf_i_local_port_t *ilport = 1761 (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 1762 stmf_ic_msg_t *ic_cmd_msg; 1763 stmf_ic_msg_status_t ic_ret; 1764 stmf_status_t ret = STMF_FAILURE; 1765 1766 if (stmf_state.stmf_alua_state != 1) { 1767 cmn_err(CE_WARN, "stmf alua state is disabled"); 1768 return (STMF_FAILURE); 1769 } 1770 1771 if (ilport->ilport_proxy_registered == 0) { 1772 return (STMF_FAILURE); 1773 } 1774 1775 mutex_enter(&stmf_state.stmf_lock); 1776 itask->itask_proxy_msg_id = stmf_proxy_msg_id++; 1777 mutex_exit(&stmf_state.stmf_lock); 1778 itask->itask_proxy_dbuf = dbuf; 1779 1780 /* 1781 * stmf will now take over the task handling for this task 1782 * but it still needs to be treated differently from other 1783 * default handled tasks, hence the ITASK_PROXY_TASK 1784 */ 1785 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK; 1786 if (dbuf) { 1787 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id, 1788 task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr, 1789 itask->itask_proxy_msg_id); 1790 } else { 1791 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id, 1792 task, 0, NULL, itask->itask_proxy_msg_id); 1793 } 1794 if (ic_cmd_msg) { 1795 ic_ret = ic_tx_msg(ic_cmd_msg); 1796 if (ic_ret == STMF_IC_MSG_SUCCESS) { 1797 ret = STMF_SUCCESS; 1798 } 1799 } 1800 return (ret); 1801 } 1802 1803 1804 stmf_status_t 1805 pppt_modload() 1806 { 1807 int error; 1808 1809 if (pppt_mod == NULL && ((pppt_mod = 1810 ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) { 1811 cmn_err(CE_WARN, "Unable to load pppt"); 1812 return (STMF_FAILURE); 1813 } 1814 1815 if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc = 1816 (stmf_ic_reg_port_msg_alloc_func_t) 1817 ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc", 1818 &error)) == NULL)) { 1819 cmn_err(CE_WARN, 1820 "Unable to find symbol - stmf_ic_reg_port_msg_alloc"); 1821 return (STMF_FAILURE); 1822 } 1823 1824 1825 if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc = 1826 (stmf_ic_dereg_port_msg_alloc_func_t) 1827 ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc", 1828 &error)) == NULL)) { 1829 cmn_err(CE_WARN, 1830 "Unable to find symbol - stmf_ic_dereg_port_msg_alloc"); 1831 return (STMF_FAILURE); 1832 } 1833 1834 if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc = 1835 (stmf_ic_reg_lun_msg_alloc_func_t) 1836 ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc", 1837 &error)) == NULL)) { 1838 cmn_err(CE_WARN, 1839 "Unable to find symbol - stmf_ic_reg_lun_msg_alloc"); 1840 return (STMF_FAILURE); 1841 } 1842 1843 if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc = 1844 (stmf_ic_lun_active_msg_alloc_func_t) 1845 ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc", 1846 &error)) == NULL)) { 1847 cmn_err(CE_WARN, 1848 "Unable to find symbol - stmf_ic_lun_active_msg_alloc"); 1849 return (STMF_FAILURE); 1850 } 1851 1852 if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc = 1853 (stmf_ic_dereg_lun_msg_alloc_func_t) 1854 ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc", 1855 &error)) == NULL)) { 1856 cmn_err(CE_WARN, 1857 "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc"); 1858 return (STMF_FAILURE); 1859 } 1860 1861 if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc = 1862 (stmf_ic_scsi_cmd_msg_alloc_func_t) 1863 ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc", 1864 &error)) == NULL)) { 1865 cmn_err(CE_WARN, 1866 "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc"); 1867 return (STMF_FAILURE); 1868 } 1869 1870 if (ic_scsi_data_xfer_done_msg_alloc == NULL && 1871 ((ic_scsi_data_xfer_done_msg_alloc = 1872 (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t) 1873 ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc", 1874 &error)) == NULL)) { 1875 cmn_err(CE_WARN, 1876 "Unable to find symbol -" 1877 "stmf_ic_scsi_data_xfer_done_msg_alloc"); 1878 return (STMF_FAILURE); 1879 } 1880 1881 if (ic_session_reg_msg_alloc == NULL && 1882 ((ic_session_reg_msg_alloc = 1883 (stmf_ic_session_create_msg_alloc_func_t) 1884 ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc", 1885 &error)) == NULL)) { 1886 cmn_err(CE_WARN, 1887 "Unable to find symbol -" 1888 "stmf_ic_session_create_msg_alloc"); 1889 return (STMF_FAILURE); 1890 } 1891 1892 if (ic_session_dereg_msg_alloc == NULL && 1893 ((ic_session_dereg_msg_alloc = 1894 (stmf_ic_session_destroy_msg_alloc_func_t) 1895 ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc", 1896 &error)) == NULL)) { 1897 cmn_err(CE_WARN, 1898 "Unable to find symbol -" 1899 "stmf_ic_session_destroy_msg_alloc"); 1900 return (STMF_FAILURE); 1901 } 1902 1903 if (ic_tx_msg == NULL && ((ic_tx_msg = 1904 (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg", 1905 &error)) == NULL)) { 1906 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg"); 1907 return (STMF_FAILURE); 1908 } 1909 1910 if (ic_msg_free == NULL && ((ic_msg_free = 1911 (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free", 1912 &error)) == NULL)) { 1913 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free"); 1914 return (STMF_FAILURE); 1915 } 1916 return (STMF_SUCCESS); 1917 } 1918 1919 static void 1920 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state) 1921 { 1922 mutex_enter(&stmf_state.stmf_lock); 1923 alua_state->alua_node = stmf_state.stmf_alua_node; 1924 alua_state->alua_state = stmf_state.stmf_alua_state; 1925 mutex_exit(&stmf_state.stmf_lock); 1926 } 1927 1928 1929 static int 1930 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state) 1931 { 1932 stmf_i_local_port_t *ilport; 1933 stmf_i_lu_t *ilu; 1934 stmf_lu_t *lu; 1935 stmf_ic_msg_status_t ic_ret; 1936 stmf_ic_msg_t *ic_reg_lun, *ic_reg_port; 1937 stmf_local_port_t *lport; 1938 int ret = 0; 1939 1940 if (alua_state->alua_state > 1 || alua_state->alua_node > 1) { 1941 return (EINVAL); 1942 } 1943 1944 mutex_enter(&stmf_state.stmf_lock); 1945 if (alua_state->alua_state == 1) { 1946 if (pppt_modload() == STMF_FAILURE) { 1947 ret = EIO; 1948 goto err; 1949 } 1950 if (alua_state->alua_node != 0) { 1951 /* reset existing rtpids to new base */ 1952 stmf_rtpid_counter = 255; 1953 } 1954 stmf_state.stmf_alua_node = alua_state->alua_node; 1955 stmf_state.stmf_alua_state = 1; 1956 /* register existing local ports with ppp */ 1957 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1958 ilport = ilport->ilport_next) { 1959 /* skip standby ports */ 1960 if (ilport->ilport_standby == 1) { 1961 continue; 1962 } 1963 if (alua_state->alua_node != 0) { 1964 ilport->ilport_rtpid = 1965 atomic_add_16_nv(&stmf_rtpid_counter, 1); 1966 } 1967 lport = ilport->ilport_lport; 1968 ic_reg_port = ic_reg_port_msg_alloc( 1969 lport->lport_id, ilport->ilport_rtpid, 1970 0, NULL, stmf_proxy_msg_id); 1971 if (ic_reg_port) { 1972 ic_ret = ic_tx_msg(ic_reg_port); 1973 if (ic_ret == STMF_IC_MSG_SUCCESS) { 1974 ilport->ilport_reg_msgid = 1975 stmf_proxy_msg_id++; 1976 } else { 1977 cmn_err(CE_WARN, 1978 "error on port registration " 1979 "port - %s", 1980 ilport->ilport_kstat_tgt_name); 1981 } 1982 } 1983 } 1984 /* register existing logical units */ 1985 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1986 ilu = ilu->ilu_next) { 1987 if (ilu->ilu_access != STMF_LU_ACTIVE) { 1988 continue; 1989 } 1990 /* register with proxy module */ 1991 lu = ilu->ilu_lu; 1992 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 1993 lu->lu_lp->lp_alua_support) { 1994 ilu->ilu_alua = 1; 1995 /* allocate the register message */ 1996 ic_reg_lun = ic_reg_lun_msg_alloc( 1997 lu->lu_id->ident, lu->lu_lp->lp_name, 1998 lu->lu_proxy_reg_arg_len, 1999 (uint8_t *)lu->lu_proxy_reg_arg, 2000 stmf_proxy_msg_id); 2001 /* send the message */ 2002 if (ic_reg_lun) { 2003 ic_ret = ic_tx_msg(ic_reg_lun); 2004 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2005 stmf_proxy_msg_id++; 2006 } 2007 } 2008 } 2009 } 2010 } else { 2011 stmf_state.stmf_alua_state = 0; 2012 } 2013 2014 err: 2015 mutex_exit(&stmf_state.stmf_lock); 2016 return (ret); 2017 } 2018 2019 2020 typedef struct { 2021 void *bp; /* back pointer from internal struct to main struct */ 2022 int alloc_size; 2023 } __istmf_t; 2024 2025 typedef struct { 2026 __istmf_t *fp; /* Framework private */ 2027 void *cp; /* Caller private */ 2028 void *ss; /* struct specific */ 2029 } __stmf_t; 2030 2031 static struct { 2032 int shared; 2033 int fw_private; 2034 } stmf_sizes[] = { { 0, 0 }, 2035 { GET_STRUCT_SIZE(stmf_lu_provider_t), 2036 GET_STRUCT_SIZE(stmf_i_lu_provider_t) }, 2037 { GET_STRUCT_SIZE(stmf_port_provider_t), 2038 GET_STRUCT_SIZE(stmf_i_port_provider_t) }, 2039 { GET_STRUCT_SIZE(stmf_local_port_t), 2040 GET_STRUCT_SIZE(stmf_i_local_port_t) }, 2041 { GET_STRUCT_SIZE(stmf_lu_t), 2042 GET_STRUCT_SIZE(stmf_i_lu_t) }, 2043 { GET_STRUCT_SIZE(stmf_scsi_session_t), 2044 GET_STRUCT_SIZE(stmf_i_scsi_session_t) }, 2045 { GET_STRUCT_SIZE(scsi_task_t), 2046 GET_STRUCT_SIZE(stmf_i_scsi_task_t) }, 2047 { GET_STRUCT_SIZE(stmf_data_buf_t), 2048 GET_STRUCT_SIZE(__istmf_t) }, 2049 { GET_STRUCT_SIZE(stmf_dbuf_store_t), 2050 GET_STRUCT_SIZE(__istmf_t) } 2051 2052 }; 2053 2054 void * 2055 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags) 2056 { 2057 int stmf_size; 2058 int kmem_flag; 2059 __stmf_t *sh; 2060 2061 if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS)) 2062 return (NULL); 2063 2064 if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) { 2065 kmem_flag = KM_NOSLEEP; 2066 } else { 2067 kmem_flag = KM_SLEEP; 2068 } 2069 2070 additional_size = (additional_size + 7) & (~7); 2071 stmf_size = stmf_sizes[struct_id].shared + 2072 stmf_sizes[struct_id].fw_private + additional_size; 2073 2074 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag); 2075 2076 if (sh == NULL) 2077 return (NULL); 2078 2079 /* 2080 * In principle, the implementation inside stmf_alloc should not 2081 * be changed anyway. But the original order of framework private 2082 * data and caller private data does not support sglist in the caller 2083 * private data. 2084 * To work around this, the memory segments of framework private 2085 * data and caller private data are re-ordered here. 2086 * A better solution is to provide a specific interface to allocate 2087 * the sglist, then we will not need this workaround any more. 2088 * But before the new interface is available, the memory segment 2089 * ordering should be kept as is. 2090 */ 2091 sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared); 2092 sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh, 2093 stmf_sizes[struct_id].shared + additional_size); 2094 2095 sh->fp->bp = sh; 2096 /* Just store the total size instead of storing additional size */ 2097 sh->fp->alloc_size = stmf_size; 2098 2099 return (sh); 2100 } 2101 2102 void 2103 stmf_free(void *ptr) 2104 { 2105 __stmf_t *sh = (__stmf_t *)ptr; 2106 2107 /* 2108 * So far we dont need any struct specific processing. If such 2109 * a need ever arises, then store the struct id in the framework 2110 * private section and get it here as sh->fp->struct_id. 2111 */ 2112 kmem_free(ptr, sh->fp->alloc_size); 2113 } 2114 2115 /* 2116 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the 2117 * framework and returns a pointer to framework private data for the lu. 2118 * Returns NULL if the lu was not found. 2119 */ 2120 stmf_i_lu_t * 2121 stmf_lookup_lu(stmf_lu_t *lu) 2122 { 2123 stmf_i_lu_t *ilu; 2124 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2125 2126 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2127 if (ilu->ilu_lu == lu) 2128 return (ilu); 2129 } 2130 return (NULL); 2131 } 2132 2133 /* 2134 * Given a pointer to stmf_local_port_t, verifies if this lport is registered 2135 * with the framework and returns a pointer to framework private data for 2136 * the lport. 2137 * Returns NULL if the lport was not found. 2138 */ 2139 stmf_i_local_port_t * 2140 stmf_lookup_lport(stmf_local_port_t *lport) 2141 { 2142 stmf_i_local_port_t *ilport; 2143 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2144 2145 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 2146 ilport = ilport->ilport_next) { 2147 if (ilport->ilport_lport == lport) 2148 return (ilport); 2149 } 2150 return (NULL); 2151 } 2152 2153 stmf_status_t 2154 stmf_register_lu_provider(stmf_lu_provider_t *lp) 2155 { 2156 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 2157 stmf_pp_data_t *ppd; 2158 uint32_t cb_flags; 2159 2160 if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2) 2161 return (STMF_FAILURE); 2162 2163 mutex_enter(&stmf_state.stmf_lock); 2164 ilp->ilp_next = stmf_state.stmf_ilplist; 2165 stmf_state.stmf_ilplist = ilp; 2166 stmf_state.stmf_nlps++; 2167 2168 /* See if we need to do a callback */ 2169 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2170 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) { 2171 break; 2172 } 2173 } 2174 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 2175 goto rlp_bail_out; 2176 } 2177 ilp->ilp_ppd = ppd; 2178 ppd->ppd_provider = ilp; 2179 if (lp->lp_cb == NULL) 2180 goto rlp_bail_out; 2181 ilp->ilp_cb_in_progress = 1; 2182 cb_flags = STMF_PCB_PREG_COMPLETE; 2183 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2184 cb_flags |= STMF_PCB_STMF_ONLINING; 2185 mutex_exit(&stmf_state.stmf_lock); 2186 lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2187 mutex_enter(&stmf_state.stmf_lock); 2188 ilp->ilp_cb_in_progress = 0; 2189 2190 rlp_bail_out: 2191 mutex_exit(&stmf_state.stmf_lock); 2192 2193 return (STMF_SUCCESS); 2194 } 2195 2196 stmf_status_t 2197 stmf_deregister_lu_provider(stmf_lu_provider_t *lp) 2198 { 2199 stmf_i_lu_provider_t **ppilp; 2200 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 2201 2202 mutex_enter(&stmf_state.stmf_lock); 2203 if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) { 2204 mutex_exit(&stmf_state.stmf_lock); 2205 return (STMF_BUSY); 2206 } 2207 for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL; 2208 ppilp = &((*ppilp)->ilp_next)) { 2209 if (*ppilp == ilp) { 2210 *ppilp = ilp->ilp_next; 2211 stmf_state.stmf_nlps--; 2212 if (ilp->ilp_ppd) { 2213 ilp->ilp_ppd->ppd_provider = NULL; 2214 ilp->ilp_ppd = NULL; 2215 } 2216 mutex_exit(&stmf_state.stmf_lock); 2217 return (STMF_SUCCESS); 2218 } 2219 } 2220 mutex_exit(&stmf_state.stmf_lock); 2221 return (STMF_NOT_FOUND); 2222 } 2223 2224 stmf_status_t 2225 stmf_register_port_provider(stmf_port_provider_t *pp) 2226 { 2227 stmf_i_port_provider_t *ipp = 2228 (stmf_i_port_provider_t *)pp->pp_stmf_private; 2229 stmf_pp_data_t *ppd; 2230 uint32_t cb_flags; 2231 2232 if (pp->pp_portif_rev != PORTIF_REV_1) 2233 return (STMF_FAILURE); 2234 2235 mutex_enter(&stmf_state.stmf_lock); 2236 ipp->ipp_next = stmf_state.stmf_ipplist; 2237 stmf_state.stmf_ipplist = ipp; 2238 stmf_state.stmf_npps++; 2239 /* See if we need to do a callback */ 2240 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2241 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) { 2242 break; 2243 } 2244 } 2245 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 2246 goto rpp_bail_out; 2247 } 2248 ipp->ipp_ppd = ppd; 2249 ppd->ppd_provider = ipp; 2250 if (pp->pp_cb == NULL) 2251 goto rpp_bail_out; 2252 ipp->ipp_cb_in_progress = 1; 2253 cb_flags = STMF_PCB_PREG_COMPLETE; 2254 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2255 cb_flags |= STMF_PCB_STMF_ONLINING; 2256 mutex_exit(&stmf_state.stmf_lock); 2257 pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2258 mutex_enter(&stmf_state.stmf_lock); 2259 ipp->ipp_cb_in_progress = 0; 2260 2261 rpp_bail_out: 2262 mutex_exit(&stmf_state.stmf_lock); 2263 2264 return (STMF_SUCCESS); 2265 } 2266 2267 stmf_status_t 2268 stmf_deregister_port_provider(stmf_port_provider_t *pp) 2269 { 2270 stmf_i_port_provider_t *ipp = 2271 (stmf_i_port_provider_t *)pp->pp_stmf_private; 2272 stmf_i_port_provider_t **ppipp; 2273 2274 mutex_enter(&stmf_state.stmf_lock); 2275 if (ipp->ipp_npps || ipp->ipp_cb_in_progress) { 2276 mutex_exit(&stmf_state.stmf_lock); 2277 return (STMF_BUSY); 2278 } 2279 for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL; 2280 ppipp = &((*ppipp)->ipp_next)) { 2281 if (*ppipp == ipp) { 2282 *ppipp = ipp->ipp_next; 2283 stmf_state.stmf_npps--; 2284 if (ipp->ipp_ppd) { 2285 ipp->ipp_ppd->ppd_provider = NULL; 2286 ipp->ipp_ppd = NULL; 2287 } 2288 mutex_exit(&stmf_state.stmf_lock); 2289 return (STMF_SUCCESS); 2290 } 2291 } 2292 mutex_exit(&stmf_state.stmf_lock); 2293 return (STMF_NOT_FOUND); 2294 } 2295 2296 int 2297 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 2298 uint32_t *err_ret) 2299 { 2300 stmf_i_port_provider_t *ipp; 2301 stmf_i_lu_provider_t *ilp; 2302 stmf_pp_data_t *ppd; 2303 nvlist_t *nv; 2304 int s; 2305 int ret; 2306 2307 *err_ret = 0; 2308 2309 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2310 return (EINVAL); 2311 } 2312 2313 mutex_enter(&stmf_state.stmf_lock); 2314 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2315 if (ppi->ppi_lu_provider) { 2316 if (!ppd->ppd_lu_provider) 2317 continue; 2318 } else if (ppi->ppi_port_provider) { 2319 if (!ppd->ppd_port_provider) 2320 continue; 2321 } 2322 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2323 break; 2324 } 2325 2326 if (ppd == NULL) { 2327 /* New provider */ 2328 s = strlen(ppi->ppi_name); 2329 if (s > 254) { 2330 mutex_exit(&stmf_state.stmf_lock); 2331 return (EINVAL); 2332 } 2333 s += sizeof (stmf_pp_data_t) - 7; 2334 2335 ppd = kmem_zalloc(s, KM_NOSLEEP); 2336 if (ppd == NULL) { 2337 mutex_exit(&stmf_state.stmf_lock); 2338 return (ENOMEM); 2339 } 2340 ppd->ppd_alloc_size = s; 2341 (void) strcpy(ppd->ppd_name, ppi->ppi_name); 2342 2343 /* See if this provider already exists */ 2344 if (ppi->ppi_lu_provider) { 2345 ppd->ppd_lu_provider = 1; 2346 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; 2347 ilp = ilp->ilp_next) { 2348 if (strcmp(ppi->ppi_name, 2349 ilp->ilp_lp->lp_name) == 0) { 2350 ppd->ppd_provider = ilp; 2351 ilp->ilp_ppd = ppd; 2352 break; 2353 } 2354 } 2355 } else { 2356 ppd->ppd_port_provider = 1; 2357 for (ipp = stmf_state.stmf_ipplist; ipp != NULL; 2358 ipp = ipp->ipp_next) { 2359 if (strcmp(ppi->ppi_name, 2360 ipp->ipp_pp->pp_name) == 0) { 2361 ppd->ppd_provider = ipp; 2362 ipp->ipp_ppd = ppd; 2363 break; 2364 } 2365 } 2366 } 2367 2368 /* Link this ppd in */ 2369 ppd->ppd_next = stmf_state.stmf_ppdlist; 2370 stmf_state.stmf_ppdlist = ppd; 2371 } 2372 2373 /* 2374 * User is requesting that the token be checked. 2375 * If there was another set after the user's get 2376 * it's an error 2377 */ 2378 if (ppi->ppi_token_valid) { 2379 if (ppi->ppi_token != ppd->ppd_token) { 2380 *err_ret = STMF_IOCERR_PPD_UPDATED; 2381 mutex_exit(&stmf_state.stmf_lock); 2382 return (EINVAL); 2383 } 2384 } 2385 2386 if ((ret = nvlist_unpack((char *)ppi->ppi_data, 2387 (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) { 2388 mutex_exit(&stmf_state.stmf_lock); 2389 return (ret); 2390 } 2391 2392 /* Free any existing lists and add this one to the ppd */ 2393 if (ppd->ppd_nv) 2394 nvlist_free(ppd->ppd_nv); 2395 ppd->ppd_nv = nv; 2396 2397 /* set the token for writes */ 2398 ppd->ppd_token++; 2399 /* return token to caller */ 2400 if (ppi_token) { 2401 *ppi_token = ppd->ppd_token; 2402 } 2403 2404 /* If there is a provider registered, do the notifications */ 2405 if (ppd->ppd_provider) { 2406 uint32_t cb_flags = 0; 2407 2408 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2409 cb_flags |= STMF_PCB_STMF_ONLINING; 2410 if (ppi->ppi_lu_provider) { 2411 ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider; 2412 if (ilp->ilp_lp->lp_cb == NULL) 2413 goto bail_out; 2414 ilp->ilp_cb_in_progress = 1; 2415 mutex_exit(&stmf_state.stmf_lock); 2416 ilp->ilp_lp->lp_cb(ilp->ilp_lp, 2417 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2418 mutex_enter(&stmf_state.stmf_lock); 2419 ilp->ilp_cb_in_progress = 0; 2420 } else { 2421 ipp = (stmf_i_port_provider_t *)ppd->ppd_provider; 2422 if (ipp->ipp_pp->pp_cb == NULL) 2423 goto bail_out; 2424 ipp->ipp_cb_in_progress = 1; 2425 mutex_exit(&stmf_state.stmf_lock); 2426 ipp->ipp_pp->pp_cb(ipp->ipp_pp, 2427 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2428 mutex_enter(&stmf_state.stmf_lock); 2429 ipp->ipp_cb_in_progress = 0; 2430 } 2431 } 2432 2433 bail_out: 2434 mutex_exit(&stmf_state.stmf_lock); 2435 2436 return (0); 2437 } 2438 2439 void 2440 stmf_delete_ppd(stmf_pp_data_t *ppd) 2441 { 2442 stmf_pp_data_t **pppd; 2443 2444 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2445 if (ppd->ppd_provider) { 2446 if (ppd->ppd_lu_provider) { 2447 ((stmf_i_lu_provider_t *) 2448 ppd->ppd_provider)->ilp_ppd = NULL; 2449 } else { 2450 ((stmf_i_port_provider_t *) 2451 ppd->ppd_provider)->ipp_ppd = NULL; 2452 } 2453 ppd->ppd_provider = NULL; 2454 } 2455 2456 for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL; 2457 pppd = &((*pppd)->ppd_next)) { 2458 if (*pppd == ppd) 2459 break; 2460 } 2461 2462 if (*pppd == NULL) 2463 return; 2464 2465 *pppd = ppd->ppd_next; 2466 if (ppd->ppd_nv) 2467 nvlist_free(ppd->ppd_nv); 2468 2469 kmem_free(ppd, ppd->ppd_alloc_size); 2470 } 2471 2472 int 2473 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi) 2474 { 2475 stmf_pp_data_t *ppd; 2476 int ret = ENOENT; 2477 2478 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2479 return (EINVAL); 2480 } 2481 2482 mutex_enter(&stmf_state.stmf_lock); 2483 2484 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2485 if (ppi->ppi_lu_provider) { 2486 if (!ppd->ppd_lu_provider) 2487 continue; 2488 } else if (ppi->ppi_port_provider) { 2489 if (!ppd->ppd_port_provider) 2490 continue; 2491 } 2492 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2493 break; 2494 } 2495 2496 if (ppd) { 2497 ret = 0; 2498 stmf_delete_ppd(ppd); 2499 } 2500 mutex_exit(&stmf_state.stmf_lock); 2501 2502 return (ret); 2503 } 2504 2505 int 2506 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 2507 uint32_t *err_ret) 2508 { 2509 stmf_pp_data_t *ppd; 2510 size_t req_size; 2511 int ret = ENOENT; 2512 char *bufp = (char *)ppi_out->ppi_data; 2513 2514 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2515 return (EINVAL); 2516 } 2517 2518 mutex_enter(&stmf_state.stmf_lock); 2519 2520 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2521 if (ppi->ppi_lu_provider) { 2522 if (!ppd->ppd_lu_provider) 2523 continue; 2524 } else if (ppi->ppi_port_provider) { 2525 if (!ppd->ppd_port_provider) 2526 continue; 2527 } 2528 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2529 break; 2530 } 2531 2532 if (ppd && ppd->ppd_nv) { 2533 ppi_out->ppi_token = ppd->ppd_token; 2534 if ((ret = nvlist_size(ppd->ppd_nv, &req_size, 2535 NV_ENCODE_XDR)) != 0) { 2536 goto done; 2537 } 2538 ppi_out->ppi_data_size = req_size; 2539 if (req_size > ppi->ppi_data_size) { 2540 *err_ret = STMF_IOCERR_INSUFFICIENT_BUF; 2541 ret = EINVAL; 2542 goto done; 2543 } 2544 2545 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size, 2546 NV_ENCODE_XDR, 0)) != 0) { 2547 goto done; 2548 } 2549 ret = 0; 2550 } 2551 2552 done: 2553 mutex_exit(&stmf_state.stmf_lock); 2554 2555 return (ret); 2556 } 2557 2558 void 2559 stmf_delete_all_ppds() 2560 { 2561 stmf_pp_data_t *ppd, *nppd; 2562 2563 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2564 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) { 2565 nppd = ppd->ppd_next; 2566 stmf_delete_ppd(ppd); 2567 } 2568 } 2569 2570 /* 2571 * 16 is the max string length of a protocol_ident, increase 2572 * the size if needed. 2573 */ 2574 #define STMF_KSTAT_LU_SZ (STMF_GUID_INPUT + 1 + 256) 2575 #define STMF_KSTAT_TGT_SZ (256 * 2 + 16) 2576 2577 typedef struct stmf_kstat_lu_info { 2578 kstat_named_t i_lun_guid; 2579 kstat_named_t i_lun_alias; 2580 } stmf_kstat_lu_info_t; 2581 2582 typedef struct stmf_kstat_tgt_info { 2583 kstat_named_t i_tgt_name; 2584 kstat_named_t i_tgt_alias; 2585 kstat_named_t i_protocol; 2586 } stmf_kstat_tgt_info_t; 2587 2588 /* 2589 * This array matches the Protocol Identifier in stmf_ioctl.h 2590 */ 2591 char *protocol_ident[PROTOCOL_ANY] = { 2592 "Fibre Channel", 2593 "Parallel SCSI", 2594 "SSA", 2595 "IEEE_1394", 2596 "SRP", 2597 "iSCSI", 2598 "SAS", 2599 "ADT", 2600 "ATAPI", 2601 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN" 2602 }; 2603 2604 /* 2605 * Update the lun wait/run queue count 2606 */ 2607 static void 2608 stmf_update_kstat_lu_q(scsi_task_t *task, void func()) 2609 { 2610 stmf_i_lu_t *ilu; 2611 kstat_io_t *kip; 2612 2613 if (task->task_lu == dlun0) 2614 return; 2615 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2616 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2617 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2618 if (kip != NULL) { 2619 mutex_enter(ilu->ilu_kstat_io->ks_lock); 2620 func(kip); 2621 mutex_exit(ilu->ilu_kstat_io->ks_lock); 2622 } 2623 } 2624 } 2625 2626 /* 2627 * Update the target(lport) wait/run queue count 2628 */ 2629 static void 2630 stmf_update_kstat_lport_q(scsi_task_t *task, void func()) 2631 { 2632 stmf_i_local_port_t *ilp; 2633 kstat_io_t *kip; 2634 2635 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2636 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2637 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2638 if (kip != NULL) { 2639 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2640 func(kip); 2641 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2642 } 2643 } 2644 } 2645 2646 static void 2647 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2648 { 2649 stmf_i_local_port_t *ilp; 2650 kstat_io_t *kip; 2651 2652 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2653 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2654 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2655 if (kip != NULL) { 2656 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2657 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2658 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2659 } 2660 } 2661 } 2662 2663 static void 2664 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2665 { 2666 stmf_i_lu_t *ilu; 2667 kstat_io_t *kip; 2668 2669 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2670 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2671 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2672 if (kip != NULL) { 2673 mutex_enter(ilu->ilu_kstat_io->ks_lock); 2674 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2675 mutex_exit(ilu->ilu_kstat_io->ks_lock); 2676 } 2677 } 2678 } 2679 2680 static void 2681 stmf_create_kstat_lu(stmf_i_lu_t *ilu) 2682 { 2683 char ks_nm[KSTAT_STRLEN]; 2684 stmf_kstat_lu_info_t *ks_lu; 2685 2686 /* create kstat lun info */ 2687 ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ, 2688 KM_NOSLEEP); 2689 if (ks_lu == NULL) { 2690 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2691 return; 2692 } 2693 2694 bzero(ks_nm, sizeof (ks_nm)); 2695 (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu); 2696 if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0, 2697 ks_nm, "misc", KSTAT_TYPE_NAMED, 2698 sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t), 2699 KSTAT_FLAG_VIRTUAL)) == NULL) { 2700 kmem_free(ks_lu, STMF_KSTAT_LU_SZ); 2701 cmn_err(CE_WARN, "STMF: kstat_create lu failed"); 2702 return; 2703 } 2704 2705 ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ; 2706 ilu->ilu_kstat_info->ks_data = ks_lu; 2707 2708 kstat_named_init(&ks_lu->i_lun_guid, "lun-guid", 2709 KSTAT_DATA_STRING); 2710 kstat_named_init(&ks_lu->i_lun_alias, "lun-alias", 2711 KSTAT_DATA_STRING); 2712 2713 /* convert guid to hex string */ 2714 int i; 2715 uint8_t *p = ilu->ilu_lu->lu_id->ident; 2716 bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid)); 2717 for (i = 0; i < STMF_GUID_INPUT / 2; i++) { 2718 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]); 2719 } 2720 kstat_named_setstr(&ks_lu->i_lun_guid, 2721 (const char *)ilu->ilu_ascii_hex_guid); 2722 kstat_named_setstr(&ks_lu->i_lun_alias, 2723 (const char *)ilu->ilu_lu->lu_alias); 2724 kstat_install(ilu->ilu_kstat_info); 2725 2726 /* create kstat lun io */ 2727 bzero(ks_nm, sizeof (ks_nm)); 2728 (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu); 2729 if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2730 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2731 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed"); 2732 return; 2733 } 2734 mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0); 2735 ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock; 2736 kstat_install(ilu->ilu_kstat_io); 2737 } 2738 2739 static void 2740 stmf_create_kstat_lport(stmf_i_local_port_t *ilport) 2741 { 2742 char ks_nm[KSTAT_STRLEN]; 2743 stmf_kstat_tgt_info_t *ks_tgt; 2744 int id, len; 2745 2746 /* create kstat lport info */ 2747 ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ, 2748 KM_NOSLEEP); 2749 if (ks_tgt == NULL) { 2750 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2751 return; 2752 } 2753 2754 bzero(ks_nm, sizeof (ks_nm)); 2755 (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport); 2756 if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME, 2757 0, ks_nm, "misc", KSTAT_TYPE_NAMED, 2758 sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t), 2759 KSTAT_FLAG_VIRTUAL)) == NULL) { 2760 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ); 2761 cmn_err(CE_WARN, "STMF: kstat_create target failed"); 2762 return; 2763 } 2764 2765 ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ; 2766 ilport->ilport_kstat_info->ks_data = ks_tgt; 2767 2768 kstat_named_init(&ks_tgt->i_tgt_name, "target-name", 2769 KSTAT_DATA_STRING); 2770 kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias", 2771 KSTAT_DATA_STRING); 2772 kstat_named_init(&ks_tgt->i_protocol, "protocol", 2773 KSTAT_DATA_STRING); 2774 2775 /* ident might not be null terminated */ 2776 len = ilport->ilport_lport->lport_id->ident_length; 2777 bcopy(ilport->ilport_lport->lport_id->ident, 2778 ilport->ilport_kstat_tgt_name, len); 2779 ilport->ilport_kstat_tgt_name[len + 1] = NULL; 2780 kstat_named_setstr(&ks_tgt->i_tgt_name, 2781 (const char *)ilport->ilport_kstat_tgt_name); 2782 kstat_named_setstr(&ks_tgt->i_tgt_alias, 2783 (const char *)ilport->ilport_lport->lport_alias); 2784 /* protocol */ 2785 if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) { 2786 cmn_err(CE_WARN, "STMF: protocol_id out of bound"); 2787 id = PROTOCOL_ANY; 2788 } 2789 kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]); 2790 kstat_install(ilport->ilport_kstat_info); 2791 2792 /* create kstat lport io */ 2793 bzero(ks_nm, sizeof (ks_nm)); 2794 (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport); 2795 if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2796 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2797 cmn_err(CE_WARN, "STMF: kstat_create target_io failed"); 2798 return; 2799 } 2800 mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0); 2801 ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock; 2802 kstat_install(ilport->ilport_kstat_io); 2803 } 2804 2805 /* 2806 * set the asymmetric access state for a logical unit 2807 * caller is responsible for establishing SCSI unit attention on 2808 * state change 2809 */ 2810 stmf_status_t 2811 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state) 2812 { 2813 stmf_i_lu_t *ilu; 2814 uint8_t *p1, *p2; 2815 2816 if ((access_state != STMF_LU_STANDBY) && 2817 (access_state != STMF_LU_ACTIVE)) { 2818 return (STMF_INVALID_ARG); 2819 } 2820 2821 p1 = &lu->lu_id->ident[0]; 2822 mutex_enter(&stmf_state.stmf_lock); 2823 if (stmf_state.stmf_inventory_locked) { 2824 mutex_exit(&stmf_state.stmf_lock); 2825 return (STMF_BUSY); 2826 } 2827 2828 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2829 p2 = &ilu->ilu_lu->lu_id->ident[0]; 2830 if (bcmp(p1, p2, 16) == 0) { 2831 break; 2832 } 2833 } 2834 2835 if (!ilu) { 2836 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2837 } else { 2838 /* 2839 * We're changing access state on an existing logical unit 2840 * Send the proxy registration message for this logical unit 2841 * if we're in alua mode. 2842 * If the requested state is STMF_LU_ACTIVE, we want to register 2843 * this logical unit. 2844 * If the requested state is STMF_LU_STANDBY, we're going to 2845 * abort all tasks for this logical unit. 2846 */ 2847 if (stmf_state.stmf_alua_state == 1 && 2848 access_state == STMF_LU_ACTIVE) { 2849 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 2850 stmf_ic_msg_t *ic_reg_lun; 2851 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 2852 lu->lu_lp->lp_alua_support) { 2853 ilu->ilu_alua = 1; 2854 /* allocate the register message */ 2855 ic_reg_lun = ic_lun_active_msg_alloc(p1, 2856 lu->lu_lp->lp_name, 2857 lu->lu_proxy_reg_arg_len, 2858 (uint8_t *)lu->lu_proxy_reg_arg, 2859 stmf_proxy_msg_id); 2860 /* send the message */ 2861 if (ic_reg_lun) { 2862 ic_ret = ic_tx_msg(ic_reg_lun); 2863 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2864 stmf_proxy_msg_id++; 2865 } 2866 } 2867 } 2868 } else if (stmf_state.stmf_alua_state == 1 && 2869 access_state == STMF_LU_STANDBY) { 2870 /* abort all tasks for this lu */ 2871 stmf_task_lu_killall(lu, NULL, STMF_ABORTED); 2872 } 2873 } 2874 2875 ilu->ilu_access = access_state; 2876 2877 mutex_exit(&stmf_state.stmf_lock); 2878 return (STMF_SUCCESS); 2879 } 2880 2881 2882 stmf_status_t 2883 stmf_register_lu(stmf_lu_t *lu) 2884 { 2885 stmf_i_lu_t *ilu; 2886 uint8_t *p1, *p2; 2887 stmf_state_change_info_t ssci; 2888 stmf_id_data_t *luid; 2889 2890 if ((lu->lu_id->ident_type != ID_TYPE_NAA) || 2891 (lu->lu_id->ident_length != 16) || 2892 ((lu->lu_id->ident[0] & 0xf0) != 0x60)) { 2893 return (STMF_INVALID_ARG); 2894 } 2895 p1 = &lu->lu_id->ident[0]; 2896 mutex_enter(&stmf_state.stmf_lock); 2897 if (stmf_state.stmf_inventory_locked) { 2898 mutex_exit(&stmf_state.stmf_lock); 2899 return (STMF_BUSY); 2900 } 2901 2902 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2903 p2 = &ilu->ilu_lu->lu_id->ident[0]; 2904 if (bcmp(p1, p2, 16) == 0) { 2905 mutex_exit(&stmf_state.stmf_lock); 2906 return (STMF_ALREADY); 2907 } 2908 } 2909 2910 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2911 luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 2912 lu->lu_id->ident_length, lu->lu_id->ident); 2913 if (luid) { 2914 luid->id_pt_to_object = (void *)ilu; 2915 ilu->ilu_luid = luid; 2916 } 2917 ilu->ilu_alias = NULL; 2918 2919 ilu->ilu_next = stmf_state.stmf_ilulist; 2920 ilu->ilu_prev = NULL; 2921 if (ilu->ilu_next) 2922 ilu->ilu_next->ilu_prev = ilu; 2923 stmf_state.stmf_ilulist = ilu; 2924 stmf_state.stmf_nlus++; 2925 if (lu->lu_lp) { 2926 ((stmf_i_lu_provider_t *) 2927 (lu->lu_lp->lp_stmf_private))->ilp_nlus++; 2928 } 2929 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 2930 STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl); 2931 stmf_create_kstat_lu(ilu); 2932 /* 2933 * register with proxy module if available and logical unit 2934 * is in active state 2935 */ 2936 if (stmf_state.stmf_alua_state == 1 && 2937 ilu->ilu_access == STMF_LU_ACTIVE) { 2938 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 2939 stmf_ic_msg_t *ic_reg_lun; 2940 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 2941 lu->lu_lp->lp_alua_support) { 2942 ilu->ilu_alua = 1; 2943 /* allocate the register message */ 2944 ic_reg_lun = ic_reg_lun_msg_alloc(p1, 2945 lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len, 2946 (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id); 2947 /* send the message */ 2948 if (ic_reg_lun) { 2949 ic_ret = ic_tx_msg(ic_reg_lun); 2950 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2951 stmf_proxy_msg_id++; 2952 } 2953 } 2954 } 2955 } 2956 mutex_exit(&stmf_state.stmf_lock); 2957 2958 /* XXX we should probably check if this lu can be brought online */ 2959 ilu->ilu_prev_state = STMF_STATE_ONLINE; 2960 if (stmf_state.stmf_service_running) { 2961 ssci.st_rflags = 0; 2962 ssci.st_additional_info = NULL; 2963 (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci); 2964 } 2965 2966 /* XXX: Generate event */ 2967 return (STMF_SUCCESS); 2968 } 2969 2970 stmf_status_t 2971 stmf_deregister_lu(stmf_lu_t *lu) 2972 { 2973 stmf_i_lu_t *ilu; 2974 2975 mutex_enter(&stmf_state.stmf_lock); 2976 if (stmf_state.stmf_inventory_locked) { 2977 mutex_exit(&stmf_state.stmf_lock); 2978 return (STMF_BUSY); 2979 } 2980 ilu = stmf_lookup_lu(lu); 2981 if (ilu == NULL) { 2982 mutex_exit(&stmf_state.stmf_lock); 2983 return (STMF_INVALID_ARG); 2984 } 2985 if (ilu->ilu_state == STMF_STATE_OFFLINE) { 2986 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 2987 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) { 2988 cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock); 2989 } 2990 if (ilu->ilu_ntasks) { 2991 stmf_i_scsi_task_t *itask, *nitask; 2992 2993 nitask = ilu->ilu_tasks; 2994 do { 2995 itask = nitask; 2996 nitask = itask->itask_lu_next; 2997 lu->lu_task_free(itask->itask_task); 2998 stmf_free(itask->itask_task); 2999 } while (nitask != NULL); 3000 3001 ilu->ilu_tasks = ilu->ilu_free_tasks = NULL; 3002 ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0; 3003 } 3004 /* de-register with proxy if available */ 3005 if (ilu->ilu_access == STMF_LU_ACTIVE && 3006 stmf_state.stmf_alua_state == 1) { 3007 /* de-register with proxy module */ 3008 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 3009 stmf_ic_msg_t *ic_dereg_lun; 3010 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 3011 lu->lu_lp->lp_alua_support) { 3012 ilu->ilu_alua = 1; 3013 /* allocate the de-register message */ 3014 ic_dereg_lun = ic_dereg_lun_msg_alloc( 3015 lu->lu_id->ident, lu->lu_lp->lp_name, 0, 3016 NULL, stmf_proxy_msg_id); 3017 /* send the message */ 3018 if (ic_dereg_lun) { 3019 ic_ret = ic_tx_msg(ic_dereg_lun); 3020 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3021 stmf_proxy_msg_id++; 3022 } 3023 } 3024 } 3025 } 3026 3027 if (ilu->ilu_next) 3028 ilu->ilu_next->ilu_prev = ilu->ilu_prev; 3029 if (ilu->ilu_prev) 3030 ilu->ilu_prev->ilu_next = ilu->ilu_next; 3031 else 3032 stmf_state.stmf_ilulist = ilu->ilu_next; 3033 stmf_state.stmf_nlus--; 3034 3035 if (ilu == stmf_state.stmf_svc_ilu_draining) { 3036 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 3037 } 3038 if (ilu == stmf_state.stmf_svc_ilu_timing) { 3039 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 3040 } 3041 if (lu->lu_lp) { 3042 ((stmf_i_lu_provider_t *) 3043 (lu->lu_lp->lp_stmf_private))->ilp_nlus--; 3044 } 3045 if (ilu->ilu_luid) { 3046 ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object = 3047 NULL; 3048 ilu->ilu_luid = NULL; 3049 } 3050 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl); 3051 } else { 3052 mutex_exit(&stmf_state.stmf_lock); 3053 return (STMF_BUSY); 3054 } 3055 if (ilu->ilu_kstat_info) { 3056 kmem_free(ilu->ilu_kstat_info->ks_data, 3057 ilu->ilu_kstat_info->ks_data_size); 3058 kstat_delete(ilu->ilu_kstat_info); 3059 } 3060 if (ilu->ilu_kstat_io) { 3061 kstat_delete(ilu->ilu_kstat_io); 3062 mutex_destroy(&ilu->ilu_kstat_lock); 3063 } 3064 mutex_exit(&stmf_state.stmf_lock); 3065 return (STMF_SUCCESS); 3066 } 3067 3068 void 3069 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid) 3070 { 3071 stmf_i_local_port_t *ilport = 3072 (stmf_i_local_port_t *)lport->lport_stmf_private; 3073 ilport->ilport_rtpid = rtpid; 3074 ilport->ilport_standby = 1; 3075 } 3076 3077 stmf_status_t 3078 stmf_register_local_port(stmf_local_port_t *lport) 3079 { 3080 stmf_i_local_port_t *ilport; 3081 stmf_state_change_info_t ssci; 3082 int start_workers = 0; 3083 3084 mutex_enter(&stmf_state.stmf_lock); 3085 if (stmf_state.stmf_inventory_locked) { 3086 mutex_exit(&stmf_state.stmf_lock); 3087 return (STMF_BUSY); 3088 } 3089 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 3090 rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL); 3091 3092 ilport->ilport_next = stmf_state.stmf_ilportlist; 3093 ilport->ilport_prev = NULL; 3094 if (ilport->ilport_next) 3095 ilport->ilport_next->ilport_prev = ilport; 3096 stmf_state.stmf_ilportlist = ilport; 3097 stmf_state.stmf_nlports++; 3098 if (lport->lport_pp) { 3099 ((stmf_i_port_provider_t *) 3100 (lport->lport_pp->pp_stmf_private))->ipp_npps++; 3101 } 3102 ilport->ilport_tg = 3103 stmf_lookup_group_for_target(lport->lport_id->ident, 3104 lport->lport_id->ident_length); 3105 3106 /* 3107 * rtpid will/must be set if this is a standby port 3108 * only register ports that are not standby (proxy) ports 3109 */ 3110 if (ilport->ilport_standby == 0) { 3111 ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1); 3112 } 3113 3114 if (stmf_state.stmf_alua_state == 1 && 3115 ilport->ilport_standby == 0) { 3116 stmf_ic_msg_t *ic_reg_port; 3117 stmf_ic_msg_status_t ic_ret; 3118 stmf_local_port_t *lport; 3119 lport = ilport->ilport_lport; 3120 ic_reg_port = ic_reg_port_msg_alloc( 3121 lport->lport_id, ilport->ilport_rtpid, 3122 0, NULL, stmf_proxy_msg_id); 3123 if (ic_reg_port) { 3124 ic_ret = ic_tx_msg(ic_reg_port); 3125 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3126 ilport->ilport_reg_msgid = stmf_proxy_msg_id++; 3127 } else { 3128 cmn_err(CE_WARN, "error on port registration " 3129 "port - %s", ilport->ilport_kstat_tgt_name); 3130 } 3131 } 3132 } 3133 STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl); 3134 stmf_create_kstat_lport(ilport); 3135 if (stmf_workers_state == STMF_WORKERS_DISABLED) { 3136 stmf_workers_state = STMF_WORKERS_ENABLING; 3137 start_workers = 1; 3138 } 3139 mutex_exit(&stmf_state.stmf_lock); 3140 3141 if (start_workers) 3142 stmf_worker_init(); 3143 3144 /* XXX we should probably check if this lport can be brought online */ 3145 ilport->ilport_prev_state = STMF_STATE_ONLINE; 3146 if (stmf_state.stmf_service_running) { 3147 ssci.st_rflags = 0; 3148 ssci.st_additional_info = NULL; 3149 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci); 3150 } 3151 3152 /* XXX: Generate event */ 3153 return (STMF_SUCCESS); 3154 } 3155 3156 stmf_status_t 3157 stmf_deregister_local_port(stmf_local_port_t *lport) 3158 { 3159 stmf_i_local_port_t *ilport; 3160 3161 mutex_enter(&stmf_state.stmf_lock); 3162 if (stmf_state.stmf_inventory_locked) { 3163 mutex_exit(&stmf_state.stmf_lock); 3164 return (STMF_BUSY); 3165 } 3166 3167 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 3168 3169 /* 3170 * deregister ports that are not standby (proxy) 3171 */ 3172 if (stmf_state.stmf_alua_state == 1 && 3173 ilport->ilport_standby == 0) { 3174 stmf_ic_msg_t *ic_dereg_port; 3175 stmf_ic_msg_status_t ic_ret; 3176 ic_dereg_port = ic_dereg_port_msg_alloc( 3177 lport->lport_id, 0, NULL, stmf_proxy_msg_id); 3178 if (ic_dereg_port) { 3179 ic_ret = ic_tx_msg(ic_dereg_port); 3180 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3181 stmf_proxy_msg_id++; 3182 } 3183 } 3184 } 3185 3186 if (ilport->ilport_nsessions == 0) { 3187 if (ilport->ilport_next) 3188 ilport->ilport_next->ilport_prev = ilport->ilport_prev; 3189 if (ilport->ilport_prev) 3190 ilport->ilport_prev->ilport_next = ilport->ilport_next; 3191 else 3192 stmf_state.stmf_ilportlist = ilport->ilport_next; 3193 rw_destroy(&ilport->ilport_lock); 3194 stmf_state.stmf_nlports--; 3195 if (lport->lport_pp) { 3196 ((stmf_i_port_provider_t *) 3197 (lport->lport_pp->pp_stmf_private))->ipp_npps--; 3198 } 3199 ilport->ilport_tg = NULL; 3200 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl); 3201 } else { 3202 mutex_exit(&stmf_state.stmf_lock); 3203 return (STMF_BUSY); 3204 } 3205 if (ilport->ilport_kstat_info) { 3206 kmem_free(ilport->ilport_kstat_info->ks_data, 3207 ilport->ilport_kstat_info->ks_data_size); 3208 kstat_delete(ilport->ilport_kstat_info); 3209 } 3210 if (ilport->ilport_kstat_io) { 3211 kstat_delete(ilport->ilport_kstat_io); 3212 mutex_destroy(&ilport->ilport_kstat_lock); 3213 } 3214 mutex_exit(&stmf_state.stmf_lock); 3215 return (STMF_SUCCESS); 3216 } 3217 3218 /* 3219 * Port provider has to make sure that register/deregister session and 3220 * port are serialized calls. 3221 */ 3222 stmf_status_t 3223 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 3224 { 3225 stmf_i_scsi_session_t *iss; 3226 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 3227 lport->lport_stmf_private; 3228 uint8_t lun[8]; 3229 3230 /* 3231 * Port state has to be online to register a scsi session. It is 3232 * possible that we started an offline operation and a new SCSI 3233 * session started at the same time (in that case also we are going 3234 * to fail the registeration). But any other state is simply 3235 * a bad port provider implementation. 3236 */ 3237 if (ilport->ilport_state != STMF_STATE_ONLINE) { 3238 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 3239 stmf_trace(lport->lport_alias, "Port is trying to " 3240 "register a session while the state is neither " 3241 "online nor offlining"); 3242 } 3243 return (STMF_FAILURE); 3244 } 3245 bzero(lun, 8); 3246 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3247 iss->iss_flags |= ISS_BEING_CREATED; 3248 3249 /* sessions use the ilport_lock. No separate lock is required */ 3250 iss->iss_lockp = &ilport->ilport_lock; 3251 (void) stmf_session_create_lun_map(ilport, iss); 3252 3253 rw_enter(&ilport->ilport_lock, RW_WRITER); 3254 ilport->ilport_nsessions++; 3255 iss->iss_next = ilport->ilport_ss_list; 3256 ilport->ilport_ss_list = iss; 3257 rw_exit(&ilport->ilport_lock); 3258 3259 iss->iss_creation_time = ddi_get_time(); 3260 ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1); 3261 iss->iss_flags &= ~ISS_BEING_CREATED; 3262 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */ 3263 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED; 3264 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport, 3265 stmf_scsi_session_t *, ss); 3266 return (STMF_SUCCESS); 3267 } 3268 3269 void 3270 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 3271 { 3272 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 3273 lport->lport_stmf_private; 3274 stmf_i_scsi_session_t *iss, **ppss; 3275 int found = 0; 3276 stmf_ic_msg_t *ic_session_dereg; 3277 stmf_status_t ic_ret = STMF_FAILURE; 3278 3279 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport, 3280 stmf_scsi_session_t *, ss); 3281 3282 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3283 if (ss->ss_rport_alias) { 3284 ss->ss_rport_alias = NULL; 3285 } 3286 3287 try_dereg_ss_again: 3288 mutex_enter(&stmf_state.stmf_lock); 3289 atomic_and_32(&iss->iss_flags, 3290 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 3291 if (iss->iss_flags & ISS_EVENT_ACTIVE) { 3292 mutex_exit(&stmf_state.stmf_lock); 3293 delay(1); 3294 goto try_dereg_ss_again; 3295 } 3296 3297 /* dereg proxy session if not standby port */ 3298 if (stmf_state.stmf_alua_state == 1 && ilport->ilport_standby == 0) { 3299 ic_session_dereg = ic_session_dereg_msg_alloc( 3300 ss, stmf_proxy_msg_id); 3301 if (ic_session_dereg) { 3302 ic_ret = ic_tx_msg(ic_session_dereg); 3303 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3304 stmf_proxy_msg_id++; 3305 } 3306 } 3307 } 3308 3309 mutex_exit(&stmf_state.stmf_lock); 3310 3311 rw_enter(&ilport->ilport_lock, RW_WRITER); 3312 for (ppss = &ilport->ilport_ss_list; *ppss != NULL; 3313 ppss = &((*ppss)->iss_next)) { 3314 if (iss == (*ppss)) { 3315 *ppss = (*ppss)->iss_next; 3316 found = 1; 3317 break; 3318 } 3319 } 3320 if (!found) { 3321 cmn_err(CE_PANIC, "Deregister session called for non existent" 3322 " session"); 3323 } 3324 ilport->ilport_nsessions--; 3325 rw_exit(&ilport->ilport_lock); 3326 3327 (void) stmf_session_destroy_lun_map(ilport, iss); 3328 } 3329 3330 stmf_i_scsi_session_t * 3331 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked) 3332 { 3333 stmf_i_local_port_t *ilport; 3334 stmf_i_scsi_session_t *iss; 3335 3336 mutex_enter(&stmf_state.stmf_lock); 3337 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 3338 ilport = ilport->ilport_next) { 3339 rw_enter(&ilport->ilport_lock, RW_WRITER); 3340 for (iss = ilport->ilport_ss_list; iss != NULL; 3341 iss = iss->iss_next) { 3342 if (iss->iss_ss->ss_session_id == session_id) { 3343 if (!stay_locked) 3344 rw_exit(&ilport->ilport_lock); 3345 mutex_exit(&stmf_state.stmf_lock); 3346 return (iss); 3347 } 3348 } 3349 rw_exit(&ilport->ilport_lock); 3350 } 3351 mutex_exit(&stmf_state.stmf_lock); 3352 return (NULL); 3353 } 3354 3355 void 3356 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl) 3357 { 3358 stmf_itl_data_t **itlpp; 3359 stmf_i_lu_t *ilu; 3360 3361 ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED); 3362 3363 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3364 mutex_enter(&ilu->ilu_task_lock); 3365 for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL; 3366 itlpp = &(*itlpp)->itl_next) { 3367 if ((*itlpp) == itl) 3368 break; 3369 } 3370 ASSERT((*itlpp) != NULL); 3371 *itlpp = itl->itl_next; 3372 mutex_exit(&ilu->ilu_task_lock); 3373 lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle, 3374 (uint32_t)itl->itl_hdlrm_reason); 3375 kmem_free(itl, sizeof (*itl)); 3376 } 3377 3378 stmf_status_t 3379 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun, 3380 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle) 3381 { 3382 stmf_itl_data_t *itl; 3383 stmf_i_scsi_session_t *iss; 3384 stmf_lun_map_ent_t *lun_map_ent; 3385 stmf_i_lu_t *ilu; 3386 uint16_t n; 3387 3388 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3389 if (ss == NULL) { 3390 iss = stmf_session_id_to_issptr(session_id, 1); 3391 if (iss == NULL) 3392 return (STMF_NOT_FOUND); 3393 } else { 3394 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3395 rw_enter(iss->iss_lockp, RW_WRITER); 3396 } 3397 3398 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3399 lun_map_ent = (stmf_lun_map_ent_t *) 3400 stmf_get_ent_from_map(iss->iss_sm, n); 3401 if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) { 3402 rw_exit(iss->iss_lockp); 3403 return (STMF_NOT_FOUND); 3404 } 3405 if (lun_map_ent->ent_itl_datap != NULL) { 3406 rw_exit(iss->iss_lockp); 3407 return (STMF_ALREADY); 3408 } 3409 3410 itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP); 3411 if (itl == NULL) { 3412 rw_exit(iss->iss_lockp); 3413 return (STMF_ALLOC_FAILURE); 3414 } 3415 3416 itl->itl_counter = 1; 3417 itl->itl_lun = n; 3418 itl->itl_handle = itl_handle; 3419 itl->itl_session = iss; 3420 mutex_enter(&ilu->ilu_task_lock); 3421 itl->itl_next = ilu->ilu_itl_list; 3422 ilu->ilu_itl_list = itl; 3423 mutex_exit(&ilu->ilu_task_lock); 3424 lun_map_ent->ent_itl_datap = itl; 3425 rw_exit(iss->iss_lockp); 3426 3427 return (STMF_SUCCESS); 3428 } 3429 3430 void 3431 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason) 3432 { 3433 uint8_t old, new; 3434 3435 do { 3436 old = new = itl->itl_flags; 3437 if (old & STMF_ITL_BEING_TERMINATED) 3438 return; 3439 new |= STMF_ITL_BEING_TERMINATED; 3440 } while (atomic_cas_8(&itl->itl_flags, old, new) != old); 3441 itl->itl_hdlrm_reason = hdlrm_reason; 3442 3443 ASSERT(itl->itl_counter); 3444 3445 if (atomic_add_32_nv(&itl->itl_counter, -1)) 3446 return; 3447 3448 drv_usecwait(10); 3449 if (itl->itl_counter) 3450 return; 3451 3452 stmf_release_itl_handle(lu, itl); 3453 } 3454 3455 stmf_status_t 3456 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu) 3457 { 3458 stmf_i_lu_t *ilu; 3459 stmf_i_local_port_t *ilport; 3460 stmf_i_scsi_session_t *iss; 3461 stmf_lun_map_t *lm; 3462 stmf_lun_map_ent_t *ent; 3463 uint32_t nmaps, nu; 3464 stmf_itl_data_t **itl_list; 3465 int i; 3466 3467 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3468 3469 dereg_itl_start:; 3470 nmaps = ilu->ilu_ref_cnt; 3471 if (nmaps == 0) 3472 return (STMF_NOT_FOUND); 3473 itl_list = (stmf_itl_data_t **)kmem_zalloc( 3474 nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP); 3475 mutex_enter(&stmf_state.stmf_lock); 3476 if (nmaps != ilu->ilu_ref_cnt) { 3477 /* Something changed, start all over */ 3478 mutex_exit(&stmf_state.stmf_lock); 3479 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 3480 goto dereg_itl_start; 3481 } 3482 nu = 0; 3483 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 3484 ilport = ilport->ilport_next) { 3485 rw_enter(&ilport->ilport_lock, RW_WRITER); 3486 for (iss = ilport->ilport_ss_list; iss != NULL; 3487 iss = iss->iss_next) { 3488 lm = iss->iss_sm; 3489 if (!lm) 3490 continue; 3491 for (i = 0; i < lm->lm_nentries; i++) { 3492 if (lm->lm_plus[i] == NULL) 3493 continue; 3494 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 3495 if ((ent->ent_lu == lu) && 3496 (ent->ent_itl_datap)) { 3497 itl_list[nu++] = ent->ent_itl_datap; 3498 ent->ent_itl_datap = NULL; 3499 if (nu == nmaps) { 3500 rw_exit(&ilport->ilport_lock); 3501 goto dai_scan_done; 3502 } 3503 } 3504 } /* lun table for a session */ 3505 } /* sessions */ 3506 rw_exit(&ilport->ilport_lock); 3507 } /* ports */ 3508 3509 dai_scan_done: 3510 mutex_exit(&stmf_state.stmf_lock); 3511 3512 for (i = 0; i < nu; i++) { 3513 stmf_do_itl_dereg(lu, itl_list[i], 3514 STMF_ITL_REASON_DEREG_REQUEST); 3515 } 3516 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 3517 3518 return (STMF_SUCCESS); 3519 } 3520 3521 stmf_status_t 3522 stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun, 3523 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle) 3524 { 3525 stmf_i_scsi_session_t *iss; 3526 stmf_itl_data_t *itl; 3527 stmf_lun_map_ent_t *ent; 3528 stmf_lun_map_t *lm; 3529 int i; 3530 uint16_t n; 3531 3532 if (ss == NULL) { 3533 if (session_id == STMF_SESSION_ID_NONE) 3534 return (STMF_INVALID_ARG); 3535 iss = stmf_session_id_to_issptr(session_id, 1); 3536 if (iss == NULL) 3537 return (STMF_NOT_FOUND); 3538 } else { 3539 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3540 rw_enter(iss->iss_lockp, RW_WRITER); 3541 } 3542 lm = iss->iss_sm; 3543 if (lm == NULL) { 3544 rw_exit(iss->iss_lockp); 3545 return (STMF_NOT_FOUND); 3546 } 3547 3548 if (lun) { 3549 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3550 ent = (stmf_lun_map_ent_t *) 3551 stmf_get_ent_from_map(iss->iss_sm, n); 3552 } else { 3553 if (itl_handle == NULL) { 3554 rw_exit(iss->iss_lockp); 3555 return (STMF_INVALID_ARG); 3556 } 3557 ent = NULL; 3558 for (i = 0; i < lm->lm_nentries; i++) { 3559 if (lm->lm_plus[i] == NULL) 3560 continue; 3561 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 3562 if (ent->ent_itl_datap && 3563 (ent->ent_itl_datap->itl_handle == itl_handle)) { 3564 break; 3565 } 3566 } 3567 } 3568 if ((ent == NULL) || (ent->ent_lu != lu) || 3569 (ent->ent_itl_datap == NULL)) { 3570 rw_exit(iss->iss_lockp); 3571 return (STMF_NOT_FOUND); 3572 } 3573 itl = ent->ent_itl_datap; 3574 ent->ent_itl_datap = NULL; 3575 rw_exit(iss->iss_lockp); 3576 stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST); 3577 3578 return (STMF_SUCCESS); 3579 } 3580 3581 stmf_status_t 3582 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss, 3583 uint64_t session_id, void **itl_handle_retp) 3584 { 3585 stmf_i_scsi_session_t *iss; 3586 stmf_lun_map_ent_t *ent; 3587 stmf_lun_map_t *lm; 3588 stmf_status_t ret; 3589 int i; 3590 uint16_t n; 3591 3592 if (ss == NULL) { 3593 iss = stmf_session_id_to_issptr(session_id, 1); 3594 if (iss == NULL) 3595 return (STMF_NOT_FOUND); 3596 } else { 3597 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3598 rw_enter(iss->iss_lockp, RW_WRITER); 3599 } 3600 3601 ent = NULL; 3602 if (lun == NULL) { 3603 lm = iss->iss_sm; 3604 for (i = 0; i < lm->lm_nentries; i++) { 3605 if (lm->lm_plus[i] == NULL) 3606 continue; 3607 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 3608 if (ent->ent_lu == lu) 3609 break; 3610 } 3611 } else { 3612 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3613 ent = (stmf_lun_map_ent_t *) 3614 stmf_get_ent_from_map(iss->iss_sm, n); 3615 if (lu && (ent->ent_lu != lu)) 3616 ent = NULL; 3617 } 3618 if (ent && ent->ent_itl_datap) { 3619 *itl_handle_retp = ent->ent_itl_datap->itl_handle; 3620 ret = STMF_SUCCESS; 3621 } else { 3622 ret = STMF_NOT_FOUND; 3623 } 3624 3625 rw_exit(iss->iss_lockp); 3626 return (ret); 3627 } 3628 3629 stmf_data_buf_t * 3630 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize, 3631 uint32_t flags) 3632 { 3633 stmf_i_scsi_task_t *itask = 3634 (stmf_i_scsi_task_t *)task->task_stmf_private; 3635 stmf_local_port_t *lport = task->task_lport; 3636 stmf_data_buf_t *dbuf; 3637 uint8_t ndx; 3638 3639 ndx = stmf_first_zero[itask->itask_allocated_buf_map]; 3640 if (ndx == 0xff) 3641 return (NULL); 3642 dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf( 3643 task, size, pminsize, flags); 3644 if (dbuf) { 3645 task->task_cur_nbufs++; 3646 itask->itask_allocated_buf_map |= (1 << ndx); 3647 dbuf->db_handle = ndx; 3648 return (dbuf); 3649 } 3650 3651 return (NULL); 3652 } 3653 3654 void 3655 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf) 3656 { 3657 stmf_i_scsi_task_t *itask = 3658 (stmf_i_scsi_task_t *)task->task_stmf_private; 3659 stmf_local_port_t *lport = task->task_lport; 3660 3661 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle); 3662 task->task_cur_nbufs--; 3663 lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf); 3664 } 3665 3666 stmf_data_buf_t * 3667 stmf_handle_to_buf(scsi_task_t *task, uint8_t h) 3668 { 3669 stmf_i_scsi_task_t *itask; 3670 3671 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 3672 if (h > 3) 3673 return (NULL); 3674 return (itask->itask_dbufs[h]); 3675 } 3676 3677 /* ARGSUSED */ 3678 struct scsi_task * 3679 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss, 3680 uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id) 3681 { 3682 stmf_lu_t *lu; 3683 stmf_i_scsi_session_t *iss; 3684 stmf_i_lu_t *ilu; 3685 stmf_i_scsi_task_t *itask; 3686 stmf_i_scsi_task_t **ppitask; 3687 scsi_task_t *task; 3688 uint64_t *p; 3689 uint8_t *l; 3690 stmf_lun_map_ent_t *lun_map_ent; 3691 uint16_t cdb_length; 3692 uint16_t luNbr; 3693 uint8_t new_task = 0; 3694 3695 /* 3696 * We allocate 7 extra bytes for CDB to provide a cdb pointer which 3697 * is guaranteed to be 8 byte aligned. Some LU providers like OSD 3698 * depend upon this alignment. 3699 */ 3700 if (cdb_length_in >= 16) 3701 cdb_length = cdb_length_in + 7; 3702 else 3703 cdb_length = 16 + 7; 3704 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3705 luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3706 rw_enter(iss->iss_lockp, RW_READER); 3707 lun_map_ent = 3708 (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr); 3709 if (!lun_map_ent) { 3710 lu = dlun0; 3711 } else { 3712 lu = lun_map_ent->ent_lu; 3713 } 3714 ilu = lu->lu_stmf_private; 3715 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 3716 rw_exit(iss->iss_lockp); 3717 return (NULL); 3718 } 3719 do { 3720 if (ilu->ilu_free_tasks == NULL) { 3721 new_task = 1; 3722 break; 3723 } 3724 mutex_enter(&ilu->ilu_task_lock); 3725 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) && 3726 ((*ppitask)->itask_cdb_buf_size < cdb_length); 3727 ppitask = &((*ppitask)->itask_lu_free_next)) 3728 ; 3729 if (*ppitask) { 3730 itask = *ppitask; 3731 *ppitask = (*ppitask)->itask_lu_free_next; 3732 ilu->ilu_ntasks_free--; 3733 if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free) 3734 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 3735 } else { 3736 new_task = 1; 3737 } 3738 mutex_exit(&ilu->ilu_task_lock); 3739 /* CONSTCOND */ 3740 } while (0); 3741 3742 if (!new_task) { 3743 task = itask->itask_task; 3744 task->task_timeout = 0; 3745 p = (uint64_t *)&task->task_flags; 3746 *p++ = 0; *p++ = 0; p++; p++; *p++ = 0; *p++ = 0; *p = 0; 3747 itask->itask_ncmds = 0; 3748 } else { 3749 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK, 3750 cdb_length, AF_FORCE_NOSLEEP); 3751 if (task == NULL) { 3752 rw_exit(iss->iss_lockp); 3753 return (NULL); 3754 } 3755 task->task_lu = lu; 3756 l = task->task_lun_no; 3757 l[0] = lun[0]; 3758 l[1] = lun[1]; 3759 l[2] = lun[2]; 3760 l[3] = lun[3]; 3761 l[4] = lun[4]; 3762 l[5] = lun[5]; 3763 l[6] = lun[6]; 3764 l[7] = lun[7]; 3765 task->task_cdb = (uint8_t *)task->task_port_private; 3766 if ((ulong_t)(task->task_cdb) & 7ul) { 3767 task->task_cdb = (uint8_t *)(((ulong_t) 3768 (task->task_cdb) + 7ul) & ~(7ul)); 3769 } 3770 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 3771 itask->itask_cdb_buf_size = cdb_length; 3772 } 3773 task->task_session = ss; 3774 task->task_lport = lport; 3775 task->task_cdb_length = cdb_length_in; 3776 itask->itask_flags = ITASK_IN_TRANSITION; 3777 3778 if (new_task) { 3779 if (lu->lu_task_alloc(task) != STMF_SUCCESS) { 3780 rw_exit(iss->iss_lockp); 3781 stmf_free(task); 3782 return (NULL); 3783 } 3784 mutex_enter(&ilu->ilu_task_lock); 3785 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 3786 mutex_exit(&ilu->ilu_task_lock); 3787 rw_exit(iss->iss_lockp); 3788 stmf_free(task); 3789 return (NULL); 3790 } 3791 itask->itask_lu_next = ilu->ilu_tasks; 3792 if (ilu->ilu_tasks) 3793 ilu->ilu_tasks->itask_lu_prev = itask; 3794 ilu->ilu_tasks = itask; 3795 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */ 3796 ilu->ilu_ntasks++; 3797 mutex_exit(&ilu->ilu_task_lock); 3798 } 3799 3800 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr; 3801 atomic_add_32(itask->itask_ilu_task_cntr, 1); 3802 itask->itask_start_time = ddi_get_lbolt(); 3803 3804 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap = 3805 lun_map_ent->ent_itl_datap) != NULL)) { 3806 atomic_add_32(&itask->itask_itl_datap->itl_counter, 1); 3807 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle; 3808 } else { 3809 itask->itask_itl_datap = NULL; 3810 task->task_lu_itl_handle = NULL; 3811 } 3812 3813 rw_exit(iss->iss_lockp); 3814 return (task); 3815 } 3816 3817 static void 3818 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss) 3819 { 3820 stmf_i_scsi_task_t *itask = 3821 (stmf_i_scsi_task_t *)task->task_stmf_private; 3822 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 3823 3824 ASSERT(rw_lock_held(iss->iss_lockp)); 3825 itask->itask_flags = ITASK_IN_FREE_LIST; 3826 itask->itask_proxy_msg_id = 0; 3827 mutex_enter(&ilu->ilu_task_lock); 3828 itask->itask_lu_free_next = ilu->ilu_free_tasks; 3829 ilu->ilu_free_tasks = itask; 3830 ilu->ilu_ntasks_free++; 3831 mutex_exit(&ilu->ilu_task_lock); 3832 atomic_add_32(itask->itask_ilu_task_cntr, -1); 3833 } 3834 3835 void 3836 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu) 3837 { 3838 uint32_t num_to_release, ndx; 3839 stmf_i_scsi_task_t *itask; 3840 stmf_lu_t *lu = ilu->ilu_lu; 3841 3842 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free); 3843 3844 /* free half of the minimal free of the free tasks */ 3845 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2; 3846 if (!num_to_release) { 3847 return; 3848 } 3849 for (ndx = 0; ndx < num_to_release; ndx++) { 3850 mutex_enter(&ilu->ilu_task_lock); 3851 itask = ilu->ilu_free_tasks; 3852 if (itask == NULL) { 3853 mutex_exit(&ilu->ilu_task_lock); 3854 break; 3855 } 3856 ilu->ilu_free_tasks = itask->itask_lu_free_next; 3857 ilu->ilu_ntasks_free--; 3858 mutex_exit(&ilu->ilu_task_lock); 3859 3860 lu->lu_task_free(itask->itask_task); 3861 mutex_enter(&ilu->ilu_task_lock); 3862 if (itask->itask_lu_next) 3863 itask->itask_lu_next->itask_lu_prev = 3864 itask->itask_lu_prev; 3865 if (itask->itask_lu_prev) 3866 itask->itask_lu_prev->itask_lu_next = 3867 itask->itask_lu_next; 3868 else 3869 ilu->ilu_tasks = itask->itask_lu_next; 3870 3871 ilu->ilu_ntasks--; 3872 mutex_exit(&ilu->ilu_task_lock); 3873 stmf_free(itask->itask_task); 3874 } 3875 } 3876 3877 /* 3878 * Called with stmf_lock held 3879 */ 3880 void 3881 stmf_check_freetask() 3882 { 3883 stmf_i_lu_t *ilu; 3884 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 3885 3886 /* stmf_svc_ilu_draining may get changed after stmf_lock is released */ 3887 while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) { 3888 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 3889 if (!ilu->ilu_ntasks_min_free) { 3890 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 3891 continue; 3892 } 3893 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 3894 mutex_exit(&stmf_state.stmf_lock); 3895 stmf_task_lu_check_freelist(ilu); 3896 /* 3897 * we do not care about the accuracy of 3898 * ilu_ntasks_min_free, so we don't lock here 3899 */ 3900 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 3901 mutex_enter(&stmf_state.stmf_lock); 3902 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 3903 cv_broadcast(&stmf_state.stmf_cv); 3904 if (ddi_get_lbolt() >= endtime) 3905 break; 3906 } 3907 } 3908 3909 void 3910 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu) 3911 { 3912 clock_t l = ddi_get_lbolt(); 3913 clock_t ps = drv_usectohz(1000000); 3914 stmf_i_scsi_task_t *itask; 3915 scsi_task_t *task; 3916 uint32_t to; 3917 3918 mutex_enter(&ilu->ilu_task_lock); 3919 for (itask = ilu->ilu_tasks; itask != NULL; 3920 itask = itask->itask_lu_next) { 3921 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 3922 ITASK_BEING_ABORTED)) { 3923 continue; 3924 } 3925 task = itask->itask_task; 3926 if (task->task_timeout == 0) 3927 to = stmf_default_task_timeout; 3928 else 3929 to = task->task_timeout; 3930 if ((itask->itask_start_time + (to * ps)) > l) 3931 continue; 3932 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 3933 STMF_TIMEOUT, NULL); 3934 } 3935 mutex_exit(&ilu->ilu_task_lock); 3936 } 3937 3938 /* 3939 * Called with stmf_lock held 3940 */ 3941 void 3942 stmf_check_ilu_timing() 3943 { 3944 stmf_i_lu_t *ilu; 3945 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 3946 3947 /* stmf_svc_ilu_timing may get changed after stmf_lock is released */ 3948 while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) { 3949 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 3950 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) { 3951 if (ilu->ilu_task_cntr2 == 0) { 3952 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2; 3953 continue; 3954 } 3955 } else { 3956 if (ilu->ilu_task_cntr1 == 0) { 3957 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 3958 continue; 3959 } 3960 } 3961 /* 3962 * If we are here then it means that there is some slowdown 3963 * in tasks on this lu. We need to check. 3964 */ 3965 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 3966 mutex_exit(&stmf_state.stmf_lock); 3967 stmf_do_ilu_timeouts(ilu); 3968 mutex_enter(&stmf_state.stmf_lock); 3969 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 3970 cv_broadcast(&stmf_state.stmf_cv); 3971 if (ddi_get_lbolt() >= endtime) 3972 break; 3973 } 3974 } 3975 3976 /* 3977 * Kills all tasks on a lu except tm_task 3978 */ 3979 void 3980 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s) 3981 { 3982 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3983 stmf_i_scsi_task_t *itask; 3984 3985 mutex_enter(&ilu->ilu_task_lock); 3986 3987 for (itask = ilu->ilu_tasks; itask != NULL; 3988 itask = itask->itask_lu_next) { 3989 if (itask->itask_flags & ITASK_IN_FREE_LIST) 3990 continue; 3991 if (itask->itask_task == tm_task) 3992 continue; 3993 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL); 3994 } 3995 mutex_exit(&ilu->ilu_task_lock); 3996 } 3997 3998 void 3999 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport) 4000 { 4001 int i; 4002 uint8_t map; 4003 4004 if ((map = itask->itask_allocated_buf_map) != 0) { 4005 for (i = 0; i < 4; i++) { 4006 if (map & 1) { 4007 stmf_data_buf_t *dbuf; 4008 4009 dbuf = itask->itask_dbufs[i]; 4010 if (dbuf->db_lu_private) { 4011 dbuf->db_lu_private = NULL; 4012 } 4013 lport->lport_ds->ds_free_data_buf( 4014 lport->lport_ds, dbuf); 4015 } 4016 map >>= 1; 4017 } 4018 itask->itask_allocated_buf_map = 0; 4019 } 4020 } 4021 4022 void 4023 stmf_task_free(scsi_task_t *task) 4024 { 4025 stmf_local_port_t *lport = task->task_lport; 4026 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4027 task->task_stmf_private; 4028 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 4029 task->task_session->ss_stmf_private; 4030 4031 DTRACE_PROBE1(stmf__task__end, scsi_task_t *, task); 4032 stmf_free_task_bufs(itask, lport); 4033 if (itask->itask_itl_datap) { 4034 if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter, 4035 -1) == 0) { 4036 stmf_release_itl_handle(task->task_lu, 4037 itask->itask_itl_datap); 4038 } 4039 } 4040 4041 rw_enter(iss->iss_lockp, RW_READER); 4042 lport->lport_task_free(task); 4043 if (itask->itask_worker) { 4044 atomic_add_32(&stmf_cur_ntasks, -1); 4045 atomic_add_32(&itask->itask_worker->worker_ref_count, -1); 4046 } 4047 /* 4048 * After calling stmf_task_lu_free, the task pointer can no longer 4049 * be trusted. 4050 */ 4051 stmf_task_lu_free(task, iss); 4052 rw_exit(iss->iss_lockp); 4053 } 4054 4055 void 4056 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 4057 { 4058 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4059 task->task_stmf_private; 4060 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 4061 int nv; 4062 uint32_t old, new; 4063 uint32_t ct; 4064 stmf_worker_t *w, *w1; 4065 uint8_t tm; 4066 4067 if (task->task_max_nbufs > 4) 4068 task->task_max_nbufs = 4; 4069 task->task_cur_nbufs = 0; 4070 /* Latest value of currently running tasks */ 4071 ct = atomic_add_32_nv(&stmf_cur_ntasks, 1); 4072 4073 /* Select the next worker using round robin */ 4074 nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1); 4075 if (nv >= stmf_nworkers_accepting_cmds) { 4076 int s = nv; 4077 do { 4078 nv -= stmf_nworkers_accepting_cmds; 4079 } while (nv >= stmf_nworkers_accepting_cmds); 4080 if (nv < 0) 4081 nv = 0; 4082 /* Its ok if this cas fails */ 4083 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter, 4084 s, nv); 4085 } 4086 w = &stmf_workers[nv]; 4087 4088 /* 4089 * A worker can be pinned by interrupt. So select the next one 4090 * if it has lower load. 4091 */ 4092 if ((nv + 1) >= stmf_nworkers_accepting_cmds) { 4093 w1 = stmf_workers; 4094 } else { 4095 w1 = &stmf_workers[nv + 1]; 4096 } 4097 if (w1->worker_queue_depth < w->worker_queue_depth) 4098 w = w1; 4099 4100 mutex_enter(&w->worker_lock); 4101 if (((w->worker_flags & STMF_WORKER_STARTED) == 0) || 4102 (w->worker_flags & STMF_WORKER_TERMINATE)) { 4103 /* 4104 * Maybe we are in the middle of a change. Just go to 4105 * the 1st worker. 4106 */ 4107 mutex_exit(&w->worker_lock); 4108 w = stmf_workers; 4109 mutex_enter(&w->worker_lock); 4110 } 4111 itask->itask_worker = w; 4112 /* 4113 * Track max system load inside the worker as we already have the 4114 * worker lock (no point implementing another lock). The service 4115 * thread will do the comparisons and figure out the max overall 4116 * system load. 4117 */ 4118 if (w->worker_max_sys_qdepth_pu < ct) 4119 w->worker_max_sys_qdepth_pu = ct; 4120 4121 do { 4122 old = new = itask->itask_flags; 4123 new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE; 4124 if (task->task_mgmt_function) { 4125 tm = task->task_mgmt_function; 4126 if ((tm == TM_TARGET_RESET) || 4127 (tm == TM_TARGET_COLD_RESET) || 4128 (tm == TM_TARGET_WARM_RESET)) { 4129 new |= ITASK_DEFAULT_HANDLING; 4130 } 4131 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 4132 new |= ITASK_DEFAULT_HANDLING; 4133 } 4134 new &= ~ITASK_IN_TRANSITION; 4135 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4136 itask->itask_worker_next = NULL; 4137 if (w->worker_task_tail) { 4138 w->worker_task_tail->itask_worker_next = itask; 4139 } else { 4140 w->worker_task_head = itask; 4141 } 4142 w->worker_task_tail = itask; 4143 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4144 w->worker_max_qdepth_pu = w->worker_queue_depth; 4145 } 4146 atomic_add_32(&w->worker_ref_count, 1); 4147 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK; 4148 itask->itask_ncmds = 1; 4149 if (dbuf) { 4150 itask->itask_allocated_buf_map = 1; 4151 itask->itask_dbufs[0] = dbuf; 4152 dbuf->db_handle = 0; 4153 } else { 4154 itask->itask_allocated_buf_map = 0; 4155 itask->itask_dbufs[0] = NULL; 4156 } 4157 4158 stmf_update_kstat_lu_q(task, kstat_waitq_enter); 4159 stmf_update_kstat_lport_q(task, kstat_waitq_enter); 4160 4161 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4162 cv_signal(&w->worker_cv); 4163 mutex_exit(&w->worker_lock); 4164 4165 /* 4166 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE 4167 * was set between checking of ILU_RESET_ACTIVE and clearing of the 4168 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here. 4169 */ 4170 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4171 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL); 4172 } 4173 } 4174 4175 /* 4176 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++ 4177 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already 4178 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot 4179 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course 4180 * the LU will make this call only if we call the LU's abort entry point. 4181 * we will only call that entry point if ITASK_KNOWN_TO_LU was set. 4182 * 4183 * Same logic applies for the port. 4184 * 4185 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU 4186 * and KNOWN_TO_TGT_PORT are reset. 4187 * 4188 * +++++++++++++++++++++++++++++++++++++++++++++++ 4189 */ 4190 4191 stmf_status_t 4192 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags) 4193 { 4194 stmf_status_t ret; 4195 4196 stmf_i_scsi_task_t *itask = 4197 (stmf_i_scsi_task_t *)task->task_stmf_private; 4198 4199 if (ioflags & STMF_IOF_LU_DONE) { 4200 uint32_t new, old; 4201 do { 4202 new = old = itask->itask_flags; 4203 if (new & ITASK_BEING_ABORTED) 4204 return (STMF_ABORTED); 4205 new &= ~ITASK_KNOWN_TO_LU; 4206 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4207 } 4208 if (itask->itask_flags & ITASK_BEING_ABORTED) 4209 return (STMF_ABORTED); 4210 #ifdef DEBUG 4211 if (stmf_drop_buf_counter > 0) { 4212 if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) == 4213 1) 4214 return (STMF_SUCCESS); 4215 } 4216 #endif 4217 4218 stmf_update_kstat_lu_io(task, dbuf); 4219 stmf_update_kstat_lport_io(task, dbuf); 4220 4221 DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, task, 4222 stmf_data_buf_t *, dbuf); 4223 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags); 4224 DTRACE_PROBE2(scsi__xfer__end, scsi_task_t *, task, 4225 stmf_data_buf_t *, dbuf); 4226 return (ret); 4227 } 4228 4229 void 4230 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof) 4231 { 4232 stmf_i_scsi_task_t *itask = 4233 (stmf_i_scsi_task_t *)task->task_stmf_private; 4234 stmf_worker_t *w = itask->itask_worker; 4235 uint32_t new, old; 4236 uint8_t update_queue_flags, free_it, queue_it, kstat_it; 4237 4238 mutex_enter(&w->worker_lock); 4239 do { 4240 new = old = itask->itask_flags; 4241 if (old & ITASK_BEING_ABORTED) { 4242 mutex_exit(&w->worker_lock); 4243 return; 4244 } 4245 free_it = 0; 4246 kstat_it = 0; 4247 if (iof & STMF_IOF_LPORT_DONE) { 4248 new &= ~ITASK_KNOWN_TO_TGT_PORT; 4249 task->task_completion_status = dbuf->db_xfer_status; 4250 free_it = 1; 4251 kstat_it = 1; 4252 } 4253 /* 4254 * If the task is known to LU then queue it. But if 4255 * it is already queued (multiple completions) then 4256 * just update the buffer information by grabbing the 4257 * worker lock. If the task is not known to LU, 4258 * completed/aborted, then see if we need to 4259 * free this task. 4260 */ 4261 if (old & ITASK_KNOWN_TO_LU) { 4262 free_it = 0; 4263 update_queue_flags = 1; 4264 if (old & ITASK_IN_WORKER_QUEUE) { 4265 queue_it = 0; 4266 } else { 4267 queue_it = 1; 4268 new |= ITASK_IN_WORKER_QUEUE; 4269 } 4270 } else { 4271 update_queue_flags = 0; 4272 queue_it = 0; 4273 } 4274 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4275 4276 if (kstat_it) { 4277 stmf_update_kstat_lu_q(task, kstat_runq_exit); 4278 stmf_update_kstat_lport_q(task, kstat_runq_exit); 4279 } 4280 if (update_queue_flags) { 4281 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE; 4282 4283 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 4284 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd; 4285 if (queue_it) { 4286 itask->itask_worker_next = NULL; 4287 if (w->worker_task_tail) { 4288 w->worker_task_tail->itask_worker_next = itask; 4289 } else { 4290 w->worker_task_head = itask; 4291 } 4292 w->worker_task_tail = itask; 4293 if (++(w->worker_queue_depth) > 4294 w->worker_max_qdepth_pu) { 4295 w->worker_max_qdepth_pu = w->worker_queue_depth; 4296 } 4297 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4298 cv_signal(&w->worker_cv); 4299 } 4300 } 4301 mutex_exit(&w->worker_lock); 4302 4303 if (free_it) { 4304 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4305 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4306 ITASK_BEING_ABORTED)) == 0) { 4307 stmf_task_free(task); 4308 } 4309 } 4310 } 4311 4312 stmf_status_t 4313 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags) 4314 { 4315 DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task); 4316 4317 stmf_i_scsi_task_t *itask = 4318 (stmf_i_scsi_task_t *)task->task_stmf_private; 4319 if (ioflags & STMF_IOF_LU_DONE) { 4320 uint32_t new, old; 4321 do { 4322 new = old = itask->itask_flags; 4323 if (new & ITASK_BEING_ABORTED) 4324 return (STMF_ABORTED); 4325 new &= ~ITASK_KNOWN_TO_LU; 4326 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4327 } 4328 4329 if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) { 4330 return (STMF_SUCCESS); 4331 } 4332 4333 if (itask->itask_flags & ITASK_BEING_ABORTED) 4334 return (STMF_ABORTED); 4335 4336 if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) { 4337 task->task_status_ctrl = 0; 4338 task->task_resid = 0; 4339 } else if (task->task_cmd_xfer_length > 4340 task->task_expected_xfer_length) { 4341 task->task_status_ctrl = TASK_SCTRL_OVER; 4342 task->task_resid = task->task_cmd_xfer_length - 4343 task->task_expected_xfer_length; 4344 } else if (task->task_nbytes_transferred < 4345 task->task_expected_xfer_length) { 4346 task->task_status_ctrl = TASK_SCTRL_UNDER; 4347 task->task_resid = task->task_expected_xfer_length - 4348 task->task_nbytes_transferred; 4349 } else { 4350 task->task_status_ctrl = 0; 4351 task->task_resid = 0; 4352 } 4353 return (task->task_lport->lport_send_status(task, ioflags)); 4354 } 4355 4356 void 4357 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof) 4358 { 4359 stmf_i_scsi_task_t *itask = 4360 (stmf_i_scsi_task_t *)task->task_stmf_private; 4361 stmf_worker_t *w = itask->itask_worker; 4362 uint32_t new, old; 4363 uint8_t free_it, queue_it, kstat_it; 4364 4365 mutex_enter(&w->worker_lock); 4366 do { 4367 new = old = itask->itask_flags; 4368 if (old & ITASK_BEING_ABORTED) { 4369 mutex_exit(&w->worker_lock); 4370 return; 4371 } 4372 free_it = 0; 4373 kstat_it = 0; 4374 if (iof & STMF_IOF_LPORT_DONE) { 4375 new &= ~ITASK_KNOWN_TO_TGT_PORT; 4376 free_it = 1; 4377 kstat_it = 1; 4378 } 4379 /* 4380 * If the task is known to LU then queue it. But if 4381 * it is already queued (multiple completions) then 4382 * just update the buffer information by grabbing the 4383 * worker lock. If the task is not known to LU, 4384 * completed/aborted, then see if we need to 4385 * free this task. 4386 */ 4387 if (old & ITASK_KNOWN_TO_LU) { 4388 free_it = 0; 4389 queue_it = 1; 4390 if (old & ITASK_IN_WORKER_QUEUE) { 4391 cmn_err(CE_PANIC, "status completion received" 4392 " when task is already in worker queue " 4393 " task = %p", (void *)task); 4394 } 4395 new |= ITASK_IN_WORKER_QUEUE; 4396 } else { 4397 queue_it = 0; 4398 } 4399 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4400 task->task_completion_status = s; 4401 4402 if (kstat_it) { 4403 stmf_update_kstat_lu_q(task, kstat_runq_exit); 4404 stmf_update_kstat_lport_q(task, kstat_runq_exit); 4405 } 4406 4407 if (queue_it) { 4408 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 4409 itask->itask_cmd_stack[itask->itask_ncmds++] = 4410 ITASK_CMD_STATUS_DONE; 4411 itask->itask_worker_next = NULL; 4412 if (w->worker_task_tail) { 4413 w->worker_task_tail->itask_worker_next = itask; 4414 } else { 4415 w->worker_task_head = itask; 4416 } 4417 w->worker_task_tail = itask; 4418 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4419 w->worker_max_qdepth_pu = w->worker_queue_depth; 4420 } 4421 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4422 cv_signal(&w->worker_cv); 4423 } 4424 mutex_exit(&w->worker_lock); 4425 4426 if (free_it) { 4427 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4428 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4429 ITASK_BEING_ABORTED)) == 0) { 4430 stmf_task_free(task); 4431 } else { 4432 cmn_err(CE_PANIC, "LU is done with the task but LPORT " 4433 " is not done, itask %p itask_flags %x", 4434 (void *)itask, itask->itask_flags); 4435 } 4436 } 4437 } 4438 4439 void 4440 stmf_task_lu_done(scsi_task_t *task) 4441 { 4442 stmf_i_scsi_task_t *itask = 4443 (stmf_i_scsi_task_t *)task->task_stmf_private; 4444 stmf_worker_t *w = itask->itask_worker; 4445 uint32_t new, old; 4446 4447 mutex_enter(&w->worker_lock); 4448 do { 4449 new = old = itask->itask_flags; 4450 if (old & ITASK_BEING_ABORTED) { 4451 mutex_exit(&w->worker_lock); 4452 return; 4453 } 4454 if (old & ITASK_IN_WORKER_QUEUE) { 4455 cmn_err(CE_PANIC, "task_lu_done received" 4456 " when task is in worker queue " 4457 " task = %p", (void *)task); 4458 } 4459 new &= ~ITASK_KNOWN_TO_LU; 4460 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4461 4462 mutex_exit(&w->worker_lock); 4463 4464 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4465 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4466 ITASK_BEING_ABORTED)) == 0) { 4467 stmf_task_free(task); 4468 } else { 4469 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but " 4470 " the task is still not done, task = %p", (void *)task); 4471 } 4472 } 4473 4474 void 4475 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s) 4476 { 4477 stmf_i_scsi_task_t *itask = 4478 (stmf_i_scsi_task_t *)task->task_stmf_private; 4479 stmf_worker_t *w; 4480 uint32_t old, new; 4481 4482 do { 4483 old = new = itask->itask_flags; 4484 if ((old & ITASK_BEING_ABORTED) || 4485 ((old & (ITASK_KNOWN_TO_TGT_PORT | 4486 ITASK_KNOWN_TO_LU)) == 0)) { 4487 return; 4488 } 4489 new |= ITASK_BEING_ABORTED; 4490 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4491 task->task_completion_status = s; 4492 itask->itask_start_time = ddi_get_lbolt(); 4493 4494 if (((w = itask->itask_worker) == NULL) || 4495 (itask->itask_flags & ITASK_IN_TRANSITION)) { 4496 return; 4497 } 4498 4499 /* Queue it and get out */ 4500 mutex_enter(&w->worker_lock); 4501 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) { 4502 mutex_exit(&w->worker_lock); 4503 return; 4504 } 4505 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); 4506 itask->itask_worker_next = NULL; 4507 if (w->worker_task_tail) { 4508 w->worker_task_tail->itask_worker_next = itask; 4509 } else { 4510 w->worker_task_head = itask; 4511 } 4512 w->worker_task_tail = itask; 4513 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4514 w->worker_max_qdepth_pu = w->worker_queue_depth; 4515 } 4516 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4517 cv_signal(&w->worker_cv); 4518 mutex_exit(&w->worker_lock); 4519 } 4520 4521 void 4522 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg) 4523 { 4524 stmf_i_scsi_task_t *itask = NULL; 4525 uint32_t old, new, f, rf; 4526 4527 DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task, 4528 stmf_status_t, s); 4529 4530 switch (abort_cmd) { 4531 case STMF_QUEUE_ABORT_LU: 4532 stmf_task_lu_killall((stmf_lu_t *)arg, task, s); 4533 return; 4534 case STMF_QUEUE_TASK_ABORT: 4535 stmf_queue_task_for_abort(task, s); 4536 return; 4537 case STMF_REQUEUE_TASK_ABORT_LPORT: 4538 rf = ITASK_TGT_PORT_ABORT_CALLED; 4539 f = ITASK_KNOWN_TO_TGT_PORT; 4540 break; 4541 case STMF_REQUEUE_TASK_ABORT_LU: 4542 rf = ITASK_LU_ABORT_CALLED; 4543 f = ITASK_KNOWN_TO_LU; 4544 break; 4545 default: 4546 return; 4547 } 4548 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4549 f |= ITASK_BEING_ABORTED | rf; 4550 do { 4551 old = new = itask->itask_flags; 4552 if ((old & f) != f) { 4553 return; 4554 } 4555 new &= ~rf; 4556 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4557 } 4558 4559 void 4560 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 4561 { 4562 char info[STMF_CHANGE_INFO_LEN]; 4563 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 4564 unsigned long long st; 4565 4566 st = s; /* gcc fix */ 4567 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 4568 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4569 "task %p, lu failed to abort ret=%llx", (void *)task, st); 4570 } else if ((iof & STMF_IOF_LU_DONE) == 0) { 4571 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4572 "Task aborted but LU is not finished, task =" 4573 "%p, s=%llx, iof=%x", (void *)task, st, iof); 4574 } else { 4575 /* 4576 * LU abort successfully 4577 */ 4578 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU); 4579 return; 4580 } 4581 4582 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4583 stmf_abort_task_offline(task, 1, info); 4584 } 4585 4586 void 4587 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 4588 { 4589 char info[STMF_CHANGE_INFO_LEN]; 4590 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 4591 unsigned long long st; 4592 uint32_t old, new; 4593 4594 st = s; 4595 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 4596 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4597 "task %p, tgt port failed to abort ret=%llx", (void *)task, 4598 st); 4599 } else if ((iof & STMF_IOF_LPORT_DONE) == 0) { 4600 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4601 "Task aborted but tgt port is not finished, " 4602 "task=%p, s=%llx, iof=%x", (void *)task, st, iof); 4603 } else { 4604 /* 4605 * LPORT abort successfully 4606 */ 4607 do { 4608 old = new = itask->itask_flags; 4609 if (!(old & ITASK_KNOWN_TO_TGT_PORT)) 4610 return; 4611 new &= ~ITASK_KNOWN_TO_TGT_PORT; 4612 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4613 4614 if (!(itask->itask_flags & ITASK_KSTAT_IN_RUNQ)) { 4615 stmf_update_kstat_lu_q(task, kstat_waitq_exit); 4616 stmf_update_kstat_lport_q(task, kstat_waitq_exit); 4617 } else { 4618 stmf_update_kstat_lu_q(task, kstat_runq_exit); 4619 stmf_update_kstat_lport_q(task, kstat_runq_exit); 4620 } 4621 return; 4622 } 4623 4624 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4625 stmf_abort_task_offline(task, 0, info); 4626 } 4627 4628 stmf_status_t 4629 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout) 4630 { 4631 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4632 task->task_stmf_private; 4633 stmf_worker_t *w = itask->itask_worker; 4634 int i; 4635 4636 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU); 4637 mutex_enter(&w->worker_lock); 4638 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 4639 mutex_exit(&w->worker_lock); 4640 return (STMF_BUSY); 4641 } 4642 for (i = 0; i < itask->itask_ncmds; i++) { 4643 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) { 4644 mutex_exit(&w->worker_lock); 4645 return (STMF_SUCCESS); 4646 } 4647 } 4648 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU; 4649 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 4650 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 4651 } else { 4652 clock_t t = drv_usectohz(timeout * 1000); 4653 if (t == 0) 4654 t = 1; 4655 itask->itask_poll_timeout = ddi_get_lbolt() + t; 4656 } 4657 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 4658 itask->itask_worker_next = NULL; 4659 if (w->worker_task_tail) { 4660 w->worker_task_tail->itask_worker_next = itask; 4661 } else { 4662 w->worker_task_head = itask; 4663 } 4664 w->worker_task_tail = itask; 4665 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4666 w->worker_max_qdepth_pu = w->worker_queue_depth; 4667 } 4668 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); 4669 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4670 cv_signal(&w->worker_cv); 4671 } 4672 mutex_exit(&w->worker_lock); 4673 return (STMF_SUCCESS); 4674 } 4675 4676 stmf_status_t 4677 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout) 4678 { 4679 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4680 task->task_stmf_private; 4681 stmf_worker_t *w = itask->itask_worker; 4682 int i; 4683 4684 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT); 4685 mutex_enter(&w->worker_lock); 4686 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 4687 mutex_exit(&w->worker_lock); 4688 return (STMF_BUSY); 4689 } 4690 for (i = 0; i < itask->itask_ncmds; i++) { 4691 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) { 4692 mutex_exit(&w->worker_lock); 4693 return (STMF_SUCCESS); 4694 } 4695 } 4696 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT; 4697 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 4698 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 4699 } else { 4700 clock_t t = drv_usectohz(timeout * 1000); 4701 if (t == 0) 4702 t = 1; 4703 itask->itask_poll_timeout = ddi_get_lbolt() + t; 4704 } 4705 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 4706 itask->itask_worker_next = NULL; 4707 if (w->worker_task_tail) { 4708 w->worker_task_tail->itask_worker_next = itask; 4709 } else { 4710 w->worker_task_head = itask; 4711 } 4712 w->worker_task_tail = itask; 4713 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4714 w->worker_max_qdepth_pu = w->worker_queue_depth; 4715 } 4716 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4717 cv_signal(&w->worker_cv); 4718 } 4719 mutex_exit(&w->worker_lock); 4720 return (STMF_SUCCESS); 4721 } 4722 4723 void 4724 stmf_do_task_abort(scsi_task_t *task) 4725 { 4726 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 4727 stmf_lu_t *lu; 4728 stmf_local_port_t *lport; 4729 unsigned long long ret; 4730 uint32_t old, new; 4731 uint8_t call_lu_abort, call_port_abort; 4732 char info[STMF_CHANGE_INFO_LEN]; 4733 4734 lu = task->task_lu; 4735 lport = task->task_lport; 4736 do { 4737 old = new = itask->itask_flags; 4738 if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) == 4739 ITASK_KNOWN_TO_LU) { 4740 new |= ITASK_LU_ABORT_CALLED; 4741 call_lu_abort = 1; 4742 } else { 4743 call_lu_abort = 0; 4744 } 4745 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4746 4747 if (call_lu_abort) { 4748 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) { 4749 ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 4750 } else { 4751 ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 4752 } 4753 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 4754 stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE); 4755 } else if (ret == STMF_BUSY) { 4756 atomic_and_32(&itask->itask_flags, 4757 ~ITASK_LU_ABORT_CALLED); 4758 } else if (ret != STMF_SUCCESS) { 4759 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4760 "Abort failed by LU %p, ret %llx", (void *)lu, ret); 4761 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4762 stmf_abort_task_offline(task, 1, info); 4763 } 4764 } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) { 4765 if (ddi_get_lbolt() > (itask->itask_start_time + 4766 STMF_SEC2TICK(lu->lu_abort_timeout? 4767 lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) { 4768 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4769 "lu abort timed out"); 4770 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4771 stmf_abort_task_offline(itask->itask_task, 1, info); 4772 } 4773 } 4774 4775 do { 4776 old = new = itask->itask_flags; 4777 if ((old & (ITASK_KNOWN_TO_TGT_PORT | 4778 ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) { 4779 new |= ITASK_TGT_PORT_ABORT_CALLED; 4780 call_port_abort = 1; 4781 } else { 4782 call_port_abort = 0; 4783 } 4784 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4785 if (call_port_abort) { 4786 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0); 4787 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 4788 stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE); 4789 } else if (ret == STMF_BUSY) { 4790 atomic_and_32(&itask->itask_flags, 4791 ~ITASK_TGT_PORT_ABORT_CALLED); 4792 } else if (ret != STMF_SUCCESS) { 4793 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4794 "Abort failed by tgt port %p ret %llx", 4795 (void *)lport, ret); 4796 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4797 stmf_abort_task_offline(task, 0, info); 4798 } 4799 } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) { 4800 if (ddi_get_lbolt() > (itask->itask_start_time + 4801 STMF_SEC2TICK(lport->lport_abort_timeout? 4802 lport->lport_abort_timeout : 4803 ITASK_DEFAULT_ABORT_TIMEOUT))) { 4804 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4805 "lport abort timed out"); 4806 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4807 stmf_abort_task_offline(itask->itask_task, 0, info); 4808 } 4809 } 4810 } 4811 4812 stmf_status_t 4813 stmf_ctl(int cmd, void *obj, void *arg) 4814 { 4815 stmf_status_t ret; 4816 stmf_i_lu_t *ilu; 4817 stmf_i_local_port_t *ilport; 4818 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg; 4819 4820 mutex_enter(&stmf_state.stmf_lock); 4821 ret = STMF_INVALID_ARG; 4822 if (cmd & STMF_CMD_LU_OP) { 4823 ilu = stmf_lookup_lu((stmf_lu_t *)obj); 4824 if (ilu == NULL) { 4825 goto stmf_ctl_lock_exit; 4826 } 4827 DTRACE_PROBE3(lu__state__change, 4828 stmf_lu_t *, ilu->ilu_lu, 4829 int, cmd, stmf_state_change_info_t *, ssci); 4830 } else if (cmd & STMF_CMD_LPORT_OP) { 4831 ilport = stmf_lookup_lport((stmf_local_port_t *)obj); 4832 if (ilport == NULL) { 4833 goto stmf_ctl_lock_exit; 4834 } 4835 DTRACE_PROBE3(lport__state__change, 4836 stmf_local_port_t *, ilport->ilport_lport, 4837 int, cmd, stmf_state_change_info_t *, ssci); 4838 } else { 4839 goto stmf_ctl_lock_exit; 4840 } 4841 4842 switch (cmd) { 4843 case STMF_CMD_LU_ONLINE: 4844 if (ilu->ilu_state == STMF_STATE_ONLINE) { 4845 ret = STMF_ALREADY; 4846 goto stmf_ctl_lock_exit; 4847 } 4848 if (ilu->ilu_state != STMF_STATE_OFFLINE) { 4849 ret = STMF_INVALID_ARG; 4850 goto stmf_ctl_lock_exit; 4851 } 4852 ilu->ilu_state = STMF_STATE_ONLINING; 4853 mutex_exit(&stmf_state.stmf_lock); 4854 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4855 break; 4856 4857 case STMF_CMD_LU_ONLINE_COMPLETE: 4858 if (ilu->ilu_state != STMF_STATE_ONLINING) { 4859 ret = STMF_INVALID_ARG; 4860 goto stmf_ctl_lock_exit; 4861 } 4862 if (((stmf_change_status_t *)arg)->st_completion_status == 4863 STMF_SUCCESS) { 4864 ilu->ilu_state = STMF_STATE_ONLINE; 4865 mutex_exit(&stmf_state.stmf_lock); 4866 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 4867 STMF_ACK_LU_ONLINE_COMPLETE, arg); 4868 mutex_enter(&stmf_state.stmf_lock); 4869 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 4870 } else { 4871 /* XXX: should throw a meesage an record more data */ 4872 ilu->ilu_state = STMF_STATE_OFFLINE; 4873 } 4874 ret = STMF_SUCCESS; 4875 goto stmf_ctl_lock_exit; 4876 4877 case STMF_CMD_LU_OFFLINE: 4878 if (ilu->ilu_state == STMF_STATE_OFFLINE) { 4879 ret = STMF_ALREADY; 4880 goto stmf_ctl_lock_exit; 4881 } 4882 if (ilu->ilu_state != STMF_STATE_ONLINE) { 4883 ret = STMF_INVALID_ARG; 4884 goto stmf_ctl_lock_exit; 4885 } 4886 ilu->ilu_state = STMF_STATE_OFFLINING; 4887 mutex_exit(&stmf_state.stmf_lock); 4888 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4889 break; 4890 4891 case STMF_CMD_LU_OFFLINE_COMPLETE: 4892 if (ilu->ilu_state != STMF_STATE_OFFLINING) { 4893 ret = STMF_INVALID_ARG; 4894 goto stmf_ctl_lock_exit; 4895 } 4896 if (((stmf_change_status_t *)arg)->st_completion_status == 4897 STMF_SUCCESS) { 4898 ilu->ilu_state = STMF_STATE_OFFLINE; 4899 mutex_exit(&stmf_state.stmf_lock); 4900 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 4901 STMF_ACK_LU_OFFLINE_COMPLETE, arg); 4902 mutex_enter(&stmf_state.stmf_lock); 4903 } else { 4904 ilu->ilu_state = STMF_STATE_ONLINE; 4905 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 4906 } 4907 mutex_exit(&stmf_state.stmf_lock); 4908 break; 4909 4910 /* 4911 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online. 4912 * It's related with hardware disable/enable. 4913 */ 4914 case STMF_CMD_LPORT_ONLINE: 4915 if (ilport->ilport_state == STMF_STATE_ONLINE) { 4916 ret = STMF_ALREADY; 4917 goto stmf_ctl_lock_exit; 4918 } 4919 if (ilport->ilport_state != STMF_STATE_OFFLINE) { 4920 ret = STMF_INVALID_ARG; 4921 goto stmf_ctl_lock_exit; 4922 } 4923 4924 /* 4925 * Only user request can recover the port from the 4926 * FORCED_OFFLINE state 4927 */ 4928 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) { 4929 if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) { 4930 ret = STMF_FAILURE; 4931 goto stmf_ctl_lock_exit; 4932 } 4933 } 4934 4935 /* 4936 * Avoid too frequent request to online 4937 */ 4938 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) { 4939 ilport->ilport_online_times = 0; 4940 ilport->ilport_avg_interval = 0; 4941 } 4942 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) && 4943 (ilport->ilport_online_times >= 4)) { 4944 ret = STMF_FAILURE; 4945 ilport->ilport_flags |= ILPORT_FORCED_OFFLINE; 4946 stmf_trace(NULL, "stmf_ctl: too frequent request to " 4947 "online the port"); 4948 cmn_err(CE_WARN, "stmf_ctl: too frequent request to " 4949 "online the port, set FORCED_OFFLINE now"); 4950 goto stmf_ctl_lock_exit; 4951 } 4952 if (ilport->ilport_online_times > 0) { 4953 if (ilport->ilport_online_times == 1) { 4954 ilport->ilport_avg_interval = ddi_get_lbolt() - 4955 ilport->ilport_last_online_clock; 4956 } else { 4957 ilport->ilport_avg_interval = 4958 (ilport->ilport_avg_interval + 4959 ddi_get_lbolt() - 4960 ilport->ilport_last_online_clock) >> 1; 4961 } 4962 } 4963 ilport->ilport_last_online_clock = ddi_get_lbolt(); 4964 ilport->ilport_online_times++; 4965 4966 /* 4967 * Submit online service request 4968 */ 4969 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE; 4970 ilport->ilport_state = STMF_STATE_ONLINING; 4971 mutex_exit(&stmf_state.stmf_lock); 4972 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4973 break; 4974 4975 case STMF_CMD_LPORT_ONLINE_COMPLETE: 4976 if (ilport->ilport_state != STMF_STATE_ONLINING) { 4977 ret = STMF_INVALID_ARG; 4978 goto stmf_ctl_lock_exit; 4979 } 4980 if (((stmf_change_status_t *)arg)->st_completion_status == 4981 STMF_SUCCESS) { 4982 ilport->ilport_state = STMF_STATE_ONLINE; 4983 mutex_exit(&stmf_state.stmf_lock); 4984 ((stmf_local_port_t *)obj)->lport_ctl( 4985 (stmf_local_port_t *)obj, 4986 STMF_ACK_LPORT_ONLINE_COMPLETE, arg); 4987 mutex_enter(&stmf_state.stmf_lock); 4988 } else { 4989 ilport->ilport_state = STMF_STATE_OFFLINE; 4990 } 4991 ret = STMF_SUCCESS; 4992 goto stmf_ctl_lock_exit; 4993 4994 case STMF_CMD_LPORT_OFFLINE: 4995 if (ilport->ilport_state == STMF_STATE_OFFLINE) { 4996 ret = STMF_ALREADY; 4997 goto stmf_ctl_lock_exit; 4998 } 4999 if (ilport->ilport_state != STMF_STATE_ONLINE) { 5000 ret = STMF_INVALID_ARG; 5001 goto stmf_ctl_lock_exit; 5002 } 5003 ilport->ilport_state = STMF_STATE_OFFLINING; 5004 mutex_exit(&stmf_state.stmf_lock); 5005 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5006 break; 5007 5008 case STMF_CMD_LPORT_OFFLINE_COMPLETE: 5009 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 5010 ret = STMF_INVALID_ARG; 5011 goto stmf_ctl_lock_exit; 5012 } 5013 if (((stmf_change_status_t *)arg)->st_completion_status == 5014 STMF_SUCCESS) { 5015 ilport->ilport_state = STMF_STATE_OFFLINE; 5016 mutex_exit(&stmf_state.stmf_lock); 5017 ((stmf_local_port_t *)obj)->lport_ctl( 5018 (stmf_local_port_t *)obj, 5019 STMF_ACK_LPORT_OFFLINE_COMPLETE, arg); 5020 mutex_enter(&stmf_state.stmf_lock); 5021 } else { 5022 ilport->ilport_state = STMF_STATE_ONLINE; 5023 } 5024 mutex_exit(&stmf_state.stmf_lock); 5025 break; 5026 5027 default: 5028 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd); 5029 ret = STMF_INVALID_ARG; 5030 goto stmf_ctl_lock_exit; 5031 } 5032 5033 return (STMF_SUCCESS); 5034 5035 stmf_ctl_lock_exit:; 5036 mutex_exit(&stmf_state.stmf_lock); 5037 return (ret); 5038 } 5039 5040 /* ARGSUSED */ 5041 stmf_status_t 5042 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 5043 uint32_t *bufsizep) 5044 { 5045 return (STMF_NOT_SUPPORTED); 5046 } 5047 5048 /* ARGSUSED */ 5049 stmf_status_t 5050 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 5051 uint32_t *bufsizep) 5052 { 5053 uint32_t cl = SI_GET_CLASS(cmd); 5054 5055 if (cl == SI_STMF) { 5056 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep)); 5057 } 5058 if (cl == SI_LPORT) { 5059 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1, 5060 arg2, buf, bufsizep)); 5061 } else if (cl == SI_LU) { 5062 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf, 5063 bufsizep)); 5064 } 5065 5066 return (STMF_NOT_SUPPORTED); 5067 } 5068 5069 /* 5070 * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by 5071 * stmf to register local ports. The ident should have 20 bytes in buffer 5072 * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string. 5073 */ 5074 void 5075 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn, 5076 uint8_t protocol_id) 5077 { 5078 char wwn_str[20+1]; 5079 5080 sdid->protocol_id = protocol_id; 5081 sdid->piv = 1; 5082 sdid->code_set = CODE_SET_ASCII; 5083 sdid->association = ID_IS_TARGET_PORT; 5084 sdid->ident_length = 20; 5085 /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */ 5086 (void) snprintf(wwn_str, sizeof (wwn_str), 5087 "wwn.%02X%02X%02X%02X%02X%02X%02X%02X", 5088 wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]); 5089 bcopy(wwn_str, (char *)sdid->ident, 20); 5090 } 5091 5092 5093 stmf_xfer_data_t * 5094 stmf_prepare_tpgs_data(uint8_t ilu_alua) 5095 { 5096 stmf_xfer_data_t *xd; 5097 stmf_i_local_port_t *ilport; 5098 uint8_t *p; 5099 uint32_t sz, asz, nports = 0, nports_standby = 0; 5100 5101 mutex_enter(&stmf_state.stmf_lock); 5102 /* check if any ports are standby and create second group */ 5103 for (ilport = stmf_state.stmf_ilportlist; ilport; 5104 ilport = ilport->ilport_next) { 5105 if (ilport->ilport_standby == 1) { 5106 nports_standby++; 5107 } else { 5108 nports++; 5109 } 5110 } 5111 5112 /* The spec only allows for 255 ports to be reported per group */ 5113 nports = min(nports, 255); 5114 nports_standby = min(nports_standby, 255); 5115 sz = (nports * 4) + 12; 5116 if (nports_standby && ilu_alua) { 5117 sz += (nports_standby * 4) + 8; 5118 } 5119 asz = sz + sizeof (*xd) - 4; 5120 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 5121 if (xd == NULL) { 5122 mutex_exit(&stmf_state.stmf_lock); 5123 return (NULL); 5124 } 5125 xd->alloc_size = asz; 5126 xd->size_left = sz; 5127 5128 p = xd->buf; 5129 5130 *((uint32_t *)p) = BE_32(sz - 4); 5131 p += 4; 5132 p[0] = 0x80; /* PREF */ 5133 p[1] = 5; /* AO_SUP, S_SUP */ 5134 if (stmf_state.stmf_alua_node == 1) { 5135 p[3] = 1; /* Group 1 */ 5136 } else { 5137 p[3] = 0; /* Group 0 */ 5138 } 5139 p[7] = nports & 0xff; 5140 p += 8; 5141 for (ilport = stmf_state.stmf_ilportlist; ilport; 5142 ilport = ilport->ilport_next) { 5143 if (ilport->ilport_standby == 1) { 5144 continue; 5145 } 5146 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 5147 p += 4; 5148 } 5149 if (nports_standby && ilu_alua) { 5150 p[0] = 0x02; /* Non PREF, Standby */ 5151 p[1] = 5; /* AO_SUP, S_SUP */ 5152 if (stmf_state.stmf_alua_node == 1) { 5153 p[3] = 0; /* Group 0 */ 5154 } else { 5155 p[3] = 1; /* Group 1 */ 5156 } 5157 p[7] = nports_standby & 0xff; 5158 p += 8; 5159 for (ilport = stmf_state.stmf_ilportlist; ilport; 5160 ilport = ilport->ilport_next) { 5161 if (ilport->ilport_standby == 0) { 5162 continue; 5163 } 5164 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 5165 p += 4; 5166 } 5167 } 5168 5169 mutex_exit(&stmf_state.stmf_lock); 5170 5171 return (xd); 5172 } 5173 5174 struct scsi_devid_desc * 5175 stmf_scsilib_get_devid_desc(uint16_t rtpid) 5176 { 5177 scsi_devid_desc_t *devid = NULL; 5178 stmf_i_local_port_t *ilport; 5179 5180 mutex_enter(&stmf_state.stmf_lock); 5181 5182 for (ilport = stmf_state.stmf_ilportlist; ilport; 5183 ilport = ilport->ilport_next) { 5184 if (ilport->ilport_rtpid == rtpid) { 5185 scsi_devid_desc_t *id = ilport->ilport_lport->lport_id; 5186 uint32_t id_sz = sizeof (scsi_devid_desc_t) - 1 + 5187 id->ident_length; 5188 devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz, 5189 KM_NOSLEEP); 5190 if (devid != NULL) { 5191 bcopy(id, devid, id_sz); 5192 } 5193 break; 5194 } 5195 } 5196 5197 mutex_exit(&stmf_state.stmf_lock); 5198 return (devid); 5199 } 5200 5201 uint16_t 5202 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid) 5203 { 5204 stmf_i_local_port_t *ilport; 5205 scsi_devid_desc_t *id; 5206 uint16_t rtpid = 0; 5207 5208 mutex_enter(&stmf_state.stmf_lock); 5209 for (ilport = stmf_state.stmf_ilportlist; ilport; 5210 ilport = ilport->ilport_next) { 5211 id = ilport->ilport_lport->lport_id; 5212 if ((devid->ident_length == id->ident_length) && 5213 (memcmp(devid->ident, id->ident, id->ident_length) == 0)) { 5214 rtpid = ilport->ilport_rtpid; 5215 break; 5216 } 5217 } 5218 mutex_exit(&stmf_state.stmf_lock); 5219 return (rtpid); 5220 } 5221 5222 static uint16_t stmf_lu_id_gen_number = 0; 5223 5224 stmf_status_t 5225 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id) 5226 { 5227 return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id)); 5228 } 5229 5230 stmf_status_t 5231 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id, 5232 scsi_devid_desc_t *lu_id) 5233 { 5234 uint8_t *p; 5235 struct timeval32 timestamp32; 5236 uint32_t *t = (uint32_t *)×tamp32; 5237 struct ether_addr mac; 5238 uint8_t *e = (uint8_t *)&mac; 5239 int hid = (int)host_id; 5240 5241 if (company_id == COMPANY_ID_NONE) 5242 company_id = COMPANY_ID_SUN; 5243 5244 if (lu_id->ident_length != 0x10) 5245 return (STMF_INVALID_ARG); 5246 5247 p = (uint8_t *)lu_id; 5248 5249 atomic_add_16(&stmf_lu_id_gen_number, 1); 5250 5251 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10; 5252 p[4] = ((company_id >> 20) & 0xf) | 0x60; 5253 p[5] = (company_id >> 12) & 0xff; 5254 p[6] = (company_id >> 4) & 0xff; 5255 p[7] = (company_id << 4) & 0xf0; 5256 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) { 5257 hid = BE_32((int)zone_get_hostid(NULL)); 5258 } 5259 if (hid != 0) { 5260 e[0] = (hid >> 24) & 0xff; 5261 e[1] = (hid >> 16) & 0xff; 5262 e[2] = (hid >> 8) & 0xff; 5263 e[3] = hid & 0xff; 5264 e[4] = e[5] = 0; 5265 } 5266 bcopy(e, p+8, 6); 5267 uniqtime32(×tamp32); 5268 *t = BE_32(*t); 5269 bcopy(t, p+14, 4); 5270 p[18] = (stmf_lu_id_gen_number >> 8) & 0xff; 5271 p[19] = stmf_lu_id_gen_number & 0xff; 5272 5273 return (STMF_SUCCESS); 5274 } 5275 5276 /* 5277 * saa is sense key, ASC, ASCQ 5278 */ 5279 void 5280 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa) 5281 { 5282 uint8_t sd[18]; 5283 task->task_scsi_status = st; 5284 if (st == 2) { 5285 bzero(sd, 18); 5286 sd[0] = 0x70; 5287 sd[2] = (saa >> 16) & 0xf; 5288 sd[7] = 10; 5289 sd[12] = (saa >> 8) & 0xff; 5290 sd[13] = saa & 0xff; 5291 task->task_sense_data = sd; 5292 task->task_sense_length = 18; 5293 } else { 5294 task->task_sense_data = NULL; 5295 task->task_sense_length = 0; 5296 } 5297 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 5298 } 5299 5300 uint32_t 5301 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page, 5302 uint32_t page_len, uint8_t byte0, uint32_t vpd_mask) 5303 { 5304 uint8_t *p = NULL; 5305 uint8_t small_buf[32]; 5306 uint32_t sz = 0; 5307 uint32_t n = 4; 5308 uint32_t m = 0; 5309 uint32_t last_bit = 0; 5310 5311 if (page_len < 4) 5312 return (0); 5313 if (page_len > 65535) 5314 page_len = 65535; 5315 5316 page[0] = byte0; 5317 page[1] = 0x83; 5318 5319 /* CONSTCOND */ 5320 while (1) { 5321 m += sz; 5322 if (sz && (page_len > n)) { 5323 uint32_t copysz; 5324 copysz = page_len > (n + sz) ? sz : page_len - n; 5325 bcopy(p, page + n, copysz); 5326 n += copysz; 5327 } 5328 vpd_mask &= ~last_bit; 5329 if (vpd_mask == 0) 5330 break; 5331 5332 if (vpd_mask & STMF_VPD_LU_ID) { 5333 last_bit = STMF_VPD_LU_ID; 5334 sz = task->task_lu->lu_id->ident_length + 4; 5335 p = (uint8_t *)task->task_lu->lu_id; 5336 continue; 5337 } else if (vpd_mask & STMF_VPD_TARGET_ID) { 5338 last_bit = STMF_VPD_TARGET_ID; 5339 sz = task->task_lport->lport_id->ident_length + 4; 5340 p = (uint8_t *)task->task_lport->lport_id; 5341 continue; 5342 } else if (vpd_mask & STMF_VPD_TP_GROUP) { 5343 stmf_i_local_port_t *ilport; 5344 last_bit = STMF_VPD_TP_GROUP; 5345 p = small_buf; 5346 bzero(p, 8); 5347 p[0] = 1; 5348 p[1] = 0x15; 5349 p[3] = 4; 5350 ilport = (stmf_i_local_port_t *) 5351 task->task_lport->lport_stmf_private; 5352 if (ilport->ilport_rtpid > 255) { 5353 p[7] = 1; /* Group 1 */ 5354 } 5355 sz = 8; 5356 continue; 5357 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) { 5358 stmf_i_local_port_t *ilport; 5359 5360 last_bit = STMF_VPD_RELATIVE_TP_ID; 5361 p = small_buf; 5362 bzero(p, 8); 5363 p[0] = 1; 5364 p[1] = 0x14; 5365 p[3] = 4; 5366 ilport = (stmf_i_local_port_t *) 5367 task->task_lport->lport_stmf_private; 5368 p[6] = (ilport->ilport_rtpid >> 8) & 0xff; 5369 p[7] = ilport->ilport_rtpid & 0xff; 5370 sz = 8; 5371 continue; 5372 } else { 5373 cmn_err(CE_WARN, "Invalid vpd_mask"); 5374 break; 5375 } 5376 } 5377 5378 page[2] = (m >> 8) & 0xff; 5379 page[3] = m & 0xff; 5380 5381 return (n); 5382 } 5383 5384 void 5385 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf) 5386 { 5387 stmf_i_scsi_task_t *itask = 5388 (stmf_i_scsi_task_t *)task->task_stmf_private; 5389 stmf_i_lu_t *ilu = 5390 (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5391 stmf_xfer_data_t *xd; 5392 uint32_t sz, minsz; 5393 5394 itask->itask_flags |= ITASK_DEFAULT_HANDLING; 5395 task->task_cmd_xfer_length = 5396 ((((uint32_t)task->task_cdb[6]) << 24) | 5397 (((uint32_t)task->task_cdb[7]) << 16) | 5398 (((uint32_t)task->task_cdb[8]) << 8) | 5399 ((uint32_t)task->task_cdb[9])); 5400 5401 if (task->task_additional_flags & 5402 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 5403 task->task_expected_xfer_length = 5404 task->task_cmd_xfer_length; 5405 } 5406 5407 if (task->task_cmd_xfer_length == 0) { 5408 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5409 return; 5410 } 5411 if (task->task_cmd_xfer_length < 4) { 5412 stmf_scsilib_send_status(task, STATUS_CHECK, 5413 STMF_SAA_INVALID_FIELD_IN_CDB); 5414 return; 5415 } 5416 5417 sz = min(task->task_expected_xfer_length, 5418 task->task_cmd_xfer_length); 5419 5420 xd = stmf_prepare_tpgs_data(ilu->ilu_alua); 5421 5422 if (xd == NULL) { 5423 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5424 STMF_ALLOC_FAILURE, NULL); 5425 return; 5426 } 5427 5428 sz = min(sz, xd->size_left); 5429 xd->size_left = sz; 5430 minsz = min(512, sz); 5431 5432 if (dbuf == NULL) 5433 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 5434 if (dbuf == NULL) { 5435 kmem_free(xd, xd->alloc_size); 5436 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5437 STMF_ALLOC_FAILURE, NULL); 5438 return; 5439 } 5440 dbuf->db_lu_private = xd; 5441 stmf_xd_to_dbuf(dbuf); 5442 5443 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 5444 (void) stmf_xfer_data(task, dbuf, 0); 5445 5446 } 5447 5448 void 5449 stmf_scsilib_handle_task_mgmt(scsi_task_t *task) 5450 { 5451 5452 switch (task->task_mgmt_function) { 5453 /* 5454 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET 5455 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state 5456 * in these cases. This needs to be changed to abort only the required 5457 * set. 5458 */ 5459 case TM_ABORT_TASK: 5460 case TM_ABORT_TASK_SET: 5461 case TM_CLEAR_TASK_SET: 5462 case TM_LUN_RESET: 5463 stmf_handle_lun_reset(task); 5464 /* issue the reset to the proxy node as well */ 5465 (void) stmf_proxy_scsi_cmd(task, NULL); 5466 return; 5467 case TM_TARGET_RESET: 5468 case TM_TARGET_COLD_RESET: 5469 case TM_TARGET_WARM_RESET: 5470 stmf_handle_target_reset(task); 5471 return; 5472 default: 5473 /* We dont support this task mgmt function */ 5474 stmf_scsilib_send_status(task, STATUS_CHECK, 5475 STMF_SAA_INVALID_FIELD_IN_CMD_IU); 5476 return; 5477 } 5478 } 5479 5480 void 5481 stmf_handle_lun_reset(scsi_task_t *task) 5482 { 5483 stmf_i_scsi_task_t *itask; 5484 stmf_i_lu_t *ilu; 5485 5486 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 5487 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5488 5489 /* 5490 * To sync with target reset, grab this lock. The LU is not going 5491 * anywhere as there is atleast one task pending (this task). 5492 */ 5493 mutex_enter(&stmf_state.stmf_lock); 5494 5495 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 5496 mutex_exit(&stmf_state.stmf_lock); 5497 stmf_scsilib_send_status(task, STATUS_CHECK, 5498 STMF_SAA_OPERATION_IN_PROGRESS); 5499 return; 5500 } 5501 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 5502 mutex_exit(&stmf_state.stmf_lock); 5503 5504 /* 5505 * Mark this task as the one causing LU reset so that we know who 5506 * was responsible for setting the ILU_RESET_ACTIVE. In case this 5507 * task itself gets aborted, we will clear ILU_RESET_ACTIVE. 5508 */ 5509 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET; 5510 5511 /* Initiatiate abort on all commands on this LU except this one */ 5512 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu); 5513 5514 /* Start polling on this task */ 5515 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 5516 != STMF_SUCCESS) { 5517 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 5518 NULL); 5519 return; 5520 } 5521 } 5522 5523 void 5524 stmf_handle_target_reset(scsi_task_t *task) 5525 { 5526 stmf_i_scsi_task_t *itask; 5527 stmf_i_lu_t *ilu; 5528 stmf_i_scsi_session_t *iss; 5529 stmf_lun_map_t *lm; 5530 stmf_lun_map_ent_t *lm_ent; 5531 int i, lf; 5532 5533 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 5534 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private; 5535 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5536 5537 /* 5538 * To sync with LUN reset, grab this lock. The session is not going 5539 * anywhere as there is atleast one task pending (this task). 5540 */ 5541 mutex_enter(&stmf_state.stmf_lock); 5542 5543 /* Grab the session lock as a writer to prevent any changes in it */ 5544 rw_enter(iss->iss_lockp, RW_WRITER); 5545 5546 if (iss->iss_flags & ISS_RESET_ACTIVE) { 5547 rw_exit(iss->iss_lockp); 5548 mutex_exit(&stmf_state.stmf_lock); 5549 stmf_scsilib_send_status(task, STATUS_CHECK, 5550 STMF_SAA_OPERATION_IN_PROGRESS); 5551 return; 5552 } 5553 atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE); 5554 5555 /* 5556 * Now go through each LUN in this session and make sure all of them 5557 * can be reset. 5558 */ 5559 lm = iss->iss_sm; 5560 for (i = 0, lf = 0; i < lm->lm_nentries; i++) { 5561 if (lm->lm_plus[i] == NULL) 5562 continue; 5563 lf++; 5564 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 5565 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 5566 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 5567 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 5568 rw_exit(iss->iss_lockp); 5569 mutex_exit(&stmf_state.stmf_lock); 5570 stmf_scsilib_send_status(task, STATUS_CHECK, 5571 STMF_SAA_OPERATION_IN_PROGRESS); 5572 return; 5573 } 5574 } 5575 if (lf == 0) { 5576 /* No luns in this session */ 5577 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 5578 rw_exit(iss->iss_lockp); 5579 mutex_exit(&stmf_state.stmf_lock); 5580 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5581 return; 5582 } 5583 5584 /* ok, start the damage */ 5585 itask->itask_flags |= ITASK_DEFAULT_HANDLING | 5586 ITASK_CAUSING_TARGET_RESET; 5587 for (i = 0; i < lm->lm_nentries; i++) { 5588 if (lm->lm_plus[i] == NULL) 5589 continue; 5590 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 5591 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 5592 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 5593 } 5594 rw_exit(iss->iss_lockp); 5595 mutex_exit(&stmf_state.stmf_lock); 5596 5597 for (i = 0; i < lm->lm_nentries; i++) { 5598 if (lm->lm_plus[i] == NULL) 5599 continue; 5600 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 5601 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, 5602 lm_ent->ent_lu); 5603 } 5604 5605 /* Start polling on this task */ 5606 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 5607 != STMF_SUCCESS) { 5608 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 5609 NULL); 5610 return; 5611 } 5612 } 5613 5614 int 5615 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask) 5616 { 5617 scsi_task_t *task = itask->itask_task; 5618 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 5619 task->task_session->ss_stmf_private; 5620 5621 rw_enter(iss->iss_lockp, RW_WRITER); 5622 if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) || 5623 (task->task_cdb[0] == SCMD_INQUIRY)) { 5624 rw_exit(iss->iss_lockp); 5625 return (0); 5626 } 5627 atomic_and_32(&iss->iss_flags, 5628 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 5629 rw_exit(iss->iss_lockp); 5630 5631 if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 5632 return (0); 5633 } 5634 stmf_scsilib_send_status(task, STATUS_CHECK, 5635 STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED); 5636 return (1); 5637 } 5638 5639 void 5640 stmf_worker_init() 5641 { 5642 uint32_t i; 5643 5644 /* Make local copy of global tunables */ 5645 stmf_i_max_nworkers = stmf_max_nworkers; 5646 stmf_i_min_nworkers = stmf_min_nworkers; 5647 5648 ASSERT(stmf_workers == NULL); 5649 if (stmf_i_min_nworkers < 4) { 5650 stmf_i_min_nworkers = 4; 5651 } 5652 if (stmf_i_max_nworkers < stmf_i_min_nworkers) { 5653 stmf_i_max_nworkers = stmf_i_min_nworkers; 5654 } 5655 stmf_workers = (stmf_worker_t *)kmem_zalloc( 5656 sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP); 5657 for (i = 0; i < stmf_i_max_nworkers; i++) { 5658 stmf_worker_t *w = &stmf_workers[i]; 5659 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL); 5660 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL); 5661 } 5662 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000); 5663 stmf_workers_state = STMF_WORKERS_ENABLED; 5664 5665 /* Workers will be started by stmf_worker_mgmt() */ 5666 5667 /* Lets wait for atleast one worker to start */ 5668 while (stmf_nworkers_cur == 0) 5669 delay(drv_usectohz(20 * 1000)); 5670 stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000); 5671 } 5672 5673 stmf_status_t 5674 stmf_worker_fini() 5675 { 5676 int i; 5677 clock_t sb; 5678 5679 if (stmf_workers_state == STMF_WORKERS_DISABLED) 5680 return (STMF_SUCCESS); 5681 ASSERT(stmf_workers); 5682 stmf_workers_state = STMF_WORKERS_DISABLED; 5683 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000); 5684 cv_signal(&stmf_state.stmf_cv); 5685 5686 sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000); 5687 /* Wait for all the threads to die */ 5688 while (stmf_nworkers_cur != 0) { 5689 if (ddi_get_lbolt() > sb) { 5690 stmf_workers_state = STMF_WORKERS_ENABLED; 5691 return (STMF_BUSY); 5692 } 5693 delay(drv_usectohz(100 * 1000)); 5694 } 5695 for (i = 0; i < stmf_i_max_nworkers; i++) { 5696 stmf_worker_t *w = &stmf_workers[i]; 5697 mutex_destroy(&w->worker_lock); 5698 cv_destroy(&w->worker_cv); 5699 } 5700 kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers); 5701 stmf_workers = NULL; 5702 5703 return (STMF_SUCCESS); 5704 } 5705 5706 void 5707 stmf_worker_task(void *arg) 5708 { 5709 stmf_worker_t *w; 5710 stmf_i_scsi_session_t *iss; 5711 scsi_task_t *task; 5712 stmf_i_scsi_task_t *itask; 5713 stmf_data_buf_t *dbuf; 5714 stmf_lu_t *lu; 5715 clock_t wait_timer = 0; 5716 clock_t wait_ticks, wait_delta = 0; 5717 uint32_t old, new; 5718 uint8_t curcmd; 5719 uint8_t abort_free; 5720 uint8_t wait_queue; 5721 uint8_t dec_qdepth; 5722 5723 w = (stmf_worker_t *)arg; 5724 wait_ticks = drv_usectohz(10000); 5725 5726 mutex_enter(&w->worker_lock); 5727 w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE; 5728 stmf_worker_loop:; 5729 if ((w->worker_ref_count == 0) && 5730 (w->worker_flags & STMF_WORKER_TERMINATE)) { 5731 w->worker_flags &= ~(STMF_WORKER_STARTED | 5732 STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE); 5733 w->worker_tid = NULL; 5734 mutex_exit(&w->worker_lock); 5735 thread_exit(); 5736 } 5737 /* CONSTCOND */ 5738 while (1) { 5739 dec_qdepth = 0; 5740 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) { 5741 wait_timer = 0; 5742 wait_delta = 0; 5743 if (w->worker_wait_head) { 5744 ASSERT(w->worker_wait_tail); 5745 if (w->worker_task_head == NULL) 5746 w->worker_task_head = 5747 w->worker_wait_head; 5748 else 5749 w->worker_task_tail->itask_worker_next = 5750 w->worker_wait_head; 5751 w->worker_task_tail = w->worker_wait_tail; 5752 w->worker_wait_head = w->worker_wait_tail = 5753 NULL; 5754 } 5755 } 5756 if ((itask = w->worker_task_head) == NULL) { 5757 break; 5758 } 5759 task = itask->itask_task; 5760 w->worker_task_head = itask->itask_worker_next; 5761 if (w->worker_task_head == NULL) 5762 w->worker_task_tail = NULL; 5763 5764 wait_queue = 0; 5765 abort_free = 0; 5766 if (itask->itask_ncmds > 0) { 5767 curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1]; 5768 } else { 5769 ASSERT(itask->itask_flags & ITASK_BEING_ABORTED); 5770 } 5771 do { 5772 old = itask->itask_flags; 5773 if (old & ITASK_BEING_ABORTED) { 5774 itask->itask_ncmds = 1; 5775 curcmd = itask->itask_cmd_stack[0] = 5776 ITASK_CMD_ABORT; 5777 goto out_itask_flag_loop; 5778 } else if ((curcmd & ITASK_CMD_MASK) == 5779 ITASK_CMD_NEW_TASK) { 5780 /* 5781 * set ITASK_KSTAT_IN_RUNQ, this flag 5782 * will not reset until task completed 5783 */ 5784 new = old | ITASK_KNOWN_TO_LU | 5785 ITASK_KSTAT_IN_RUNQ; 5786 } else { 5787 goto out_itask_flag_loop; 5788 } 5789 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 5790 5791 out_itask_flag_loop: 5792 5793 /* 5794 * Decide if this task needs to go to a queue and/or if 5795 * we can decrement the itask_cmd_stack. 5796 */ 5797 if (curcmd == ITASK_CMD_ABORT) { 5798 if (itask->itask_flags & (ITASK_KNOWN_TO_LU | 5799 ITASK_KNOWN_TO_TGT_PORT)) { 5800 wait_queue = 1; 5801 } else { 5802 abort_free = 1; 5803 } 5804 } else if ((curcmd & ITASK_CMD_POLL) && 5805 (itask->itask_poll_timeout > ddi_get_lbolt())) { 5806 wait_queue = 1; 5807 } 5808 5809 if (wait_queue) { 5810 itask->itask_worker_next = NULL; 5811 if (w->worker_wait_tail) { 5812 w->worker_wait_tail->itask_worker_next = itask; 5813 } else { 5814 w->worker_wait_head = itask; 5815 } 5816 w->worker_wait_tail = itask; 5817 if (wait_timer == 0) { 5818 wait_timer = ddi_get_lbolt() + wait_ticks; 5819 wait_delta = wait_ticks; 5820 } 5821 } else if ((--(itask->itask_ncmds)) != 0) { 5822 itask->itask_worker_next = NULL; 5823 if (w->worker_task_tail) { 5824 w->worker_task_tail->itask_worker_next = itask; 5825 } else { 5826 w->worker_task_head = itask; 5827 } 5828 w->worker_task_tail = itask; 5829 } else { 5830 atomic_and_32(&itask->itask_flags, 5831 ~ITASK_IN_WORKER_QUEUE); 5832 /* 5833 * This is where the queue depth should go down by 5834 * one but we delay that on purpose to account for 5835 * the call into the provider. The actual decrement 5836 * happens after the worker has done its job. 5837 */ 5838 dec_qdepth = 1; 5839 } 5840 5841 /* We made it here means we are going to call LU */ 5842 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) 5843 lu = task->task_lu; 5844 else 5845 lu = dlun0; 5846 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)]; 5847 mutex_exit(&w->worker_lock); 5848 curcmd &= ITASK_CMD_MASK; 5849 switch (curcmd) { 5850 case ITASK_CMD_NEW_TASK: 5851 iss = (stmf_i_scsi_session_t *) 5852 task->task_session->ss_stmf_private; 5853 stmf_update_kstat_lu_q(task, kstat_waitq_to_runq); 5854 stmf_update_kstat_lport_q(task, kstat_waitq_to_runq); 5855 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) { 5856 if (stmf_handle_cmd_during_ic(itask)) 5857 break; 5858 } 5859 #ifdef DEBUG 5860 if (stmf_drop_task_counter > 0) { 5861 if (atomic_add_32_nv( 5862 (uint32_t *)&stmf_drop_task_counter, 5863 -1) == 1) { 5864 break; 5865 } 5866 } 5867 #endif 5868 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task); 5869 lu->lu_new_task(task, dbuf); 5870 break; 5871 case ITASK_CMD_DATA_XFER_DONE: 5872 lu->lu_dbuf_xfer_done(task, dbuf); 5873 break; 5874 case ITASK_CMD_STATUS_DONE: 5875 lu->lu_send_status_done(task); 5876 break; 5877 case ITASK_CMD_ABORT: 5878 if (abort_free) { 5879 stmf_task_free(task); 5880 } else { 5881 stmf_do_task_abort(task); 5882 } 5883 break; 5884 case ITASK_CMD_POLL_LU: 5885 if (!wait_queue) { 5886 lu->lu_task_poll(task); 5887 } 5888 break; 5889 case ITASK_CMD_POLL_LPORT: 5890 if (!wait_queue) 5891 task->task_lport->lport_task_poll(task); 5892 break; 5893 case ITASK_CMD_SEND_STATUS: 5894 /* case ITASK_CMD_XFER_DATA: */ 5895 break; 5896 } 5897 mutex_enter(&w->worker_lock); 5898 if (dec_qdepth) { 5899 w->worker_queue_depth--; 5900 } 5901 } 5902 if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) { 5903 if (w->worker_ref_count == 0) 5904 goto stmf_worker_loop; 5905 else { 5906 wait_timer = ddi_get_lbolt() + 1; 5907 wait_delta = 1; 5908 } 5909 } 5910 w->worker_flags &= ~STMF_WORKER_ACTIVE; 5911 if (wait_timer) { 5912 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock, 5913 wait_delta, TR_CLOCK_TICK); 5914 } else { 5915 cv_wait(&w->worker_cv, &w->worker_lock); 5916 } 5917 w->worker_flags |= STMF_WORKER_ACTIVE; 5918 goto stmf_worker_loop; 5919 } 5920 5921 void 5922 stmf_worker_mgmt() 5923 { 5924 int i; 5925 int workers_needed; 5926 uint32_t qd; 5927 clock_t tps, d = 0; 5928 uint32_t cur_max_ntasks = 0; 5929 stmf_worker_t *w; 5930 5931 /* Check if we are trying to increase the # of threads */ 5932 for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) { 5933 if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) { 5934 stmf_nworkers_cur++; 5935 stmf_nworkers_accepting_cmds++; 5936 } else { 5937 /* Wait for transition to complete */ 5938 return; 5939 } 5940 } 5941 /* Check if we are trying to decrease the # of workers */ 5942 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) { 5943 if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) { 5944 stmf_nworkers_cur--; 5945 /* 5946 * stmf_nworkers_accepting_cmds has already been 5947 * updated by the request to reduce the # of workers. 5948 */ 5949 } else { 5950 /* Wait for transition to complete */ 5951 return; 5952 } 5953 } 5954 /* Check if we are being asked to quit */ 5955 if (stmf_workers_state != STMF_WORKERS_ENABLED) { 5956 if (stmf_nworkers_cur) { 5957 workers_needed = 0; 5958 goto worker_mgmt_trigger_change; 5959 } 5960 return; 5961 } 5962 /* Check if we are starting */ 5963 if (stmf_nworkers_cur < stmf_i_min_nworkers) { 5964 workers_needed = stmf_i_min_nworkers; 5965 goto worker_mgmt_trigger_change; 5966 } 5967 5968 tps = drv_usectohz(1 * 1000 * 1000); 5969 if ((stmf_wm_last != 0) && 5970 ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) { 5971 qd = 0; 5972 for (i = 0; i < stmf_nworkers_accepting_cmds; i++) { 5973 qd += stmf_workers[i].worker_max_qdepth_pu; 5974 stmf_workers[i].worker_max_qdepth_pu = 0; 5975 if (stmf_workers[i].worker_max_sys_qdepth_pu > 5976 cur_max_ntasks) { 5977 cur_max_ntasks = 5978 stmf_workers[i].worker_max_sys_qdepth_pu; 5979 } 5980 stmf_workers[i].worker_max_sys_qdepth_pu = 0; 5981 } 5982 } 5983 stmf_wm_last = ddi_get_lbolt(); 5984 if (d <= tps) { 5985 /* still ramping up */ 5986 return; 5987 } 5988 /* max qdepth cannot be more than max tasks */ 5989 if (qd > cur_max_ntasks) 5990 qd = cur_max_ntasks; 5991 5992 /* See if we have more workers */ 5993 if (qd < stmf_nworkers_accepting_cmds) { 5994 /* 5995 * Since we dont reduce the worker count right away, monitor 5996 * the highest load during the scale_down_delay. 5997 */ 5998 if (qd > stmf_worker_scale_down_qd) 5999 stmf_worker_scale_down_qd = qd; 6000 if (stmf_worker_scale_down_timer == 0) { 6001 stmf_worker_scale_down_timer = ddi_get_lbolt() + 6002 drv_usectohz(stmf_worker_scale_down_delay * 6003 1000 * 1000); 6004 return; 6005 } 6006 if (ddi_get_lbolt() < stmf_worker_scale_down_timer) { 6007 return; 6008 } 6009 /* Its time to reduce the workers */ 6010 if (stmf_worker_scale_down_qd < stmf_i_min_nworkers) 6011 stmf_worker_scale_down_qd = stmf_i_min_nworkers; 6012 if (stmf_worker_scale_down_qd > stmf_i_max_nworkers) 6013 stmf_worker_scale_down_qd = stmf_i_max_nworkers; 6014 if (stmf_worker_scale_down_qd == stmf_nworkers_cur) 6015 return; 6016 workers_needed = stmf_worker_scale_down_qd; 6017 stmf_worker_scale_down_qd = 0; 6018 goto worker_mgmt_trigger_change; 6019 } 6020 stmf_worker_scale_down_qd = 0; 6021 stmf_worker_scale_down_timer = 0; 6022 if (qd > stmf_i_max_nworkers) 6023 qd = stmf_i_max_nworkers; 6024 if (qd < stmf_i_min_nworkers) 6025 qd = stmf_i_min_nworkers; 6026 if (qd == stmf_nworkers_cur) 6027 return; 6028 workers_needed = qd; 6029 goto worker_mgmt_trigger_change; 6030 6031 /* NOTREACHED */ 6032 return; 6033 6034 worker_mgmt_trigger_change: 6035 ASSERT(workers_needed != stmf_nworkers_cur); 6036 if (workers_needed > stmf_nworkers_cur) { 6037 stmf_nworkers_needed = workers_needed; 6038 for (i = stmf_nworkers_cur; i < workers_needed; i++) { 6039 w = &stmf_workers[i]; 6040 w->worker_tid = thread_create(NULL, 0, stmf_worker_task, 6041 (void *)&stmf_workers[i], 0, &p0, TS_RUN, 6042 minclsyspri); 6043 } 6044 return; 6045 } 6046 /* At this point we know that we are decreasing the # of workers */ 6047 stmf_nworkers_accepting_cmds = workers_needed; 6048 stmf_nworkers_needed = workers_needed; 6049 /* Signal the workers that its time to quit */ 6050 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) { 6051 w = &stmf_workers[i]; 6052 ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED)); 6053 mutex_enter(&w->worker_lock); 6054 w->worker_flags |= STMF_WORKER_TERMINATE; 6055 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 6056 cv_signal(&w->worker_cv); 6057 mutex_exit(&w->worker_lock); 6058 } 6059 } 6060 6061 /* 6062 * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private). 6063 * If all the data has been filled out, frees the xd and makes 6064 * db_lu_private NULL. 6065 */ 6066 void 6067 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf) 6068 { 6069 stmf_xfer_data_t *xd; 6070 uint8_t *p; 6071 int i; 6072 uint32_t s; 6073 6074 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 6075 dbuf->db_data_size = 0; 6076 dbuf->db_relative_offset = xd->size_done; 6077 for (i = 0; i < dbuf->db_sglist_length; i++) { 6078 s = min(xd->size_left, dbuf->db_sglist[i].seg_length); 6079 p = &xd->buf[xd->size_done]; 6080 bcopy(p, dbuf->db_sglist[i].seg_addr, s); 6081 xd->size_left -= s; 6082 xd->size_done += s; 6083 dbuf->db_data_size += s; 6084 if (xd->size_left == 0) { 6085 kmem_free(xd, xd->alloc_size); 6086 dbuf->db_lu_private = NULL; 6087 return; 6088 } 6089 } 6090 } 6091 6092 /* ARGSUSED */ 6093 stmf_status_t 6094 stmf_dlun0_task_alloc(scsi_task_t *task) 6095 { 6096 return (STMF_SUCCESS); 6097 } 6098 6099 void 6100 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 6101 { 6102 uint8_t *cdbp = (uint8_t *)&task->task_cdb[0]; 6103 stmf_i_scsi_session_t *iss; 6104 uint32_t sz, minsz; 6105 uint8_t *p; 6106 stmf_xfer_data_t *xd; 6107 uint8_t inq_page_length = 31; 6108 6109 if (task->task_mgmt_function) { 6110 stmf_scsilib_handle_task_mgmt(task); 6111 return; 6112 } 6113 6114 switch (cdbp[0]) { 6115 case SCMD_INQUIRY: 6116 /* 6117 * Basic protocol checks. In addition, only reply to 6118 * standard inquiry. Otherwise, the LU provider needs 6119 * to respond. 6120 */ 6121 6122 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) { 6123 stmf_scsilib_send_status(task, STATUS_CHECK, 6124 STMF_SAA_INVALID_FIELD_IN_CDB); 6125 return; 6126 } 6127 6128 task->task_cmd_xfer_length = 6129 (((uint32_t)cdbp[3]) << 8) | cdbp[4]; 6130 6131 if (task->task_additional_flags & 6132 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6133 task->task_expected_xfer_length = 6134 task->task_cmd_xfer_length; 6135 } 6136 6137 sz = min(task->task_expected_xfer_length, 6138 min(36, task->task_cmd_xfer_length)); 6139 minsz = 36; 6140 6141 if (sz == 0) { 6142 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6143 return; 6144 } 6145 6146 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) { 6147 /* 6148 * Ignore any preallocated dbuf if the size is less 6149 * than 36. It will be freed during the task_free. 6150 */ 6151 dbuf = NULL; 6152 } 6153 if (dbuf == NULL) 6154 dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0); 6155 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) { 6156 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6157 STMF_ALLOC_FAILURE, NULL); 6158 return; 6159 } 6160 dbuf->db_lu_private = NULL; 6161 6162 p = dbuf->db_sglist[0].seg_addr; 6163 6164 /* 6165 * Standard inquiry handling only. 6166 */ 6167 6168 bzero(p, inq_page_length + 5); 6169 6170 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN; 6171 p[2] = 5; 6172 p[3] = 0x12; 6173 p[4] = inq_page_length; 6174 p[6] = 0x80; 6175 6176 (void) strncpy((char *)p+8, "SUN ", 8); 6177 (void) strncpy((char *)p+16, "COMSTAR ", 16); 6178 (void) strncpy((char *)p+32, "1.0 ", 4); 6179 6180 dbuf->db_data_size = sz; 6181 dbuf->db_relative_offset = 0; 6182 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 6183 (void) stmf_xfer_data(task, dbuf, 0); 6184 6185 return; 6186 6187 case SCMD_REPORT_LUNS: 6188 task->task_cmd_xfer_length = 6189 ((((uint32_t)task->task_cdb[6]) << 24) | 6190 (((uint32_t)task->task_cdb[7]) << 16) | 6191 (((uint32_t)task->task_cdb[8]) << 8) | 6192 ((uint32_t)task->task_cdb[9])); 6193 6194 if (task->task_additional_flags & 6195 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6196 task->task_expected_xfer_length = 6197 task->task_cmd_xfer_length; 6198 } 6199 6200 sz = min(task->task_expected_xfer_length, 6201 task->task_cmd_xfer_length); 6202 6203 if (sz < 16) { 6204 stmf_scsilib_send_status(task, STATUS_CHECK, 6205 STMF_SAA_INVALID_FIELD_IN_CDB); 6206 return; 6207 } 6208 6209 iss = (stmf_i_scsi_session_t *) 6210 task->task_session->ss_stmf_private; 6211 rw_enter(iss->iss_lockp, RW_WRITER); 6212 xd = stmf_session_prepare_report_lun_data(iss->iss_sm); 6213 rw_exit(iss->iss_lockp); 6214 6215 if (xd == NULL) { 6216 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6217 STMF_ALLOC_FAILURE, NULL); 6218 return; 6219 } 6220 6221 sz = min(sz, xd->size_left); 6222 xd->size_left = sz; 6223 minsz = min(512, sz); 6224 6225 if (dbuf == NULL) 6226 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 6227 if (dbuf == NULL) { 6228 kmem_free(xd, xd->alloc_size); 6229 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6230 STMF_ALLOC_FAILURE, NULL); 6231 return; 6232 } 6233 dbuf->db_lu_private = xd; 6234 stmf_xd_to_dbuf(dbuf); 6235 6236 atomic_and_32(&iss->iss_flags, 6237 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 6238 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 6239 (void) stmf_xfer_data(task, dbuf, 0); 6240 return; 6241 } 6242 6243 stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE); 6244 } 6245 6246 void 6247 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf) 6248 { 6249 stmf_i_scsi_task_t *itask = 6250 (stmf_i_scsi_task_t *)task->task_stmf_private; 6251 6252 if (dbuf->db_xfer_status != STMF_SUCCESS) { 6253 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6254 dbuf->db_xfer_status, NULL); 6255 return; 6256 } 6257 task->task_nbytes_transferred = dbuf->db_data_size; 6258 if (dbuf->db_lu_private) { 6259 /* There is more */ 6260 stmf_xd_to_dbuf(dbuf); 6261 (void) stmf_xfer_data(task, dbuf, 0); 6262 return; 6263 } 6264 /* 6265 * If this is a proxy task, it will need to be completed from the 6266 * proxy port provider. This message lets pppt know that the xfer 6267 * is complete. When we receive the status from pppt, we will 6268 * then relay that status back to the lport. 6269 */ 6270 if (itask->itask_flags & ITASK_PROXY_TASK) { 6271 stmf_ic_msg_t *ic_xfer_done_msg = NULL; 6272 stmf_status_t ic_ret = STMF_FAILURE; 6273 uint64_t session_msg_id; 6274 mutex_enter(&stmf_state.stmf_lock); 6275 session_msg_id = stmf_proxy_msg_id++; 6276 mutex_exit(&stmf_state.stmf_lock); 6277 /* send xfer done status to pppt */ 6278 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc( 6279 itask->itask_proxy_msg_id, 6280 task->task_session->ss_session_id, 6281 STMF_SUCCESS, session_msg_id); 6282 if (ic_xfer_done_msg) { 6283 ic_ret = ic_tx_msg(ic_xfer_done_msg); 6284 if (ic_ret != STMF_IC_MSG_SUCCESS) { 6285 cmn_err(CE_WARN, "unable to xmit session msg"); 6286 } 6287 } 6288 /* task will be completed from pppt */ 6289 return; 6290 } 6291 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6292 } 6293 6294 /* ARGSUSED */ 6295 void 6296 stmf_dlun0_status_done(scsi_task_t *task) 6297 { 6298 } 6299 6300 /* ARGSUSED */ 6301 void 6302 stmf_dlun0_task_free(scsi_task_t *task) 6303 { 6304 } 6305 6306 /* ARGSUSED */ 6307 stmf_status_t 6308 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags) 6309 { 6310 scsi_task_t *task = (scsi_task_t *)arg; 6311 stmf_i_scsi_task_t *itask = 6312 (stmf_i_scsi_task_t *)task->task_stmf_private; 6313 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 6314 int i; 6315 uint8_t map; 6316 6317 ASSERT(abort_cmd == STMF_LU_ABORT_TASK); 6318 if ((task->task_mgmt_function) && (itask->itask_flags & 6319 (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) { 6320 switch (task->task_mgmt_function) { 6321 case TM_ABORT_TASK: 6322 case TM_ABORT_TASK_SET: 6323 case TM_CLEAR_TASK_SET: 6324 case TM_LUN_RESET: 6325 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6326 break; 6327 case TM_TARGET_RESET: 6328 case TM_TARGET_COLD_RESET: 6329 case TM_TARGET_WARM_RESET: 6330 stmf_abort_target_reset(task); 6331 break; 6332 } 6333 return (STMF_ABORT_SUCCESS); 6334 } 6335 6336 /* 6337 * OK so its not a task mgmt. Make sure we free any xd sitting 6338 * inside any dbuf. 6339 */ 6340 if ((map = itask->itask_allocated_buf_map) != 0) { 6341 for (i = 0; i < 4; i++) { 6342 if ((map & 1) && 6343 ((itask->itask_dbufs[i])->db_lu_private)) { 6344 stmf_xfer_data_t *xd; 6345 stmf_data_buf_t *dbuf; 6346 6347 dbuf = itask->itask_dbufs[i]; 6348 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 6349 dbuf->db_lu_private = NULL; 6350 kmem_free(xd, xd->alloc_size); 6351 } 6352 map >>= 1; 6353 } 6354 } 6355 return (STMF_ABORT_SUCCESS); 6356 } 6357 6358 void 6359 stmf_dlun0_task_poll(struct scsi_task *task) 6360 { 6361 /* Right now we only do this for handling task management functions */ 6362 ASSERT(task->task_mgmt_function); 6363 6364 switch (task->task_mgmt_function) { 6365 case TM_ABORT_TASK: 6366 case TM_ABORT_TASK_SET: 6367 case TM_CLEAR_TASK_SET: 6368 case TM_LUN_RESET: 6369 (void) stmf_lun_reset_poll(task->task_lu, task, 0); 6370 return; 6371 case TM_TARGET_RESET: 6372 case TM_TARGET_COLD_RESET: 6373 case TM_TARGET_WARM_RESET: 6374 stmf_target_reset_poll(task); 6375 return; 6376 } 6377 } 6378 6379 /* ARGSUSED */ 6380 void 6381 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg) 6382 { 6383 /* This function will never be called */ 6384 cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd); 6385 } 6386 6387 void 6388 stmf_dlun_init() 6389 { 6390 stmf_i_lu_t *ilu; 6391 6392 dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0); 6393 dlun0->lu_task_alloc = stmf_dlun0_task_alloc; 6394 dlun0->lu_new_task = stmf_dlun0_new_task; 6395 dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done; 6396 dlun0->lu_send_status_done = stmf_dlun0_status_done; 6397 dlun0->lu_task_free = stmf_dlun0_task_free; 6398 dlun0->lu_abort = stmf_dlun0_abort; 6399 dlun0->lu_task_poll = stmf_dlun0_task_poll; 6400 dlun0->lu_ctl = stmf_dlun0_ctl; 6401 6402 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 6403 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 6404 } 6405 6406 stmf_status_t 6407 stmf_dlun_fini() 6408 { 6409 stmf_i_lu_t *ilu; 6410 6411 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 6412 6413 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 6414 if (ilu->ilu_ntasks) { 6415 stmf_i_scsi_task_t *itask, *nitask; 6416 6417 nitask = ilu->ilu_tasks; 6418 do { 6419 itask = nitask; 6420 nitask = itask->itask_lu_next; 6421 dlun0->lu_task_free(itask->itask_task); 6422 stmf_free(itask->itask_task); 6423 } while (nitask != NULL); 6424 6425 } 6426 stmf_free(dlun0); 6427 return (STMF_SUCCESS); 6428 } 6429 6430 void 6431 stmf_abort_target_reset(scsi_task_t *task) 6432 { 6433 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 6434 task->task_session->ss_stmf_private; 6435 stmf_lun_map_t *lm; 6436 stmf_lun_map_ent_t *lm_ent; 6437 stmf_i_lu_t *ilu; 6438 int i; 6439 6440 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE); 6441 6442 rw_enter(iss->iss_lockp, RW_READER); 6443 lm = iss->iss_sm; 6444 for (i = 0; i < lm->lm_nentries; i++) { 6445 if (lm->lm_plus[i] == NULL) 6446 continue; 6447 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6448 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 6449 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 6450 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6451 } 6452 } 6453 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6454 rw_exit(iss->iss_lockp); 6455 } 6456 6457 /* 6458 * The return value is only used by function managing target reset. 6459 */ 6460 stmf_status_t 6461 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset) 6462 { 6463 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6464 int ntasks_pending; 6465 6466 ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free; 6467 /* 6468 * This function is also used during Target reset. The idea is that 6469 * once all the commands are aborted, call the LU's reset entry 6470 * point (abort entry point with a reset flag). But if this Task 6471 * mgmt is running on this LU then all the tasks cannot be aborted. 6472 * one task (this task) will still be running which is OK. 6473 */ 6474 if ((ntasks_pending == 0) || ((task->task_lu == lu) && 6475 (ntasks_pending == 1))) { 6476 stmf_status_t ret; 6477 6478 if ((task->task_mgmt_function == TM_LUN_RESET) || 6479 (task->task_mgmt_function == TM_TARGET_RESET) || 6480 (task->task_mgmt_function == TM_TARGET_WARM_RESET) || 6481 (task->task_mgmt_function == TM_TARGET_COLD_RESET)) { 6482 ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0); 6483 } else { 6484 ret = STMF_SUCCESS; 6485 } 6486 if (ret == STMF_SUCCESS) { 6487 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6488 } 6489 if (target_reset) { 6490 return (ret); 6491 } 6492 if (ret == STMF_SUCCESS) { 6493 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6494 return (ret); 6495 } 6496 if (ret != STMF_BUSY) { 6497 stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL); 6498 return (ret); 6499 } 6500 } 6501 6502 if (target_reset) { 6503 /* Tell target reset polling code that we are not done */ 6504 return (STMF_BUSY); 6505 } 6506 6507 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 6508 != STMF_SUCCESS) { 6509 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6510 STMF_ALLOC_FAILURE, NULL); 6511 return (STMF_SUCCESS); 6512 } 6513 6514 return (STMF_SUCCESS); 6515 } 6516 6517 void 6518 stmf_target_reset_poll(struct scsi_task *task) 6519 { 6520 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 6521 task->task_session->ss_stmf_private; 6522 stmf_lun_map_t *lm; 6523 stmf_lun_map_ent_t *lm_ent; 6524 stmf_i_lu_t *ilu; 6525 stmf_status_t ret; 6526 int i; 6527 int not_done = 0; 6528 6529 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE); 6530 6531 rw_enter(iss->iss_lockp, RW_READER); 6532 lm = iss->iss_sm; 6533 for (i = 0; i < lm->lm_nentries; i++) { 6534 if (lm->lm_plus[i] == NULL) 6535 continue; 6536 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6537 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 6538 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 6539 rw_exit(iss->iss_lockp); 6540 ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1); 6541 rw_enter(iss->iss_lockp, RW_READER); 6542 if (ret == STMF_SUCCESS) 6543 continue; 6544 not_done = 1; 6545 if (ret != STMF_BUSY) { 6546 rw_exit(iss->iss_lockp); 6547 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6548 STMF_ABORTED, NULL); 6549 return; 6550 } 6551 } 6552 } 6553 rw_exit(iss->iss_lockp); 6554 6555 if (not_done) { 6556 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 6557 != STMF_SUCCESS) { 6558 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6559 STMF_ALLOC_FAILURE, NULL); 6560 return; 6561 } 6562 return; 6563 } 6564 6565 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6566 6567 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6568 } 6569 6570 stmf_status_t 6571 stmf_lu_add_event(stmf_lu_t *lu, int eventid) 6572 { 6573 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6574 6575 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 6576 return (STMF_INVALID_ARG); 6577 } 6578 6579 STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid); 6580 return (STMF_SUCCESS); 6581 } 6582 6583 stmf_status_t 6584 stmf_lu_remove_event(stmf_lu_t *lu, int eventid) 6585 { 6586 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6587 6588 if (eventid == STMF_EVENT_ALL) { 6589 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl); 6590 return (STMF_SUCCESS); 6591 } 6592 6593 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 6594 return (STMF_INVALID_ARG); 6595 } 6596 6597 STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid); 6598 return (STMF_SUCCESS); 6599 } 6600 6601 stmf_status_t 6602 stmf_lport_add_event(stmf_local_port_t *lport, int eventid) 6603 { 6604 stmf_i_local_port_t *ilport = 6605 (stmf_i_local_port_t *)lport->lport_stmf_private; 6606 6607 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 6608 return (STMF_INVALID_ARG); 6609 } 6610 6611 STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid); 6612 return (STMF_SUCCESS); 6613 } 6614 6615 stmf_status_t 6616 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid) 6617 { 6618 stmf_i_local_port_t *ilport = 6619 (stmf_i_local_port_t *)lport->lport_stmf_private; 6620 6621 if (eventid == STMF_EVENT_ALL) { 6622 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl); 6623 return (STMF_SUCCESS); 6624 } 6625 6626 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 6627 return (STMF_INVALID_ARG); 6628 } 6629 6630 STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid); 6631 return (STMF_SUCCESS); 6632 } 6633 6634 void 6635 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags) 6636 { 6637 if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) && 6638 (ilu->ilu_lu->lu_event_handler != NULL)) { 6639 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags); 6640 } 6641 } 6642 6643 void 6644 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg, 6645 uint32_t flags) 6646 { 6647 if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) && 6648 (ilport->ilport_lport->lport_event_handler != NULL)) { 6649 ilport->ilport_lport->lport_event_handler( 6650 ilport->ilport_lport, eventid, arg, flags); 6651 } 6652 } 6653 6654 void 6655 stmf_svc_init() 6656 { 6657 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 6658 return; 6659 stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1, 6660 TASKQ_DEFAULTPRI, 0); 6661 (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq, 6662 stmf_svc, 0, DDI_SLEEP); 6663 } 6664 6665 stmf_status_t 6666 stmf_svc_fini() 6667 { 6668 uint32_t i; 6669 6670 mutex_enter(&stmf_state.stmf_lock); 6671 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) { 6672 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE; 6673 cv_signal(&stmf_state.stmf_cv); 6674 } 6675 mutex_exit(&stmf_state.stmf_lock); 6676 6677 /* Wait for 5 seconds */ 6678 for (i = 0; i < 500; i++) { 6679 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 6680 delay(drv_usectohz(10000)); 6681 else 6682 break; 6683 } 6684 if (i == 500) 6685 return (STMF_BUSY); 6686 6687 ddi_taskq_destroy(stmf_state.stmf_svc_taskq); 6688 6689 return (STMF_SUCCESS); 6690 } 6691 6692 /* ARGSUSED */ 6693 void 6694 stmf_svc(void *arg) 6695 { 6696 stmf_svc_req_t *req, **preq; 6697 clock_t td; 6698 clock_t drain_start, drain_next = 0; 6699 clock_t timing_start, timing_next = 0; 6700 clock_t worker_delay = 0; 6701 int deq; 6702 stmf_lu_t *lu; 6703 stmf_i_lu_t *ilu; 6704 stmf_local_port_t *lport; 6705 stmf_i_local_port_t *ilport, *next_ilport; 6706 stmf_i_scsi_session_t *iss; 6707 6708 td = drv_usectohz(20000); 6709 6710 mutex_enter(&stmf_state.stmf_lock); 6711 stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE; 6712 6713 stmf_svc_loop: 6714 if (stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE) { 6715 stmf_state.stmf_svc_flags &= 6716 ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE); 6717 mutex_exit(&stmf_state.stmf_lock); 6718 return; 6719 } 6720 6721 if (stmf_state.stmf_svc_active) { 6722 int waitq_add = 0; 6723 req = stmf_state.stmf_svc_active; 6724 stmf_state.stmf_svc_active = req->svc_next; 6725 6726 switch (req->svc_cmd) { 6727 case STMF_CMD_LPORT_ONLINE: 6728 /* Fallthrough */ 6729 case STMF_CMD_LPORT_OFFLINE: 6730 /* Fallthrough */ 6731 case STMF_CMD_LU_ONLINE: 6732 /* Nothing to do */ 6733 waitq_add = 1; 6734 break; 6735 6736 case STMF_CMD_LU_OFFLINE: 6737 /* Remove all mappings of this LU */ 6738 stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj); 6739 /* Kill all the pending I/Os for this LU */ 6740 mutex_exit(&stmf_state.stmf_lock); 6741 stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL, 6742 STMF_ABORTED); 6743 mutex_enter(&stmf_state.stmf_lock); 6744 waitq_add = 1; 6745 break; 6746 default: 6747 cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d", 6748 req->svc_cmd); 6749 } 6750 6751 if (waitq_add) { 6752 /* Put it in the wait queue */ 6753 req->svc_next = stmf_state.stmf_svc_waiting; 6754 stmf_state.stmf_svc_waiting = req; 6755 } 6756 } 6757 6758 /* The waiting list is not going to be modified by anybody else */ 6759 mutex_exit(&stmf_state.stmf_lock); 6760 6761 for (preq = &stmf_state.stmf_svc_waiting; (*preq) != NULL; ) { 6762 req = *preq; 6763 deq = 0; 6764 switch (req->svc_cmd) { 6765 case STMF_CMD_LU_ONLINE: 6766 lu = (stmf_lu_t *)req->svc_obj; 6767 deq = 1; 6768 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 6769 break; 6770 6771 case STMF_CMD_LU_OFFLINE: 6772 lu = (stmf_lu_t *)req->svc_obj; 6773 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6774 if (ilu->ilu_ntasks != ilu->ilu_ntasks_free) 6775 break; 6776 deq = 1; 6777 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 6778 break; 6779 6780 case STMF_CMD_LPORT_OFFLINE: 6781 /* Fallthrough */ 6782 case STMF_CMD_LPORT_ONLINE: 6783 lport = (stmf_local_port_t *)req->svc_obj; 6784 deq = 1; 6785 lport->lport_ctl(lport, req->svc_cmd, &req->svc_info); 6786 break; 6787 } 6788 if (deq) { 6789 *preq = req->svc_next; 6790 kmem_free(req, req->svc_req_alloc_size); 6791 } else { 6792 preq = &req->svc_next; 6793 } 6794 } 6795 6796 mutex_enter(&stmf_state.stmf_lock); 6797 if (stmf_state.stmf_svc_active == NULL) { 6798 /* Do timeouts */ 6799 if (stmf_state.stmf_nlus && 6800 ((!timing_next) || (ddi_get_lbolt() >= timing_next))) { 6801 if (!stmf_state.stmf_svc_ilu_timing) { 6802 /* we are starting a new round */ 6803 stmf_state.stmf_svc_ilu_timing = 6804 stmf_state.stmf_ilulist; 6805 timing_start = ddi_get_lbolt(); 6806 } 6807 stmf_check_ilu_timing(); 6808 if (!stmf_state.stmf_svc_ilu_timing) { 6809 /* we finished a complete round */ 6810 timing_next = 6811 timing_start + drv_usectohz(5*1000*1000); 6812 } else { 6813 /* we still have some ilu items to check */ 6814 timing_next = 6815 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 6816 } 6817 if (stmf_state.stmf_svc_active) 6818 goto stmf_svc_loop; 6819 } 6820 /* Check if there are free tasks to clear */ 6821 if (stmf_state.stmf_nlus && 6822 ((!drain_next) || (ddi_get_lbolt() >= drain_next))) { 6823 if (!stmf_state.stmf_svc_ilu_draining) { 6824 /* we are starting a new round */ 6825 stmf_state.stmf_svc_ilu_draining = 6826 stmf_state.stmf_ilulist; 6827 drain_start = ddi_get_lbolt(); 6828 } 6829 stmf_check_freetask(); 6830 if (!stmf_state.stmf_svc_ilu_draining) { 6831 /* we finished a complete round */ 6832 drain_next = 6833 drain_start + drv_usectohz(10*1000*1000); 6834 } else { 6835 /* we still have some ilu items to check */ 6836 drain_next = 6837 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 6838 } 6839 if (stmf_state.stmf_svc_active) 6840 goto stmf_svc_loop; 6841 } 6842 6843 /* Check if we need to run worker_mgmt */ 6844 if (ddi_get_lbolt() > worker_delay) { 6845 stmf_worker_mgmt(); 6846 worker_delay = ddi_get_lbolt() + 6847 stmf_worker_mgmt_delay; 6848 } 6849 6850 /* Check if any active session got its 1st LUN */ 6851 if (stmf_state.stmf_process_initial_luns) { 6852 int stmf_level = 0; 6853 int port_level; 6854 for (ilport = stmf_state.stmf_ilportlist; ilport; 6855 ilport = next_ilport) { 6856 next_ilport = ilport->ilport_next; 6857 if ((ilport->ilport_flags & 6858 ILPORT_SS_GOT_INITIAL_LUNS) == 0) { 6859 continue; 6860 } 6861 port_level = 0; 6862 rw_enter(&ilport->ilport_lock, RW_READER); 6863 for (iss = ilport->ilport_ss_list; iss; 6864 iss = iss->iss_next) { 6865 if ((iss->iss_flags & 6866 ISS_GOT_INITIAL_LUNS) == 0) { 6867 continue; 6868 } 6869 port_level++; 6870 stmf_level++; 6871 atomic_and_32(&iss->iss_flags, 6872 ~ISS_GOT_INITIAL_LUNS); 6873 atomic_or_32(&iss->iss_flags, 6874 ISS_EVENT_ACTIVE); 6875 rw_exit(&ilport->ilport_lock); 6876 mutex_exit(&stmf_state.stmf_lock); 6877 stmf_generate_lport_event(ilport, 6878 LPORT_EVENT_INITIAL_LUN_MAPPED, 6879 iss->iss_ss, 0); 6880 atomic_and_32(&iss->iss_flags, 6881 ~ISS_EVENT_ACTIVE); 6882 mutex_enter(&stmf_state.stmf_lock); 6883 /* 6884 * scan all the ilports again as the 6885 * ilport list might have changed. 6886 */ 6887 next_ilport = 6888 stmf_state.stmf_ilportlist; 6889 break; 6890 } 6891 if (port_level == 0) { 6892 atomic_and_32(&ilport->ilport_flags, 6893 ~ILPORT_SS_GOT_INITIAL_LUNS); 6894 } 6895 /* drop the lock if we are holding it. */ 6896 if (rw_lock_held(&ilport->ilport_lock)) 6897 rw_exit(&ilport->ilport_lock); 6898 6899 /* Max 4 session at a time */ 6900 if (stmf_level >= 4) { 6901 break; 6902 } 6903 } 6904 if (stmf_level == 0) { 6905 stmf_state.stmf_process_initial_luns = 0; 6906 } 6907 } 6908 6909 stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE; 6910 (void) cv_reltimedwait(&stmf_state.stmf_cv, 6911 &stmf_state.stmf_lock, td, TR_CLOCK_TICK); 6912 stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE; 6913 } 6914 goto stmf_svc_loop; 6915 } 6916 6917 void 6918 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info) 6919 { 6920 stmf_svc_req_t *req; 6921 int s; 6922 6923 ASSERT(!mutex_owned(&stmf_state.stmf_lock)); 6924 s = sizeof (stmf_svc_req_t); 6925 if (info->st_additional_info) { 6926 s += strlen(info->st_additional_info) + 1; 6927 } 6928 req = kmem_zalloc(s, KM_SLEEP); 6929 6930 req->svc_cmd = cmd; 6931 req->svc_obj = obj; 6932 req->svc_info.st_rflags = info->st_rflags; 6933 if (info->st_additional_info) { 6934 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req, 6935 sizeof (stmf_svc_req_t))); 6936 (void) strcpy(req->svc_info.st_additional_info, 6937 info->st_additional_info); 6938 } 6939 req->svc_req_alloc_size = s; 6940 6941 mutex_enter(&stmf_state.stmf_lock); 6942 req->svc_next = stmf_state.stmf_svc_active; 6943 stmf_state.stmf_svc_active = req; 6944 if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) { 6945 cv_signal(&stmf_state.stmf_cv); 6946 } 6947 mutex_exit(&stmf_state.stmf_lock); 6948 } 6949 6950 void 6951 stmf_trace(caddr_t ident, const char *fmt, ...) 6952 { 6953 va_list args; 6954 char tbuf[160]; 6955 int len; 6956 6957 if (!stmf_trace_on) 6958 return; 6959 len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "", 6960 ddi_get_lbolt()); 6961 va_start(args, fmt); 6962 len += vsnprintf(tbuf + len, 158 - len, fmt, args); 6963 va_end(args); 6964 6965 if (len > 158) { 6966 len = 158; 6967 } 6968 tbuf[len++] = '\n'; 6969 tbuf[len] = 0; 6970 6971 mutex_enter(&trace_buf_lock); 6972 bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1); 6973 trace_buf_curndx += len; 6974 if (trace_buf_curndx > (trace_buf_size - 320)) 6975 trace_buf_curndx = 0; 6976 mutex_exit(&trace_buf_lock); 6977 } 6978 6979 void 6980 stmf_trace_clear() 6981 { 6982 if (!stmf_trace_on) 6983 return; 6984 mutex_enter(&trace_buf_lock); 6985 trace_buf_curndx = 0; 6986 if (trace_buf_size > 0) 6987 stmf_trace_buf[0] = 0; 6988 mutex_exit(&trace_buf_lock); 6989 } 6990 6991 static void 6992 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info) 6993 { 6994 stmf_state_change_info_t change_info; 6995 void *ctl_private; 6996 uint32_t ctl_cmd; 6997 int msg = 0; 6998 6999 stmf_trace("FROM STMF", "abort_task_offline called for %s: %s", 7000 offline_lu ? "LU" : "LPORT", info ? info : "no additional info"); 7001 change_info.st_additional_info = info; 7002 if (offline_lu) { 7003 change_info.st_rflags = STMF_RFLAG_RESET | 7004 STMF_RFLAG_LU_ABORT; 7005 ctl_private = task->task_lu; 7006 if (((stmf_i_lu_t *) 7007 task->task_lu->lu_stmf_private)->ilu_state == 7008 STMF_STATE_ONLINE) { 7009 msg = 1; 7010 } 7011 ctl_cmd = STMF_CMD_LU_OFFLINE; 7012 } else { 7013 change_info.st_rflags = STMF_RFLAG_RESET | 7014 STMF_RFLAG_LPORT_ABORT; 7015 ctl_private = task->task_lport; 7016 if (((stmf_i_local_port_t *) 7017 task->task_lport->lport_stmf_private)->ilport_state == 7018 STMF_STATE_ONLINE) { 7019 msg = 1; 7020 } 7021 ctl_cmd = STMF_CMD_LPORT_OFFLINE; 7022 } 7023 7024 if (msg) { 7025 stmf_trace(0, "Calling stmf_ctl to offline %s : %s", 7026 offline_lu ? "LU" : "LPORT", info ? info : 7027 "<no additional info>"); 7028 } 7029 (void) stmf_ctl(ctl_cmd, ctl_private, &change_info); 7030 } 7031