1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/conf.h> 27 #include <sys/file.h> 28 #include <sys/ddi.h> 29 #include <sys/sunddi.h> 30 #include <sys/modctl.h> 31 #include <sys/scsi/scsi.h> 32 #include <sys/scsi/impl/scsi_reset_notify.h> 33 #include <sys/disp.h> 34 #include <sys/byteorder.h> 35 #include <sys/atomic.h> 36 #include <sys/ethernet.h> 37 #include <sys/sdt.h> 38 #include <sys/nvpair.h> 39 #include <sys/zone.h> 40 41 #include <stmf.h> 42 #include <lpif.h> 43 #include <portif.h> 44 #include <stmf_ioctl.h> 45 #include <stmf_impl.h> 46 #include <lun_map.h> 47 #include <stmf_state.h> 48 49 static uint64_t stmf_session_counter = 0; 50 static uint16_t stmf_rtpid_counter = 0; 51 52 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 53 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 54 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 55 void **result); 56 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp); 57 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp); 58 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 59 cred_t *credp, int *rval); 60 static int stmf_get_stmf_state(stmf_state_desc_t *std); 61 static int stmf_set_stmf_state(stmf_state_desc_t *std); 62 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu, 63 char *info); 64 void stmf_svc_init(); 65 stmf_status_t stmf_svc_fini(); 66 void stmf_svc(void *arg); 67 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info); 68 void stmf_check_freetask(); 69 void stmf_abort_target_reset(scsi_task_t *task); 70 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, 71 int target_reset); 72 void stmf_target_reset_poll(struct scsi_task *task); 73 void stmf_handle_lun_reset(scsi_task_t *task); 74 void stmf_handle_target_reset(scsi_task_t *task); 75 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf); 76 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 77 uint32_t *err_ret); 78 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi); 79 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 80 uint32_t *err_ret); 81 void stmf_delete_ppd(stmf_pp_data_t *ppd); 82 void stmf_delete_all_ppds(); 83 void stmf_trace_clear(); 84 void stmf_worker_init(); 85 stmf_status_t stmf_worker_fini(); 86 void stmf_worker_mgmt(); 87 void stmf_worker_task(void *arg); 88 89 static void stmf_update_kstat_lu_q(scsi_task_t *, void()); 90 static void stmf_update_kstat_lport_q(scsi_task_t *, void()); 91 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *); 92 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *); 93 94 extern struct mod_ops mod_driverops; 95 96 /* =====[ Tunables ]===== */ 97 /* Internal tracing */ 98 volatile int stmf_trace_on = 1; 99 volatile int stmf_trace_buf_size = (1 * 1024 * 1024); 100 /* 101 * The reason default task timeout is 75 is because we want the 102 * host to timeout 1st and mostly host timeout is 60 seconds. 103 */ 104 volatile int stmf_default_task_timeout = 75; 105 /* 106 * Setting this to one means, you are responsible for config load and keeping 107 * things in sync with persistent database. 108 */ 109 volatile int stmf_allow_modunload = 0; 110 111 volatile int stmf_max_nworkers = 256; 112 volatile int stmf_min_nworkers = 4; 113 volatile int stmf_worker_scale_down_delay = 20; 114 115 /* === [ Debugging and fault injection ] === */ 116 #ifdef DEBUG 117 volatile int stmf_drop_task_counter = 0; 118 volatile int stmf_drop_buf_counter = 0; 119 120 #endif 121 122 stmf_state_t stmf_state; 123 static stmf_lu_t *dlun0; 124 125 static uint8_t stmf_first_zero[] = 126 { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff }; 127 static uint8_t stmf_first_one[] = 128 { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 }; 129 130 static kmutex_t trace_buf_lock; 131 static int trace_buf_size; 132 static int trace_buf_curndx; 133 caddr_t stmf_trace_buf; 134 135 static enum { 136 STMF_WORKERS_DISABLED = 0, 137 STMF_WORKERS_ENABLING, 138 STMF_WORKERS_ENABLED 139 } stmf_workers_state = STMF_WORKERS_DISABLED; 140 static int stmf_i_max_nworkers; 141 static int stmf_i_min_nworkers; 142 static int stmf_nworkers_cur; /* # of workers currently running */ 143 static int stmf_nworkers_needed; /* # of workers need to be running */ 144 static int stmf_worker_sel_counter = 0; 145 static uint32_t stmf_cur_ntasks = 0; 146 static clock_t stmf_wm_last = 0; 147 /* 148 * This is equal to stmf_nworkers_cur while we are increasing # workers and 149 * stmf_nworkers_needed while we are decreasing the worker count. 150 */ 151 static int stmf_nworkers_accepting_cmds; 152 static stmf_worker_t *stmf_workers = NULL; 153 static clock_t stmf_worker_mgmt_delay = 2; 154 static clock_t stmf_worker_scale_down_timer = 0; 155 static int stmf_worker_scale_down_qd = 0; 156 157 static struct cb_ops stmf_cb_ops = { 158 stmf_open, /* open */ 159 stmf_close, /* close */ 160 nodev, /* strategy */ 161 nodev, /* print */ 162 nodev, /* dump */ 163 nodev, /* read */ 164 nodev, /* write */ 165 stmf_ioctl, /* ioctl */ 166 nodev, /* devmap */ 167 nodev, /* mmap */ 168 nodev, /* segmap */ 169 nochpoll, /* chpoll */ 170 ddi_prop_op, /* cb_prop_op */ 171 0, /* streamtab */ 172 D_NEW | D_MP, /* cb_flag */ 173 CB_REV, /* rev */ 174 nodev, /* aread */ 175 nodev /* awrite */ 176 }; 177 178 static struct dev_ops stmf_ops = { 179 DEVO_REV, 180 0, 181 stmf_getinfo, 182 nulldev, /* identify */ 183 nulldev, /* probe */ 184 stmf_attach, 185 stmf_detach, 186 nodev, /* reset */ 187 &stmf_cb_ops, 188 NULL, /* bus_ops */ 189 NULL /* power */ 190 }; 191 192 #define STMF_NAME "COMSTAR STMF" 193 #define STMF_MODULE_NAME "stmf" 194 195 static struct modldrv modldrv = { 196 &mod_driverops, 197 STMF_NAME, 198 &stmf_ops 199 }; 200 201 static struct modlinkage modlinkage = { 202 MODREV_1, 203 &modldrv, 204 NULL 205 }; 206 207 int 208 _init(void) 209 { 210 int ret; 211 212 ret = mod_install(&modlinkage); 213 if (ret) 214 return (ret); 215 stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP); 216 trace_buf_size = stmf_trace_buf_size; 217 trace_buf_curndx = 0; 218 mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0); 219 bzero(&stmf_state, sizeof (stmf_state_t)); 220 /* STMF service is off by default */ 221 stmf_state.stmf_service_running = 0; 222 mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL); 223 cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL); 224 stmf_session_counter = (uint64_t)ddi_get_lbolt(); 225 stmf_view_init(); 226 stmf_svc_init(); 227 stmf_dlun_init(); 228 return (ret); 229 } 230 231 int 232 _fini(void) 233 { 234 int ret; 235 236 if (stmf_state.stmf_service_running) 237 return (EBUSY); 238 if ((!stmf_allow_modunload) && 239 (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) { 240 return (EBUSY); 241 } 242 if (stmf_state.stmf_nlps || stmf_state.stmf_npps) { 243 return (EBUSY); 244 } 245 if (stmf_dlun_fini() != STMF_SUCCESS) 246 return (EBUSY); 247 if (stmf_worker_fini() != STMF_SUCCESS) { 248 stmf_dlun_init(); 249 return (EBUSY); 250 } 251 if (stmf_svc_fini() != STMF_SUCCESS) { 252 stmf_dlun_init(); 253 stmf_worker_init(); 254 return (EBUSY); 255 } 256 257 ret = mod_remove(&modlinkage); 258 if (ret) { 259 stmf_svc_init(); 260 stmf_dlun_init(); 261 stmf_worker_init(); 262 return (ret); 263 } 264 265 stmf_view_clear_config(); 266 kmem_free(stmf_trace_buf, stmf_trace_buf_size); 267 mutex_destroy(&trace_buf_lock); 268 mutex_destroy(&stmf_state.stmf_lock); 269 cv_destroy(&stmf_state.stmf_cv); 270 return (ret); 271 } 272 273 int 274 _info(struct modinfo *modinfop) 275 { 276 return (mod_info(&modlinkage, modinfop)); 277 } 278 279 /* ARGSUSED */ 280 static int 281 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 282 { 283 switch (cmd) { 284 case DDI_INFO_DEVT2DEVINFO: 285 *result = stmf_state.stmf_dip; 286 break; 287 case DDI_INFO_DEVT2INSTANCE: 288 *result = 289 (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip); 290 break; 291 default: 292 return (DDI_FAILURE); 293 } 294 295 return (DDI_SUCCESS); 296 } 297 298 static int 299 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 300 { 301 switch (cmd) { 302 case DDI_ATTACH: 303 stmf_state.stmf_dip = dip; 304 305 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0, 306 DDI_NT_STMF, 0) != DDI_SUCCESS) { 307 break; 308 } 309 ddi_report_dev(dip); 310 return (DDI_SUCCESS); 311 } 312 313 return (DDI_FAILURE); 314 } 315 316 static int 317 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 318 { 319 switch (cmd) { 320 case DDI_DETACH: 321 ddi_remove_minor_node(dip, 0); 322 return (DDI_SUCCESS); 323 } 324 325 return (DDI_FAILURE); 326 } 327 328 /* ARGSUSED */ 329 static int 330 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp) 331 { 332 mutex_enter(&stmf_state.stmf_lock); 333 if (stmf_state.stmf_exclusive_open) { 334 mutex_exit(&stmf_state.stmf_lock); 335 return (EBUSY); 336 } 337 if (flag & FEXCL) { 338 if (stmf_state.stmf_opened) { 339 mutex_exit(&stmf_state.stmf_lock); 340 return (EBUSY); 341 } 342 stmf_state.stmf_exclusive_open = 1; 343 } 344 stmf_state.stmf_opened = 1; 345 mutex_exit(&stmf_state.stmf_lock); 346 return (0); 347 } 348 349 /* ARGSUSED */ 350 static int 351 stmf_close(dev_t dev, int flag, int otype, cred_t *credp) 352 { 353 mutex_enter(&stmf_state.stmf_lock); 354 stmf_state.stmf_opened = 0; 355 if (stmf_state.stmf_exclusive_open && 356 (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) { 357 stmf_state.stmf_config_state = STMF_CONFIG_NONE; 358 stmf_delete_all_ppds(); 359 stmf_view_clear_config(); 360 stmf_view_init(); 361 } 362 stmf_state.stmf_exclusive_open = 0; 363 mutex_exit(&stmf_state.stmf_lock); 364 return (0); 365 } 366 367 int 368 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd, 369 void **ibuf, void **obuf) 370 { 371 int ret; 372 373 *ibuf = NULL; 374 *obuf = NULL; 375 *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP); 376 377 ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode); 378 if (ret) 379 return (EFAULT); 380 if ((*iocd)->stmf_version != STMF_VERSION_1) { 381 ret = EINVAL; 382 goto copyin_iocdata_done; 383 } 384 if ((*iocd)->stmf_ibuf_size) { 385 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP); 386 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf), 387 *ibuf, (*iocd)->stmf_ibuf_size, mode); 388 } 389 if ((*iocd)->stmf_obuf_size) 390 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP); 391 392 if (ret == 0) 393 return (0); 394 ret = EFAULT; 395 copyin_iocdata_done:; 396 if (*obuf) { 397 kmem_free(*obuf, (*iocd)->stmf_obuf_size); 398 *obuf = NULL; 399 } 400 if (*ibuf) { 401 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size); 402 *ibuf = NULL; 403 } 404 kmem_free(*iocd, sizeof (stmf_iocdata_t)); 405 return (ret); 406 } 407 408 int 409 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf) 410 { 411 int ret; 412 413 if (iocd->stmf_obuf_size) { 414 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf, 415 iocd->stmf_obuf_size, mode); 416 if (ret) 417 return (EFAULT); 418 } 419 ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode); 420 if (ret) 421 return (EFAULT); 422 return (0); 423 } 424 425 /* ARGSUSED */ 426 static int 427 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 428 cred_t *credp, int *rval) 429 { 430 stmf_iocdata_t *iocd; 431 void *ibuf = NULL, *obuf = NULL; 432 slist_lu_t *luid_list; 433 slist_target_port_t *lportid_list; 434 stmf_i_lu_t *ilu; 435 stmf_i_local_port_t *ilport; 436 stmf_i_scsi_session_t *iss; 437 slist_scsi_session_t *iss_list; 438 sioc_lu_props_t *lup; 439 sioc_target_port_props_t *lportp; 440 stmf_ppioctl_data_t *ppi, *ppi_out = NULL; 441 uint64_t *ppi_token = NULL; 442 uint8_t *p_id, *id; 443 stmf_state_desc_t *std; 444 stmf_status_t ctl_ret; 445 stmf_state_change_info_t ssi; 446 int ret = 0; 447 uint32_t n; 448 int i; 449 stmf_group_op_data_t *grp_entry; 450 stmf_group_name_t *grpname; 451 stmf_view_op_entry_t *ve; 452 stmf_id_type_t idtype; 453 stmf_id_data_t *id_entry; 454 stmf_id_list_t *id_list; 455 stmf_view_entry_t *view_entry; 456 uint32_t veid; 457 458 if ((cmd & 0xff000000) != STMF_IOCTL) { 459 return (ENOTTY); 460 } 461 462 if (drv_priv(credp) != 0) { 463 return (EPERM); 464 } 465 466 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf); 467 if (ret) 468 return (ret); 469 iocd->stmf_error = 0; 470 471 switch (cmd) { 472 case STMF_IOCTL_LU_LIST: 473 /* retrieves both registered/unregistered */ 474 mutex_enter(&stmf_state.stmf_lock); 475 id_list = &stmf_state.stmf_luid_list; 476 n = min(id_list->id_count, 477 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 478 iocd->stmf_obuf_max_nentries = id_list->id_count; 479 luid_list = (slist_lu_t *)obuf; 480 id_entry = id_list->idl_head; 481 for (i = 0; i < n; i++) { 482 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 483 id_entry = id_entry->id_next; 484 } 485 486 n = iocd->stmf_obuf_size/sizeof (slist_lu_t); 487 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 488 id = (uint8_t *)ilu->ilu_lu->lu_id; 489 if (stmf_lookup_id(id_list, 16, id + 4) == NULL) { 490 iocd->stmf_obuf_max_nentries++; 491 if (i < n) { 492 bcopy(id + 4, luid_list[i].lu_guid, 493 sizeof (slist_lu_t)); 494 i++; 495 } 496 } 497 } 498 iocd->stmf_obuf_nentries = i; 499 mutex_exit(&stmf_state.stmf_lock); 500 break; 501 502 case STMF_IOCTL_REG_LU_LIST: 503 mutex_enter(&stmf_state.stmf_lock); 504 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus; 505 n = min(stmf_state.stmf_nlus, 506 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 507 iocd->stmf_obuf_nentries = n; 508 ilu = stmf_state.stmf_ilulist; 509 luid_list = (slist_lu_t *)obuf; 510 for (i = 0; i < n; i++) { 511 uint8_t *id; 512 id = (uint8_t *)ilu->ilu_lu->lu_id; 513 bcopy(id + 4, luid_list[i].lu_guid, 16); 514 ilu = ilu->ilu_next; 515 } 516 mutex_exit(&stmf_state.stmf_lock); 517 break; 518 519 case STMF_IOCTL_VE_LU_LIST: 520 mutex_enter(&stmf_state.stmf_lock); 521 id_list = &stmf_state.stmf_luid_list; 522 n = min(id_list->id_count, 523 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 524 iocd->stmf_obuf_max_nentries = id_list->id_count; 525 iocd->stmf_obuf_nentries = n; 526 luid_list = (slist_lu_t *)obuf; 527 id_entry = id_list->idl_head; 528 for (i = 0; i < n; i++) { 529 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 530 id_entry = id_entry->id_next; 531 } 532 mutex_exit(&stmf_state.stmf_lock); 533 break; 534 535 case STMF_IOCTL_TARGET_PORT_LIST: 536 mutex_enter(&stmf_state.stmf_lock); 537 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports; 538 n = min(stmf_state.stmf_nlports, 539 (iocd->stmf_obuf_size)/sizeof (slist_target_port_t)); 540 iocd->stmf_obuf_nentries = n; 541 ilport = stmf_state.stmf_ilportlist; 542 lportid_list = (slist_target_port_t *)obuf; 543 for (i = 0; i < n; i++) { 544 uint8_t *id; 545 id = (uint8_t *)ilport->ilport_lport->lport_id; 546 bcopy(id, lportid_list[i].target, id[3] + 4); 547 ilport = ilport->ilport_next; 548 } 549 mutex_exit(&stmf_state.stmf_lock); 550 break; 551 552 case STMF_IOCTL_SESSION_LIST: 553 p_id = (uint8_t *)ibuf; 554 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) || 555 (iocd->stmf_ibuf_size < (p_id[3] + 4))) { 556 ret = EINVAL; 557 break; 558 } 559 mutex_enter(&stmf_state.stmf_lock); 560 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport = 561 ilport->ilport_next) { 562 uint8_t *id; 563 id = (uint8_t *)ilport->ilport_lport->lport_id; 564 if ((p_id[3] == id[3]) && 565 (bcmp(p_id + 4, id + 4, id[3]) == 0)) { 566 break; 567 } 568 } 569 if (ilport == NULL) { 570 mutex_exit(&stmf_state.stmf_lock); 571 ret = ENOENT; 572 break; 573 } 574 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions; 575 n = min(ilport->ilport_nsessions, 576 (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t)); 577 iocd->stmf_obuf_nentries = n; 578 iss = ilport->ilport_ss_list; 579 iss_list = (slist_scsi_session_t *)obuf; 580 for (i = 0; i < n; i++) { 581 uint8_t *id; 582 id = (uint8_t *)iss->iss_ss->ss_rport_id; 583 bcopy(id, iss_list[i].initiator, id[3] + 4); 584 iss_list[i].creation_time = (uint32_t) 585 iss->iss_creation_time; 586 if (iss->iss_ss->ss_rport_alias) { 587 (void) strncpy(iss_list[i].alias, 588 iss->iss_ss->ss_rport_alias, 255); 589 iss_list[i].alias[255] = 0; 590 } else { 591 iss_list[i].alias[0] = 0; 592 } 593 iss = iss->iss_next; 594 } 595 mutex_exit(&stmf_state.stmf_lock); 596 break; 597 598 case STMF_IOCTL_GET_LU_PROPERTIES: 599 p_id = (uint8_t *)ibuf; 600 if ((iocd->stmf_ibuf_size < 16) || 601 (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) || 602 (p_id[0] == 0)) { 603 ret = EINVAL; 604 break; 605 } 606 mutex_enter(&stmf_state.stmf_lock); 607 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 608 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 609 break; 610 } 611 if (ilu == NULL) { 612 mutex_exit(&stmf_state.stmf_lock); 613 ret = ENOENT; 614 break; 615 } 616 lup = (sioc_lu_props_t *)obuf; 617 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16); 618 lup->lu_state = ilu->ilu_state & 0x0f; 619 lup->lu_present = 1; /* XXX */ 620 (void) strncpy(lup->lu_provider_name, 621 ilu->ilu_lu->lu_lp->lp_name, 255); 622 lup->lu_provider_name[254] = 0; 623 if (ilu->ilu_lu->lu_alias) { 624 (void) strncpy(lup->lu_alias, 625 ilu->ilu_lu->lu_alias, 255); 626 lup->lu_alias[255] = 0; 627 } else { 628 lup->lu_alias[0] = 0; 629 } 630 mutex_exit(&stmf_state.stmf_lock); 631 break; 632 633 case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES: 634 p_id = (uint8_t *)ibuf; 635 if ((p_id == NULL) || 636 (iocd->stmf_ibuf_size < (p_id[3] + 4)) || 637 (iocd->stmf_obuf_size < 638 sizeof (sioc_target_port_props_t))) { 639 ret = EINVAL; 640 break; 641 } 642 mutex_enter(&stmf_state.stmf_lock); 643 for (ilport = stmf_state.stmf_ilportlist; ilport; 644 ilport = ilport->ilport_next) { 645 uint8_t *id; 646 id = (uint8_t *)ilport->ilport_lport->lport_id; 647 if ((p_id[3] == id[3]) && 648 (bcmp(p_id+4, id+4, id[3]) == 0)) 649 break; 650 } 651 if (ilport == NULL) { 652 mutex_exit(&stmf_state.stmf_lock); 653 ret = ENOENT; 654 break; 655 } 656 lportp = (sioc_target_port_props_t *)obuf; 657 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id, 658 ilport->ilport_lport->lport_id->ident_length + 4); 659 lportp->tgt_state = ilport->ilport_state & 0x0f; 660 lportp->tgt_present = 1; /* XXX */ 661 (void) strncpy(lportp->tgt_provider_name, 662 ilport->ilport_lport->lport_pp->pp_name, 255); 663 lportp->tgt_provider_name[254] = 0; 664 if (ilport->ilport_lport->lport_alias) { 665 (void) strncpy(lportp->tgt_alias, 666 ilport->ilport_lport->lport_alias, 255); 667 lportp->tgt_alias[255] = 0; 668 } else { 669 lportp->tgt_alias[0] = 0; 670 } 671 mutex_exit(&stmf_state.stmf_lock); 672 break; 673 674 case STMF_IOCTL_SET_STMF_STATE: 675 if ((ibuf == NULL) || 676 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 677 ret = EINVAL; 678 break; 679 } 680 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf); 681 break; 682 683 case STMF_IOCTL_GET_STMF_STATE: 684 if ((obuf == NULL) || 685 (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) { 686 ret = EINVAL; 687 break; 688 } 689 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf); 690 break; 691 692 case STMF_IOCTL_SET_LU_STATE: 693 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 694 ssi.st_additional_info = NULL; 695 std = (stmf_state_desc_t *)ibuf; 696 if ((ibuf == NULL) || 697 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 698 ret = EINVAL; 699 break; 700 } 701 p_id = std->ident; 702 mutex_enter(&stmf_state.stmf_lock); 703 if (stmf_state.stmf_inventory_locked) { 704 mutex_exit(&stmf_state.stmf_lock); 705 ret = EBUSY; 706 break; 707 } 708 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 709 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 710 break; 711 } 712 if (ilu == NULL) { 713 mutex_exit(&stmf_state.stmf_lock); 714 ret = ENOENT; 715 break; 716 } 717 stmf_state.stmf_inventory_locked = 1; 718 mutex_exit(&stmf_state.stmf_lock); 719 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE : 720 STMF_CMD_LU_OFFLINE; 721 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi); 722 if (ctl_ret == STMF_ALREADY) 723 ret = 0; 724 else if (ctl_ret != STMF_SUCCESS) 725 ret = EIO; 726 mutex_enter(&stmf_state.stmf_lock); 727 stmf_state.stmf_inventory_locked = 0; 728 mutex_exit(&stmf_state.stmf_lock); 729 break; 730 731 case STMF_IOCTL_SET_TARGET_PORT_STATE: 732 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 733 ssi.st_additional_info = NULL; 734 std = (stmf_state_desc_t *)ibuf; 735 if ((ibuf == NULL) || 736 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 737 ret = EINVAL; 738 break; 739 } 740 p_id = std->ident; 741 mutex_enter(&stmf_state.stmf_lock); 742 if (stmf_state.stmf_inventory_locked) { 743 mutex_exit(&stmf_state.stmf_lock); 744 ret = EBUSY; 745 break; 746 } 747 for (ilport = stmf_state.stmf_ilportlist; ilport; 748 ilport = ilport->ilport_next) { 749 uint8_t *id; 750 id = (uint8_t *)ilport->ilport_lport->lport_id; 751 if ((id[3] == p_id[3]) && 752 (bcmp(id+4, p_id+4, id[3]) == 0)) { 753 break; 754 } 755 } 756 if (ilport == NULL) { 757 mutex_exit(&stmf_state.stmf_lock); 758 ret = ENOENT; 759 break; 760 } 761 stmf_state.stmf_inventory_locked = 1; 762 mutex_exit(&stmf_state.stmf_lock); 763 cmd = (std->state == STMF_STATE_ONLINE) ? 764 STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE; 765 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi); 766 if (ctl_ret == STMF_ALREADY) 767 ret = 0; 768 else if (ctl_ret != STMF_SUCCESS) 769 ret = EIO; 770 mutex_enter(&stmf_state.stmf_lock); 771 stmf_state.stmf_inventory_locked = 0; 772 mutex_exit(&stmf_state.stmf_lock); 773 break; 774 775 case STMF_IOCTL_ADD_HG_ENTRY: 776 idtype = STMF_ID_TYPE_HOST; 777 /* FALLTHROUGH */ 778 case STMF_IOCTL_ADD_TG_ENTRY: 779 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 780 ret = EACCES; 781 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 782 break; 783 } 784 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) { 785 idtype = STMF_ID_TYPE_TARGET; 786 } 787 grp_entry = (stmf_group_op_data_t *)ibuf; 788 if ((ibuf == NULL) || 789 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 790 ret = EINVAL; 791 break; 792 } 793 if (grp_entry->group.name[0] == '*') { 794 ret = EINVAL; 795 break; /* not allowed */ 796 } 797 mutex_enter(&stmf_state.stmf_lock); 798 ret = stmf_add_group_member(grp_entry->group.name, 799 grp_entry->group.name_size, 800 grp_entry->ident + 4, 801 grp_entry->ident[3], 802 idtype, 803 &iocd->stmf_error); 804 mutex_exit(&stmf_state.stmf_lock); 805 break; 806 case STMF_IOCTL_REMOVE_HG_ENTRY: 807 idtype = STMF_ID_TYPE_HOST; 808 /* FALLTHROUGH */ 809 case STMF_IOCTL_REMOVE_TG_ENTRY: 810 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 811 ret = EACCES; 812 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 813 break; 814 } 815 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) { 816 idtype = STMF_ID_TYPE_TARGET; 817 } 818 grp_entry = (stmf_group_op_data_t *)ibuf; 819 if ((ibuf == NULL) || 820 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 821 ret = EINVAL; 822 break; 823 } 824 if (grp_entry->group.name[0] == '*') { 825 ret = EINVAL; 826 break; /* not allowed */ 827 } 828 mutex_enter(&stmf_state.stmf_lock); 829 ret = stmf_remove_group_member(grp_entry->group.name, 830 grp_entry->group.name_size, 831 grp_entry->ident + 4, 832 grp_entry->ident[3], 833 idtype, 834 &iocd->stmf_error); 835 mutex_exit(&stmf_state.stmf_lock); 836 break; 837 case STMF_IOCTL_CREATE_HOST_GROUP: 838 idtype = STMF_ID_TYPE_HOST_GROUP; 839 /* FALLTHROUGH */ 840 case STMF_IOCTL_CREATE_TARGET_GROUP: 841 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 842 ret = EACCES; 843 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 844 break; 845 } 846 grpname = (stmf_group_name_t *)ibuf; 847 848 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP) 849 idtype = STMF_ID_TYPE_TARGET_GROUP; 850 if ((ibuf == NULL) || 851 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 852 ret = EINVAL; 853 break; 854 } 855 if (grpname->name[0] == '*') { 856 ret = EINVAL; 857 break; /* not allowed */ 858 } 859 mutex_enter(&stmf_state.stmf_lock); 860 ret = stmf_add_group(grpname->name, 861 grpname->name_size, idtype, &iocd->stmf_error); 862 mutex_exit(&stmf_state.stmf_lock); 863 break; 864 case STMF_IOCTL_REMOVE_HOST_GROUP: 865 idtype = STMF_ID_TYPE_HOST_GROUP; 866 /* FALLTHROUGH */ 867 case STMF_IOCTL_REMOVE_TARGET_GROUP: 868 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 869 ret = EACCES; 870 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 871 break; 872 } 873 grpname = (stmf_group_name_t *)ibuf; 874 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP) 875 idtype = STMF_ID_TYPE_TARGET_GROUP; 876 if ((ibuf == NULL) || 877 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 878 ret = EINVAL; 879 break; 880 } 881 if (grpname->name[0] == '*') { 882 ret = EINVAL; 883 break; /* not allowed */ 884 } 885 mutex_enter(&stmf_state.stmf_lock); 886 ret = stmf_remove_group(grpname->name, 887 grpname->name_size, idtype, &iocd->stmf_error); 888 mutex_exit(&stmf_state.stmf_lock); 889 break; 890 case STMF_IOCTL_ADD_VIEW_ENTRY: 891 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 892 ret = EACCES; 893 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 894 break; 895 } 896 ve = (stmf_view_op_entry_t *)ibuf; 897 if ((ibuf == NULL) || 898 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 899 ret = EINVAL; 900 break; 901 } 902 if (!ve->ve_lu_number_valid) 903 ve->ve_lu_nbr[2] = 0xFF; 904 if (ve->ve_all_hosts) { 905 ve->ve_host_group.name[0] = '*'; 906 ve->ve_host_group.name_size = 1; 907 } 908 if (ve->ve_all_targets) { 909 ve->ve_target_group.name[0] = '*'; 910 ve->ve_target_group.name_size = 1; 911 } 912 if (ve->ve_ndx_valid) 913 veid = ve->ve_ndx; 914 else 915 veid = 0xffffffff; 916 mutex_enter(&stmf_state.stmf_lock); 917 ret = stmf_add_ve(ve->ve_host_group.name, 918 ve->ve_host_group.name_size, 919 ve->ve_target_group.name, 920 ve->ve_target_group.name_size, 921 ve->ve_guid, 922 &veid, 923 ve->ve_lu_nbr, 924 &iocd->stmf_error); 925 mutex_exit(&stmf_state.stmf_lock); 926 if (ret == 0 && 927 (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) && 928 iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) { 929 stmf_view_op_entry_t *ve_ret = 930 (stmf_view_op_entry_t *)obuf; 931 iocd->stmf_obuf_nentries = 1; 932 iocd->stmf_obuf_max_nentries = 1; 933 if (!ve->ve_ndx_valid) { 934 ve_ret->ve_ndx = veid; 935 ve_ret->ve_ndx_valid = 1; 936 } 937 if (!ve->ve_lu_number_valid) { 938 ve_ret->ve_lu_number_valid = 1; 939 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8); 940 } 941 } 942 break; 943 case STMF_IOCTL_REMOVE_VIEW_ENTRY: 944 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 945 ret = EACCES; 946 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 947 break; 948 } 949 ve = (stmf_view_op_entry_t *)ibuf; 950 if ((ibuf == NULL) || 951 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 952 ret = EINVAL; 953 break; 954 } 955 if (!ve->ve_ndx_valid) { 956 ret = EINVAL; 957 break; 958 } 959 mutex_enter(&stmf_state.stmf_lock); 960 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx, 961 &iocd->stmf_error); 962 mutex_exit(&stmf_state.stmf_lock); 963 break; 964 case STMF_IOCTL_GET_HG_LIST: 965 id_list = &stmf_state.stmf_hg_list; 966 /* FALLTHROUGH */ 967 case STMF_IOCTL_GET_TG_LIST: 968 if (cmd == STMF_IOCTL_GET_TG_LIST) 969 id_list = &stmf_state.stmf_tg_list; 970 mutex_enter(&stmf_state.stmf_lock); 971 iocd->stmf_obuf_max_nentries = id_list->id_count; 972 n = min(id_list->id_count, 973 (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t)); 974 iocd->stmf_obuf_nentries = n; 975 id_entry = id_list->idl_head; 976 grpname = (stmf_group_name_t *)obuf; 977 for (i = 0; i < n; i++) { 978 if (id_entry->id_data[0] == '*') { 979 if (iocd->stmf_obuf_nentries > 0) { 980 iocd->stmf_obuf_nentries--; 981 } 982 id_entry = id_entry->id_next; 983 continue; 984 } 985 grpname->name_size = id_entry->id_data_size; 986 bcopy(id_entry->id_data, grpname->name, 987 id_entry->id_data_size); 988 grpname++; 989 id_entry = id_entry->id_next; 990 } 991 mutex_exit(&stmf_state.stmf_lock); 992 break; 993 case STMF_IOCTL_GET_HG_ENTRIES: 994 id_list = &stmf_state.stmf_hg_list; 995 /* FALLTHROUGH */ 996 case STMF_IOCTL_GET_TG_ENTRIES: 997 grpname = (stmf_group_name_t *)ibuf; 998 if ((ibuf == NULL) || 999 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1000 ret = EINVAL; 1001 break; 1002 } 1003 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) { 1004 id_list = &stmf_state.stmf_tg_list; 1005 } 1006 mutex_enter(&stmf_state.stmf_lock); 1007 id_entry = stmf_lookup_id(id_list, grpname->name_size, 1008 grpname->name); 1009 if (!id_entry) 1010 ret = ENODEV; 1011 else { 1012 stmf_ge_ident_t *grp_entry; 1013 id_list = (stmf_id_list_t *)id_entry->id_impl_specific; 1014 iocd->stmf_obuf_max_nentries = id_list->id_count; 1015 n = min(id_list->id_count, 1016 iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t)); 1017 iocd->stmf_obuf_nentries = n; 1018 id_entry = id_list->idl_head; 1019 grp_entry = (stmf_ge_ident_t *)obuf; 1020 for (i = 0; i < n; i++) { 1021 bcopy(id_entry->id_data, grp_entry->ident, 1022 id_entry->id_data_size); 1023 grp_entry->ident_size = id_entry->id_data_size; 1024 id_entry = id_entry->id_next; 1025 grp_entry++; 1026 } 1027 } 1028 mutex_exit(&stmf_state.stmf_lock); 1029 break; 1030 1031 case STMF_IOCTL_GET_VE_LIST: 1032 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1033 mutex_enter(&stmf_state.stmf_lock); 1034 ve = (stmf_view_op_entry_t *)obuf; 1035 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1036 id_entry; id_entry = id_entry->id_next) { 1037 for (view_entry = (stmf_view_entry_t *) 1038 id_entry->id_impl_specific; view_entry; 1039 view_entry = view_entry->ve_next) { 1040 iocd->stmf_obuf_max_nentries++; 1041 if (iocd->stmf_obuf_nentries >= n) 1042 continue; 1043 ve->ve_ndx_valid = 1; 1044 ve->ve_ndx = view_entry->ve_id; 1045 ve->ve_lu_number_valid = 1; 1046 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1047 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1048 view_entry->ve_luid->id_data_size); 1049 if (view_entry->ve_hg->id_data[0] == '*') { 1050 ve->ve_all_hosts = 1; 1051 } else { 1052 bcopy(view_entry->ve_hg->id_data, 1053 ve->ve_host_group.name, 1054 view_entry->ve_hg->id_data_size); 1055 ve->ve_host_group.name_size = 1056 view_entry->ve_hg->id_data_size; 1057 } 1058 1059 if (view_entry->ve_tg->id_data[0] == '*') { 1060 ve->ve_all_targets = 1; 1061 } else { 1062 bcopy(view_entry->ve_tg->id_data, 1063 ve->ve_target_group.name, 1064 view_entry->ve_tg->id_data_size); 1065 ve->ve_target_group.name_size = 1066 view_entry->ve_tg->id_data_size; 1067 } 1068 ve++; 1069 iocd->stmf_obuf_nentries++; 1070 } 1071 } 1072 mutex_exit(&stmf_state.stmf_lock); 1073 break; 1074 1075 case STMF_IOCTL_LU_VE_LIST: 1076 p_id = (uint8_t *)ibuf; 1077 if ((iocd->stmf_ibuf_size != 16) || 1078 (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) { 1079 ret = EINVAL; 1080 break; 1081 } 1082 1083 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1084 mutex_enter(&stmf_state.stmf_lock); 1085 ve = (stmf_view_op_entry_t *)obuf; 1086 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1087 id_entry; id_entry = id_entry->id_next) { 1088 if (bcmp(id_entry->id_data, p_id, 16) != 0) 1089 continue; 1090 for (view_entry = (stmf_view_entry_t *) 1091 id_entry->id_impl_specific; view_entry; 1092 view_entry = view_entry->ve_next) { 1093 iocd->stmf_obuf_max_nentries++; 1094 if (iocd->stmf_obuf_nentries >= n) 1095 continue; 1096 ve->ve_ndx_valid = 1; 1097 ve->ve_ndx = view_entry->ve_id; 1098 ve->ve_lu_number_valid = 1; 1099 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1100 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1101 view_entry->ve_luid->id_data_size); 1102 if (view_entry->ve_hg->id_data[0] == '*') { 1103 ve->ve_all_hosts = 1; 1104 } else { 1105 bcopy(view_entry->ve_hg->id_data, 1106 ve->ve_host_group.name, 1107 view_entry->ve_hg->id_data_size); 1108 ve->ve_host_group.name_size = 1109 view_entry->ve_hg->id_data_size; 1110 } 1111 1112 if (view_entry->ve_tg->id_data[0] == '*') { 1113 ve->ve_all_targets = 1; 1114 } else { 1115 bcopy(view_entry->ve_tg->id_data, 1116 ve->ve_target_group.name, 1117 view_entry->ve_tg->id_data_size); 1118 ve->ve_target_group.name_size = 1119 view_entry->ve_tg->id_data_size; 1120 } 1121 ve++; 1122 iocd->stmf_obuf_nentries++; 1123 } 1124 break; 1125 } 1126 mutex_exit(&stmf_state.stmf_lock); 1127 break; 1128 1129 case STMF_IOCTL_LOAD_PP_DATA: 1130 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1131 ret = EACCES; 1132 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1133 break; 1134 } 1135 ppi = (stmf_ppioctl_data_t *)ibuf; 1136 if ((ppi == NULL) || 1137 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1138 ret = EINVAL; 1139 break; 1140 } 1141 /* returned token */ 1142 ppi_token = (uint64_t *)obuf; 1143 if ((ppi_token == NULL) || 1144 (iocd->stmf_obuf_size < sizeof (uint64_t))) { 1145 ret = EINVAL; 1146 break; 1147 } 1148 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error); 1149 break; 1150 1151 case STMF_IOCTL_GET_PP_DATA: 1152 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1153 ret = EACCES; 1154 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1155 break; 1156 } 1157 ppi = (stmf_ppioctl_data_t *)ibuf; 1158 if (ppi == NULL || 1159 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1160 ret = EINVAL; 1161 break; 1162 } 1163 ppi_out = (stmf_ppioctl_data_t *)obuf; 1164 if ((ppi_out == NULL) || 1165 (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) { 1166 ret = EINVAL; 1167 break; 1168 } 1169 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error); 1170 break; 1171 1172 case STMF_IOCTL_CLEAR_PP_DATA: 1173 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1174 ret = EACCES; 1175 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1176 break; 1177 } 1178 ppi = (stmf_ppioctl_data_t *)ibuf; 1179 if ((ppi == NULL) || 1180 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1181 ret = EINVAL; 1182 break; 1183 } 1184 ret = stmf_delete_ppd_ioctl(ppi); 1185 break; 1186 1187 case STMF_IOCTL_CLEAR_TRACE: 1188 stmf_trace_clear(); 1189 break; 1190 1191 case STMF_IOCTL_ADD_TRACE: 1192 if (iocd->stmf_ibuf_size && ibuf) { 1193 ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0; 1194 stmf_trace("\nstradm", "%s\n", ibuf); 1195 } 1196 break; 1197 1198 case STMF_IOCTL_GET_TRACE_POSITION: 1199 if (obuf && (iocd->stmf_obuf_size > 3)) { 1200 mutex_enter(&trace_buf_lock); 1201 *((int *)obuf) = trace_buf_curndx; 1202 mutex_exit(&trace_buf_lock); 1203 } else { 1204 ret = EINVAL; 1205 } 1206 break; 1207 1208 case STMF_IOCTL_GET_TRACE: 1209 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) { 1210 ret = EINVAL; 1211 break; 1212 } 1213 i = *((int *)ibuf); 1214 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) > 1215 trace_buf_size)) { 1216 ret = EINVAL; 1217 break; 1218 } 1219 mutex_enter(&trace_buf_lock); 1220 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size); 1221 mutex_exit(&trace_buf_lock); 1222 break; 1223 1224 default: 1225 ret = ENOTTY; 1226 } 1227 1228 if (ret == 0) { 1229 ret = stmf_copyout_iocdata(data, mode, iocd, obuf); 1230 } else if (iocd->stmf_error) { 1231 (void) stmf_copyout_iocdata(data, mode, iocd, obuf); 1232 } 1233 if (obuf) { 1234 kmem_free(obuf, iocd->stmf_obuf_size); 1235 obuf = NULL; 1236 } 1237 if (ibuf) { 1238 kmem_free(ibuf, iocd->stmf_ibuf_size); 1239 ibuf = NULL; 1240 } 1241 kmem_free(iocd, sizeof (stmf_iocdata_t)); 1242 return (ret); 1243 } 1244 1245 static int 1246 stmf_get_service_state() 1247 { 1248 stmf_i_local_port_t *ilport; 1249 stmf_i_lu_t *ilu; 1250 int online = 0; 1251 int offline = 0; 1252 int onlining = 0; 1253 int offlining = 0; 1254 1255 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1256 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1257 ilport = ilport->ilport_next) { 1258 if (ilport->ilport_state == STMF_STATE_OFFLINE) 1259 offline++; 1260 else if (ilport->ilport_state == STMF_STATE_ONLINE) 1261 online++; 1262 else if (ilport->ilport_state == STMF_STATE_ONLINING) 1263 onlining++; 1264 else if (ilport->ilport_state == STMF_STATE_OFFLINING) 1265 offlining++; 1266 } 1267 1268 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1269 ilu = ilu->ilu_next) { 1270 if (ilu->ilu_state == STMF_STATE_OFFLINE) 1271 offline++; 1272 else if (ilu->ilu_state == STMF_STATE_ONLINE) 1273 online++; 1274 else if (ilu->ilu_state == STMF_STATE_ONLINING) 1275 onlining++; 1276 else if (ilu->ilu_state == STMF_STATE_OFFLINING) 1277 offlining++; 1278 } 1279 1280 if (stmf_state.stmf_service_running) { 1281 if (onlining) 1282 return (STMF_STATE_ONLINING); 1283 else 1284 return (STMF_STATE_ONLINE); 1285 } 1286 1287 if (offlining) { 1288 return (STMF_STATE_OFFLINING); 1289 } 1290 1291 return (STMF_STATE_OFFLINE); 1292 } 1293 1294 static int 1295 stmf_set_stmf_state(stmf_state_desc_t *std) 1296 { 1297 stmf_i_local_port_t *ilport; 1298 stmf_i_lu_t *ilu; 1299 stmf_state_change_info_t ssi; 1300 int svc_state; 1301 1302 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 1303 ssi.st_additional_info = NULL; 1304 1305 mutex_enter(&stmf_state.stmf_lock); 1306 if (!stmf_state.stmf_exclusive_open) { 1307 mutex_exit(&stmf_state.stmf_lock); 1308 return (EACCES); 1309 } 1310 1311 if (stmf_state.stmf_inventory_locked) { 1312 mutex_exit(&stmf_state.stmf_lock); 1313 return (EBUSY); 1314 } 1315 1316 if ((std->state != STMF_STATE_ONLINE) && 1317 (std->state != STMF_STATE_OFFLINE)) { 1318 mutex_exit(&stmf_state.stmf_lock); 1319 return (EINVAL); 1320 } 1321 1322 svc_state = stmf_get_service_state(); 1323 if ((svc_state == STMF_STATE_OFFLINING) || 1324 (svc_state == STMF_STATE_ONLINING)) { 1325 mutex_exit(&stmf_state.stmf_lock); 1326 return (EBUSY); 1327 } 1328 1329 if (svc_state == STMF_STATE_OFFLINE) { 1330 if (std->config_state == STMF_CONFIG_INIT) { 1331 if (std->state != STMF_STATE_OFFLINE) { 1332 mutex_exit(&stmf_state.stmf_lock); 1333 return (EINVAL); 1334 } 1335 stmf_state.stmf_config_state = STMF_CONFIG_INIT; 1336 stmf_delete_all_ppds(); 1337 stmf_view_clear_config(); 1338 stmf_view_init(); 1339 mutex_exit(&stmf_state.stmf_lock); 1340 return (0); 1341 } 1342 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) { 1343 if (std->config_state != STMF_CONFIG_INIT_DONE) { 1344 mutex_exit(&stmf_state.stmf_lock); 1345 return (EINVAL); 1346 } 1347 stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE; 1348 } 1349 if (std->state == STMF_STATE_OFFLINE) { 1350 mutex_exit(&stmf_state.stmf_lock); 1351 return (0); 1352 } 1353 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) { 1354 mutex_exit(&stmf_state.stmf_lock); 1355 return (EINVAL); 1356 } 1357 stmf_state.stmf_inventory_locked = 1; 1358 stmf_state.stmf_service_running = 1; 1359 mutex_exit(&stmf_state.stmf_lock); 1360 1361 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1362 ilport = ilport->ilport_next) { 1363 if (ilport->ilport_prev_state != STMF_STATE_ONLINE) 1364 continue; 1365 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, 1366 ilport->ilport_lport, &ssi); 1367 } 1368 1369 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1370 ilu = ilu->ilu_next) { 1371 if (ilu->ilu_prev_state != STMF_STATE_ONLINE) 1372 continue; 1373 (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi); 1374 } 1375 mutex_enter(&stmf_state.stmf_lock); 1376 stmf_state.stmf_inventory_locked = 0; 1377 mutex_exit(&stmf_state.stmf_lock); 1378 return (0); 1379 } 1380 1381 /* svc_state is STMF_STATE_ONLINE here */ 1382 if ((std->state != STMF_STATE_OFFLINE) || 1383 (std->config_state == STMF_CONFIG_INIT)) { 1384 mutex_exit(&stmf_state.stmf_lock); 1385 return (EACCES); 1386 } 1387 1388 stmf_state.stmf_inventory_locked = 1; 1389 stmf_state.stmf_service_running = 0; 1390 stmf_delete_all_ppds(); 1391 mutex_exit(&stmf_state.stmf_lock); 1392 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1393 ilport = ilport->ilport_next) { 1394 if (ilport->ilport_state != STMF_STATE_ONLINE) 1395 continue; 1396 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE, 1397 ilport->ilport_lport, &ssi); 1398 } 1399 1400 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1401 ilu = ilu->ilu_next) { 1402 if (ilu->ilu_state != STMF_STATE_ONLINE) 1403 continue; 1404 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi); 1405 } 1406 mutex_enter(&stmf_state.stmf_lock); 1407 stmf_state.stmf_inventory_locked = 0; 1408 mutex_exit(&stmf_state.stmf_lock); 1409 return (0); 1410 } 1411 1412 static int 1413 stmf_get_stmf_state(stmf_state_desc_t *std) 1414 { 1415 mutex_enter(&stmf_state.stmf_lock); 1416 std->state = stmf_get_service_state(); 1417 std->config_state = stmf_state.stmf_config_state; 1418 mutex_exit(&stmf_state.stmf_lock); 1419 1420 return (0); 1421 } 1422 1423 typedef struct { 1424 void *bp; /* back pointer from internal struct to main struct */ 1425 int alloc_size; 1426 } __istmf_t; 1427 1428 typedef struct { 1429 __istmf_t *fp; /* Framework private */ 1430 void *cp; /* Caller private */ 1431 void *ss; /* struct specific */ 1432 } __stmf_t; 1433 1434 static struct { 1435 int shared; 1436 int fw_private; 1437 } stmf_sizes[] = { { 0, 0 }, 1438 { GET_STRUCT_SIZE(stmf_lu_provider_t), 1439 GET_STRUCT_SIZE(stmf_i_lu_provider_t) }, 1440 { GET_STRUCT_SIZE(stmf_port_provider_t), 1441 GET_STRUCT_SIZE(stmf_i_port_provider_t) }, 1442 { GET_STRUCT_SIZE(stmf_local_port_t), 1443 GET_STRUCT_SIZE(stmf_i_local_port_t) }, 1444 { GET_STRUCT_SIZE(stmf_lu_t), 1445 GET_STRUCT_SIZE(stmf_i_lu_t) }, 1446 { GET_STRUCT_SIZE(stmf_scsi_session_t), 1447 GET_STRUCT_SIZE(stmf_i_scsi_session_t) }, 1448 { GET_STRUCT_SIZE(scsi_task_t), 1449 GET_STRUCT_SIZE(stmf_i_scsi_task_t) }, 1450 { GET_STRUCT_SIZE(stmf_data_buf_t), 1451 GET_STRUCT_SIZE(__istmf_t) }, 1452 { GET_STRUCT_SIZE(stmf_dbuf_store_t), 1453 GET_STRUCT_SIZE(__istmf_t) } 1454 1455 }; 1456 1457 void * 1458 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags) 1459 { 1460 int stmf_size; 1461 int kmem_flag; 1462 __stmf_t *sh; 1463 1464 if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS)) 1465 return (NULL); 1466 1467 if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) { 1468 kmem_flag = KM_NOSLEEP; 1469 } else { 1470 kmem_flag = KM_SLEEP; 1471 } 1472 1473 additional_size = (additional_size + 7) & (~7); 1474 stmf_size = stmf_sizes[struct_id].shared + 1475 stmf_sizes[struct_id].fw_private + additional_size; 1476 1477 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag); 1478 1479 if (sh == NULL) 1480 return (NULL); 1481 1482 /* 1483 * In principle, the implementation inside stmf_alloc should not 1484 * be changed anyway. But the original order of framework private 1485 * data and caller private data does not support sglist in the caller 1486 * private data. 1487 * To work around this, the memory segments of framework private 1488 * data and caller private data are re-ordered here. 1489 * A better solution is to provide a specific interface to allocate 1490 * the sglist, then we will not need this workaround any more. 1491 * But before the new interface is available, the memory segment 1492 * ordering should be kept as is. 1493 */ 1494 sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared); 1495 sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh, 1496 stmf_sizes[struct_id].shared + additional_size); 1497 1498 sh->fp->bp = sh; 1499 /* Just store the total size instead of storing additional size */ 1500 sh->fp->alloc_size = stmf_size; 1501 1502 return (sh); 1503 } 1504 1505 void 1506 stmf_free(void *ptr) 1507 { 1508 __stmf_t *sh = (__stmf_t *)ptr; 1509 1510 /* 1511 * So far we dont need any struct specific processing. If such 1512 * a need ever arises, then store the struct id in the framework 1513 * private section and get it here as sh->fp->struct_id. 1514 */ 1515 kmem_free(ptr, sh->fp->alloc_size); 1516 } 1517 1518 /* 1519 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the 1520 * framework and returns a pointer to framework private data for the lu. 1521 * Returns NULL if the lu was not found. 1522 */ 1523 stmf_i_lu_t * 1524 stmf_lookup_lu(stmf_lu_t *lu) 1525 { 1526 stmf_i_lu_t *ilu; 1527 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1528 1529 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 1530 if (ilu->ilu_lu == lu) 1531 return (ilu); 1532 } 1533 return (NULL); 1534 } 1535 1536 /* 1537 * Given a pointer to stmf_local_port_t, verifies if this lport is registered 1538 * with the framework and returns a pointer to framework private data for 1539 * the lport. 1540 * Returns NULL if the lport was not found. 1541 */ 1542 stmf_i_local_port_t * 1543 stmf_lookup_lport(stmf_local_port_t *lport) 1544 { 1545 stmf_i_local_port_t *ilport; 1546 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1547 1548 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1549 ilport = ilport->ilport_next) { 1550 if (ilport->ilport_lport == lport) 1551 return (ilport); 1552 } 1553 return (NULL); 1554 } 1555 1556 stmf_status_t 1557 stmf_register_lu_provider(stmf_lu_provider_t *lp) 1558 { 1559 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 1560 stmf_pp_data_t *ppd; 1561 uint32_t cb_flags; 1562 1563 if (lp->lp_lpif_rev != LPIF_REV_1) 1564 return (STMF_FAILURE); 1565 1566 mutex_enter(&stmf_state.stmf_lock); 1567 ilp->ilp_next = stmf_state.stmf_ilplist; 1568 stmf_state.stmf_ilplist = ilp; 1569 stmf_state.stmf_nlps++; 1570 1571 /* See if we need to do a callback */ 1572 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 1573 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) { 1574 break; 1575 } 1576 } 1577 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 1578 goto rlp_bail_out; 1579 } 1580 ilp->ilp_ppd = ppd; 1581 ppd->ppd_provider = ilp; 1582 if (lp->lp_cb == NULL) 1583 goto rlp_bail_out; 1584 ilp->ilp_cb_in_progress = 1; 1585 cb_flags = STMF_PCB_PREG_COMPLETE; 1586 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 1587 cb_flags |= STMF_PCB_STMF_ONLINING; 1588 mutex_exit(&stmf_state.stmf_lock); 1589 lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 1590 mutex_enter(&stmf_state.stmf_lock); 1591 ilp->ilp_cb_in_progress = 0; 1592 1593 rlp_bail_out: 1594 mutex_exit(&stmf_state.stmf_lock); 1595 1596 return (STMF_SUCCESS); 1597 } 1598 1599 stmf_status_t 1600 stmf_deregister_lu_provider(stmf_lu_provider_t *lp) 1601 { 1602 stmf_i_lu_provider_t **ppilp; 1603 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 1604 1605 mutex_enter(&stmf_state.stmf_lock); 1606 if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) { 1607 mutex_exit(&stmf_state.stmf_lock); 1608 return (STMF_BUSY); 1609 } 1610 for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL; 1611 ppilp = &((*ppilp)->ilp_next)) { 1612 if (*ppilp == ilp) { 1613 *ppilp = ilp->ilp_next; 1614 stmf_state.stmf_nlps--; 1615 if (ilp->ilp_ppd) { 1616 ilp->ilp_ppd->ppd_provider = NULL; 1617 ilp->ilp_ppd = NULL; 1618 } 1619 mutex_exit(&stmf_state.stmf_lock); 1620 return (STMF_SUCCESS); 1621 } 1622 } 1623 mutex_exit(&stmf_state.stmf_lock); 1624 return (STMF_NOT_FOUND); 1625 } 1626 1627 stmf_status_t 1628 stmf_register_port_provider(stmf_port_provider_t *pp) 1629 { 1630 stmf_i_port_provider_t *ipp = 1631 (stmf_i_port_provider_t *)pp->pp_stmf_private; 1632 stmf_pp_data_t *ppd; 1633 uint32_t cb_flags; 1634 1635 if (pp->pp_portif_rev != PORTIF_REV_1) 1636 return (STMF_FAILURE); 1637 1638 mutex_enter(&stmf_state.stmf_lock); 1639 ipp->ipp_next = stmf_state.stmf_ipplist; 1640 stmf_state.stmf_ipplist = ipp; 1641 stmf_state.stmf_npps++; 1642 /* See if we need to do a callback */ 1643 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 1644 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) { 1645 break; 1646 } 1647 } 1648 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 1649 goto rpp_bail_out; 1650 } 1651 ipp->ipp_ppd = ppd; 1652 ppd->ppd_provider = ipp; 1653 if (pp->pp_cb == NULL) 1654 goto rpp_bail_out; 1655 ipp->ipp_cb_in_progress = 1; 1656 cb_flags = STMF_PCB_PREG_COMPLETE; 1657 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 1658 cb_flags |= STMF_PCB_STMF_ONLINING; 1659 mutex_exit(&stmf_state.stmf_lock); 1660 pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 1661 mutex_enter(&stmf_state.stmf_lock); 1662 ipp->ipp_cb_in_progress = 0; 1663 1664 rpp_bail_out: 1665 mutex_exit(&stmf_state.stmf_lock); 1666 1667 return (STMF_SUCCESS); 1668 } 1669 1670 stmf_status_t 1671 stmf_deregister_port_provider(stmf_port_provider_t *pp) 1672 { 1673 stmf_i_port_provider_t *ipp = 1674 (stmf_i_port_provider_t *)pp->pp_stmf_private; 1675 stmf_i_port_provider_t **ppipp; 1676 1677 mutex_enter(&stmf_state.stmf_lock); 1678 if (ipp->ipp_npps || ipp->ipp_cb_in_progress) { 1679 mutex_exit(&stmf_state.stmf_lock); 1680 return (STMF_BUSY); 1681 } 1682 for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL; 1683 ppipp = &((*ppipp)->ipp_next)) { 1684 if (*ppipp == ipp) { 1685 *ppipp = ipp->ipp_next; 1686 stmf_state.stmf_npps--; 1687 if (ipp->ipp_ppd) { 1688 ipp->ipp_ppd->ppd_provider = NULL; 1689 ipp->ipp_ppd = NULL; 1690 } 1691 mutex_exit(&stmf_state.stmf_lock); 1692 return (STMF_SUCCESS); 1693 } 1694 } 1695 mutex_exit(&stmf_state.stmf_lock); 1696 return (STMF_NOT_FOUND); 1697 } 1698 1699 int 1700 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 1701 uint32_t *err_ret) 1702 { 1703 stmf_i_port_provider_t *ipp; 1704 stmf_i_lu_provider_t *ilp; 1705 stmf_pp_data_t *ppd; 1706 nvlist_t *nv; 1707 int s; 1708 int ret; 1709 1710 *err_ret = 0; 1711 1712 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 1713 return (EINVAL); 1714 } 1715 1716 mutex_enter(&stmf_state.stmf_lock); 1717 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 1718 if (ppi->ppi_lu_provider) { 1719 if (!ppd->ppd_lu_provider) 1720 continue; 1721 } else if (ppi->ppi_port_provider) { 1722 if (!ppd->ppd_port_provider) 1723 continue; 1724 } 1725 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 1726 break; 1727 } 1728 1729 if (ppd == NULL) { 1730 /* New provider */ 1731 s = strlen(ppi->ppi_name); 1732 if (s > 254) { 1733 mutex_exit(&stmf_state.stmf_lock); 1734 return (EINVAL); 1735 } 1736 s += sizeof (stmf_pp_data_t) - 7; 1737 1738 ppd = kmem_zalloc(s, KM_NOSLEEP); 1739 if (ppd == NULL) { 1740 mutex_exit(&stmf_state.stmf_lock); 1741 return (ENOMEM); 1742 } 1743 ppd->ppd_alloc_size = s; 1744 (void) strcpy(ppd->ppd_name, ppi->ppi_name); 1745 1746 /* See if this provider already exists */ 1747 if (ppi->ppi_lu_provider) { 1748 ppd->ppd_lu_provider = 1; 1749 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; 1750 ilp = ilp->ilp_next) { 1751 if (strcmp(ppi->ppi_name, 1752 ilp->ilp_lp->lp_name) == 0) { 1753 ppd->ppd_provider = ilp; 1754 ilp->ilp_ppd = ppd; 1755 break; 1756 } 1757 } 1758 } else { 1759 ppd->ppd_port_provider = 1; 1760 for (ipp = stmf_state.stmf_ipplist; ipp != NULL; 1761 ipp = ipp->ipp_next) { 1762 if (strcmp(ppi->ppi_name, 1763 ipp->ipp_pp->pp_name) == 0) { 1764 ppd->ppd_provider = ipp; 1765 ipp->ipp_ppd = ppd; 1766 break; 1767 } 1768 } 1769 } 1770 1771 /* Link this ppd in */ 1772 ppd->ppd_next = stmf_state.stmf_ppdlist; 1773 stmf_state.stmf_ppdlist = ppd; 1774 } 1775 1776 /* 1777 * User is requesting that the token be checked. 1778 * If there was another set after the user's get 1779 * it's an error 1780 */ 1781 if (ppi->ppi_token_valid) { 1782 if (ppi->ppi_token != ppd->ppd_token) { 1783 *err_ret = STMF_IOCERR_PPD_UPDATED; 1784 mutex_exit(&stmf_state.stmf_lock); 1785 return (EINVAL); 1786 } 1787 } 1788 1789 if ((ret = nvlist_unpack((char *)ppi->ppi_data, 1790 (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) { 1791 mutex_exit(&stmf_state.stmf_lock); 1792 return (ret); 1793 } 1794 1795 /* Free any existing lists and add this one to the ppd */ 1796 if (ppd->ppd_nv) 1797 nvlist_free(ppd->ppd_nv); 1798 ppd->ppd_nv = nv; 1799 1800 /* set the token for writes */ 1801 ppd->ppd_token++; 1802 /* return token to caller */ 1803 if (ppi_token) { 1804 *ppi_token = ppd->ppd_token; 1805 } 1806 1807 /* If there is a provider registered, do the notifications */ 1808 if (ppd->ppd_provider) { 1809 uint32_t cb_flags = 0; 1810 1811 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 1812 cb_flags |= STMF_PCB_STMF_ONLINING; 1813 if (ppi->ppi_lu_provider) { 1814 ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider; 1815 if (ilp->ilp_lp->lp_cb == NULL) 1816 goto bail_out; 1817 ilp->ilp_cb_in_progress = 1; 1818 mutex_exit(&stmf_state.stmf_lock); 1819 ilp->ilp_lp->lp_cb(ilp->ilp_lp, 1820 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 1821 mutex_enter(&stmf_state.stmf_lock); 1822 ilp->ilp_cb_in_progress = 0; 1823 } else { 1824 ipp = (stmf_i_port_provider_t *)ppd->ppd_provider; 1825 if (ipp->ipp_pp->pp_cb == NULL) 1826 goto bail_out; 1827 ipp->ipp_cb_in_progress = 1; 1828 mutex_exit(&stmf_state.stmf_lock); 1829 ipp->ipp_pp->pp_cb(ipp->ipp_pp, 1830 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 1831 mutex_enter(&stmf_state.stmf_lock); 1832 ipp->ipp_cb_in_progress = 0; 1833 } 1834 } 1835 1836 bail_out: 1837 mutex_exit(&stmf_state.stmf_lock); 1838 1839 return (0); 1840 } 1841 1842 void 1843 stmf_delete_ppd(stmf_pp_data_t *ppd) 1844 { 1845 stmf_pp_data_t **pppd; 1846 1847 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1848 if (ppd->ppd_provider) { 1849 if (ppd->ppd_lu_provider) { 1850 ((stmf_i_lu_provider_t *) 1851 ppd->ppd_provider)->ilp_ppd = NULL; 1852 } else { 1853 ((stmf_i_port_provider_t *) 1854 ppd->ppd_provider)->ipp_ppd = NULL; 1855 } 1856 ppd->ppd_provider = NULL; 1857 } 1858 1859 for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL; 1860 pppd = &((*pppd)->ppd_next)) { 1861 if (*pppd == ppd) 1862 break; 1863 } 1864 1865 if (*pppd == NULL) 1866 return; 1867 1868 *pppd = ppd->ppd_next; 1869 if (ppd->ppd_nv) 1870 nvlist_free(ppd->ppd_nv); 1871 1872 kmem_free(ppd, ppd->ppd_alloc_size); 1873 } 1874 1875 int 1876 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi) 1877 { 1878 stmf_pp_data_t *ppd; 1879 int ret = ENOENT; 1880 1881 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 1882 return (EINVAL); 1883 } 1884 1885 mutex_enter(&stmf_state.stmf_lock); 1886 1887 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 1888 if (ppi->ppi_lu_provider) { 1889 if (!ppd->ppd_lu_provider) 1890 continue; 1891 } else if (ppi->ppi_port_provider) { 1892 if (!ppd->ppd_port_provider) 1893 continue; 1894 } 1895 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 1896 break; 1897 } 1898 1899 if (ppd) { 1900 ret = 0; 1901 stmf_delete_ppd(ppd); 1902 } 1903 mutex_exit(&stmf_state.stmf_lock); 1904 1905 return (ret); 1906 } 1907 1908 int 1909 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 1910 uint32_t *err_ret) 1911 { 1912 stmf_pp_data_t *ppd; 1913 size_t req_size; 1914 int ret = ENOENT; 1915 char *bufp = (char *)ppi_out->ppi_data; 1916 1917 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 1918 return (EINVAL); 1919 } 1920 1921 mutex_enter(&stmf_state.stmf_lock); 1922 1923 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 1924 if (ppi->ppi_lu_provider) { 1925 if (!ppd->ppd_lu_provider) 1926 continue; 1927 } else if (ppi->ppi_port_provider) { 1928 if (!ppd->ppd_port_provider) 1929 continue; 1930 } 1931 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 1932 break; 1933 } 1934 1935 if (ppd && ppd->ppd_nv) { 1936 ppi_out->ppi_token = ppd->ppd_token; 1937 if ((ret = nvlist_size(ppd->ppd_nv, &req_size, 1938 NV_ENCODE_XDR)) != 0) { 1939 goto done; 1940 } 1941 ppi_out->ppi_data_size = req_size; 1942 if (req_size > ppi->ppi_data_size) { 1943 *err_ret = STMF_IOCERR_INSUFFICIENT_BUF; 1944 ret = EINVAL; 1945 goto done; 1946 } 1947 1948 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size, 1949 NV_ENCODE_XDR, 0)) != 0) { 1950 goto done; 1951 } 1952 ret = 0; 1953 } 1954 1955 done: 1956 mutex_exit(&stmf_state.stmf_lock); 1957 1958 return (ret); 1959 } 1960 1961 void 1962 stmf_delete_all_ppds() 1963 { 1964 stmf_pp_data_t *ppd, *nppd; 1965 1966 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1967 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) { 1968 nppd = ppd->ppd_next; 1969 stmf_delete_ppd(ppd); 1970 } 1971 } 1972 1973 /* 1974 * 16 is the max string length of a protocol_ident, increase 1975 * the size if needed. 1976 */ 1977 #define STMF_KSTAT_LU_SZ (STMF_GUID_INPUT + 1 + 256) 1978 #define STMF_KSTAT_TGT_SZ (256 * 2 + 16) 1979 1980 typedef struct stmf_kstat_lu_info { 1981 kstat_named_t i_lun_guid; 1982 kstat_named_t i_lun_alias; 1983 } stmf_kstat_lu_info_t; 1984 1985 typedef struct stmf_kstat_tgt_info { 1986 kstat_named_t i_tgt_name; 1987 kstat_named_t i_tgt_alias; 1988 kstat_named_t i_protocol; 1989 } stmf_kstat_tgt_info_t; 1990 1991 /* 1992 * This array matches the Protocol Identifier in stmf_ioctl.h 1993 */ 1994 char *protocol_ident[PROTOCOL_ANY] = { 1995 "Fibre Channel", 1996 "Parallel SCSI", 1997 "SSA", 1998 "IEEE_1394", 1999 "SRP", 2000 "iSCSI", 2001 "SAS", 2002 "ADT", 2003 "ATAPI", 2004 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN" 2005 }; 2006 2007 /* 2008 * Update the lun wait/run queue count 2009 */ 2010 static void 2011 stmf_update_kstat_lu_q(scsi_task_t *task, void func()) 2012 { 2013 stmf_i_lu_t *ilu; 2014 kstat_io_t *kip; 2015 2016 if (task->task_lu == dlun0) 2017 return; 2018 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2019 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2020 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2021 if (kip != NULL) { 2022 mutex_enter(ilu->ilu_kstat_io->ks_lock); 2023 func(kip); 2024 mutex_exit(ilu->ilu_kstat_io->ks_lock); 2025 } 2026 } 2027 } 2028 2029 /* 2030 * Update the target(lport) wait/run queue count 2031 */ 2032 static void 2033 stmf_update_kstat_lport_q(scsi_task_t *task, void func()) 2034 { 2035 stmf_i_local_port_t *ilp; 2036 kstat_io_t *kip; 2037 2038 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2039 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2040 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2041 if (kip != NULL) { 2042 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2043 func(kip); 2044 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2045 } 2046 } 2047 } 2048 2049 static void 2050 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2051 { 2052 stmf_i_local_port_t *ilp; 2053 kstat_io_t *kip; 2054 2055 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2056 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2057 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2058 if (kip != NULL) { 2059 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2060 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2061 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2062 } 2063 } 2064 } 2065 2066 static void 2067 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2068 { 2069 stmf_i_lu_t *ilu; 2070 kstat_io_t *kip; 2071 2072 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2073 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2074 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2075 if (kip != NULL) { 2076 mutex_enter(ilu->ilu_kstat_io->ks_lock); 2077 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2078 mutex_exit(ilu->ilu_kstat_io->ks_lock); 2079 } 2080 } 2081 } 2082 2083 static void 2084 stmf_create_kstat_lu(stmf_i_lu_t *ilu) 2085 { 2086 char ks_nm[KSTAT_STRLEN]; 2087 stmf_kstat_lu_info_t *ks_lu; 2088 2089 /* create kstat lun info */ 2090 ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ, 2091 KM_NOSLEEP); 2092 if (ks_lu == NULL) { 2093 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2094 return; 2095 } 2096 2097 bzero(ks_nm, sizeof (ks_nm)); 2098 (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu); 2099 if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0, 2100 ks_nm, "misc", KSTAT_TYPE_NAMED, 2101 sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t), 2102 KSTAT_FLAG_VIRTUAL)) == NULL) { 2103 kmem_free(ks_lu, STMF_KSTAT_LU_SZ); 2104 cmn_err(CE_WARN, "STMF: kstat_create lu failed"); 2105 return; 2106 } 2107 2108 ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ; 2109 ilu->ilu_kstat_info->ks_data = ks_lu; 2110 2111 kstat_named_init(&ks_lu->i_lun_guid, "lun-guid", 2112 KSTAT_DATA_STRING); 2113 kstat_named_init(&ks_lu->i_lun_alias, "lun-alias", 2114 KSTAT_DATA_STRING); 2115 2116 /* convert guid to hex string */ 2117 int i; 2118 uint8_t *p = ilu->ilu_lu->lu_id->ident; 2119 bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid)); 2120 for (i = 0; i < STMF_GUID_INPUT / 2; i++) { 2121 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]); 2122 } 2123 kstat_named_setstr(&ks_lu->i_lun_guid, 2124 (const char *)ilu->ilu_ascii_hex_guid); 2125 kstat_named_setstr(&ks_lu->i_lun_alias, 2126 (const char *)ilu->ilu_lu->lu_alias); 2127 kstat_install(ilu->ilu_kstat_info); 2128 2129 /* create kstat lun io */ 2130 bzero(ks_nm, sizeof (ks_nm)); 2131 (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu); 2132 if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2133 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2134 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed"); 2135 return; 2136 } 2137 mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0); 2138 ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock; 2139 kstat_install(ilu->ilu_kstat_io); 2140 } 2141 2142 static void 2143 stmf_create_kstat_lport(stmf_i_local_port_t *ilport) 2144 { 2145 char ks_nm[KSTAT_STRLEN]; 2146 stmf_kstat_tgt_info_t *ks_tgt; 2147 int id, len; 2148 2149 /* create kstat lport info */ 2150 ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ, 2151 KM_NOSLEEP); 2152 if (ks_tgt == NULL) { 2153 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2154 return; 2155 } 2156 2157 bzero(ks_nm, sizeof (ks_nm)); 2158 (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport); 2159 if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME, 2160 0, ks_nm, "misc", KSTAT_TYPE_NAMED, 2161 sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t), 2162 KSTAT_FLAG_VIRTUAL)) == NULL) { 2163 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ); 2164 cmn_err(CE_WARN, "STMF: kstat_create target failed"); 2165 return; 2166 } 2167 2168 ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ; 2169 ilport->ilport_kstat_info->ks_data = ks_tgt; 2170 2171 kstat_named_init(&ks_tgt->i_tgt_name, "target-name", 2172 KSTAT_DATA_STRING); 2173 kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias", 2174 KSTAT_DATA_STRING); 2175 kstat_named_init(&ks_tgt->i_protocol, "protocol", 2176 KSTAT_DATA_STRING); 2177 2178 /* ident might not be null terminated */ 2179 len = ilport->ilport_lport->lport_id->ident_length; 2180 bcopy(ilport->ilport_lport->lport_id->ident, 2181 ilport->ilport_kstat_tgt_name, len); 2182 ilport->ilport_kstat_tgt_name[len + 1] = NULL; 2183 kstat_named_setstr(&ks_tgt->i_tgt_name, 2184 (const char *)ilport->ilport_kstat_tgt_name); 2185 kstat_named_setstr(&ks_tgt->i_tgt_alias, 2186 (const char *)ilport->ilport_lport->lport_alias); 2187 /* protocol */ 2188 if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) { 2189 cmn_err(CE_WARN, "STMF: protocol_id out of bound"); 2190 id = PROTOCOL_ANY; 2191 } 2192 kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]); 2193 kstat_install(ilport->ilport_kstat_info); 2194 2195 /* create kstat lport io */ 2196 bzero(ks_nm, sizeof (ks_nm)); 2197 (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport); 2198 if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2199 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2200 cmn_err(CE_WARN, "STMF: kstat_create target_io failed"); 2201 return; 2202 } 2203 mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0); 2204 ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock; 2205 kstat_install(ilport->ilport_kstat_io); 2206 } 2207 2208 stmf_status_t 2209 stmf_register_lu(stmf_lu_t *lu) 2210 { 2211 stmf_i_lu_t *ilu; 2212 uint8_t *p1, *p2; 2213 stmf_state_change_info_t ssci; 2214 stmf_id_data_t *luid; 2215 2216 if ((lu->lu_id->ident_type != ID_TYPE_NAA) || 2217 (lu->lu_id->ident_length != 16) || 2218 ((lu->lu_id->ident[0] & 0xf0) != 0x60)) { 2219 return (STMF_INVALID_ARG); 2220 } 2221 p1 = &lu->lu_id->ident[0]; 2222 mutex_enter(&stmf_state.stmf_lock); 2223 if (stmf_state.stmf_inventory_locked) { 2224 mutex_exit(&stmf_state.stmf_lock); 2225 return (STMF_BUSY); 2226 } 2227 2228 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2229 p2 = &ilu->ilu_lu->lu_id->ident[0]; 2230 if (bcmp(p1, p2, 16) == 0) { 2231 mutex_exit(&stmf_state.stmf_lock); 2232 return (STMF_ALREADY); 2233 } 2234 } 2235 2236 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2237 luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 2238 lu->lu_id->ident_length, lu->lu_id->ident); 2239 if (luid) { 2240 luid->id_pt_to_object = (void *)ilu; 2241 ilu->ilu_luid = luid; 2242 } 2243 ilu->ilu_alias = NULL; 2244 2245 ilu->ilu_next = stmf_state.stmf_ilulist; 2246 ilu->ilu_prev = NULL; 2247 if (ilu->ilu_next) 2248 ilu->ilu_next->ilu_prev = ilu; 2249 stmf_state.stmf_ilulist = ilu; 2250 stmf_state.stmf_nlus++; 2251 if (lu->lu_lp) { 2252 ((stmf_i_lu_provider_t *) 2253 (lu->lu_lp->lp_stmf_private))->ilp_nlus++; 2254 } 2255 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 2256 STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl); 2257 stmf_create_kstat_lu(ilu); 2258 mutex_exit(&stmf_state.stmf_lock); 2259 2260 /* XXX we should probably check if this lu can be brought online */ 2261 ilu->ilu_prev_state = STMF_STATE_ONLINE; 2262 if (stmf_state.stmf_service_running) { 2263 ssci.st_rflags = 0; 2264 ssci.st_additional_info = NULL; 2265 (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci); 2266 } 2267 2268 /* XXX: Generate event */ 2269 return (STMF_SUCCESS); 2270 } 2271 2272 stmf_status_t 2273 stmf_deregister_lu(stmf_lu_t *lu) 2274 { 2275 stmf_i_lu_t *ilu; 2276 2277 mutex_enter(&stmf_state.stmf_lock); 2278 if (stmf_state.stmf_inventory_locked) { 2279 mutex_exit(&stmf_state.stmf_lock); 2280 return (STMF_BUSY); 2281 } 2282 ilu = stmf_lookup_lu(lu); 2283 if (ilu == NULL) { 2284 mutex_exit(&stmf_state.stmf_lock); 2285 return (STMF_INVALID_ARG); 2286 } 2287 if (ilu->ilu_state == STMF_STATE_OFFLINE) { 2288 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 2289 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) { 2290 cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock); 2291 } 2292 if (ilu->ilu_ntasks) { 2293 stmf_i_scsi_task_t *itask, *nitask; 2294 2295 nitask = ilu->ilu_tasks; 2296 do { 2297 itask = nitask; 2298 nitask = itask->itask_lu_next; 2299 lu->lu_task_free(itask->itask_task); 2300 stmf_free(itask->itask_task); 2301 } while (nitask != NULL); 2302 2303 ilu->ilu_tasks = ilu->ilu_free_tasks = NULL; 2304 ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0; 2305 } 2306 2307 if (ilu->ilu_next) 2308 ilu->ilu_next->ilu_prev = ilu->ilu_prev; 2309 if (ilu->ilu_prev) 2310 ilu->ilu_prev->ilu_next = ilu->ilu_next; 2311 else 2312 stmf_state.stmf_ilulist = ilu->ilu_next; 2313 stmf_state.stmf_nlus--; 2314 2315 if (ilu == stmf_state.stmf_svc_ilu_draining) { 2316 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 2317 } 2318 if (ilu == stmf_state.stmf_svc_ilu_timing) { 2319 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 2320 } 2321 if (lu->lu_lp) { 2322 ((stmf_i_lu_provider_t *) 2323 (lu->lu_lp->lp_stmf_private))->ilp_nlus--; 2324 } 2325 if (ilu->ilu_luid) { 2326 ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object = 2327 NULL; 2328 ilu->ilu_luid = NULL; 2329 } 2330 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl); 2331 } else { 2332 mutex_exit(&stmf_state.stmf_lock); 2333 return (STMF_BUSY); 2334 } 2335 if (ilu->ilu_kstat_info) { 2336 kmem_free(ilu->ilu_kstat_info->ks_data, 2337 ilu->ilu_kstat_info->ks_data_size); 2338 kstat_delete(ilu->ilu_kstat_info); 2339 } 2340 if (ilu->ilu_kstat_io) { 2341 kstat_delete(ilu->ilu_kstat_io); 2342 mutex_destroy(&ilu->ilu_kstat_lock); 2343 } 2344 mutex_exit(&stmf_state.stmf_lock); 2345 return (STMF_SUCCESS); 2346 } 2347 2348 stmf_status_t 2349 stmf_register_local_port(stmf_local_port_t *lport) 2350 { 2351 stmf_i_local_port_t *ilport; 2352 stmf_state_change_info_t ssci; 2353 int start_workers = 0; 2354 2355 mutex_enter(&stmf_state.stmf_lock); 2356 if (stmf_state.stmf_inventory_locked) { 2357 mutex_exit(&stmf_state.stmf_lock); 2358 return (STMF_BUSY); 2359 } 2360 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 2361 rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL); 2362 2363 ilport->ilport_next = stmf_state.stmf_ilportlist; 2364 ilport->ilport_prev = NULL; 2365 if (ilport->ilport_next) 2366 ilport->ilport_next->ilport_prev = ilport; 2367 stmf_state.stmf_ilportlist = ilport; 2368 stmf_state.stmf_nlports++; 2369 if (lport->lport_pp) { 2370 ((stmf_i_port_provider_t *) 2371 (lport->lport_pp->pp_stmf_private))->ipp_npps++; 2372 } 2373 ilport->ilport_tg = 2374 stmf_lookup_group_for_target(lport->lport_id->ident, 2375 lport->lport_id->ident_length); 2376 ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1); 2377 STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl); 2378 stmf_create_kstat_lport(ilport); 2379 if (stmf_workers_state == STMF_WORKERS_DISABLED) { 2380 stmf_workers_state = STMF_WORKERS_ENABLING; 2381 start_workers = 1; 2382 } 2383 mutex_exit(&stmf_state.stmf_lock); 2384 2385 if (start_workers) 2386 stmf_worker_init(); 2387 2388 /* XXX we should probably check if this lport can be brought online */ 2389 ilport->ilport_prev_state = STMF_STATE_ONLINE; 2390 if (stmf_state.stmf_service_running) { 2391 ssci.st_rflags = 0; 2392 ssci.st_additional_info = NULL; 2393 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci); 2394 } 2395 2396 /* XXX: Generate event */ 2397 return (STMF_SUCCESS); 2398 } 2399 2400 stmf_status_t 2401 stmf_deregister_local_port(stmf_local_port_t *lport) 2402 { 2403 stmf_i_local_port_t *ilport; 2404 2405 mutex_enter(&stmf_state.stmf_lock); 2406 if (stmf_state.stmf_inventory_locked) { 2407 mutex_exit(&stmf_state.stmf_lock); 2408 return (STMF_BUSY); 2409 } 2410 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 2411 if (ilport->ilport_nsessions == 0) { 2412 if (ilport->ilport_next) 2413 ilport->ilport_next->ilport_prev = ilport->ilport_prev; 2414 if (ilport->ilport_prev) 2415 ilport->ilport_prev->ilport_next = ilport->ilport_next; 2416 else 2417 stmf_state.stmf_ilportlist = ilport->ilport_next; 2418 rw_destroy(&ilport->ilport_lock); 2419 stmf_state.stmf_nlports--; 2420 if (lport->lport_pp) { 2421 ((stmf_i_port_provider_t *) 2422 (lport->lport_pp->pp_stmf_private))->ipp_npps--; 2423 } 2424 ilport->ilport_tg = NULL; 2425 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl); 2426 } else { 2427 mutex_exit(&stmf_state.stmf_lock); 2428 return (STMF_BUSY); 2429 } 2430 if (ilport->ilport_kstat_info) { 2431 kmem_free(ilport->ilport_kstat_info->ks_data, 2432 ilport->ilport_kstat_info->ks_data_size); 2433 kstat_delete(ilport->ilport_kstat_info); 2434 } 2435 if (ilport->ilport_kstat_io) { 2436 kstat_delete(ilport->ilport_kstat_io); 2437 mutex_destroy(&ilport->ilport_kstat_lock); 2438 } 2439 mutex_exit(&stmf_state.stmf_lock); 2440 return (STMF_SUCCESS); 2441 } 2442 2443 /* 2444 * Port provider has to make sure that register/deregister session and 2445 * port are serialized calls. 2446 */ 2447 stmf_status_t 2448 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 2449 { 2450 stmf_i_scsi_session_t *iss; 2451 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 2452 lport->lport_stmf_private; 2453 uint8_t lun[8]; 2454 2455 /* 2456 * Port state has to be online to register a scsi session. It is 2457 * possible that we started an offline operation and a new SCSI 2458 * session started at the same time (in that case also we are going 2459 * to fail the registeration). But any other state is simply 2460 * a bad port provider implementation. 2461 */ 2462 if (ilport->ilport_state != STMF_STATE_ONLINE) { 2463 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 2464 stmf_trace(lport->lport_alias, "Port is trying to " 2465 "register a session while the state is neither " 2466 "online nor offlining"); 2467 } 2468 return (STMF_FAILURE); 2469 } 2470 bzero(lun, 8); 2471 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 2472 iss->iss_flags |= ISS_BEING_CREATED; 2473 2474 /* sessions use the ilport_lock. No separate lock is required */ 2475 iss->iss_lockp = &ilport->ilport_lock; 2476 (void) stmf_session_create_lun_map(ilport, iss); 2477 2478 rw_enter(&ilport->ilport_lock, RW_WRITER); 2479 ilport->ilport_nsessions++; 2480 iss->iss_next = ilport->ilport_ss_list; 2481 ilport->ilport_ss_list = iss; 2482 rw_exit(&ilport->ilport_lock); 2483 2484 iss->iss_creation_time = ddi_get_time(); 2485 ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1); 2486 iss->iss_flags &= ~ISS_BEING_CREATED; 2487 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport, 2488 stmf_scsi_session_t *, ss); 2489 return (STMF_SUCCESS); 2490 } 2491 2492 void 2493 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 2494 { 2495 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 2496 lport->lport_stmf_private; 2497 stmf_i_scsi_session_t *iss, **ppss; 2498 int found = 0; 2499 2500 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport, 2501 stmf_scsi_session_t *, ss); 2502 2503 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 2504 if (ss->ss_rport_alias) { 2505 ss->ss_rport_alias = NULL; 2506 } 2507 2508 try_dereg_ss_again: 2509 mutex_enter(&stmf_state.stmf_lock); 2510 atomic_and_32(&iss->iss_flags, 2511 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 2512 if (iss->iss_flags & ISS_EVENT_ACTIVE) { 2513 mutex_exit(&stmf_state.stmf_lock); 2514 delay(1); 2515 goto try_dereg_ss_again; 2516 } 2517 mutex_exit(&stmf_state.stmf_lock); 2518 rw_enter(&ilport->ilport_lock, RW_WRITER); 2519 for (ppss = &ilport->ilport_ss_list; *ppss != NULL; 2520 ppss = &((*ppss)->iss_next)) { 2521 if (iss == (*ppss)) { 2522 *ppss = (*ppss)->iss_next; 2523 found = 1; 2524 break; 2525 } 2526 } 2527 if (!found) { 2528 cmn_err(CE_PANIC, "Deregister session called for non existent" 2529 " session"); 2530 } 2531 ilport->ilport_nsessions--; 2532 rw_exit(&ilport->ilport_lock); 2533 2534 (void) stmf_session_destroy_lun_map(ilport, iss); 2535 } 2536 2537 stmf_i_scsi_session_t * 2538 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked) 2539 { 2540 stmf_i_local_port_t *ilport; 2541 stmf_i_scsi_session_t *iss; 2542 2543 mutex_enter(&stmf_state.stmf_lock); 2544 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 2545 ilport = ilport->ilport_next) { 2546 rw_enter(&ilport->ilport_lock, RW_WRITER); 2547 for (iss = ilport->ilport_ss_list; iss != NULL; 2548 iss = iss->iss_next) { 2549 if (iss->iss_ss->ss_session_id == session_id) { 2550 if (!stay_locked) 2551 rw_exit(&ilport->ilport_lock); 2552 mutex_exit(&stmf_state.stmf_lock); 2553 return (iss); 2554 } 2555 } 2556 rw_exit(&ilport->ilport_lock); 2557 } 2558 mutex_exit(&stmf_state.stmf_lock); 2559 return (NULL); 2560 } 2561 2562 void 2563 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl) 2564 { 2565 stmf_itl_data_t **itlpp; 2566 stmf_i_lu_t *ilu; 2567 2568 ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED); 2569 2570 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2571 mutex_enter(&ilu->ilu_task_lock); 2572 for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL; 2573 itlpp = &(*itlpp)->itl_next) { 2574 if ((*itlpp) == itl) 2575 break; 2576 } 2577 ASSERT((*itlpp) != NULL); 2578 *itlpp = itl->itl_next; 2579 mutex_exit(&ilu->ilu_task_lock); 2580 lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle, 2581 (uint32_t)itl->itl_hdlrm_reason); 2582 kmem_free(itl, sizeof (*itl)); 2583 } 2584 2585 stmf_status_t 2586 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun, 2587 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle) 2588 { 2589 stmf_itl_data_t *itl; 2590 stmf_i_scsi_session_t *iss; 2591 stmf_lun_map_ent_t *lun_map_ent; 2592 stmf_i_lu_t *ilu; 2593 uint16_t n; 2594 2595 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2596 if (ss == NULL) { 2597 iss = stmf_session_id_to_issptr(session_id, 1); 2598 if (iss == NULL) 2599 return (STMF_NOT_FOUND); 2600 } else { 2601 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 2602 rw_enter(iss->iss_lockp, RW_WRITER); 2603 } 2604 2605 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 2606 lun_map_ent = (stmf_lun_map_ent_t *) 2607 stmf_get_ent_from_map(iss->iss_sm, n); 2608 if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) { 2609 rw_exit(iss->iss_lockp); 2610 return (STMF_NOT_FOUND); 2611 } 2612 if (lun_map_ent->ent_itl_datap != NULL) { 2613 rw_exit(iss->iss_lockp); 2614 return (STMF_ALREADY); 2615 } 2616 2617 itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP); 2618 if (itl == NULL) { 2619 rw_exit(iss->iss_lockp); 2620 return (STMF_ALLOC_FAILURE); 2621 } 2622 2623 itl->itl_counter = 1; 2624 itl->itl_lun = n; 2625 itl->itl_handle = itl_handle; 2626 itl->itl_session = iss; 2627 mutex_enter(&ilu->ilu_task_lock); 2628 itl->itl_next = ilu->ilu_itl_list; 2629 ilu->ilu_itl_list = itl; 2630 mutex_exit(&ilu->ilu_task_lock); 2631 lun_map_ent->ent_itl_datap = itl; 2632 rw_exit(iss->iss_lockp); 2633 2634 return (STMF_SUCCESS); 2635 } 2636 2637 void 2638 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason) 2639 { 2640 uint8_t old, new; 2641 2642 do { 2643 old = new = itl->itl_flags; 2644 if (old & STMF_ITL_BEING_TERMINATED) 2645 return; 2646 new |= STMF_ITL_BEING_TERMINATED; 2647 } while (atomic_cas_8(&itl->itl_flags, old, new) != old); 2648 itl->itl_hdlrm_reason = hdlrm_reason; 2649 2650 ASSERT(itl->itl_counter); 2651 2652 if (atomic_add_32_nv(&itl->itl_counter, -1)) 2653 return; 2654 2655 drv_usecwait(10); 2656 if (itl->itl_counter) 2657 return; 2658 2659 stmf_release_itl_handle(lu, itl); 2660 } 2661 2662 stmf_status_t 2663 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu) 2664 { 2665 stmf_i_lu_t *ilu; 2666 stmf_i_local_port_t *ilport; 2667 stmf_i_scsi_session_t *iss; 2668 stmf_lun_map_t *lm; 2669 stmf_lun_map_ent_t *ent; 2670 uint32_t nmaps, nu; 2671 stmf_itl_data_t **itl_list; 2672 int i; 2673 2674 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2675 2676 dereg_itl_start:; 2677 nmaps = ilu->ilu_ref_cnt; 2678 if (nmaps == 0) 2679 return (STMF_NOT_FOUND); 2680 itl_list = (stmf_itl_data_t **)kmem_zalloc( 2681 nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP); 2682 mutex_enter(&stmf_state.stmf_lock); 2683 if (nmaps != ilu->ilu_ref_cnt) { 2684 /* Something changed, start all over */ 2685 mutex_exit(&stmf_state.stmf_lock); 2686 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 2687 goto dereg_itl_start; 2688 } 2689 nu = 0; 2690 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 2691 ilport = ilport->ilport_next) { 2692 rw_enter(&ilport->ilport_lock, RW_WRITER); 2693 for (iss = ilport->ilport_ss_list; iss != NULL; 2694 iss = iss->iss_next) { 2695 lm = iss->iss_sm; 2696 if (!lm) 2697 continue; 2698 for (i = 0; i < lm->lm_nentries; i++) { 2699 if (lm->lm_plus[i] == NULL) 2700 continue; 2701 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 2702 if ((ent->ent_lu == lu) && 2703 (ent->ent_itl_datap)) { 2704 itl_list[nu++] = ent->ent_itl_datap; 2705 ent->ent_itl_datap = NULL; 2706 if (nu == nmaps) { 2707 rw_exit(&ilport->ilport_lock); 2708 goto dai_scan_done; 2709 } 2710 } 2711 } /* lun table for a session */ 2712 } /* sessions */ 2713 rw_exit(&ilport->ilport_lock); 2714 } /* ports */ 2715 2716 dai_scan_done: 2717 mutex_exit(&stmf_state.stmf_lock); 2718 2719 for (i = 0; i < nu; i++) { 2720 stmf_do_itl_dereg(lu, itl_list[i], 2721 STMF_ITL_REASON_DEREG_REQUEST); 2722 } 2723 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 2724 2725 return (STMF_SUCCESS); 2726 } 2727 2728 stmf_status_t 2729 stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun, 2730 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle) 2731 { 2732 stmf_i_scsi_session_t *iss; 2733 stmf_itl_data_t *itl; 2734 stmf_lun_map_ent_t *ent; 2735 stmf_lun_map_t *lm; 2736 int i; 2737 uint16_t n; 2738 2739 if (ss == NULL) { 2740 if (session_id == STMF_SESSION_ID_NONE) 2741 return (STMF_INVALID_ARG); 2742 iss = stmf_session_id_to_issptr(session_id, 1); 2743 if (iss == NULL) 2744 return (STMF_NOT_FOUND); 2745 } else { 2746 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 2747 rw_enter(iss->iss_lockp, RW_WRITER); 2748 } 2749 lm = iss->iss_sm; 2750 if (lm == NULL) { 2751 rw_exit(iss->iss_lockp); 2752 return (STMF_NOT_FOUND); 2753 } 2754 2755 if (lun) { 2756 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 2757 ent = (stmf_lun_map_ent_t *) 2758 stmf_get_ent_from_map(iss->iss_sm, n); 2759 } else { 2760 if (itl_handle == NULL) { 2761 rw_exit(iss->iss_lockp); 2762 return (STMF_INVALID_ARG); 2763 } 2764 ent = NULL; 2765 for (i = 0; i < lm->lm_nentries; i++) { 2766 if (lm->lm_plus[i] == NULL) 2767 continue; 2768 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 2769 if (ent->ent_itl_datap && 2770 (ent->ent_itl_datap->itl_handle == itl_handle)) { 2771 break; 2772 } 2773 } 2774 } 2775 if ((ent == NULL) || (ent->ent_lu != lu) || 2776 (ent->ent_itl_datap == NULL)) { 2777 rw_exit(iss->iss_lockp); 2778 return (STMF_NOT_FOUND); 2779 } 2780 itl = ent->ent_itl_datap; 2781 ent->ent_itl_datap = NULL; 2782 rw_exit(iss->iss_lockp); 2783 stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST); 2784 2785 return (STMF_SUCCESS); 2786 } 2787 2788 stmf_status_t 2789 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss, 2790 uint64_t session_id, void **itl_handle_retp) 2791 { 2792 stmf_i_scsi_session_t *iss; 2793 stmf_lun_map_ent_t *ent; 2794 stmf_lun_map_t *lm; 2795 stmf_status_t ret; 2796 int i; 2797 uint16_t n; 2798 2799 if (ss == NULL) { 2800 iss = stmf_session_id_to_issptr(session_id, 1); 2801 if (iss == NULL) 2802 return (STMF_NOT_FOUND); 2803 } else { 2804 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 2805 rw_enter(iss->iss_lockp, RW_WRITER); 2806 } 2807 2808 ent = NULL; 2809 if (lun == NULL) { 2810 lm = iss->iss_sm; 2811 for (i = 0; i < lm->lm_nentries; i++) { 2812 if (lm->lm_plus[i] == NULL) 2813 continue; 2814 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 2815 if (ent->ent_lu == lu) 2816 break; 2817 } 2818 } else { 2819 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 2820 ent = (stmf_lun_map_ent_t *) 2821 stmf_get_ent_from_map(iss->iss_sm, n); 2822 if (lu && (ent->ent_lu != lu)) 2823 ent = NULL; 2824 } 2825 if (ent && ent->ent_itl_datap) { 2826 *itl_handle_retp = ent->ent_itl_datap->itl_handle; 2827 ret = STMF_SUCCESS; 2828 } else { 2829 ret = STMF_NOT_FOUND; 2830 } 2831 2832 rw_exit(iss->iss_lockp); 2833 return (ret); 2834 } 2835 2836 stmf_data_buf_t * 2837 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize, 2838 uint32_t flags) 2839 { 2840 stmf_i_scsi_task_t *itask = 2841 (stmf_i_scsi_task_t *)task->task_stmf_private; 2842 stmf_local_port_t *lport = task->task_lport; 2843 stmf_data_buf_t *dbuf; 2844 uint8_t ndx; 2845 2846 ndx = stmf_first_zero[itask->itask_allocated_buf_map]; 2847 if (ndx == 0xff) 2848 return (NULL); 2849 dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf( 2850 task, size, pminsize, flags); 2851 if (dbuf) { 2852 task->task_cur_nbufs++; 2853 itask->itask_allocated_buf_map |= (1 << ndx); 2854 dbuf->db_handle = ndx; 2855 return (dbuf); 2856 } 2857 2858 return (NULL); 2859 } 2860 2861 void 2862 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf) 2863 { 2864 stmf_i_scsi_task_t *itask = 2865 (stmf_i_scsi_task_t *)task->task_stmf_private; 2866 stmf_local_port_t *lport = task->task_lport; 2867 2868 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle); 2869 task->task_cur_nbufs--; 2870 lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf); 2871 } 2872 2873 stmf_data_buf_t * 2874 stmf_handle_to_buf(scsi_task_t *task, uint8_t h) 2875 { 2876 stmf_i_scsi_task_t *itask; 2877 2878 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 2879 if (h > 3) 2880 return (NULL); 2881 return (itask->itask_dbufs[h]); 2882 } 2883 2884 /* ARGSUSED */ 2885 struct scsi_task * 2886 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss, 2887 uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id) 2888 { 2889 stmf_lu_t *lu; 2890 stmf_i_scsi_session_t *iss; 2891 stmf_i_lu_t *ilu; 2892 stmf_i_scsi_task_t *itask; 2893 stmf_i_scsi_task_t **ppitask; 2894 scsi_task_t *task; 2895 uint64_t *p; 2896 uint8_t *l; 2897 stmf_lun_map_ent_t *lun_map_ent; 2898 uint16_t cdb_length; 2899 uint16_t luNbr; 2900 uint8_t new_task = 0; 2901 2902 /* 2903 * We allocate 7 extra bytes for CDB to provide a cdb pointer which 2904 * is guaranteed to be 8 byte aligned. Some LU providers like OSD 2905 * depend upon this alignment. 2906 */ 2907 if (cdb_length_in >= 16) 2908 cdb_length = cdb_length_in + 7; 2909 else 2910 cdb_length = 16 + 7; 2911 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 2912 luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 2913 rw_enter(iss->iss_lockp, RW_READER); 2914 lun_map_ent = 2915 (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr); 2916 if (!lun_map_ent) { 2917 lu = dlun0; 2918 } else { 2919 lu = lun_map_ent->ent_lu; 2920 } 2921 ilu = lu->lu_stmf_private; 2922 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 2923 rw_exit(iss->iss_lockp); 2924 return (NULL); 2925 } 2926 do { 2927 if (ilu->ilu_free_tasks == NULL) { 2928 new_task = 1; 2929 break; 2930 } 2931 mutex_enter(&ilu->ilu_task_lock); 2932 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) && 2933 ((*ppitask)->itask_cdb_buf_size < cdb_length); 2934 ppitask = &((*ppitask)->itask_lu_free_next)) 2935 ; 2936 if (*ppitask) { 2937 itask = *ppitask; 2938 *ppitask = (*ppitask)->itask_lu_free_next; 2939 ilu->ilu_ntasks_free--; 2940 if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free) 2941 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 2942 } else { 2943 new_task = 1; 2944 } 2945 mutex_exit(&ilu->ilu_task_lock); 2946 /* CONSTCOND */ 2947 } while (0); 2948 2949 if (!new_task) { 2950 task = itask->itask_task; 2951 task->task_timeout = 0; 2952 p = (uint64_t *)&task->task_flags; 2953 *p++ = 0; *p++ = 0; p++; p++; *p++ = 0; *p++ = 0; *p = 0; 2954 itask->itask_ncmds = 0; 2955 } else { 2956 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK, 2957 cdb_length, AF_FORCE_NOSLEEP); 2958 if (task == NULL) { 2959 rw_exit(iss->iss_lockp); 2960 return (NULL); 2961 } 2962 task->task_lu = lu; 2963 l = task->task_lun_no; 2964 l[0] = lun[0]; 2965 l[1] = lun[1]; 2966 l[2] = lun[2]; 2967 l[3] = lun[3]; 2968 l[4] = lun[4]; 2969 l[5] = lun[5]; 2970 l[6] = lun[6]; 2971 l[7] = lun[7]; 2972 task->task_cdb = (uint8_t *)task->task_port_private; 2973 if ((ulong_t)(task->task_cdb) & 7ul) { 2974 task->task_cdb = (uint8_t *)(((ulong_t) 2975 (task->task_cdb) + 7ul) & ~(7ul)); 2976 } 2977 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 2978 itask->itask_cdb_buf_size = cdb_length; 2979 } 2980 task->task_session = ss; 2981 task->task_lport = lport; 2982 task->task_cdb_length = cdb_length_in; 2983 itask->itask_flags = ITASK_IN_TRANSITION; 2984 2985 if (new_task) { 2986 if (lu->lu_task_alloc(task) != STMF_SUCCESS) { 2987 rw_exit(iss->iss_lockp); 2988 stmf_free(task); 2989 return (NULL); 2990 } 2991 mutex_enter(&ilu->ilu_task_lock); 2992 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 2993 mutex_exit(&ilu->ilu_task_lock); 2994 rw_exit(iss->iss_lockp); 2995 stmf_free(task); 2996 return (NULL); 2997 } 2998 itask->itask_lu_next = ilu->ilu_tasks; 2999 if (ilu->ilu_tasks) 3000 ilu->ilu_tasks->itask_lu_prev = itask; 3001 ilu->ilu_tasks = itask; 3002 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */ 3003 ilu->ilu_ntasks++; 3004 mutex_exit(&ilu->ilu_task_lock); 3005 } 3006 3007 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr; 3008 atomic_add_32(itask->itask_ilu_task_cntr, 1); 3009 itask->itask_start_time = ddi_get_lbolt(); 3010 3011 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap = 3012 lun_map_ent->ent_itl_datap) != NULL)) { 3013 atomic_add_32(&itask->itask_itl_datap->itl_counter, 1); 3014 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle; 3015 } else { 3016 itask->itask_itl_datap = NULL; 3017 task->task_lu_itl_handle = NULL; 3018 } 3019 3020 rw_exit(iss->iss_lockp); 3021 return (task); 3022 } 3023 3024 void 3025 stmf_task_lu_free(scsi_task_t *task) 3026 { 3027 stmf_i_scsi_task_t *itask = 3028 (stmf_i_scsi_task_t *)task->task_stmf_private; 3029 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 3030 task->task_session->ss_stmf_private; 3031 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 3032 3033 rw_enter(iss->iss_lockp, RW_READER); 3034 itask->itask_flags = ITASK_IN_FREE_LIST; 3035 mutex_enter(&ilu->ilu_task_lock); 3036 itask->itask_lu_free_next = ilu->ilu_free_tasks; 3037 ilu->ilu_free_tasks = itask; 3038 ilu->ilu_ntasks_free++; 3039 mutex_exit(&ilu->ilu_task_lock); 3040 atomic_add_32(itask->itask_ilu_task_cntr, -1); 3041 rw_exit(iss->iss_lockp); 3042 } 3043 3044 void 3045 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu) 3046 { 3047 uint32_t num_to_release, ndx; 3048 stmf_i_scsi_task_t *itask; 3049 stmf_lu_t *lu = ilu->ilu_lu; 3050 3051 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free); 3052 3053 /* free half of the minimal free of the free tasks */ 3054 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2; 3055 if (!num_to_release) { 3056 return; 3057 } 3058 for (ndx = 0; ndx < num_to_release; ndx++) { 3059 mutex_enter(&ilu->ilu_task_lock); 3060 itask = ilu->ilu_free_tasks; 3061 if (itask == NULL) { 3062 mutex_exit(&ilu->ilu_task_lock); 3063 break; 3064 } 3065 ilu->ilu_free_tasks = itask->itask_lu_free_next; 3066 ilu->ilu_ntasks_free--; 3067 mutex_exit(&ilu->ilu_task_lock); 3068 3069 lu->lu_task_free(itask->itask_task); 3070 mutex_enter(&ilu->ilu_task_lock); 3071 if (itask->itask_lu_next) 3072 itask->itask_lu_next->itask_lu_prev = 3073 itask->itask_lu_prev; 3074 if (itask->itask_lu_prev) 3075 itask->itask_lu_prev->itask_lu_next = 3076 itask->itask_lu_next; 3077 else 3078 ilu->ilu_tasks = itask->itask_lu_next; 3079 3080 ilu->ilu_ntasks--; 3081 mutex_exit(&ilu->ilu_task_lock); 3082 stmf_free(itask->itask_task); 3083 } 3084 } 3085 3086 /* 3087 * Called with stmf_lock held 3088 */ 3089 void 3090 stmf_check_freetask() 3091 { 3092 stmf_i_lu_t *ilu; 3093 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 3094 3095 /* stmf_svc_ilu_draining may get changed after stmf_lock is released */ 3096 while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) { 3097 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 3098 if (!ilu->ilu_ntasks_min_free) { 3099 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 3100 continue; 3101 } 3102 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 3103 mutex_exit(&stmf_state.stmf_lock); 3104 stmf_task_lu_check_freelist(ilu); 3105 /* 3106 * we do not care about the accuracy of 3107 * ilu_ntasks_min_free, so we don't lock here 3108 */ 3109 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 3110 mutex_enter(&stmf_state.stmf_lock); 3111 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 3112 cv_broadcast(&stmf_state.stmf_cv); 3113 if (ddi_get_lbolt() >= endtime) 3114 break; 3115 } 3116 } 3117 3118 void 3119 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu) 3120 { 3121 clock_t l = ddi_get_lbolt(); 3122 clock_t ps = drv_usectohz(1000000); 3123 stmf_i_scsi_task_t *itask; 3124 scsi_task_t *task; 3125 uint32_t to; 3126 3127 mutex_enter(&ilu->ilu_task_lock); 3128 for (itask = ilu->ilu_tasks; itask != NULL; 3129 itask = itask->itask_lu_next) { 3130 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 3131 ITASK_BEING_ABORTED)) { 3132 continue; 3133 } 3134 task = itask->itask_task; 3135 if (task->task_timeout == 0) 3136 to = stmf_default_task_timeout; 3137 else 3138 to = task->task_timeout; 3139 if ((itask->itask_start_time + (to * ps)) > l) 3140 continue; 3141 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 3142 STMF_TIMEOUT, NULL); 3143 } 3144 mutex_exit(&ilu->ilu_task_lock); 3145 } 3146 3147 /* 3148 * Called with stmf_lock held 3149 */ 3150 void 3151 stmf_check_ilu_timing() 3152 { 3153 stmf_i_lu_t *ilu; 3154 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 3155 3156 /* stmf_svc_ilu_timing may get changed after stmf_lock is released */ 3157 while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) { 3158 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 3159 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) { 3160 if (ilu->ilu_task_cntr2 == 0) { 3161 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2; 3162 continue; 3163 } 3164 } else { 3165 if (ilu->ilu_task_cntr1 == 0) { 3166 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 3167 continue; 3168 } 3169 } 3170 /* 3171 * If we are here then it means that there is some slowdown 3172 * in tasks on this lu. We need to check. 3173 */ 3174 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 3175 mutex_exit(&stmf_state.stmf_lock); 3176 stmf_do_ilu_timeouts(ilu); 3177 mutex_enter(&stmf_state.stmf_lock); 3178 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 3179 cv_broadcast(&stmf_state.stmf_cv); 3180 if (ddi_get_lbolt() >= endtime) 3181 break; 3182 } 3183 } 3184 3185 /* 3186 * Kills all tasks on a lu except tm_task 3187 */ 3188 void 3189 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s) 3190 { 3191 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3192 stmf_i_scsi_task_t *itask; 3193 3194 mutex_enter(&ilu->ilu_task_lock); 3195 3196 for (itask = ilu->ilu_tasks; itask != NULL; 3197 itask = itask->itask_lu_next) { 3198 if (itask->itask_flags & ITASK_IN_FREE_LIST) 3199 continue; 3200 if (itask->itask_task == tm_task) 3201 continue; 3202 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL); 3203 } 3204 mutex_exit(&ilu->ilu_task_lock); 3205 } 3206 3207 void 3208 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport) 3209 { 3210 int i; 3211 uint8_t map; 3212 3213 if ((map = itask->itask_allocated_buf_map) != 0) { 3214 for (i = 0; i < 4; i++) { 3215 if (map & 1) { 3216 stmf_data_buf_t *dbuf; 3217 3218 dbuf = itask->itask_dbufs[i]; 3219 if (dbuf->db_lu_private) { 3220 dbuf->db_lu_private = NULL; 3221 } 3222 lport->lport_ds->ds_free_data_buf( 3223 lport->lport_ds, dbuf); 3224 } 3225 map >>= 1; 3226 } 3227 itask->itask_allocated_buf_map = 0; 3228 } 3229 } 3230 3231 void 3232 stmf_task_free(scsi_task_t *task) 3233 { 3234 stmf_local_port_t *lport = task->task_lport; 3235 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 3236 task->task_stmf_private; 3237 3238 DTRACE_PROBE1(stmf__task__end, scsi_task_t *, task); 3239 stmf_free_task_bufs(itask, lport); 3240 if (itask->itask_itl_datap) { 3241 if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter, 3242 -1) == 0) { 3243 stmf_release_itl_handle(task->task_lu, 3244 itask->itask_itl_datap); 3245 } 3246 } 3247 lport->lport_task_free(task); 3248 if (itask->itask_worker) { 3249 atomic_add_32(&stmf_cur_ntasks, -1); 3250 atomic_add_32(&itask->itask_worker->worker_ref_count, -1); 3251 } 3252 /* 3253 * After calling stmf_task_lu_free, the task pointer can no longer 3254 * be trusted. 3255 */ 3256 stmf_task_lu_free(task); 3257 } 3258 3259 void 3260 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 3261 { 3262 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 3263 task->task_stmf_private; 3264 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 3265 int nv; 3266 uint32_t old, new; 3267 uint32_t ct; 3268 stmf_worker_t *w, *w1; 3269 uint8_t tm; 3270 3271 if (task->task_max_nbufs > 4) 3272 task->task_max_nbufs = 4; 3273 task->task_cur_nbufs = 0; 3274 /* Latest value of currently running tasks */ 3275 ct = atomic_add_32_nv(&stmf_cur_ntasks, 1); 3276 3277 /* Select the next worker using round robin */ 3278 nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1); 3279 if (nv >= stmf_nworkers_accepting_cmds) { 3280 int s = nv; 3281 do { 3282 nv -= stmf_nworkers_accepting_cmds; 3283 } while (nv >= stmf_nworkers_accepting_cmds); 3284 if (nv < 0) 3285 nv = 0; 3286 /* Its ok if this cas fails */ 3287 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter, 3288 s, nv); 3289 } 3290 w = &stmf_workers[nv]; 3291 3292 /* 3293 * A worker can be pinned by interrupt. So select the next one 3294 * if it has lower load. 3295 */ 3296 if ((nv + 1) >= stmf_nworkers_accepting_cmds) { 3297 w1 = stmf_workers; 3298 } else { 3299 w1 = &stmf_workers[nv + 1]; 3300 } 3301 if (w1->worker_queue_depth < w->worker_queue_depth) 3302 w = w1; 3303 3304 mutex_enter(&w->worker_lock); 3305 if (((w->worker_flags & STMF_WORKER_STARTED) == 0) || 3306 (w->worker_flags & STMF_WORKER_TERMINATE)) { 3307 /* 3308 * Maybe we are in the middle of a change. Just go to 3309 * the 1st worker. 3310 */ 3311 mutex_exit(&w->worker_lock); 3312 w = stmf_workers; 3313 mutex_enter(&w->worker_lock); 3314 } 3315 itask->itask_worker = w; 3316 /* 3317 * Track max system load inside the worker as we already have the 3318 * worker lock (no point implementing another lock). The service 3319 * thread will do the comparisons and figure out the max overall 3320 * system load. 3321 */ 3322 if (w->worker_max_sys_qdepth_pu < ct) 3323 w->worker_max_sys_qdepth_pu = ct; 3324 3325 do { 3326 old = new = itask->itask_flags; 3327 new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE; 3328 if (task->task_mgmt_function) { 3329 tm = task->task_mgmt_function; 3330 if ((tm == TM_TARGET_RESET) || 3331 (tm == TM_TARGET_COLD_RESET) || 3332 (tm == TM_TARGET_WARM_RESET)) { 3333 new |= ITASK_DEFAULT_HANDLING; 3334 } 3335 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 3336 new |= ITASK_DEFAULT_HANDLING; 3337 } 3338 new &= ~ITASK_IN_TRANSITION; 3339 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3340 itask->itask_worker_next = NULL; 3341 if (w->worker_task_tail) { 3342 w->worker_task_tail->itask_worker_next = itask; 3343 } else { 3344 w->worker_task_head = itask; 3345 } 3346 w->worker_task_tail = itask; 3347 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 3348 w->worker_max_qdepth_pu = w->worker_queue_depth; 3349 } 3350 atomic_add_32(&w->worker_ref_count, 1); 3351 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK; 3352 itask->itask_ncmds = 1; 3353 if (dbuf) { 3354 itask->itask_allocated_buf_map = 1; 3355 itask->itask_dbufs[0] = dbuf; 3356 dbuf->db_handle = 0; 3357 } else { 3358 itask->itask_allocated_buf_map = 0; 3359 itask->itask_dbufs[0] = NULL; 3360 } 3361 3362 stmf_update_kstat_lu_q(task, kstat_waitq_enter); 3363 stmf_update_kstat_lport_q(task, kstat_waitq_enter); 3364 3365 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 3366 cv_signal(&w->worker_cv); 3367 mutex_exit(&w->worker_lock); 3368 3369 /* 3370 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE 3371 * was set between checking of ILU_RESET_ACTIVE and clearing of the 3372 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here. 3373 */ 3374 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 3375 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL); 3376 } 3377 } 3378 3379 /* 3380 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++ 3381 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already 3382 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot 3383 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course 3384 * the LU will make this call only if we call the LU's abort entry point. 3385 * we will only call that entry point if ITASK_KNOWN_TO_LU was set. 3386 * 3387 * Same logic applies for the port. 3388 * 3389 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU 3390 * and KNOWN_TO_TGT_PORT are reset. 3391 * 3392 * +++++++++++++++++++++++++++++++++++++++++++++++ 3393 */ 3394 3395 stmf_status_t 3396 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags) 3397 { 3398 stmf_status_t ret; 3399 3400 stmf_i_scsi_task_t *itask = 3401 (stmf_i_scsi_task_t *)task->task_stmf_private; 3402 3403 if (ioflags & STMF_IOF_LU_DONE) { 3404 uint32_t new, old; 3405 do { 3406 new = old = itask->itask_flags; 3407 if (new & ITASK_BEING_ABORTED) 3408 return (STMF_ABORTED); 3409 new &= ~ITASK_KNOWN_TO_LU; 3410 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3411 } 3412 if (itask->itask_flags & ITASK_BEING_ABORTED) 3413 return (STMF_ABORTED); 3414 #ifdef DEBUG 3415 if (stmf_drop_buf_counter > 0) { 3416 if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) == 3417 1) 3418 return (STMF_SUCCESS); 3419 } 3420 #endif 3421 3422 stmf_update_kstat_lu_io(task, dbuf); 3423 stmf_update_kstat_lport_io(task, dbuf); 3424 3425 DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, task, 3426 stmf_data_buf_t *, dbuf); 3427 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags); 3428 DTRACE_PROBE2(scsi__xfer__end, scsi_task_t *, task, 3429 stmf_data_buf_t *, dbuf); 3430 return (ret); 3431 } 3432 3433 void 3434 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof) 3435 { 3436 stmf_i_scsi_task_t *itask = 3437 (stmf_i_scsi_task_t *)task->task_stmf_private; 3438 stmf_worker_t *w = itask->itask_worker; 3439 uint32_t new, old; 3440 uint8_t update_queue_flags, free_it, queue_it, kstat_it; 3441 3442 mutex_enter(&w->worker_lock); 3443 do { 3444 new = old = itask->itask_flags; 3445 if (old & ITASK_BEING_ABORTED) { 3446 mutex_exit(&w->worker_lock); 3447 return; 3448 } 3449 free_it = 0; 3450 kstat_it = 0; 3451 if (iof & STMF_IOF_LPORT_DONE) { 3452 new &= ~ITASK_KNOWN_TO_TGT_PORT; 3453 task->task_completion_status = dbuf->db_xfer_status; 3454 free_it = 1; 3455 kstat_it = 1; 3456 } 3457 /* 3458 * If the task is known to LU then queue it. But if 3459 * it is already queued (multiple completions) then 3460 * just update the buffer information by grabbing the 3461 * worker lock. If the task is not known to LU, 3462 * completed/aborted, then see if we need to 3463 * free this task. 3464 */ 3465 if (old & ITASK_KNOWN_TO_LU) { 3466 free_it = 0; 3467 update_queue_flags = 1; 3468 if (old & ITASK_IN_WORKER_QUEUE) { 3469 queue_it = 0; 3470 } else { 3471 queue_it = 1; 3472 new |= ITASK_IN_WORKER_QUEUE; 3473 } 3474 } else { 3475 update_queue_flags = 0; 3476 queue_it = 0; 3477 } 3478 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3479 3480 if (kstat_it) { 3481 stmf_update_kstat_lu_q(task, kstat_runq_exit); 3482 stmf_update_kstat_lport_q(task, kstat_runq_exit); 3483 } 3484 if (update_queue_flags) { 3485 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE; 3486 3487 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 3488 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd; 3489 if (queue_it) { 3490 itask->itask_worker_next = NULL; 3491 if (w->worker_task_tail) { 3492 w->worker_task_tail->itask_worker_next = itask; 3493 } else { 3494 w->worker_task_head = itask; 3495 } 3496 w->worker_task_tail = itask; 3497 if (++(w->worker_queue_depth) > 3498 w->worker_max_qdepth_pu) { 3499 w->worker_max_qdepth_pu = w->worker_queue_depth; 3500 } 3501 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 3502 cv_signal(&w->worker_cv); 3503 } 3504 } 3505 mutex_exit(&w->worker_lock); 3506 3507 if (free_it) { 3508 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 3509 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 3510 ITASK_BEING_ABORTED)) == 0) { 3511 stmf_task_free(task); 3512 } 3513 } 3514 } 3515 3516 stmf_status_t 3517 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags) 3518 { 3519 DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task); 3520 3521 stmf_i_scsi_task_t *itask = 3522 (stmf_i_scsi_task_t *)task->task_stmf_private; 3523 if (ioflags & STMF_IOF_LU_DONE) { 3524 uint32_t new, old; 3525 do { 3526 new = old = itask->itask_flags; 3527 if (new & ITASK_BEING_ABORTED) 3528 return (STMF_ABORTED); 3529 new &= ~ITASK_KNOWN_TO_LU; 3530 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3531 } 3532 3533 if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) { 3534 return (STMF_SUCCESS); 3535 } 3536 3537 if (itask->itask_flags & ITASK_BEING_ABORTED) 3538 return (STMF_ABORTED); 3539 3540 if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) { 3541 task->task_status_ctrl = 0; 3542 task->task_resid = 0; 3543 } else if (task->task_cmd_xfer_length > 3544 task->task_expected_xfer_length) { 3545 task->task_status_ctrl = TASK_SCTRL_OVER; 3546 task->task_resid = task->task_cmd_xfer_length - 3547 task->task_expected_xfer_length; 3548 } else if (task->task_nbytes_transferred < 3549 task->task_expected_xfer_length) { 3550 task->task_status_ctrl = TASK_SCTRL_UNDER; 3551 task->task_resid = task->task_expected_xfer_length - 3552 task->task_nbytes_transferred; 3553 } else { 3554 task->task_status_ctrl = 0; 3555 task->task_resid = 0; 3556 } 3557 return (task->task_lport->lport_send_status(task, ioflags)); 3558 } 3559 3560 void 3561 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof) 3562 { 3563 stmf_i_scsi_task_t *itask = 3564 (stmf_i_scsi_task_t *)task->task_stmf_private; 3565 stmf_worker_t *w = itask->itask_worker; 3566 uint32_t new, old; 3567 uint8_t free_it, queue_it, kstat_it; 3568 3569 mutex_enter(&w->worker_lock); 3570 do { 3571 new = old = itask->itask_flags; 3572 if (old & ITASK_BEING_ABORTED) { 3573 mutex_exit(&w->worker_lock); 3574 return; 3575 } 3576 free_it = 0; 3577 kstat_it = 0; 3578 if (iof & STMF_IOF_LPORT_DONE) { 3579 new &= ~ITASK_KNOWN_TO_TGT_PORT; 3580 free_it = 1; 3581 kstat_it = 1; 3582 } 3583 /* 3584 * If the task is known to LU then queue it. But if 3585 * it is already queued (multiple completions) then 3586 * just update the buffer information by grabbing the 3587 * worker lock. If the task is not known to LU, 3588 * completed/aborted, then see if we need to 3589 * free this task. 3590 */ 3591 if (old & ITASK_KNOWN_TO_LU) { 3592 free_it = 0; 3593 queue_it = 1; 3594 if (old & ITASK_IN_WORKER_QUEUE) { 3595 cmn_err(CE_PANIC, "status completion received" 3596 " when task is already in worker queue " 3597 " task = %p", (void *)task); 3598 } 3599 new |= ITASK_IN_WORKER_QUEUE; 3600 } else { 3601 queue_it = 0; 3602 } 3603 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3604 task->task_completion_status = s; 3605 3606 if (kstat_it) { 3607 stmf_update_kstat_lu_q(task, kstat_runq_exit); 3608 stmf_update_kstat_lport_q(task, kstat_runq_exit); 3609 } 3610 3611 if (queue_it) { 3612 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 3613 itask->itask_cmd_stack[itask->itask_ncmds++] = 3614 ITASK_CMD_STATUS_DONE; 3615 itask->itask_worker_next = NULL; 3616 if (w->worker_task_tail) { 3617 w->worker_task_tail->itask_worker_next = itask; 3618 } else { 3619 w->worker_task_head = itask; 3620 } 3621 w->worker_task_tail = itask; 3622 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 3623 w->worker_max_qdepth_pu = w->worker_queue_depth; 3624 } 3625 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 3626 cv_signal(&w->worker_cv); 3627 } 3628 mutex_exit(&w->worker_lock); 3629 3630 if (free_it) { 3631 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 3632 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 3633 ITASK_BEING_ABORTED)) == 0) { 3634 stmf_task_free(task); 3635 } else { 3636 cmn_err(CE_PANIC, "LU is done with the task but LPORT " 3637 " is not done, itask %p", (void *)itask); 3638 } 3639 } 3640 } 3641 3642 void 3643 stmf_task_lu_done(scsi_task_t *task) 3644 { 3645 stmf_i_scsi_task_t *itask = 3646 (stmf_i_scsi_task_t *)task->task_stmf_private; 3647 stmf_worker_t *w = itask->itask_worker; 3648 uint32_t new, old; 3649 3650 mutex_enter(&w->worker_lock); 3651 do { 3652 new = old = itask->itask_flags; 3653 if (old & ITASK_BEING_ABORTED) { 3654 mutex_exit(&w->worker_lock); 3655 return; 3656 } 3657 if (old & ITASK_IN_WORKER_QUEUE) { 3658 cmn_err(CE_PANIC, "task_lu_done received" 3659 " when task is in worker queue " 3660 " task = %p", (void *)task); 3661 } 3662 new &= ~ITASK_KNOWN_TO_LU; 3663 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3664 3665 mutex_exit(&w->worker_lock); 3666 3667 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 3668 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 3669 ITASK_BEING_ABORTED)) == 0) { 3670 stmf_task_free(task); 3671 } else { 3672 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but " 3673 " the task is still not done, task = %p", (void *)task); 3674 } 3675 } 3676 3677 void 3678 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s) 3679 { 3680 stmf_i_scsi_task_t *itask = 3681 (stmf_i_scsi_task_t *)task->task_stmf_private; 3682 stmf_worker_t *w; 3683 uint32_t old, new; 3684 3685 do { 3686 old = new = itask->itask_flags; 3687 if ((old & ITASK_BEING_ABORTED) || 3688 ((old & (ITASK_KNOWN_TO_TGT_PORT | 3689 ITASK_KNOWN_TO_LU)) == 0)) { 3690 return; 3691 } 3692 new |= ITASK_BEING_ABORTED; 3693 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3694 task->task_completion_status = s; 3695 itask->itask_start_time = ddi_get_lbolt(); 3696 3697 if (((w = itask->itask_worker) == NULL) || 3698 (itask->itask_flags & ITASK_IN_TRANSITION)) { 3699 return; 3700 } 3701 3702 /* Queue it and get out */ 3703 mutex_enter(&w->worker_lock); 3704 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) { 3705 mutex_exit(&w->worker_lock); 3706 return; 3707 } 3708 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); 3709 itask->itask_worker_next = NULL; 3710 if (w->worker_task_tail) { 3711 w->worker_task_tail->itask_worker_next = itask; 3712 } else { 3713 w->worker_task_head = itask; 3714 } 3715 w->worker_task_tail = itask; 3716 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 3717 w->worker_max_qdepth_pu = w->worker_queue_depth; 3718 } 3719 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 3720 cv_signal(&w->worker_cv); 3721 mutex_exit(&w->worker_lock); 3722 } 3723 3724 void 3725 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg) 3726 { 3727 stmf_i_scsi_task_t *itask = NULL; 3728 uint32_t old, new, f, rf; 3729 3730 DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task, 3731 stmf_status_t, s); 3732 3733 switch (abort_cmd) { 3734 case STMF_QUEUE_ABORT_LU: 3735 stmf_task_lu_killall((stmf_lu_t *)arg, task, s); 3736 return; 3737 case STMF_QUEUE_TASK_ABORT: 3738 stmf_queue_task_for_abort(task, s); 3739 return; 3740 case STMF_REQUEUE_TASK_ABORT_LPORT: 3741 rf = ITASK_TGT_PORT_ABORT_CALLED; 3742 f = ITASK_KNOWN_TO_TGT_PORT; 3743 break; 3744 case STMF_REQUEUE_TASK_ABORT_LU: 3745 rf = ITASK_LU_ABORT_CALLED; 3746 f = ITASK_KNOWN_TO_LU; 3747 break; 3748 default: 3749 return; 3750 } 3751 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 3752 f |= ITASK_BEING_ABORTED | rf; 3753 do { 3754 old = new = itask->itask_flags; 3755 if ((old & f) != f) { 3756 return; 3757 } 3758 new &= ~rf; 3759 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3760 } 3761 3762 void 3763 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 3764 { 3765 char info[STMF_CHANGE_INFO_LEN]; 3766 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 3767 unsigned long long st; 3768 3769 st = s; /* gcc fix */ 3770 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 3771 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 3772 "task %p, lu failed to abort ret=%llx", (void *)task, st); 3773 } else if ((iof & STMF_IOF_LU_DONE) == 0) { 3774 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 3775 "Task aborted but LU is not finished, task =" 3776 "%p, s=%llx, iof=%x", (void *)task, st, iof); 3777 } else { 3778 /* 3779 * LU abort successfully 3780 */ 3781 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU); 3782 return; 3783 } 3784 3785 info[STMF_CHANGE_INFO_LEN - 1] = 0; 3786 stmf_abort_task_offline(task, 1, info); 3787 } 3788 3789 void 3790 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 3791 { 3792 char info[STMF_CHANGE_INFO_LEN]; 3793 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 3794 unsigned long long st; 3795 uint32_t old, new; 3796 3797 st = s; 3798 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 3799 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 3800 "task %p, tgt port failed to abort ret=%llx", (void *)task, 3801 st); 3802 } else if ((iof & STMF_IOF_LPORT_DONE) == 0) { 3803 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 3804 "Task aborted but tgt port is not finished, " 3805 "task=%p, s=%llx, iof=%x", (void *)task, st, iof); 3806 } else { 3807 /* 3808 * LPORT abort successfully 3809 */ 3810 do { 3811 old = new = itask->itask_flags; 3812 if (!(old & ITASK_KNOWN_TO_TGT_PORT)) 3813 return; 3814 new &= ~ITASK_KNOWN_TO_TGT_PORT; 3815 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3816 3817 if (!(itask->itask_flags & ITASK_KSTAT_IN_RUNQ)) { 3818 stmf_update_kstat_lu_q(task, kstat_waitq_exit); 3819 stmf_update_kstat_lport_q(task, kstat_waitq_exit); 3820 } else { 3821 stmf_update_kstat_lu_q(task, kstat_runq_exit); 3822 stmf_update_kstat_lport_q(task, kstat_runq_exit); 3823 } 3824 return; 3825 } 3826 3827 info[STMF_CHANGE_INFO_LEN - 1] = 0; 3828 stmf_abort_task_offline(task, 0, info); 3829 } 3830 3831 stmf_status_t 3832 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout) 3833 { 3834 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 3835 task->task_stmf_private; 3836 stmf_worker_t *w = itask->itask_worker; 3837 int i; 3838 3839 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU); 3840 mutex_enter(&w->worker_lock); 3841 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 3842 mutex_exit(&w->worker_lock); 3843 return (STMF_BUSY); 3844 } 3845 for (i = 0; i < itask->itask_ncmds; i++) { 3846 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) { 3847 mutex_exit(&w->worker_lock); 3848 return (STMF_SUCCESS); 3849 } 3850 } 3851 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU; 3852 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 3853 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 3854 } else { 3855 clock_t t = drv_usectohz(timeout * 1000); 3856 if (t == 0) 3857 t = 1; 3858 itask->itask_poll_timeout = ddi_get_lbolt() + t; 3859 } 3860 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 3861 itask->itask_worker_next = NULL; 3862 if (w->worker_task_tail) { 3863 w->worker_task_tail->itask_worker_next = itask; 3864 } else { 3865 w->worker_task_head = itask; 3866 } 3867 w->worker_task_tail = itask; 3868 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 3869 w->worker_max_qdepth_pu = w->worker_queue_depth; 3870 } 3871 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); 3872 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 3873 cv_signal(&w->worker_cv); 3874 } 3875 mutex_exit(&w->worker_lock); 3876 return (STMF_SUCCESS); 3877 } 3878 3879 stmf_status_t 3880 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout) 3881 { 3882 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 3883 task->task_stmf_private; 3884 stmf_worker_t *w = itask->itask_worker; 3885 int i; 3886 3887 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT); 3888 mutex_enter(&w->worker_lock); 3889 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 3890 mutex_exit(&w->worker_lock); 3891 return (STMF_BUSY); 3892 } 3893 for (i = 0; i < itask->itask_ncmds; i++) { 3894 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) { 3895 mutex_exit(&w->worker_lock); 3896 return (STMF_SUCCESS); 3897 } 3898 } 3899 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT; 3900 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 3901 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 3902 } else { 3903 clock_t t = drv_usectohz(timeout * 1000); 3904 if (t == 0) 3905 t = 1; 3906 itask->itask_poll_timeout = ddi_get_lbolt() + t; 3907 } 3908 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 3909 itask->itask_worker_next = NULL; 3910 if (w->worker_task_tail) { 3911 w->worker_task_tail->itask_worker_next = itask; 3912 } else { 3913 w->worker_task_head = itask; 3914 } 3915 w->worker_task_tail = itask; 3916 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 3917 w->worker_max_qdepth_pu = w->worker_queue_depth; 3918 } 3919 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 3920 cv_signal(&w->worker_cv); 3921 } 3922 mutex_exit(&w->worker_lock); 3923 return (STMF_SUCCESS); 3924 } 3925 3926 void 3927 stmf_do_task_abort(scsi_task_t *task) 3928 { 3929 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 3930 stmf_lu_t *lu; 3931 stmf_local_port_t *lport; 3932 unsigned long long ret; 3933 uint32_t old, new; 3934 uint8_t call_lu_abort, call_port_abort; 3935 char info[STMF_CHANGE_INFO_LEN]; 3936 3937 lu = task->task_lu; 3938 lport = task->task_lport; 3939 do { 3940 old = new = itask->itask_flags; 3941 if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) == 3942 ITASK_KNOWN_TO_LU) { 3943 new |= ITASK_LU_ABORT_CALLED; 3944 call_lu_abort = 1; 3945 } else { 3946 call_lu_abort = 0; 3947 } 3948 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3949 3950 if (call_lu_abort) { 3951 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) { 3952 ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 3953 } else { 3954 ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 3955 } 3956 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 3957 stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE); 3958 } else if (ret == STMF_BUSY) { 3959 atomic_and_32(&itask->itask_flags, 3960 ~ITASK_LU_ABORT_CALLED); 3961 } else if (ret != STMF_SUCCESS) { 3962 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 3963 "Abort failed by LU %p, ret %llx", (void *)lu, ret); 3964 info[STMF_CHANGE_INFO_LEN - 1] = 0; 3965 stmf_abort_task_offline(task, 1, info); 3966 } 3967 } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) { 3968 if (ddi_get_lbolt() > (itask->itask_start_time + 3969 STMF_SEC2TICK(lu->lu_abort_timeout? 3970 lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) { 3971 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 3972 "lu abort timed out"); 3973 info[STMF_CHANGE_INFO_LEN - 1] = 0; 3974 stmf_abort_task_offline(itask->itask_task, 1, info); 3975 } 3976 } 3977 3978 do { 3979 old = new = itask->itask_flags; 3980 if ((old & (ITASK_KNOWN_TO_TGT_PORT | 3981 ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) { 3982 new |= ITASK_TGT_PORT_ABORT_CALLED; 3983 call_port_abort = 1; 3984 } else { 3985 call_port_abort = 0; 3986 } 3987 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 3988 if (call_port_abort) { 3989 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0); 3990 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 3991 stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE); 3992 } else if (ret == STMF_BUSY) { 3993 atomic_and_32(&itask->itask_flags, 3994 ~ITASK_TGT_PORT_ABORT_CALLED); 3995 } else if (ret != STMF_SUCCESS) { 3996 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 3997 "Abort failed by tgt port %p ret %llx", 3998 (void *)lport, ret); 3999 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4000 stmf_abort_task_offline(task, 0, info); 4001 } 4002 } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) { 4003 if (ddi_get_lbolt() > (itask->itask_start_time + 4004 STMF_SEC2TICK(lport->lport_abort_timeout? 4005 lport->lport_abort_timeout : 4006 ITASK_DEFAULT_ABORT_TIMEOUT))) { 4007 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4008 "lport abort timed out"); 4009 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4010 stmf_abort_task_offline(itask->itask_task, 0, info); 4011 } 4012 } 4013 } 4014 4015 stmf_status_t 4016 stmf_ctl(int cmd, void *obj, void *arg) 4017 { 4018 stmf_status_t ret; 4019 stmf_i_lu_t *ilu; 4020 stmf_i_local_port_t *ilport; 4021 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg; 4022 4023 mutex_enter(&stmf_state.stmf_lock); 4024 ret = STMF_INVALID_ARG; 4025 if (cmd & STMF_CMD_LU_OP) { 4026 ilu = stmf_lookup_lu((stmf_lu_t *)obj); 4027 if (ilu == NULL) { 4028 goto stmf_ctl_lock_exit; 4029 } 4030 DTRACE_PROBE3(lu__state__change, 4031 stmf_lu_t *, ilu->ilu_lu, 4032 int, cmd, stmf_state_change_info_t *, ssci); 4033 } else if (cmd & STMF_CMD_LPORT_OP) { 4034 ilport = stmf_lookup_lport((stmf_local_port_t *)obj); 4035 if (ilport == NULL) { 4036 goto stmf_ctl_lock_exit; 4037 } 4038 DTRACE_PROBE3(lport__state__change, 4039 stmf_local_port_t *, ilport->ilport_lport, 4040 int, cmd, stmf_state_change_info_t *, ssci); 4041 } else { 4042 goto stmf_ctl_lock_exit; 4043 } 4044 4045 switch (cmd) { 4046 case STMF_CMD_LU_ONLINE: 4047 if (ilu->ilu_state == STMF_STATE_ONLINE) { 4048 ret = STMF_ALREADY; 4049 goto stmf_ctl_lock_exit; 4050 } 4051 if (ilu->ilu_state != STMF_STATE_OFFLINE) { 4052 ret = STMF_INVALID_ARG; 4053 goto stmf_ctl_lock_exit; 4054 } 4055 ilu->ilu_state = STMF_STATE_ONLINING; 4056 mutex_exit(&stmf_state.stmf_lock); 4057 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4058 break; 4059 4060 case STMF_CMD_LU_ONLINE_COMPLETE: 4061 if (ilu->ilu_state != STMF_STATE_ONLINING) { 4062 ret = STMF_INVALID_ARG; 4063 goto stmf_ctl_lock_exit; 4064 } 4065 if (((stmf_change_status_t *)arg)->st_completion_status == 4066 STMF_SUCCESS) { 4067 ilu->ilu_state = STMF_STATE_ONLINE; 4068 mutex_exit(&stmf_state.stmf_lock); 4069 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 4070 STMF_ACK_LU_ONLINE_COMPLETE, arg); 4071 mutex_enter(&stmf_state.stmf_lock); 4072 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 4073 } else { 4074 /* XXX: should throw a meesage an record more data */ 4075 ilu->ilu_state = STMF_STATE_OFFLINE; 4076 } 4077 ret = STMF_SUCCESS; 4078 goto stmf_ctl_lock_exit; 4079 4080 case STMF_CMD_LU_OFFLINE: 4081 if (ilu->ilu_state == STMF_STATE_OFFLINE) { 4082 ret = STMF_ALREADY; 4083 goto stmf_ctl_lock_exit; 4084 } 4085 if (ilu->ilu_state != STMF_STATE_ONLINE) { 4086 ret = STMF_INVALID_ARG; 4087 goto stmf_ctl_lock_exit; 4088 } 4089 ilu->ilu_state = STMF_STATE_OFFLINING; 4090 mutex_exit(&stmf_state.stmf_lock); 4091 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4092 break; 4093 4094 case STMF_CMD_LU_OFFLINE_COMPLETE: 4095 if (ilu->ilu_state != STMF_STATE_OFFLINING) { 4096 ret = STMF_INVALID_ARG; 4097 goto stmf_ctl_lock_exit; 4098 } 4099 if (((stmf_change_status_t *)arg)->st_completion_status == 4100 STMF_SUCCESS) { 4101 ilu->ilu_state = STMF_STATE_OFFLINE; 4102 mutex_exit(&stmf_state.stmf_lock); 4103 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 4104 STMF_ACK_LU_OFFLINE_COMPLETE, arg); 4105 mutex_enter(&stmf_state.stmf_lock); 4106 } else { 4107 ilu->ilu_state = STMF_STATE_ONLINE; 4108 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 4109 } 4110 mutex_exit(&stmf_state.stmf_lock); 4111 break; 4112 4113 /* 4114 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online. 4115 * It's related with hardware disable/enable. 4116 */ 4117 case STMF_CMD_LPORT_ONLINE: 4118 if (ilport->ilport_state == STMF_STATE_ONLINE) { 4119 ret = STMF_ALREADY; 4120 goto stmf_ctl_lock_exit; 4121 } 4122 if (ilport->ilport_state != STMF_STATE_OFFLINE) { 4123 ret = STMF_INVALID_ARG; 4124 goto stmf_ctl_lock_exit; 4125 } 4126 4127 /* 4128 * Only user request can recover the port from the 4129 * FORCED_OFFLINE state 4130 */ 4131 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) { 4132 if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) { 4133 ret = STMF_FAILURE; 4134 goto stmf_ctl_lock_exit; 4135 } 4136 } 4137 4138 /* 4139 * Avoid too frequent request to online 4140 */ 4141 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) { 4142 ilport->ilport_online_times = 0; 4143 ilport->ilport_avg_interval = 0; 4144 } 4145 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) && 4146 (ilport->ilport_online_times >= 4)) { 4147 ret = STMF_FAILURE; 4148 ilport->ilport_flags |= ILPORT_FORCED_OFFLINE; 4149 stmf_trace(NULL, "stmf_ctl: too frequent request to " 4150 "online the port"); 4151 cmn_err(CE_WARN, "stmf_ctl: too frequent request to " 4152 "online the port, set FORCED_OFFLINE now"); 4153 goto stmf_ctl_lock_exit; 4154 } 4155 if (ilport->ilport_online_times > 0) { 4156 if (ilport->ilport_online_times == 1) { 4157 ilport->ilport_avg_interval = ddi_get_lbolt() - 4158 ilport->ilport_last_online_clock; 4159 } else { 4160 ilport->ilport_avg_interval = 4161 (ilport->ilport_avg_interval + 4162 ddi_get_lbolt() - 4163 ilport->ilport_last_online_clock) >> 1; 4164 } 4165 } 4166 ilport->ilport_last_online_clock = ddi_get_lbolt(); 4167 ilport->ilport_online_times++; 4168 4169 /* 4170 * Submit online service request 4171 */ 4172 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE; 4173 ilport->ilport_state = STMF_STATE_ONLINING; 4174 mutex_exit(&stmf_state.stmf_lock); 4175 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4176 break; 4177 4178 case STMF_CMD_LPORT_ONLINE_COMPLETE: 4179 if (ilport->ilport_state != STMF_STATE_ONLINING) { 4180 ret = STMF_INVALID_ARG; 4181 goto stmf_ctl_lock_exit; 4182 } 4183 if (((stmf_change_status_t *)arg)->st_completion_status == 4184 STMF_SUCCESS) { 4185 ilport->ilport_state = STMF_STATE_ONLINE; 4186 mutex_exit(&stmf_state.stmf_lock); 4187 ((stmf_local_port_t *)obj)->lport_ctl( 4188 (stmf_local_port_t *)obj, 4189 STMF_ACK_LPORT_ONLINE_COMPLETE, arg); 4190 mutex_enter(&stmf_state.stmf_lock); 4191 } else { 4192 ilport->ilport_state = STMF_STATE_OFFLINE; 4193 } 4194 ret = STMF_SUCCESS; 4195 goto stmf_ctl_lock_exit; 4196 4197 case STMF_CMD_LPORT_OFFLINE: 4198 if (ilport->ilport_state == STMF_STATE_OFFLINE) { 4199 ret = STMF_ALREADY; 4200 goto stmf_ctl_lock_exit; 4201 } 4202 if (ilport->ilport_state != STMF_STATE_ONLINE) { 4203 ret = STMF_INVALID_ARG; 4204 goto stmf_ctl_lock_exit; 4205 } 4206 ilport->ilport_state = STMF_STATE_OFFLINING; 4207 mutex_exit(&stmf_state.stmf_lock); 4208 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4209 break; 4210 4211 case STMF_CMD_LPORT_OFFLINE_COMPLETE: 4212 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 4213 ret = STMF_INVALID_ARG; 4214 goto stmf_ctl_lock_exit; 4215 } 4216 if (((stmf_change_status_t *)arg)->st_completion_status == 4217 STMF_SUCCESS) { 4218 ilport->ilport_state = STMF_STATE_OFFLINE; 4219 mutex_exit(&stmf_state.stmf_lock); 4220 ((stmf_local_port_t *)obj)->lport_ctl( 4221 (stmf_local_port_t *)obj, 4222 STMF_ACK_LPORT_OFFLINE_COMPLETE, arg); 4223 mutex_enter(&stmf_state.stmf_lock); 4224 } else { 4225 ilport->ilport_state = STMF_STATE_ONLINE; 4226 } 4227 mutex_exit(&stmf_state.stmf_lock); 4228 break; 4229 4230 default: 4231 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd); 4232 ret = STMF_INVALID_ARG; 4233 goto stmf_ctl_lock_exit; 4234 } 4235 4236 return (STMF_SUCCESS); 4237 4238 stmf_ctl_lock_exit:; 4239 mutex_exit(&stmf_state.stmf_lock); 4240 return (ret); 4241 } 4242 4243 /* ARGSUSED */ 4244 stmf_status_t 4245 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 4246 uint32_t *bufsizep) 4247 { 4248 return (STMF_NOT_SUPPORTED); 4249 } 4250 4251 /* ARGSUSED */ 4252 stmf_status_t 4253 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 4254 uint32_t *bufsizep) 4255 { 4256 uint32_t cl = SI_GET_CLASS(cmd); 4257 4258 if (cl == SI_STMF) { 4259 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep)); 4260 } 4261 if (cl == SI_LPORT) { 4262 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1, 4263 arg2, buf, bufsizep)); 4264 } else if (cl == SI_LU) { 4265 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf, 4266 bufsizep)); 4267 } 4268 4269 return (STMF_NOT_SUPPORTED); 4270 } 4271 4272 /* 4273 * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by 4274 * stmf to register local ports. The ident should have 20 bytes in buffer 4275 * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string. 4276 */ 4277 void 4278 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn, 4279 uint8_t protocol_id) 4280 { 4281 char wwn_str[20+1]; 4282 4283 sdid->protocol_id = protocol_id; 4284 sdid->piv = 1; 4285 sdid->code_set = CODE_SET_ASCII; 4286 sdid->association = ID_IS_TARGET_PORT; 4287 sdid->ident_length = 20; 4288 /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */ 4289 (void) snprintf(wwn_str, sizeof (wwn_str), 4290 "wwn.%02X%02X%02X%02X%02X%02X%02X%02X", 4291 wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]); 4292 bcopy(wwn_str, (char *)sdid->ident, 20); 4293 } 4294 4295 4296 stmf_xfer_data_t * 4297 stmf_prepare_tpgs_data() 4298 { 4299 stmf_xfer_data_t *xd; 4300 stmf_i_local_port_t *ilport; 4301 uint8_t *p; 4302 uint32_t sz, asz, nports; 4303 4304 mutex_enter(&stmf_state.stmf_lock); 4305 /* The spec only allows for 255 ports to be reported */ 4306 nports = min(stmf_state.stmf_nlports, 255); 4307 sz = (nports * 4) + 12; 4308 asz = sz + sizeof (*xd) - 4; 4309 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 4310 if (xd == NULL) { 4311 mutex_exit(&stmf_state.stmf_lock); 4312 return (NULL); 4313 } 4314 xd->alloc_size = asz; 4315 xd->size_left = sz; 4316 4317 p = xd->buf; 4318 4319 *((uint32_t *)p) = BE_32(sz - 4); 4320 p += 4; 4321 p[0] = 0x80; /* PREF */ 4322 p[1] = 1; /* AO_SUP */ 4323 p[7] = nports & 0xff; 4324 p += 8; 4325 for (ilport = stmf_state.stmf_ilportlist; ilport && nports; 4326 nports++, ilport = ilport->ilport_next, p += 4) { 4327 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 4328 } 4329 mutex_exit(&stmf_state.stmf_lock); 4330 4331 return (xd); 4332 } 4333 4334 struct scsi_devid_desc * 4335 stmf_scsilib_get_devid_desc(uint16_t rtpid) 4336 { 4337 scsi_devid_desc_t *devid = NULL; 4338 stmf_i_local_port_t *ilport; 4339 4340 mutex_enter(&stmf_state.stmf_lock); 4341 4342 for (ilport = stmf_state.stmf_ilportlist; ilport; 4343 ilport = ilport->ilport_next) { 4344 if (ilport->ilport_rtpid == rtpid) { 4345 scsi_devid_desc_t *id = ilport->ilport_lport->lport_id; 4346 uint32_t id_sz = sizeof (scsi_devid_desc_t) - 1 + 4347 id->ident_length; 4348 devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz, 4349 KM_NOSLEEP); 4350 if (devid != NULL) { 4351 bcopy(id, devid, id_sz); 4352 } 4353 break; 4354 } 4355 } 4356 4357 mutex_exit(&stmf_state.stmf_lock); 4358 return (devid); 4359 } 4360 4361 uint16_t 4362 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid) 4363 { 4364 stmf_i_local_port_t *ilport; 4365 scsi_devid_desc_t *id; 4366 uint16_t rtpid = 0; 4367 4368 mutex_enter(&stmf_state.stmf_lock); 4369 for (ilport = stmf_state.stmf_ilportlist; ilport; 4370 ilport = ilport->ilport_next) { 4371 id = ilport->ilport_lport->lport_id; 4372 if ((devid->ident_length == id->ident_length) && 4373 (memcmp(devid->ident, id->ident, id->ident_length) == 0)) { 4374 rtpid = ilport->ilport_rtpid; 4375 break; 4376 } 4377 } 4378 mutex_exit(&stmf_state.stmf_lock); 4379 return (rtpid); 4380 } 4381 4382 static uint16_t stmf_lu_id_gen_number = 0; 4383 4384 stmf_status_t 4385 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id) 4386 { 4387 uint8_t *p; 4388 struct timeval32 timestamp32; 4389 uint32_t *t = (uint32_t *)×tamp32; 4390 struct ether_addr mac; 4391 uint8_t *e = (uint8_t *)&mac; 4392 4393 if (company_id == COMPANY_ID_NONE) 4394 company_id = COMPANY_ID_SUN; 4395 4396 if (lu_id->ident_length != 0x10) 4397 return (STMF_INVALID_ARG); 4398 4399 p = (uint8_t *)lu_id; 4400 4401 atomic_add_16(&stmf_lu_id_gen_number, 1); 4402 4403 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10; 4404 p[4] = ((company_id >> 20) & 0xf) | 0x60; 4405 p[5] = (company_id >> 12) & 0xff; 4406 p[6] = (company_id >> 4) & 0xff; 4407 p[7] = (company_id << 4) & 0xf0; 4408 if (!localetheraddr((struct ether_addr *)NULL, &mac)) { 4409 int hid = BE_32((int)zone_get_hostid(NULL)); 4410 e[0] = (hid >> 24) & 0xff; 4411 e[1] = (hid >> 16) & 0xff; 4412 e[2] = (hid >> 8) & 0xff; 4413 e[3] = hid & 0xff; 4414 e[4] = e[5] = 0; 4415 } 4416 bcopy(e, p+8, 6); 4417 uniqtime32(×tamp32); 4418 *t = BE_32(*t); 4419 bcopy(t, p+14, 4); 4420 p[18] = (stmf_lu_id_gen_number >> 8) & 0xff; 4421 p[19] = stmf_lu_id_gen_number & 0xff; 4422 4423 return (STMF_SUCCESS); 4424 } 4425 4426 /* 4427 * saa is sense key, ASC, ASCQ 4428 */ 4429 void 4430 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa) 4431 { 4432 uint8_t sd[18]; 4433 task->task_scsi_status = st; 4434 if (st == 2) { 4435 bzero(sd, 18); 4436 sd[0] = 0x70; 4437 sd[2] = (saa >> 16) & 0xf; 4438 sd[7] = 10; 4439 sd[12] = (saa >> 8) & 0xff; 4440 sd[13] = saa & 0xff; 4441 task->task_sense_data = sd; 4442 task->task_sense_length = 18; 4443 } else { 4444 task->task_sense_data = NULL; 4445 task->task_sense_length = 0; 4446 } 4447 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 4448 } 4449 4450 uint32_t 4451 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page, 4452 uint32_t page_len, uint8_t byte0, uint32_t vpd_mask) 4453 { 4454 uint8_t *p = NULL; 4455 uint8_t small_buf[32]; 4456 uint32_t sz = 0; 4457 uint32_t n = 4; 4458 uint32_t m = 0; 4459 uint32_t last_bit = 0; 4460 4461 if (page_len < 4) 4462 return (0); 4463 if (page_len > 65535) 4464 page_len = 65535; 4465 4466 page[0] = byte0; 4467 page[1] = 0x83; 4468 4469 /* CONSTCOND */ 4470 while (1) { 4471 m += sz; 4472 if (sz && (page_len > n)) { 4473 uint32_t copysz; 4474 copysz = page_len > (n + sz) ? sz : page_len - n; 4475 bcopy(p, page + n, copysz); 4476 n += copysz; 4477 } 4478 vpd_mask &= ~last_bit; 4479 if (vpd_mask == 0) 4480 break; 4481 4482 if (vpd_mask & STMF_VPD_LU_ID) { 4483 last_bit = STMF_VPD_LU_ID; 4484 sz = task->task_lu->lu_id->ident_length + 4; 4485 p = (uint8_t *)task->task_lu->lu_id; 4486 continue; 4487 } else if (vpd_mask & STMF_VPD_TARGET_ID) { 4488 last_bit = STMF_VPD_TARGET_ID; 4489 sz = task->task_lport->lport_id->ident_length + 4; 4490 p = (uint8_t *)task->task_lport->lport_id; 4491 continue; 4492 } else if (vpd_mask & STMF_VPD_TP_GROUP) { 4493 last_bit = STMF_VPD_TP_GROUP; 4494 p = small_buf; 4495 bzero(p, 8); 4496 p[0] = 1; 4497 p[1] = 0x15; 4498 p[3] = 4; 4499 /* Group ID is always 0 */ 4500 sz = 8; 4501 continue; 4502 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) { 4503 stmf_i_local_port_t *ilport; 4504 4505 last_bit = STMF_VPD_RELATIVE_TP_ID; 4506 p = small_buf; 4507 bzero(p, 8); 4508 p[0] = 1; 4509 p[1] = 0x14; 4510 p[3] = 4; 4511 ilport = (stmf_i_local_port_t *) 4512 task->task_lport->lport_stmf_private; 4513 p[6] = (ilport->ilport_rtpid >> 8) & 0xff; 4514 p[7] = ilport->ilport_rtpid & 0xff; 4515 sz = 8; 4516 continue; 4517 } else { 4518 cmn_err(CE_WARN, "Invalid vpd_mask"); 4519 break; 4520 } 4521 } 4522 4523 page[2] = (m >> 8) & 0xff; 4524 page[3] = m & 0xff; 4525 4526 return (n); 4527 } 4528 4529 void 4530 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf) 4531 { 4532 stmf_i_scsi_task_t *itask = 4533 (stmf_i_scsi_task_t *)task->task_stmf_private; 4534 stmf_xfer_data_t *xd; 4535 uint32_t sz, minsz; 4536 4537 itask->itask_flags |= ITASK_DEFAULT_HANDLING; 4538 task->task_cmd_xfer_length = 4539 ((((uint32_t)task->task_cdb[6]) << 24) | 4540 (((uint32_t)task->task_cdb[7]) << 16) | 4541 (((uint32_t)task->task_cdb[8]) << 8) | 4542 ((uint32_t)task->task_cdb[9])); 4543 4544 if (task->task_additional_flags & 4545 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 4546 task->task_expected_xfer_length = 4547 task->task_cmd_xfer_length; 4548 } 4549 4550 if (task->task_cmd_xfer_length == 0) { 4551 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 4552 return; 4553 } 4554 if (task->task_cmd_xfer_length < 4) { 4555 stmf_scsilib_send_status(task, STATUS_CHECK, 4556 STMF_SAA_INVALID_FIELD_IN_CDB); 4557 return; 4558 } 4559 4560 sz = min(task->task_expected_xfer_length, 4561 task->task_cmd_xfer_length); 4562 4563 xd = stmf_prepare_tpgs_data(); 4564 4565 if (xd == NULL) { 4566 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 4567 STMF_ALLOC_FAILURE, NULL); 4568 return; 4569 } 4570 4571 sz = min(sz, xd->size_left); 4572 xd->size_left = sz; 4573 minsz = min(512, sz); 4574 4575 if (dbuf == NULL) 4576 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 4577 if (dbuf == NULL) { 4578 kmem_free(xd, xd->alloc_size); 4579 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 4580 STMF_ALLOC_FAILURE, NULL); 4581 return; 4582 } 4583 dbuf->db_lu_private = xd; 4584 stmf_xd_to_dbuf(dbuf); 4585 4586 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 4587 (void) stmf_xfer_data(task, dbuf, 0); 4588 4589 } 4590 4591 void 4592 stmf_scsilib_handle_task_mgmt(scsi_task_t *task) 4593 { 4594 switch (task->task_mgmt_function) { 4595 /* 4596 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET 4597 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state 4598 * in these cases. This needs to be changed to abort only the required 4599 * set. 4600 */ 4601 case TM_ABORT_TASK: 4602 case TM_ABORT_TASK_SET: 4603 case TM_CLEAR_TASK_SET: 4604 case TM_LUN_RESET: 4605 stmf_handle_lun_reset(task); 4606 return; 4607 case TM_TARGET_RESET: 4608 case TM_TARGET_COLD_RESET: 4609 case TM_TARGET_WARM_RESET: 4610 stmf_handle_target_reset(task); 4611 return; 4612 default: 4613 /* We dont support this task mgmt function */ 4614 stmf_scsilib_send_status(task, STATUS_CHECK, 4615 STMF_SAA_INVALID_FIELD_IN_CMD_IU); 4616 return; 4617 } 4618 } 4619 4620 void 4621 stmf_handle_lun_reset(scsi_task_t *task) 4622 { 4623 stmf_i_scsi_task_t *itask; 4624 stmf_i_lu_t *ilu; 4625 4626 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4627 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 4628 4629 /* 4630 * To sync with target reset, grab this lock. The LU is not going 4631 * anywhere as there is atleast one task pending (this task). 4632 */ 4633 mutex_enter(&stmf_state.stmf_lock); 4634 4635 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4636 mutex_exit(&stmf_state.stmf_lock); 4637 stmf_scsilib_send_status(task, STATUS_CHECK, 4638 STMF_SAA_OPERATION_IN_PROGRESS); 4639 return; 4640 } 4641 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 4642 mutex_exit(&stmf_state.stmf_lock); 4643 4644 /* 4645 * Mark this task as the one causing LU reset so that we know who 4646 * was responsible for setting the ILU_RESET_ACTIVE. In case this 4647 * task itself gets aborted, we will clear ILU_RESET_ACTIVE. 4648 */ 4649 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET; 4650 4651 /* Initiatiate abort on all commands on this LU except this one */ 4652 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu); 4653 4654 /* Start polling on this task */ 4655 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 4656 != STMF_SUCCESS) { 4657 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 4658 NULL); 4659 return; 4660 } 4661 } 4662 4663 void 4664 stmf_handle_target_reset(scsi_task_t *task) 4665 { 4666 stmf_i_scsi_task_t *itask; 4667 stmf_i_lu_t *ilu; 4668 stmf_i_scsi_session_t *iss; 4669 stmf_lun_map_t *lm; 4670 stmf_lun_map_ent_t *lm_ent; 4671 int i, lf; 4672 4673 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4674 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private; 4675 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 4676 4677 /* 4678 * To sync with LUN reset, grab this lock. The session is not going 4679 * anywhere as there is atleast one task pending (this task). 4680 */ 4681 mutex_enter(&stmf_state.stmf_lock); 4682 4683 /* Grab the session lock as a writer to prevent any changes in it */ 4684 rw_enter(iss->iss_lockp, RW_WRITER); 4685 4686 if (iss->iss_flags & ISS_RESET_ACTIVE) { 4687 rw_exit(iss->iss_lockp); 4688 mutex_exit(&stmf_state.stmf_lock); 4689 stmf_scsilib_send_status(task, STATUS_CHECK, 4690 STMF_SAA_OPERATION_IN_PROGRESS); 4691 return; 4692 } 4693 atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE); 4694 4695 /* 4696 * Now go through each LUN in this session and make sure all of them 4697 * can be reset. 4698 */ 4699 lm = iss->iss_sm; 4700 for (i = 0, lf = 0; i < lm->lm_nentries; i++) { 4701 if (lm->lm_plus[i] == NULL) 4702 continue; 4703 lf++; 4704 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 4705 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 4706 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4707 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 4708 rw_exit(iss->iss_lockp); 4709 mutex_exit(&stmf_state.stmf_lock); 4710 stmf_scsilib_send_status(task, STATUS_CHECK, 4711 STMF_SAA_OPERATION_IN_PROGRESS); 4712 return; 4713 } 4714 } 4715 if (lf == 0) { 4716 /* No luns in this session */ 4717 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 4718 rw_exit(iss->iss_lockp); 4719 mutex_exit(&stmf_state.stmf_lock); 4720 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 4721 return; 4722 } 4723 4724 /* ok, start the damage */ 4725 itask->itask_flags |= ITASK_DEFAULT_HANDLING | 4726 ITASK_CAUSING_TARGET_RESET; 4727 for (i = 0; i < lm->lm_nentries; i++) { 4728 if (lm->lm_plus[i] == NULL) 4729 continue; 4730 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 4731 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 4732 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 4733 } 4734 rw_exit(iss->iss_lockp); 4735 mutex_exit(&stmf_state.stmf_lock); 4736 4737 for (i = 0; i < lm->lm_nentries; i++) { 4738 if (lm->lm_plus[i] == NULL) 4739 continue; 4740 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 4741 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, 4742 lm_ent->ent_lu); 4743 } 4744 4745 /* Start polling on this task */ 4746 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 4747 != STMF_SUCCESS) { 4748 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 4749 NULL); 4750 return; 4751 } 4752 } 4753 4754 int 4755 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask) 4756 { 4757 scsi_task_t *task = itask->itask_task; 4758 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 4759 task->task_session->ss_stmf_private; 4760 4761 rw_enter(iss->iss_lockp, RW_WRITER); 4762 if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) || 4763 (task->task_cdb[0] == SCMD_INQUIRY)) { 4764 rw_exit(iss->iss_lockp); 4765 return (0); 4766 } 4767 atomic_and_32(&iss->iss_flags, 4768 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 4769 rw_exit(iss->iss_lockp); 4770 4771 if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 4772 return (0); 4773 } 4774 stmf_scsilib_send_status(task, STATUS_CHECK, 4775 STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED); 4776 return (1); 4777 } 4778 4779 void 4780 stmf_worker_init() 4781 { 4782 uint32_t i; 4783 4784 /* Make local copy of global tunables */ 4785 stmf_i_max_nworkers = stmf_max_nworkers; 4786 stmf_i_min_nworkers = stmf_min_nworkers; 4787 4788 ASSERT(stmf_workers == NULL); 4789 if (stmf_i_min_nworkers < 4) { 4790 stmf_i_min_nworkers = 4; 4791 } 4792 if (stmf_i_max_nworkers < stmf_i_min_nworkers) { 4793 stmf_i_max_nworkers = stmf_i_min_nworkers; 4794 } 4795 stmf_workers = (stmf_worker_t *)kmem_zalloc( 4796 sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP); 4797 for (i = 0; i < stmf_i_max_nworkers; i++) { 4798 stmf_worker_t *w = &stmf_workers[i]; 4799 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL); 4800 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL); 4801 } 4802 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000); 4803 stmf_workers_state = STMF_WORKERS_ENABLED; 4804 4805 /* Workers will be started by stmf_worker_mgmt() */ 4806 4807 /* Lets wait for atleast one worker to start */ 4808 while (stmf_nworkers_cur == 0) 4809 delay(drv_usectohz(20 * 1000)); 4810 stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000); 4811 } 4812 4813 stmf_status_t 4814 stmf_worker_fini() 4815 { 4816 int i; 4817 clock_t sb; 4818 4819 if (stmf_workers_state == STMF_WORKERS_DISABLED) 4820 return (STMF_SUCCESS); 4821 ASSERT(stmf_workers); 4822 stmf_workers_state = STMF_WORKERS_DISABLED; 4823 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000); 4824 cv_signal(&stmf_state.stmf_cv); 4825 4826 sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000); 4827 /* Wait for all the threads to die */ 4828 while (stmf_nworkers_cur != 0) { 4829 if (ddi_get_lbolt() > sb) { 4830 stmf_workers_state = STMF_WORKERS_ENABLED; 4831 return (STMF_BUSY); 4832 } 4833 delay(drv_usectohz(100 * 1000)); 4834 } 4835 for (i = 0; i < stmf_i_max_nworkers; i++) { 4836 stmf_worker_t *w = &stmf_workers[i]; 4837 mutex_destroy(&w->worker_lock); 4838 cv_destroy(&w->worker_cv); 4839 } 4840 kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers); 4841 stmf_workers = NULL; 4842 4843 return (STMF_SUCCESS); 4844 } 4845 4846 void 4847 stmf_worker_task(void *arg) 4848 { 4849 stmf_worker_t *w; 4850 stmf_i_scsi_session_t *iss; 4851 scsi_task_t *task; 4852 stmf_i_scsi_task_t *itask; 4853 stmf_data_buf_t *dbuf; 4854 stmf_lu_t *lu; 4855 clock_t wait_timer = 0; 4856 clock_t wait_ticks; 4857 uint32_t old, new; 4858 uint8_t curcmd; 4859 uint8_t abort_free; 4860 uint8_t wait_queue; 4861 uint8_t dec_qdepth; 4862 4863 w = (stmf_worker_t *)arg; 4864 wait_ticks = drv_usectohz(10000); 4865 4866 mutex_enter(&w->worker_lock); 4867 w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE; 4868 stmf_worker_loop:; 4869 if ((w->worker_ref_count == 0) && 4870 (w->worker_flags & STMF_WORKER_TERMINATE)) { 4871 w->worker_flags &= ~(STMF_WORKER_STARTED | 4872 STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE); 4873 w->worker_tid = NULL; 4874 mutex_exit(&w->worker_lock); 4875 thread_exit(); 4876 } 4877 /* CONSTCOND */ 4878 while (1) { 4879 dec_qdepth = 0; 4880 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) { 4881 wait_timer = 0; 4882 if (w->worker_wait_head) { 4883 ASSERT(w->worker_wait_tail); 4884 if (w->worker_task_head == NULL) 4885 w->worker_task_head = 4886 w->worker_wait_head; 4887 else 4888 w->worker_task_tail->itask_worker_next = 4889 w->worker_wait_head; 4890 w->worker_task_tail = w->worker_wait_tail; 4891 w->worker_wait_head = w->worker_wait_tail = 4892 NULL; 4893 } 4894 } 4895 if ((itask = w->worker_task_head) == NULL) { 4896 break; 4897 } 4898 task = itask->itask_task; 4899 w->worker_task_head = itask->itask_worker_next; 4900 if (w->worker_task_head == NULL) 4901 w->worker_task_tail = NULL; 4902 4903 wait_queue = 0; 4904 abort_free = 0; 4905 if (itask->itask_ncmds > 0) { 4906 curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1]; 4907 } else { 4908 ASSERT(itask->itask_flags & ITASK_BEING_ABORTED); 4909 } 4910 do { 4911 old = itask->itask_flags; 4912 if (old & ITASK_BEING_ABORTED) { 4913 itask->itask_ncmds = 1; 4914 curcmd = itask->itask_cmd_stack[0] = 4915 ITASK_CMD_ABORT; 4916 goto out_itask_flag_loop; 4917 } else if ((curcmd & ITASK_CMD_MASK) == 4918 ITASK_CMD_NEW_TASK) { 4919 /* 4920 * set ITASK_KSTAT_IN_RUNQ, this flag 4921 * will not reset until task completed 4922 */ 4923 new = old | ITASK_KNOWN_TO_LU | 4924 ITASK_KSTAT_IN_RUNQ; 4925 } else { 4926 goto out_itask_flag_loop; 4927 } 4928 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4929 4930 out_itask_flag_loop: 4931 4932 /* 4933 * Decide if this task needs to go to a queue and/or if 4934 * we can decrement the itask_cmd_stack. 4935 */ 4936 if (curcmd == ITASK_CMD_ABORT) { 4937 if (itask->itask_flags & (ITASK_KNOWN_TO_LU | 4938 ITASK_KNOWN_TO_TGT_PORT)) { 4939 wait_queue = 1; 4940 } else { 4941 abort_free = 1; 4942 } 4943 } else if ((curcmd & ITASK_CMD_POLL) && 4944 (itask->itask_poll_timeout > ddi_get_lbolt())) { 4945 wait_queue = 1; 4946 } 4947 4948 if (wait_queue) { 4949 itask->itask_worker_next = NULL; 4950 if (w->worker_wait_tail) { 4951 w->worker_wait_tail->itask_worker_next = itask; 4952 } else { 4953 w->worker_wait_head = itask; 4954 } 4955 w->worker_wait_tail = itask; 4956 if (wait_timer == 0) { 4957 wait_timer = ddi_get_lbolt() + wait_ticks; 4958 } 4959 } else if ((--(itask->itask_ncmds)) != 0) { 4960 itask->itask_worker_next = NULL; 4961 if (w->worker_task_tail) { 4962 w->worker_task_tail->itask_worker_next = itask; 4963 } else { 4964 w->worker_task_head = itask; 4965 } 4966 w->worker_task_tail = itask; 4967 } else { 4968 atomic_and_32(&itask->itask_flags, 4969 ~ITASK_IN_WORKER_QUEUE); 4970 /* 4971 * This is where the queue depth should go down by 4972 * one but we delay that on purpose to account for 4973 * the call into the provider. The actual decrement 4974 * happens after the worker has done its job. 4975 */ 4976 dec_qdepth = 1; 4977 } 4978 4979 /* We made it here means we are going to call LU */ 4980 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) 4981 lu = task->task_lu; 4982 else 4983 lu = dlun0; 4984 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)]; 4985 mutex_exit(&w->worker_lock); 4986 curcmd &= ITASK_CMD_MASK; 4987 switch (curcmd) { 4988 case ITASK_CMD_NEW_TASK: 4989 iss = (stmf_i_scsi_session_t *) 4990 task->task_session->ss_stmf_private; 4991 stmf_update_kstat_lu_q(task, kstat_waitq_to_runq); 4992 stmf_update_kstat_lport_q(task, kstat_waitq_to_runq); 4993 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) { 4994 if (stmf_handle_cmd_during_ic(itask)) 4995 break; 4996 } 4997 #ifdef DEBUG 4998 if (stmf_drop_task_counter > 0) { 4999 if (atomic_add_32_nv( 5000 (uint32_t *)&stmf_drop_task_counter, 5001 -1) == 1) { 5002 break; 5003 } 5004 } 5005 #endif 5006 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task); 5007 lu->lu_new_task(task, dbuf); 5008 break; 5009 case ITASK_CMD_DATA_XFER_DONE: 5010 lu->lu_dbuf_xfer_done(task, dbuf); 5011 break; 5012 case ITASK_CMD_STATUS_DONE: 5013 lu->lu_send_status_done(task); 5014 break; 5015 case ITASK_CMD_ABORT: 5016 if (abort_free) { 5017 stmf_task_free(task); 5018 } else { 5019 stmf_do_task_abort(task); 5020 } 5021 break; 5022 case ITASK_CMD_POLL_LU: 5023 if (!wait_queue) { 5024 lu->lu_task_poll(task); 5025 } 5026 break; 5027 case ITASK_CMD_POLL_LPORT: 5028 if (!wait_queue) 5029 task->task_lport->lport_task_poll(task); 5030 break; 5031 case ITASK_CMD_SEND_STATUS: 5032 /* case ITASK_CMD_XFER_DATA: */ 5033 break; 5034 } 5035 mutex_enter(&w->worker_lock); 5036 if (dec_qdepth) { 5037 w->worker_queue_depth--; 5038 } 5039 } 5040 if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) { 5041 if (w->worker_ref_count == 0) 5042 goto stmf_worker_loop; 5043 else 5044 wait_timer = ddi_get_lbolt() + 1; 5045 } 5046 w->worker_flags &= ~STMF_WORKER_ACTIVE; 5047 if (wait_timer) { 5048 (void) cv_timedwait(&w->worker_cv, &w->worker_lock, wait_timer); 5049 } else { 5050 cv_wait(&w->worker_cv, &w->worker_lock); 5051 } 5052 w->worker_flags |= STMF_WORKER_ACTIVE; 5053 goto stmf_worker_loop; 5054 } 5055 5056 void 5057 stmf_worker_mgmt() 5058 { 5059 int i; 5060 int workers_needed; 5061 uint32_t qd; 5062 clock_t tps, d = 0; 5063 uint32_t cur_max_ntasks = 0; 5064 stmf_worker_t *w; 5065 5066 /* Check if we are trying to increase the # of threads */ 5067 for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) { 5068 if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) { 5069 stmf_nworkers_cur++; 5070 stmf_nworkers_accepting_cmds++; 5071 } else { 5072 /* Wait for transition to complete */ 5073 return; 5074 } 5075 } 5076 /* Check if we are trying to decrease the # of workers */ 5077 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) { 5078 if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) { 5079 stmf_nworkers_cur--; 5080 /* 5081 * stmf_nworkers_accepting_cmds has already been 5082 * updated by the request to reduce the # of workers. 5083 */ 5084 } else { 5085 /* Wait for transition to complete */ 5086 return; 5087 } 5088 } 5089 /* Check if we are being asked to quit */ 5090 if (stmf_workers_state != STMF_WORKERS_ENABLED) { 5091 if (stmf_nworkers_cur) { 5092 workers_needed = 0; 5093 goto worker_mgmt_trigger_change; 5094 } 5095 return; 5096 } 5097 /* Check if we are starting */ 5098 if (stmf_nworkers_cur < stmf_i_min_nworkers) { 5099 workers_needed = stmf_i_min_nworkers; 5100 goto worker_mgmt_trigger_change; 5101 } 5102 5103 tps = drv_usectohz(1 * 1000 * 1000); 5104 if ((stmf_wm_last != 0) && 5105 ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) { 5106 qd = 0; 5107 for (i = 0; i < stmf_nworkers_accepting_cmds; i++) { 5108 qd += stmf_workers[i].worker_max_qdepth_pu; 5109 stmf_workers[i].worker_max_qdepth_pu = 0; 5110 if (stmf_workers[i].worker_max_sys_qdepth_pu > 5111 cur_max_ntasks) { 5112 cur_max_ntasks = 5113 stmf_workers[i].worker_max_sys_qdepth_pu; 5114 } 5115 stmf_workers[i].worker_max_sys_qdepth_pu = 0; 5116 } 5117 } 5118 stmf_wm_last = ddi_get_lbolt(); 5119 if (d <= tps) { 5120 /* still ramping up */ 5121 return; 5122 } 5123 /* max qdepth cannot be more than max tasks */ 5124 if (qd > cur_max_ntasks) 5125 qd = cur_max_ntasks; 5126 5127 /* See if we have more workers */ 5128 if (qd < stmf_nworkers_accepting_cmds) { 5129 /* 5130 * Since we dont reduce the worker count right away, monitor 5131 * the highest load during the scale_down_delay. 5132 */ 5133 if (qd > stmf_worker_scale_down_qd) 5134 stmf_worker_scale_down_qd = qd; 5135 if (stmf_worker_scale_down_timer == 0) { 5136 stmf_worker_scale_down_timer = ddi_get_lbolt() + 5137 drv_usectohz(stmf_worker_scale_down_delay * 5138 1000 * 1000); 5139 return; 5140 } 5141 if (ddi_get_lbolt() < stmf_worker_scale_down_timer) { 5142 return; 5143 } 5144 /* Its time to reduce the workers */ 5145 if (stmf_worker_scale_down_qd < stmf_i_min_nworkers) 5146 stmf_worker_scale_down_qd = stmf_i_min_nworkers; 5147 if (stmf_worker_scale_down_qd > stmf_i_max_nworkers) 5148 stmf_worker_scale_down_qd = stmf_i_max_nworkers; 5149 if (stmf_worker_scale_down_qd == stmf_nworkers_cur) 5150 return; 5151 workers_needed = stmf_worker_scale_down_qd; 5152 stmf_worker_scale_down_qd = 0; 5153 goto worker_mgmt_trigger_change; 5154 } 5155 stmf_worker_scale_down_qd = 0; 5156 stmf_worker_scale_down_timer = 0; 5157 if (qd > stmf_i_max_nworkers) 5158 qd = stmf_i_max_nworkers; 5159 if (qd < stmf_i_min_nworkers) 5160 qd = stmf_i_min_nworkers; 5161 if (qd == stmf_nworkers_cur) 5162 return; 5163 workers_needed = qd; 5164 goto worker_mgmt_trigger_change; 5165 5166 /* NOTREACHED */ 5167 return; 5168 5169 worker_mgmt_trigger_change: 5170 ASSERT(workers_needed != stmf_nworkers_cur); 5171 if (workers_needed > stmf_nworkers_cur) { 5172 stmf_nworkers_needed = workers_needed; 5173 for (i = stmf_nworkers_cur; i < workers_needed; i++) { 5174 w = &stmf_workers[i]; 5175 w->worker_tid = thread_create(NULL, 0, stmf_worker_task, 5176 (void *)&stmf_workers[i], 0, &p0, TS_RUN, 5177 minclsyspri); 5178 } 5179 return; 5180 } 5181 /* At this point we know that we are decreasing the # of workers */ 5182 stmf_nworkers_accepting_cmds = workers_needed; 5183 stmf_nworkers_needed = workers_needed; 5184 /* Signal the workers that its time to quit */ 5185 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) { 5186 w = &stmf_workers[i]; 5187 ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED)); 5188 mutex_enter(&w->worker_lock); 5189 w->worker_flags |= STMF_WORKER_TERMINATE; 5190 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 5191 cv_signal(&w->worker_cv); 5192 mutex_exit(&w->worker_lock); 5193 } 5194 } 5195 5196 /* 5197 * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private). 5198 * If all the data has been filled out, frees the xd and makes 5199 * db_lu_private NULL. 5200 */ 5201 void 5202 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf) 5203 { 5204 stmf_xfer_data_t *xd; 5205 uint8_t *p; 5206 int i; 5207 uint32_t s; 5208 5209 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 5210 dbuf->db_data_size = 0; 5211 dbuf->db_relative_offset = xd->size_done; 5212 for (i = 0; i < dbuf->db_sglist_length; i++) { 5213 s = min(xd->size_left, dbuf->db_sglist[i].seg_length); 5214 p = &xd->buf[xd->size_done]; 5215 bcopy(p, dbuf->db_sglist[i].seg_addr, s); 5216 xd->size_left -= s; 5217 xd->size_done += s; 5218 dbuf->db_data_size += s; 5219 if (xd->size_left == 0) { 5220 kmem_free(xd, xd->alloc_size); 5221 dbuf->db_lu_private = NULL; 5222 return; 5223 } 5224 } 5225 } 5226 5227 /* ARGSUSED */ 5228 stmf_status_t 5229 stmf_dlun0_task_alloc(scsi_task_t *task) 5230 { 5231 return (STMF_SUCCESS); 5232 } 5233 5234 void 5235 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 5236 { 5237 uint8_t *cdbp = (uint8_t *)&task->task_cdb[0]; 5238 stmf_i_scsi_session_t *iss; 5239 uint32_t sz, minsz; 5240 uint8_t *p; 5241 stmf_xfer_data_t *xd; 5242 uint8_t inq_page_length = 31; 5243 5244 if (task->task_mgmt_function) { 5245 stmf_scsilib_handle_task_mgmt(task); 5246 return; 5247 } 5248 5249 switch (cdbp[0]) { 5250 case SCMD_INQUIRY: 5251 /* 5252 * Basic protocol checks. In addition, only reply to 5253 * standard inquiry. Otherwise, the LU provider needs 5254 * to respond. 5255 */ 5256 5257 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) { 5258 stmf_scsilib_send_status(task, STATUS_CHECK, 5259 STMF_SAA_INVALID_FIELD_IN_CDB); 5260 return; 5261 } 5262 5263 task->task_cmd_xfer_length = 5264 (((uint32_t)cdbp[3]) << 8) | cdbp[4]; 5265 5266 if (task->task_additional_flags & 5267 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 5268 task->task_expected_xfer_length = 5269 task->task_cmd_xfer_length; 5270 } 5271 5272 sz = min(task->task_expected_xfer_length, 5273 min(36, task->task_cmd_xfer_length)); 5274 minsz = 36; 5275 5276 if (sz == 0) { 5277 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5278 return; 5279 } 5280 5281 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) { 5282 /* 5283 * Ignore any preallocated dbuf if the size is less 5284 * than 36. It will be freed during the task_free. 5285 */ 5286 dbuf = NULL; 5287 } 5288 if (dbuf == NULL) 5289 dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0); 5290 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) { 5291 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5292 STMF_ALLOC_FAILURE, NULL); 5293 return; 5294 } 5295 dbuf->db_lu_private = NULL; 5296 5297 p = dbuf->db_sglist[0].seg_addr; 5298 5299 /* 5300 * Standard inquiry handling only. 5301 */ 5302 5303 bzero(p, inq_page_length + 5); 5304 5305 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN; 5306 p[2] = 5; 5307 p[3] = 0x12; 5308 p[4] = inq_page_length; 5309 p[6] = 0x80; 5310 5311 (void) strncpy((char *)p+8, "SUN ", 8); 5312 (void) strncpy((char *)p+16, "COMSTAR ", 16); 5313 (void) strncpy((char *)p+32, "1.0 ", 4); 5314 5315 dbuf->db_data_size = sz; 5316 dbuf->db_relative_offset = 0; 5317 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 5318 (void) stmf_xfer_data(task, dbuf, 0); 5319 5320 return; 5321 5322 case SCMD_REPORT_LUNS: 5323 task->task_cmd_xfer_length = 5324 ((((uint32_t)task->task_cdb[6]) << 24) | 5325 (((uint32_t)task->task_cdb[7]) << 16) | 5326 (((uint32_t)task->task_cdb[8]) << 8) | 5327 ((uint32_t)task->task_cdb[9])); 5328 5329 if (task->task_additional_flags & 5330 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 5331 task->task_expected_xfer_length = 5332 task->task_cmd_xfer_length; 5333 } 5334 5335 sz = min(task->task_expected_xfer_length, 5336 task->task_cmd_xfer_length); 5337 5338 if (sz < 16) { 5339 stmf_scsilib_send_status(task, STATUS_CHECK, 5340 STMF_SAA_INVALID_FIELD_IN_CDB); 5341 return; 5342 } 5343 5344 iss = (stmf_i_scsi_session_t *) 5345 task->task_session->ss_stmf_private; 5346 rw_enter(iss->iss_lockp, RW_WRITER); 5347 xd = stmf_session_prepare_report_lun_data(iss->iss_sm); 5348 rw_exit(iss->iss_lockp); 5349 5350 if (xd == NULL) { 5351 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5352 STMF_ALLOC_FAILURE, NULL); 5353 return; 5354 } 5355 5356 sz = min(sz, xd->size_left); 5357 xd->size_left = sz; 5358 minsz = min(512, sz); 5359 5360 if (dbuf == NULL) 5361 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 5362 if (dbuf == NULL) { 5363 kmem_free(xd, xd->alloc_size); 5364 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5365 STMF_ALLOC_FAILURE, NULL); 5366 return; 5367 } 5368 dbuf->db_lu_private = xd; 5369 stmf_xd_to_dbuf(dbuf); 5370 5371 atomic_and_32(&iss->iss_flags, 5372 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 5373 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 5374 (void) stmf_xfer_data(task, dbuf, 0); 5375 return; 5376 } 5377 5378 stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE); 5379 } 5380 5381 void 5382 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf) 5383 { 5384 if (dbuf->db_xfer_status != STMF_SUCCESS) { 5385 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5386 dbuf->db_xfer_status, NULL); 5387 return; 5388 } 5389 task->task_nbytes_transferred = dbuf->db_data_size; 5390 if (dbuf->db_lu_private) { 5391 /* There is more */ 5392 stmf_xd_to_dbuf(dbuf); 5393 (void) stmf_xfer_data(task, dbuf, 0); 5394 return; 5395 } 5396 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5397 } 5398 5399 /* ARGSUSED */ 5400 void 5401 stmf_dlun0_status_done(scsi_task_t *task) 5402 { 5403 } 5404 5405 /* ARGSUSED */ 5406 void 5407 stmf_dlun0_task_free(scsi_task_t *task) 5408 { 5409 } 5410 5411 /* ARGSUSED */ 5412 stmf_status_t 5413 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags) 5414 { 5415 scsi_task_t *task = (scsi_task_t *)arg; 5416 stmf_i_scsi_task_t *itask = 5417 (stmf_i_scsi_task_t *)task->task_stmf_private; 5418 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5419 int i; 5420 uint8_t map; 5421 5422 ASSERT(abort_cmd == STMF_LU_ABORT_TASK); 5423 if ((task->task_mgmt_function) && (itask->itask_flags & 5424 (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) { 5425 switch (task->task_mgmt_function) { 5426 case TM_ABORT_TASK: 5427 case TM_ABORT_TASK_SET: 5428 case TM_CLEAR_TASK_SET: 5429 case TM_LUN_RESET: 5430 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 5431 break; 5432 case TM_TARGET_RESET: 5433 case TM_TARGET_COLD_RESET: 5434 case TM_TARGET_WARM_RESET: 5435 stmf_abort_target_reset(task); 5436 break; 5437 } 5438 return (STMF_ABORT_SUCCESS); 5439 } 5440 5441 /* 5442 * OK so its not a task mgmt. Make sure we free any xd sitting 5443 * inside any dbuf. 5444 */ 5445 if ((map = itask->itask_allocated_buf_map) != 0) { 5446 for (i = 0; i < 4; i++) { 5447 if ((map & 1) && 5448 ((itask->itask_dbufs[i])->db_lu_private)) { 5449 stmf_xfer_data_t *xd; 5450 stmf_data_buf_t *dbuf; 5451 5452 dbuf = itask->itask_dbufs[i]; 5453 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 5454 dbuf->db_lu_private = NULL; 5455 kmem_free(xd, xd->alloc_size); 5456 } 5457 map >>= 1; 5458 } 5459 } 5460 return (STMF_ABORT_SUCCESS); 5461 } 5462 5463 void 5464 stmf_dlun0_task_poll(struct scsi_task *task) 5465 { 5466 /* Right now we only do this for handling task management functions */ 5467 ASSERT(task->task_mgmt_function); 5468 5469 switch (task->task_mgmt_function) { 5470 case TM_ABORT_TASK: 5471 case TM_ABORT_TASK_SET: 5472 case TM_CLEAR_TASK_SET: 5473 case TM_LUN_RESET: 5474 (void) stmf_lun_reset_poll(task->task_lu, task, 0); 5475 return; 5476 case TM_TARGET_RESET: 5477 case TM_TARGET_COLD_RESET: 5478 case TM_TARGET_WARM_RESET: 5479 stmf_target_reset_poll(task); 5480 return; 5481 } 5482 } 5483 5484 /* ARGSUSED */ 5485 void 5486 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg) 5487 { 5488 /* This function will never be called */ 5489 cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd); 5490 } 5491 5492 void 5493 stmf_dlun_init() 5494 { 5495 stmf_i_lu_t *ilu; 5496 5497 dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0); 5498 dlun0->lu_task_alloc = stmf_dlun0_task_alloc; 5499 dlun0->lu_new_task = stmf_dlun0_new_task; 5500 dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done; 5501 dlun0->lu_send_status_done = stmf_dlun0_status_done; 5502 dlun0->lu_task_free = stmf_dlun0_task_free; 5503 dlun0->lu_abort = stmf_dlun0_abort; 5504 dlun0->lu_task_poll = stmf_dlun0_task_poll; 5505 dlun0->lu_ctl = stmf_dlun0_ctl; 5506 5507 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 5508 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 5509 } 5510 5511 stmf_status_t 5512 stmf_dlun_fini() 5513 { 5514 stmf_i_lu_t *ilu; 5515 5516 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 5517 5518 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 5519 if (ilu->ilu_ntasks) { 5520 stmf_i_scsi_task_t *itask, *nitask; 5521 5522 nitask = ilu->ilu_tasks; 5523 do { 5524 itask = nitask; 5525 nitask = itask->itask_lu_next; 5526 dlun0->lu_task_free(itask->itask_task); 5527 stmf_free(itask->itask_task); 5528 } while (nitask != NULL); 5529 5530 } 5531 stmf_free(dlun0); 5532 return (STMF_SUCCESS); 5533 } 5534 5535 void 5536 stmf_abort_target_reset(scsi_task_t *task) 5537 { 5538 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 5539 task->task_session->ss_stmf_private; 5540 stmf_lun_map_t *lm; 5541 stmf_lun_map_ent_t *lm_ent; 5542 stmf_i_lu_t *ilu; 5543 int i; 5544 5545 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE); 5546 5547 rw_enter(iss->iss_lockp, RW_READER); 5548 lm = iss->iss_sm; 5549 for (i = 0; i < lm->lm_nentries; i++) { 5550 if (lm->lm_plus[i] == NULL) 5551 continue; 5552 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 5553 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 5554 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 5555 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 5556 } 5557 } 5558 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 5559 rw_exit(iss->iss_lockp); 5560 } 5561 5562 /* 5563 * The return value is only used by function managing target reset. 5564 */ 5565 stmf_status_t 5566 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset) 5567 { 5568 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 5569 int ntasks_pending; 5570 5571 ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free; 5572 /* 5573 * This function is also used during Target reset. The idea is that 5574 * once all the commands are aborted, call the LU's reset entry 5575 * point (abort entry point with a reset flag). But if this Task 5576 * mgmt is running on this LU then all the tasks cannot be aborted. 5577 * one task (this task) will still be running which is OK. 5578 */ 5579 if ((ntasks_pending == 0) || ((task->task_lu == lu) && 5580 (ntasks_pending == 1))) { 5581 stmf_status_t ret; 5582 5583 if ((task->task_mgmt_function == TM_LUN_RESET) || 5584 (task->task_mgmt_function == TM_TARGET_RESET) || 5585 (task->task_mgmt_function == TM_TARGET_WARM_RESET) || 5586 (task->task_mgmt_function == TM_TARGET_COLD_RESET)) { 5587 ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0); 5588 } else { 5589 ret = STMF_SUCCESS; 5590 } 5591 if (ret == STMF_SUCCESS) { 5592 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 5593 } 5594 if (target_reset) { 5595 return (ret); 5596 } 5597 if (ret == STMF_SUCCESS) { 5598 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5599 return (ret); 5600 } 5601 if (ret != STMF_BUSY) { 5602 stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL); 5603 return (ret); 5604 } 5605 } 5606 5607 if (target_reset) { 5608 /* Tell target reset polling code that we are not done */ 5609 return (STMF_BUSY); 5610 } 5611 5612 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 5613 != STMF_SUCCESS) { 5614 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5615 STMF_ALLOC_FAILURE, NULL); 5616 return (STMF_SUCCESS); 5617 } 5618 5619 return (STMF_SUCCESS); 5620 } 5621 5622 void 5623 stmf_target_reset_poll(struct scsi_task *task) 5624 { 5625 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 5626 task->task_session->ss_stmf_private; 5627 stmf_lun_map_t *lm; 5628 stmf_lun_map_ent_t *lm_ent; 5629 stmf_i_lu_t *ilu; 5630 stmf_status_t ret; 5631 int i; 5632 int not_done = 0; 5633 5634 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE); 5635 5636 rw_enter(iss->iss_lockp, RW_READER); 5637 lm = iss->iss_sm; 5638 for (i = 0; i < lm->lm_nentries; i++) { 5639 if (lm->lm_plus[i] == NULL) 5640 continue; 5641 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 5642 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 5643 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 5644 rw_exit(iss->iss_lockp); 5645 ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1); 5646 rw_enter(iss->iss_lockp, RW_READER); 5647 if (ret == STMF_SUCCESS) 5648 continue; 5649 not_done = 1; 5650 if (ret != STMF_BUSY) { 5651 rw_exit(iss->iss_lockp); 5652 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5653 STMF_ABORTED, NULL); 5654 return; 5655 } 5656 } 5657 } 5658 rw_exit(iss->iss_lockp); 5659 5660 if (not_done) { 5661 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 5662 != STMF_SUCCESS) { 5663 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5664 STMF_ALLOC_FAILURE, NULL); 5665 return; 5666 } 5667 return; 5668 } 5669 5670 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 5671 5672 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5673 } 5674 5675 stmf_status_t 5676 stmf_lu_add_event(stmf_lu_t *lu, int eventid) 5677 { 5678 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 5679 5680 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 5681 return (STMF_INVALID_ARG); 5682 } 5683 5684 STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid); 5685 return (STMF_SUCCESS); 5686 } 5687 5688 stmf_status_t 5689 stmf_lu_remove_event(stmf_lu_t *lu, int eventid) 5690 { 5691 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 5692 5693 if (eventid == STMF_EVENT_ALL) { 5694 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl); 5695 return (STMF_SUCCESS); 5696 } 5697 5698 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 5699 return (STMF_INVALID_ARG); 5700 } 5701 5702 STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid); 5703 return (STMF_SUCCESS); 5704 } 5705 5706 stmf_status_t 5707 stmf_lport_add_event(stmf_local_port_t *lport, int eventid) 5708 { 5709 stmf_i_local_port_t *ilport = 5710 (stmf_i_local_port_t *)lport->lport_stmf_private; 5711 5712 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 5713 return (STMF_INVALID_ARG); 5714 } 5715 5716 STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid); 5717 return (STMF_SUCCESS); 5718 } 5719 5720 stmf_status_t 5721 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid) 5722 { 5723 stmf_i_local_port_t *ilport = 5724 (stmf_i_local_port_t *)lport->lport_stmf_private; 5725 5726 if (eventid == STMF_EVENT_ALL) { 5727 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl); 5728 return (STMF_SUCCESS); 5729 } 5730 5731 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 5732 return (STMF_INVALID_ARG); 5733 } 5734 5735 STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid); 5736 return (STMF_SUCCESS); 5737 } 5738 5739 void 5740 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags) 5741 { 5742 if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) && 5743 (ilu->ilu_lu->lu_event_handler != NULL)) { 5744 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags); 5745 } 5746 } 5747 5748 void 5749 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg, 5750 uint32_t flags) 5751 { 5752 if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) && 5753 (ilport->ilport_lport->lport_event_handler != NULL)) { 5754 ilport->ilport_lport->lport_event_handler( 5755 ilport->ilport_lport, eventid, arg, flags); 5756 } 5757 } 5758 5759 void 5760 stmf_svc_init() 5761 { 5762 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 5763 return; 5764 stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1, 5765 TASKQ_DEFAULTPRI, 0); 5766 (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq, 5767 stmf_svc, 0, DDI_SLEEP); 5768 } 5769 5770 stmf_status_t 5771 stmf_svc_fini() 5772 { 5773 uint32_t i; 5774 5775 mutex_enter(&stmf_state.stmf_lock); 5776 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) { 5777 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE; 5778 cv_signal(&stmf_state.stmf_cv); 5779 } 5780 mutex_exit(&stmf_state.stmf_lock); 5781 5782 /* Wait for 5 seconds */ 5783 for (i = 0; i < 500; i++) { 5784 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 5785 delay(drv_usectohz(10000)); 5786 else 5787 break; 5788 } 5789 if (i == 500) 5790 return (STMF_BUSY); 5791 5792 ddi_taskq_destroy(stmf_state.stmf_svc_taskq); 5793 5794 return (STMF_SUCCESS); 5795 } 5796 5797 /* ARGSUSED */ 5798 void 5799 stmf_svc(void *arg) 5800 { 5801 stmf_svc_req_t *req, **preq; 5802 clock_t td; 5803 clock_t drain_start, drain_next = 0; 5804 clock_t timing_start, timing_next = 0; 5805 clock_t worker_delay = 0; 5806 int deq; 5807 stmf_lu_t *lu; 5808 stmf_i_lu_t *ilu; 5809 stmf_local_port_t *lport; 5810 stmf_i_local_port_t *ilport, *next_ilport; 5811 stmf_i_scsi_session_t *iss; 5812 5813 td = drv_usectohz(20000); 5814 5815 mutex_enter(&stmf_state.stmf_lock); 5816 stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE; 5817 5818 stmf_svc_loop: 5819 if (stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE) { 5820 stmf_state.stmf_svc_flags &= 5821 ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE); 5822 mutex_exit(&stmf_state.stmf_lock); 5823 return; 5824 } 5825 5826 if (stmf_state.stmf_svc_active) { 5827 int waitq_add = 0; 5828 req = stmf_state.stmf_svc_active; 5829 stmf_state.stmf_svc_active = req->svc_next; 5830 5831 switch (req->svc_cmd) { 5832 case STMF_CMD_LPORT_ONLINE: 5833 /* Fallthrough */ 5834 case STMF_CMD_LPORT_OFFLINE: 5835 /* Fallthrough */ 5836 case STMF_CMD_LU_ONLINE: 5837 /* Nothing to do */ 5838 waitq_add = 1; 5839 break; 5840 5841 case STMF_CMD_LU_OFFLINE: 5842 /* Remove all mappings of this LU */ 5843 stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj); 5844 /* Kill all the pending I/Os for this LU */ 5845 mutex_exit(&stmf_state.stmf_lock); 5846 stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL, 5847 STMF_ABORTED); 5848 mutex_enter(&stmf_state.stmf_lock); 5849 waitq_add = 1; 5850 break; 5851 default: 5852 cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d", 5853 req->svc_cmd); 5854 } 5855 5856 if (waitq_add) { 5857 /* Put it in the wait queue */ 5858 req->svc_next = stmf_state.stmf_svc_waiting; 5859 stmf_state.stmf_svc_waiting = req; 5860 } 5861 } 5862 5863 /* The waiting list is not going to be modified by anybody else */ 5864 mutex_exit(&stmf_state.stmf_lock); 5865 5866 for (preq = &stmf_state.stmf_svc_waiting; (*preq) != NULL; ) { 5867 req = *preq; 5868 deq = 0; 5869 switch (req->svc_cmd) { 5870 case STMF_CMD_LU_ONLINE: 5871 lu = (stmf_lu_t *)req->svc_obj; 5872 deq = 1; 5873 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 5874 break; 5875 5876 case STMF_CMD_LU_OFFLINE: 5877 lu = (stmf_lu_t *)req->svc_obj; 5878 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 5879 if (ilu->ilu_ntasks != ilu->ilu_ntasks_free) 5880 break; 5881 deq = 1; 5882 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 5883 break; 5884 5885 case STMF_CMD_LPORT_OFFLINE: 5886 /* Fallthrough */ 5887 case STMF_CMD_LPORT_ONLINE: 5888 lport = (stmf_local_port_t *)req->svc_obj; 5889 deq = 1; 5890 lport->lport_ctl(lport, req->svc_cmd, &req->svc_info); 5891 break; 5892 } 5893 if (deq) { 5894 *preq = req->svc_next; 5895 kmem_free(req, req->svc_req_alloc_size); 5896 } else { 5897 preq = &req->svc_next; 5898 } 5899 } 5900 5901 mutex_enter(&stmf_state.stmf_lock); 5902 if (stmf_state.stmf_svc_active == NULL) { 5903 /* Do timeouts */ 5904 if (stmf_state.stmf_nlus && 5905 ((!timing_next) || (ddi_get_lbolt() >= timing_next))) { 5906 if (!stmf_state.stmf_svc_ilu_timing) { 5907 /* we are starting a new round */ 5908 stmf_state.stmf_svc_ilu_timing = 5909 stmf_state.stmf_ilulist; 5910 timing_start = ddi_get_lbolt(); 5911 } 5912 stmf_check_ilu_timing(); 5913 if (!stmf_state.stmf_svc_ilu_timing) { 5914 /* we finished a complete round */ 5915 timing_next = 5916 timing_start + drv_usectohz(5*1000*1000); 5917 } else { 5918 /* we still have some ilu items to check */ 5919 timing_next = 5920 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 5921 } 5922 if (stmf_state.stmf_svc_active) 5923 goto stmf_svc_loop; 5924 } 5925 /* Check if there are free tasks to clear */ 5926 if (stmf_state.stmf_nlus && 5927 ((!drain_next) || (ddi_get_lbolt() >= drain_next))) { 5928 if (!stmf_state.stmf_svc_ilu_draining) { 5929 /* we are starting a new round */ 5930 stmf_state.stmf_svc_ilu_draining = 5931 stmf_state.stmf_ilulist; 5932 drain_start = ddi_get_lbolt(); 5933 } 5934 stmf_check_freetask(); 5935 if (!stmf_state.stmf_svc_ilu_draining) { 5936 /* we finished a complete round */ 5937 drain_next = 5938 drain_start + drv_usectohz(10*1000*1000); 5939 } else { 5940 /* we still have some ilu items to check */ 5941 drain_next = 5942 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 5943 } 5944 if (stmf_state.stmf_svc_active) 5945 goto stmf_svc_loop; 5946 } 5947 5948 /* Check if we need to run worker_mgmt */ 5949 if (ddi_get_lbolt() > worker_delay) { 5950 stmf_worker_mgmt(); 5951 worker_delay = ddi_get_lbolt() + 5952 stmf_worker_mgmt_delay; 5953 } 5954 5955 /* Check if any active session got its 1st LUN */ 5956 if (stmf_state.stmf_process_initial_luns) { 5957 int stmf_level = 0; 5958 int port_level; 5959 for (ilport = stmf_state.stmf_ilportlist; ilport; 5960 ilport = next_ilport) { 5961 next_ilport = ilport->ilport_next; 5962 if ((ilport->ilport_flags & 5963 ILPORT_SS_GOT_INITIAL_LUNS) == 0) { 5964 continue; 5965 } 5966 port_level = 0; 5967 rw_enter(&ilport->ilport_lock, RW_READER); 5968 for (iss = ilport->ilport_ss_list; iss; 5969 iss = iss->iss_next) { 5970 if ((iss->iss_flags & 5971 ISS_GOT_INITIAL_LUNS) == 0) { 5972 continue; 5973 } 5974 port_level++; 5975 stmf_level++; 5976 atomic_and_32(&iss->iss_flags, 5977 ~ISS_GOT_INITIAL_LUNS); 5978 atomic_or_32(&iss->iss_flags, 5979 ISS_EVENT_ACTIVE); 5980 rw_exit(&ilport->ilport_lock); 5981 mutex_exit(&stmf_state.stmf_lock); 5982 stmf_generate_lport_event(ilport, 5983 LPORT_EVENT_INITIAL_LUN_MAPPED, 5984 iss->iss_ss, 0); 5985 atomic_and_32(&iss->iss_flags, 5986 ~ISS_EVENT_ACTIVE); 5987 mutex_enter(&stmf_state.stmf_lock); 5988 /* 5989 * scan all the ilports again as the 5990 * ilport list might have changed. 5991 */ 5992 next_ilport = 5993 stmf_state.stmf_ilportlist; 5994 break; 5995 } 5996 if (port_level == 0) { 5997 atomic_and_32(&ilport->ilport_flags, 5998 ~ILPORT_SS_GOT_INITIAL_LUNS); 5999 } 6000 /* drop the lock if we are holding it. */ 6001 if (rw_lock_held(&ilport->ilport_lock)) 6002 rw_exit(&ilport->ilport_lock); 6003 6004 /* Max 4 session at a time */ 6005 if (stmf_level >= 4) { 6006 break; 6007 } 6008 } 6009 if (stmf_level == 0) { 6010 stmf_state.stmf_process_initial_luns = 0; 6011 } 6012 } 6013 6014 stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE; 6015 (void) cv_timedwait(&stmf_state.stmf_cv, &stmf_state.stmf_lock, 6016 ddi_get_lbolt() + td); 6017 stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE; 6018 } 6019 goto stmf_svc_loop; 6020 } 6021 6022 void 6023 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info) 6024 { 6025 stmf_svc_req_t *req; 6026 int s; 6027 6028 ASSERT(!mutex_owned(&stmf_state.stmf_lock)); 6029 s = sizeof (stmf_svc_req_t); 6030 if (info->st_additional_info) { 6031 s += strlen(info->st_additional_info) + 1; 6032 } 6033 req = kmem_zalloc(s, KM_SLEEP); 6034 6035 req->svc_cmd = cmd; 6036 req->svc_obj = obj; 6037 req->svc_info.st_rflags = info->st_rflags; 6038 if (info->st_additional_info) { 6039 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req, 6040 sizeof (stmf_svc_req_t))); 6041 (void) strcpy(req->svc_info.st_additional_info, 6042 info->st_additional_info); 6043 } 6044 req->svc_req_alloc_size = s; 6045 6046 mutex_enter(&stmf_state.stmf_lock); 6047 req->svc_next = stmf_state.stmf_svc_active; 6048 stmf_state.stmf_svc_active = req; 6049 if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) { 6050 cv_signal(&stmf_state.stmf_cv); 6051 } 6052 mutex_exit(&stmf_state.stmf_lock); 6053 } 6054 6055 void 6056 stmf_trace(caddr_t ident, const char *fmt, ...) 6057 { 6058 va_list args; 6059 char tbuf[160]; 6060 int len; 6061 6062 if (!stmf_trace_on) 6063 return; 6064 len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "", 6065 ddi_get_lbolt()); 6066 va_start(args, fmt); 6067 len += vsnprintf(tbuf + len, 158 - len, fmt, args); 6068 va_end(args); 6069 6070 if (len > 158) { 6071 len = 158; 6072 } 6073 tbuf[len++] = '\n'; 6074 tbuf[len] = 0; 6075 6076 mutex_enter(&trace_buf_lock); 6077 bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1); 6078 trace_buf_curndx += len; 6079 if (trace_buf_curndx > (trace_buf_size - 320)) 6080 trace_buf_curndx = 0; 6081 mutex_exit(&trace_buf_lock); 6082 } 6083 6084 void 6085 stmf_trace_clear() 6086 { 6087 if (!stmf_trace_on) 6088 return; 6089 mutex_enter(&trace_buf_lock); 6090 trace_buf_curndx = 0; 6091 if (trace_buf_size > 0) 6092 stmf_trace_buf[0] = 0; 6093 mutex_exit(&trace_buf_lock); 6094 } 6095 6096 static void 6097 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info) 6098 { 6099 stmf_state_change_info_t change_info; 6100 void *ctl_private; 6101 uint32_t ctl_cmd; 6102 int msg = 0; 6103 6104 stmf_trace("FROM STMF", "abort_task_offline called for %s: %s", 6105 offline_lu ? "LU" : "LPORT", info ? info : "no additional info"); 6106 change_info.st_additional_info = info; 6107 if (offline_lu) { 6108 change_info.st_rflags = STMF_RFLAG_RESET | 6109 STMF_RFLAG_LU_ABORT; 6110 ctl_private = task->task_lu; 6111 if (((stmf_i_lu_t *) 6112 task->task_lu->lu_stmf_private)->ilu_state == 6113 STMF_STATE_ONLINE) { 6114 msg = 1; 6115 } 6116 ctl_cmd = STMF_CMD_LU_OFFLINE; 6117 } else { 6118 change_info.st_rflags = STMF_RFLAG_RESET | 6119 STMF_RFLAG_LPORT_ABORT; 6120 ctl_private = task->task_lport; 6121 if (((stmf_i_local_port_t *) 6122 task->task_lport->lport_stmf_private)->ilport_state == 6123 STMF_STATE_ONLINE) { 6124 msg = 1; 6125 } 6126 ctl_cmd = STMF_CMD_LPORT_OFFLINE; 6127 } 6128 6129 if (msg) { 6130 stmf_trace(0, "Calling stmf_ctl to offline %s : %s", 6131 offline_lu ? "LU" : "LPORT", info ? info : 6132 "<no additional info>"); 6133 } 6134 (void) stmf_ctl(ctl_cmd, ctl_private, &change_info); 6135 } 6136