1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Md - is the meta-disk driver. It sits below the UFS file system 31 * but above the 'real' disk drivers, xy, id, sd etc. 32 * 33 * To the UFS software, md looks like a normal driver, since it has 34 * the normal kinds of entries in the bdevsw and cdevsw arrays. So 35 * UFS accesses md in the usual ways. In particular, the strategy 36 * routine, mdstrategy(), gets called by fbiwrite(), ufs_getapage(), 37 * and ufs_writelbn(). 38 * 39 * Md maintains an array of minor devices (meta-partitions). Each 40 * meta partition stands for a matrix of real partitions, in rows 41 * which are not necessarily of equal length. Md maintains a table, 42 * with one entry for each meta-partition, which lists the rows and 43 * columns of actual partitions, and the job of the strategy routine 44 * is to translate from the meta-partition device and block numbers 45 * known to UFS into the actual partitions' device and block numbers. 46 * 47 * See below, in mdstrategy(), mdreal(), and mddone() for details of 48 * this translation. 49 */ 50 51 /* 52 * Driver for Virtual Disk. 53 */ 54 55 #include <sys/user.h> 56 #include <sys/sysmacros.h> 57 #include <sys/conf.h> 58 #include <sys/stat.h> 59 #include <sys/errno.h> 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/file.h> 63 #include <sys/open.h> 64 #include <sys/dkio.h> 65 #include <sys/vtoc.h> 66 #include <sys/cmn_err.h> 67 #include <sys/ddi.h> 68 #include <sys/sunddi.h> 69 #include <sys/debug.h> 70 #include <sys/utsname.h> 71 #include <sys/lvm/mdvar.h> 72 #include <sys/lvm/md_names.h> 73 #include <sys/lvm/md_mddb.h> 74 #include <sys/lvm/md_sp.h> 75 #include <sys/types.h> 76 #include <sys/kmem.h> 77 #include <sys/cladm.h> 78 #include <sys/priv_names.h> 79 80 #ifndef lint 81 static char _depends_on[] = "strmod/rpcmod"; 82 #endif /* lint */ 83 int md_init_debug = 0; /* module binding debug */ 84 85 /* 86 * Tunable to turn off the failfast behavior. 87 */ 88 int md_ff_disable = 0; 89 90 md_krwlock_t md_unit_array_rw; /* protects all unit arrays */ 91 md_krwlock_t nm_lock; /* protects all the name spaces */ 92 93 md_resync_t md_cpr_resync; 94 95 extern char svm_bootpath[]; 96 #define SVM_PSEUDO_STR "/pseudo/md@0:" 97 98 #define VERSION_LENGTH 6 99 #define VERSION "1.0" 100 101 /* 102 * Keep track of possible 'orphan' entries in the name space 103 */ 104 int *md_nm_snarfed = NULL; 105 106 /* 107 * Global tunable giving the percentage of free space left in replica during 108 * conversion of non-devid style replica to devid style replica. 109 */ 110 int md_conv_perc = MDDB_DEVID_CONV_PERC; 111 112 #ifdef DEBUG 113 /* debug code to verify framework exclusion guarantees */ 114 int md_in; 115 kmutex_t md_in_mx; /* used to md global stuff */ 116 #define IN_INIT 0x01 117 #define IN_FINI 0x02 118 #define IN_ATTACH 0x04 119 #define IN_DETACH 0x08 120 #define IN_OPEN 0x10 121 #define MD_SET_IN(x) { \ 122 mutex_enter(&md_in_mx); \ 123 if (md_in) \ 124 debug_enter("MD_SET_IN exclusion lost"); \ 125 if (md_in & x) \ 126 debug_enter("MD_SET_IN already set"); \ 127 md_in |= x; \ 128 mutex_exit(&md_in_mx); \ 129 } 130 131 #define MD_CLR_IN(x) { \ 132 mutex_enter(&md_in_mx); \ 133 if (md_in & ~(x)) \ 134 debug_enter("MD_CLR_IN exclusion lost"); \ 135 if (!(md_in & x)) \ 136 debug_enter("MD_CLR_IN already clr"); \ 137 md_in &= ~x; \ 138 mutex_exit(&md_in_mx); \ 139 } 140 #else /* DEBUG */ 141 #define MD_SET_IN(x) 142 #define MD_CLR_IN(x) 143 #endif /* DEBUG */ 144 hrtime_t savetime1, savetime2; 145 146 147 /* 148 * list things protected by md_mx even if they aren't 149 * used in this file. 150 */ 151 kmutex_t md_mx; /* used to md global stuff */ 152 kcondvar_t md_cv; /* md_status events */ 153 int md_status = 0; /* global status for the meta-driver */ 154 int md_num_daemons = 0; 155 int md_ioctl_cnt = 0; 156 int md_mtioctl_cnt = 0; /* multithreaded ioctl cnt */ 157 uint_t md_mdelay = 10; /* variable so can be patched */ 158 159 int (*mdv_strategy_tstpnt)(buf_t *, int, void*); 160 161 major_t md_major, md_major_targ; 162 163 unit_t md_nunits = MD_MAXUNITS; 164 set_t md_nsets = MD_MAXSETS; 165 int md_nmedh = 0; 166 char *md_med_trans_lst = NULL; 167 md_set_t md_set[MD_MAXSETS]; 168 md_set_io_t md_set_io[MD_MAXSETS]; 169 170 md_krwlock_t hsp_rwlp; /* protects hot_spare_interface */ 171 md_krwlock_t ni_rwlp; /* protects notify_interface */ 172 md_ops_t **md_ops; 173 ddi_modhandle_t *md_mods; 174 md_ops_t *md_opslist; 175 clock_t md_hz; 176 md_event_queue_t *md_event_queue = NULL; 177 178 int md_in_upgrade; 179 int md_keep_repl_state; 180 int md_devid_destroy; 181 182 /* for sending messages thru a door to userland */ 183 door_handle_t mdmn_door_handle = NULL; 184 int mdmn_door_did = -1; 185 186 dev_info_t *md_devinfo = NULL; 187 188 md_mn_nodeid_t md_mn_mynode_id = ~0u; /* My node id (for multi-node sets) */ 189 190 static uint_t md_ocnt[OTYPCNT]; 191 192 static int mdinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 193 static int mdattach(dev_info_t *, ddi_attach_cmd_t); 194 static int mddetach(dev_info_t *, ddi_detach_cmd_t); 195 static int mdopen(dev_t *, int, int, cred_t *); 196 static int mdclose(dev_t, int, int, cred_t *); 197 static int mddump(dev_t, caddr_t, daddr_t, int); 198 static int mdread(dev_t, struct uio *, cred_t *); 199 static int mdwrite(dev_t, struct uio *, cred_t *); 200 static int mdaread(dev_t, struct aio_req *, cred_t *); 201 static int mdawrite(dev_t, struct aio_req *, cred_t *); 202 static int mdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 203 static int mdprop_op(dev_t, dev_info_t *, 204 ddi_prop_op_t, int, char *, caddr_t, int *); 205 206 static struct cb_ops md_cb_ops = { 207 mdopen, /* open */ 208 mdclose, /* close */ 209 mdstrategy, /* strategy */ 210 /* print routine -- none yet */ 211 (int(*)(dev_t, char *))nulldev, 212 mddump, /* dump */ 213 mdread, /* read */ 214 mdwrite, /* write */ 215 mdioctl, /* ioctl */ 216 /* devmap */ 217 (int(*)(dev_t, devmap_cookie_t, offset_t, size_t, size_t *, 218 uint_t))nodev, 219 /* mmap */ 220 (int(*)(dev_t, off_t, int))nodev, 221 /* segmap */ 222 (int(*)(dev_t, off_t, struct as *, caddr_t *, off_t, unsigned, 223 unsigned, unsigned, cred_t *))nodev, 224 nochpoll, /* poll */ 225 mdprop_op, /* prop_op */ 226 0, /* streamtab */ 227 (D_64BIT|D_MP|D_NEW), /* driver compatibility flag */ 228 CB_REV, /* cb_ops version */ 229 mdaread, /* aread */ 230 mdawrite, /* awrite */ 231 }; 232 233 static struct dev_ops md_devops = { 234 DEVO_REV, /* dev_ops version */ 235 0, /* device reference count */ 236 mdinfo, /* info routine */ 237 nulldev, /* identify routine */ 238 nulldev, /* probe - not defined */ 239 mdattach, /* attach routine */ 240 mddetach, /* detach routine */ 241 nodev, /* reset - not defined */ 242 &md_cb_ops, /* driver operations */ 243 NULL, /* bus operations */ 244 nodev /* power management */ 245 }; 246 247 /* 248 * loadable module wrapper 249 */ 250 #include <sys/modctl.h> 251 252 static struct modldrv modldrv = { 253 &mod_driverops, /* type of module -- a pseudodriver */ 254 "Solaris Volume Manager base module %I%", /* name of the module */ 255 &md_devops, /* driver ops */ 256 }; 257 258 static struct modlinkage modlinkage = { 259 MODREV_1, 260 (void *)&modldrv, 261 NULL 262 }; 263 264 265 /* md_medd.c */ 266 extern void med_init(void); 267 extern void med_fini(void); 268 extern void md_devid_cleanup(set_t, uint_t); 269 270 /* md_names.c */ 271 extern void *lookup_entry(struct nm_next_hdr *, set_t, 272 side_t, mdkey_t, md_dev64_t, int); 273 extern struct nm_next_hdr *get_first_record(set_t, int, int); 274 extern int remove_entry(struct nm_next_hdr *, 275 side_t, mdkey_t, int); 276 277 int md_maxphys = 0; /* maximum io size in bytes */ 278 #define MD_MAXBCOUNT (1024 * 1024) 279 unsigned md_maxbcount = 0; /* maximum physio size in bytes */ 280 281 /* allocate/free dynamic space associated with driver globals */ 282 void 283 md_global_alloc_free(int alloc) 284 { 285 set_t s; 286 287 if (alloc) { 288 /* initialize driver global locks */ 289 cv_init(&md_cv, NULL, CV_DEFAULT, NULL); 290 mutex_init(&md_mx, NULL, MUTEX_DEFAULT, NULL); 291 rw_init(&md_unit_array_rw.lock, NULL, RW_DEFAULT, NULL); 292 rw_init(&nm_lock.lock, NULL, RW_DEFAULT, NULL); 293 rw_init(&ni_rwlp.lock, NULL, RW_DRIVER, NULL); 294 rw_init(&hsp_rwlp.lock, NULL, RW_DRIVER, NULL); 295 mutex_init(&md_cpr_resync.md_resync_mutex, NULL, 296 MUTEX_DEFAULT, NULL); 297 298 /* initialize per set driver global locks */ 299 for (s = 0; s < MD_MAXSETS; s++) { 300 /* initialize per set driver globals locks */ 301 mutex_init(&md_set[s].s_dbmx, 302 NULL, MUTEX_DEFAULT, NULL); 303 mutex_init(&md_set_io[s].md_io_mx, 304 NULL, MUTEX_DEFAULT, NULL); 305 cv_init(&md_set_io[s].md_io_cv, 306 NULL, CV_DEFAULT, NULL); 307 } 308 } else { 309 /* destroy per set driver global locks */ 310 for (s = 0; s < MD_MAXSETS; s++) { 311 cv_destroy(&md_set_io[s].md_io_cv); 312 mutex_destroy(&md_set_io[s].md_io_mx); 313 mutex_destroy(&md_set[s].s_dbmx); 314 } 315 316 /* destroy driver global locks */ 317 mutex_destroy(&md_cpr_resync.md_resync_mutex); 318 rw_destroy(&hsp_rwlp.lock); 319 rw_destroy(&ni_rwlp.lock); 320 rw_destroy(&nm_lock.lock); 321 rw_destroy(&md_unit_array_rw.lock); 322 mutex_destroy(&md_mx); 323 cv_destroy(&md_cv); 324 } 325 } 326 327 int 328 _init(void) 329 { 330 set_t s; 331 int err; 332 333 MD_SET_IN(IN_INIT); 334 335 /* allocate dynamic space associated with driver globals */ 336 md_global_alloc_free(1); 337 338 /* initialize driver globals */ 339 md_major = ddi_name_to_major("md"); 340 md_hz = drv_usectohz(NUM_USEC_IN_SEC); 341 342 /* initialize tunable globals */ 343 if (md_maxphys == 0) /* maximum io size in bytes */ 344 md_maxphys = maxphys; 345 if (md_maxbcount == 0) /* maximum physio size in bytes */ 346 md_maxbcount = MD_MAXBCOUNT; 347 348 /* initialize per set driver globals */ 349 for (s = 0; s < MD_MAXSETS; s++) 350 md_set_io[s].io_state = MD_SET_ACTIVE; 351 352 /* 353 * NOTE: the framework does not currently guarantee exclusion 354 * between _init and attach after calling mod_install. 355 */ 356 MD_CLR_IN(IN_INIT); 357 if ((err = mod_install(&modlinkage))) { 358 MD_SET_IN(IN_INIT); 359 md_global_alloc_free(0); /* free dynamic space */ 360 MD_CLR_IN(IN_INIT); 361 } 362 return (err); 363 } 364 365 int 366 _fini(void) 367 { 368 int err; 369 370 /* 371 * NOTE: the framework currently does not guarantee exclusion 372 * with attach until after mod_remove returns 0. 373 */ 374 if ((err = mod_remove(&modlinkage))) 375 return (err); 376 377 MD_SET_IN(IN_FINI); 378 md_global_alloc_free(0); /* free dynamic space */ 379 MD_CLR_IN(IN_FINI); 380 return (err); 381 } 382 383 int 384 _info(struct modinfo *modinfop) 385 { 386 return (mod_info(&modlinkage, modinfop)); 387 } 388 389 /* ARGSUSED */ 390 static int 391 mdattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 392 { 393 int len; 394 unit_t i; 395 size_t sz; 396 char ver[VERSION_LENGTH]; 397 char **maj_str_array; 398 char *str, *str2; 399 400 MD_SET_IN(IN_ATTACH); 401 md_in_upgrade = 0; 402 md_keep_repl_state = 0; 403 md_devid_destroy = 0; 404 405 if (cmd != DDI_ATTACH) { 406 MD_CLR_IN(IN_ATTACH); 407 return (DDI_FAILURE); 408 } 409 410 if (md_devinfo != NULL) { 411 MD_CLR_IN(IN_ATTACH); 412 return (DDI_FAILURE); 413 } 414 415 mddb_init(); 416 417 if (md_start_daemons(TRUE)) { 418 MD_CLR_IN(IN_ATTACH); 419 mddb_unload(); /* undo mddb_init() allocations */ 420 return (DDI_FAILURE); 421 } 422 423 /* clear the halted state */ 424 md_clr_status(MD_GBL_HALTED); 425 426 /* see if the diagnostic switch is on */ 427 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 428 DDI_PROP_DONTPASS, "md_init_debug", 0)) 429 md_init_debug++; 430 431 /* see if the failfast disable switch is on */ 432 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 433 DDI_PROP_DONTPASS, "md_ff_disable", 0)) 434 md_ff_disable++; 435 436 /* try and get the md_nmedh property */ 437 md_nmedh = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 438 DDI_PROP_DONTPASS, "md_nmedh", MED_DEF_HOSTS); 439 if ((md_nmedh <= 0) || (md_nmedh > MED_MAX_HOSTS)) 440 md_nmedh = MED_DEF_HOSTS; 441 442 /* try and get the md_med_trans_lst property */ 443 len = 0; 444 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN, 445 0, "md_med_trans_lst", NULL, &len) != DDI_PROP_SUCCESS || 446 len == 0) { 447 md_med_trans_lst = md_strdup("tcp"); 448 } else { 449 md_med_trans_lst = kmem_zalloc((size_t)len, KM_SLEEP); 450 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 451 0, "md_med_trans_lst", md_med_trans_lst, &len) != 452 DDI_PROP_SUCCESS) { 453 kmem_free(md_med_trans_lst, (size_t)len); 454 md_med_trans_lst = md_strdup("tcp"); 455 } 456 } 457 458 /* try and get the md_xlate property */ 459 /* Should we only do this if upgrade? */ 460 len = sizeof (char) * 5; 461 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 462 0, "md_xlate_ver", ver, &len) == DDI_PROP_SUCCESS) { 463 if (strcmp(ver, VERSION) == 0) { 464 len = 0; 465 if (ddi_prop_op(DDI_DEV_T_ANY, dip, 466 PROP_LEN_AND_VAL_ALLOC, 0, "md_xlate", 467 (caddr_t)&md_tuple_table, &len) != 468 DDI_PROP_SUCCESS) { 469 if (md_init_debug) 470 cmn_err(CE_WARN, 471 "md_xlate ddi_prop_op failed"); 472 goto attach_failure; 473 } else { 474 md_tuple_length = 475 len/(2 * ((int)sizeof (dev32_t))); 476 md_in_upgrade = 1; 477 } 478 479 /* Get target's name to major table */ 480 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, 481 dip, DDI_PROP_DONTPASS, 482 "md_targ_nm_table", &maj_str_array, 483 &md_majortab_len) != DDI_PROP_SUCCESS) { 484 md_majortab_len = 0; 485 if (md_init_debug) 486 cmn_err(CE_WARN, "md_targ_nm_table " 487 "ddi_prop_lookup_string_array failed"); 488 goto attach_failure; 489 } 490 491 md_major_tuple_table = 492 (struct md_xlate_major_table *) 493 kmem_zalloc(md_majortab_len * 494 sizeof (struct md_xlate_major_table), KM_SLEEP); 495 496 for (i = 0; i < md_majortab_len; i++) { 497 /* Getting major name */ 498 str = strchr(maj_str_array[i], ' '); 499 if (str == NULL) 500 continue; 501 *str = '\0'; 502 md_major_tuple_table[i].drv_name = 503 md_strdup(maj_str_array[i]); 504 505 /* Simplified atoi to get major number */ 506 str2 = str + 1; 507 md_major_tuple_table[i].targ_maj = 0; 508 while ((*str2 >= '0') && (*str2 <= '9')) { 509 md_major_tuple_table[i].targ_maj *= 10; 510 md_major_tuple_table[i].targ_maj += 511 *str2++ - '0'; 512 } 513 *str = ' '; 514 } 515 ddi_prop_free((void *)maj_str_array); 516 } else { 517 if (md_init_debug) 518 cmn_err(CE_WARN, "md_xlate_ver is incorrect"); 519 goto attach_failure; 520 } 521 } 522 523 /* 524 * Check for properties: 525 * md_keep_repl_state and md_devid_destroy 526 * and set globals if these exist. 527 */ 528 md_keep_repl_state = ddi_getprop(DDI_DEV_T_ANY, dip, 529 0, "md_keep_repl_state", 0); 530 531 md_devid_destroy = ddi_getprop(DDI_DEV_T_ANY, dip, 532 0, "md_devid_destroy", 0); 533 534 if (MD_UPGRADE) 535 md_major_targ = md_targ_name_to_major("md"); 536 else 537 md_major_targ = 0; 538 539 /* alloc md_ops and md_mods struct */ 540 md_ops = (md_ops_t **)kmem_zalloc( 541 sizeof (md_ops_t *) * MD_NOPS, KM_SLEEP); 542 md_mods = (ddi_modhandle_t *)kmem_zalloc( 543 sizeof (ddi_modhandle_t) * MD_NOPS, KM_SLEEP); 544 545 /* allocate admin device node */ 546 if (ddi_create_priv_minor_node(dip, "admin", S_IFCHR, 547 MD_ADM_MINOR, DDI_PSEUDO, 0, NULL, PRIV_SYS_CONFIG, 0640)) 548 goto attach_failure; 549 550 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 551 DDI_KERNEL_IOCTL, NULL, 0) != DDI_SUCCESS) 552 goto attach_failure; 553 554 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, 555 "ddi-abrwrite-supported", 1) != DDI_SUCCESS) 556 goto attach_failure; 557 558 /* these could have been cleared by a detach */ 559 md_nunits = MD_MAXUNITS; 560 md_nsets = MD_MAXSETS; 561 562 sz = sizeof (void *) * MD_MAXUNITS; 563 if (md_set[0].s_un == NULL) 564 md_set[0].s_un = kmem_zalloc(sz, KM_SLEEP); 565 if (md_set[0].s_ui == NULL) 566 md_set[0].s_ui = kmem_zalloc(sz, KM_SLEEP); 567 568 md_devinfo = dip; 569 570 /* 571 * Only allocate device node for root mirror metadevice. 572 * Don't pre-allocate unnecessary device nodes (thus slowing down a 573 * boot when we attach). 574 * We can't read the mddbs in attach. The mddbs will be read 575 * by metainit during the boot process when it is doing the 576 * auto-take processing and any other minor nodes will be 577 * allocated at that point. 578 * 579 * There are two scenarios to be aware of here: 580 * 1) when we are booting from a mirrored root we need the root 581 * metadevice to exist very early (during vfs_mountroot processing) 582 * 2) we need all of the nodes to be created so that any mnttab entries 583 * will succeed (handled by metainit reading the mddb during boot). 584 */ 585 if (strncmp(SVM_PSEUDO_STR, svm_bootpath, sizeof (SVM_PSEUDO_STR) - 1) 586 == 0) { 587 char *p; 588 int mnum = 0; 589 590 /* 591 * The svm_bootpath string looks something like 592 * /pseudo/md@0:0,150,blk where 150 is the minor number 593 * in this example so we need to set the pointer p onto 594 * the first digit of the minor number and convert it 595 * from ascii. 596 */ 597 for (p = svm_bootpath + sizeof (SVM_PSEUDO_STR) + 1; 598 *p >= '0' && *p <= '9'; p++) { 599 mnum *= 10; 600 mnum += *p - '0'; 601 } 602 603 if (md_create_minor_node(0, mnum)) { 604 kmem_free(md_set[0].s_un, sz); 605 kmem_free(md_set[0].s_ui, sz); 606 goto attach_failure; 607 } 608 } 609 610 med_init(); 611 612 MD_CLR_IN(IN_ATTACH); 613 return (DDI_SUCCESS); 614 615 attach_failure: 616 /* 617 * Use our own detach routine to toss any stuff we allocated above. 618 * NOTE: detach will call md_halt to free the mddb_init allocations. 619 */ 620 MD_CLR_IN(IN_ATTACH); 621 if (mddetach(dip, DDI_DETACH) != DDI_SUCCESS) 622 cmn_err(CE_WARN, "detach from attach failed"); 623 return (DDI_FAILURE); 624 } 625 626 /* ARGSUSED */ 627 static int 628 mddetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 629 { 630 extern int check_active_locators(); 631 set_t s; 632 size_t sz; 633 int len; 634 635 MD_SET_IN(IN_DETACH); 636 637 /* check command */ 638 if (cmd != DDI_DETACH) { 639 MD_CLR_IN(IN_DETACH); 640 return (DDI_FAILURE); 641 } 642 643 /* 644 * if we have not already halted yet we have no active config 645 * then automatically initiate a halt so we can detach. 646 */ 647 if (!(md_get_status() & MD_GBL_HALTED)) { 648 if (check_active_locators() == 0) { 649 /* 650 * NOTE: a successful md_halt will have done the 651 * mddb_unload to free allocations done in mddb_init 652 */ 653 if (md_halt(MD_NO_GBL_LOCKS_HELD)) { 654 cmn_err(CE_NOTE, "md:detach: " 655 "Could not halt Solaris Volume Manager"); 656 MD_CLR_IN(IN_DETACH); 657 return (DDI_FAILURE); 658 } 659 } 660 661 /* fail detach if we have not halted */ 662 if (!(md_get_status() & MD_GBL_HALTED)) { 663 MD_CLR_IN(IN_DETACH); 664 return (DDI_FAILURE); 665 } 666 } 667 668 /* must be in halted state, this will be cleared on next attach */ 669 ASSERT(md_get_status() & MD_GBL_HALTED); 670 671 /* cleanup attach allocations and initializations */ 672 md_major_targ = 0; 673 674 sz = sizeof (void *) * md_nunits; 675 for (s = 0; s < md_nsets; s++) { 676 if (md_set[s].s_un != NULL) { 677 kmem_free(md_set[s].s_un, sz); 678 md_set[s].s_un = NULL; 679 } 680 681 if (md_set[s].s_ui != NULL) { 682 kmem_free(md_set[s].s_ui, sz); 683 md_set[s].s_ui = NULL; 684 } 685 } 686 md_nunits = 0; 687 md_nsets = 0; 688 md_nmedh = 0; 689 690 if (md_med_trans_lst != NULL) { 691 kmem_free(md_med_trans_lst, strlen(md_med_trans_lst) + 1); 692 md_med_trans_lst = NULL; 693 } 694 695 if (md_mods != NULL) { 696 kmem_free(md_mods, sizeof (ddi_modhandle_t) * MD_NOPS); 697 md_mods = NULL; 698 } 699 700 if (md_ops != NULL) { 701 kmem_free(md_ops, sizeof (md_ops_t *) * MD_NOPS); 702 md_ops = NULL; 703 } 704 705 if (MD_UPGRADE) { 706 len = md_tuple_length * (2 * ((int)sizeof (dev32_t))); 707 md_in_upgrade = 0; 708 md_xlate_free(len); 709 md_majortab_free(); 710 } 711 712 /* 713 * Undo what we did in mdattach, freeing resources 714 * and removing things we installed. The system 715 * framework guarantees we are not active with this devinfo 716 * node in any other entry points at this time. 717 */ 718 ddi_prop_remove_all(dip); 719 ddi_remove_minor_node(dip, NULL); 720 721 med_fini(); 722 md_devinfo = NULL; 723 724 MD_CLR_IN(IN_DETACH); 725 return (DDI_SUCCESS); 726 } 727 728 729 /* 730 * Given the device number return the devinfo pointer 731 * given to md via md_attach 732 */ 733 /*ARGSUSED*/ 734 static int 735 mdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 736 { 737 int error = DDI_FAILURE; 738 739 switch (infocmd) { 740 case DDI_INFO_DEVT2DEVINFO: 741 if (md_devinfo) { 742 *result = (void *)md_devinfo; 743 error = DDI_SUCCESS; 744 } 745 break; 746 747 case DDI_INFO_DEVT2INSTANCE: 748 *result = (void *)0; 749 error = DDI_SUCCESS; 750 break; 751 } 752 return (error); 753 } 754 755 /* 756 * property operation routine. return the number of blocks for the partition 757 * in question or forward the request to the property facilities. 758 */ 759 static int 760 mdprop_op( 761 dev_t dev, /* device number associated with device */ 762 dev_info_t *dip, /* device info struct for this device */ 763 ddi_prop_op_t prop_op, /* property operator */ 764 int mod_flags, /* property flags */ 765 char *name, /* name of property */ 766 caddr_t valuep, /* where to put property value */ 767 int *lengthp) /* put length of property here */ 768 { 769 minor_t mnum; 770 set_t setno; 771 md_unit_t *un; 772 mdi_unit_t *ui; 773 uint64_t nblocks64; 774 775 /* 776 * Our dynamic properties are all device specific and size oriented. 777 * Requests issued under conditions where size is valid are passed 778 * to ddi_prop_op_nblocks with the size information, otherwise the 779 * request is passed to ddi_prop_op. Make sure that the minor device 780 * is a valid part of the Virtual Disk subsystem. 781 */ 782 mnum = getminor(dev); 783 setno = MD_MIN2SET(mnum); 784 if ((dev == DDI_DEV_T_ANY) || (mnum == MD_ADM_MINOR) || 785 (setno >= md_nsets) || (MD_MIN2UNIT(mnum) >= md_nunits)) { 786 pass: return (ddi_prop_op(dev, dip, prop_op, mod_flags, 787 name, valuep, lengthp)); 788 } else { 789 rw_enter(&md_unit_array_rw.lock, RW_READER); 790 if (((md_get_setstatus(setno) & MD_SET_SNARFED) == 0) || 791 ((ui = MDI_UNIT(mnum)) == NULL)) { 792 rw_exit(&md_unit_array_rw.lock); 793 goto pass; 794 } 795 796 /* get nblocks value */ 797 un = (md_unit_t *)md_unit_readerlock(ui); 798 nblocks64 = un->c.un_total_blocks; 799 md_unit_readerexit(ui); 800 rw_exit(&md_unit_array_rw.lock); 801 802 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 803 name, valuep, lengthp, nblocks64)); 804 } 805 806 } 807 808 static void 809 snarf_user_data(set_t setno) 810 { 811 mddb_recid_t recid; 812 mddb_recstatus_t status; 813 814 recid = mddb_makerecid(setno, 0); 815 while ((recid = mddb_getnextrec(recid, MDDB_USER, 0)) > 0) { 816 if (mddb_getrecprivate(recid) & MD_PRV_GOTIT) 817 continue; 818 819 status = mddb_getrecstatus(recid); 820 if (status == MDDB_STALE) 821 continue; 822 823 if (status == MDDB_NODATA) { 824 mddb_setrecprivate(recid, MD_PRV_PENDDEL); 825 continue; 826 } 827 828 ASSERT(status == MDDB_OK); 829 830 mddb_setrecprivate(recid, MD_PRV_GOTIT); 831 } 832 } 833 834 static void 835 md_print_block_usage(mddb_set_t *s, uint_t blks) 836 { 837 uint_t ib; 838 int li; 839 mddb_mb_ic_t *mbip; 840 uint_t max_blk_needed; 841 mddb_lb_t *lbp; 842 mddb_sidelocator_t *slp; 843 int drv_index; 844 md_splitname sn; 845 char *name; 846 char *suffix; 847 size_t prefixlen; 848 size_t suffixlen; 849 int alloc_sz; 850 851 852 max_blk_needed = s->s_totalblkcnt - s->s_freeblkcnt + blks; 853 854 855 cmn_err(CE_WARN, "Blocks in Metadevice State Database: %d\n" 856 " Additional Blocks Needed: %d\n\n" 857 " Increase size of following replicas for\n" 858 " device relocatability by deleting listed\n" 859 " replica and re-adding replica with\n" 860 " increased size (see metadb(1M)):\n" 861 " Replica Increase By", 862 s->s_totalblkcnt, (blks - s->s_freeblkcnt)); 863 864 lbp = s->s_lbp; 865 866 for (li = 0; li < lbp->lb_loccnt; li++) { 867 if (lbp->lb_locators[li].l_flags & MDDB_F_DELETED) 868 continue; 869 ib = 0; 870 for (mbip = s->s_mbiarray[li]; mbip != NULL; 871 mbip = mbip->mbi_next) { 872 ib += (uint_t)mbip->mbi_mddb_mb.mb_blkcnt; 873 } 874 if (ib == 0) 875 continue; 876 if (ib < max_blk_needed) { 877 slp = &lbp->lb_sidelocators[s->s_sideno][li]; 878 drv_index = slp->l_drvnm_index; 879 mddb_locatorblock2splitname(s->s_lnp, li, s->s_sideno, 880 &sn); 881 prefixlen = SPN_PREFIX(&sn).pre_len; 882 suffixlen = SPN_SUFFIX(&sn).suf_len; 883 alloc_sz = (int)(prefixlen + suffixlen + 2); 884 name = (char *)kmem_alloc(alloc_sz, KM_SLEEP); 885 (void) strncpy(name, SPN_PREFIX(&sn).pre_data, 886 prefixlen); 887 name[prefixlen] = '/'; 888 suffix = name + (prefixlen + 1); 889 (void) strncpy(suffix, SPN_SUFFIX(&sn).suf_data, 890 suffixlen); 891 name[prefixlen + suffixlen + 1] = '\0'; 892 cmn_err(CE_WARN, 893 " %s (%s:%d:%d) %d blocks", 894 name, lbp->lb_drvnm[drv_index].dn_data, 895 slp->l_mnum, lbp->lb_locators[li].l_blkno, 896 (max_blk_needed - ib)); 897 kmem_free(name, alloc_sz); 898 } 899 } 900 } 901 902 /* 903 * md_create_minor_node: 904 * Create the minor device for the given set and un_self_id. 905 * 906 * Input: 907 * setno - set number 908 * mnum - selfID of unit 909 * 910 * Output: 911 * None. 912 * 913 * Returns 0 for success, 1 for failure. 914 * 915 * Side-effects: 916 * None. 917 */ 918 int 919 md_create_minor_node(set_t setno, minor_t mnum) 920 { 921 char name[20]; 922 923 /* Check for valid arguments */ 924 if (setno >= MD_MAXSETS || MD_MIN2UNIT(mnum) >= MD_MAXUNITS) 925 return (1); 926 927 (void) snprintf(name, 20, "%u,%u,blk", 928 (unsigned)setno, (unsigned)MD_MIN2UNIT(mnum)); 929 930 if (ddi_create_minor_node(md_devinfo, name, S_IFBLK, 931 MD_MKMIN(setno, mnum), DDI_PSEUDO, 0)) 932 return (1); 933 934 (void) snprintf(name, 20, "%u,%u,raw", 935 (unsigned)setno, (unsigned)MD_MIN2UNIT(mnum)); 936 937 if (ddi_create_minor_node(md_devinfo, name, S_IFCHR, 938 MD_MKMIN(setno, mnum), DDI_PSEUDO, 0)) 939 return (1); 940 941 return (0); 942 } 943 944 /* 945 * For a given key check if it is an orphaned record. 946 * The following conditions are used to determine an orphan. 947 * 1. The device associated with that key is not a metadevice. 948 * 2. If DEVID_STYLE then the physical device does not have a device Id 949 * associated with it. 950 * 951 * If a key does not have an entry in the devid namespace it could be 952 * a device that does not support device ids. Hence the record is not 953 * deleted. 954 */ 955 956 static int 957 md_verify_orphaned_record(set_t setno, mdkey_t key) 958 { 959 md_dev64_t odev; /* orphaned dev */ 960 mddb_set_t *s; 961 side_t side = 0; 962 struct nm_next_hdr *did_nh = NULL; 963 964 s = (mddb_set_t *)md_set[setno].s_db; 965 if ((did_nh = get_first_record(setno, 1, (NM_DEVID | NM_NOTSHARED))) 966 == NULL) 967 return (0); 968 /* 969 * If devid style is set then get the dev_t using MD_NOTRUST_DEVT 970 */ 971 if (s->s_lbp->lb_flags & MDDB_DEVID_STYLE) { 972 odev = md_getdevnum(setno, side, key, MD_NOTRUST_DEVT); 973 if ((odev == NODEV64) || (md_getmajor(odev) == md_major)) 974 return (0); 975 if (lookup_entry(did_nh, setno, side, key, odev, NM_DEVID) == 976 NULL) 977 return (1); 978 } 979 return (0); 980 } 981 982 int 983 md_snarf_db_set(set_t setno, md_error_t *ep) 984 { 985 int err = 0; 986 int i; 987 mddb_recid_t recid; 988 mddb_type_t drvrid; 989 mddb_recstatus_t status; 990 md_ops_t *ops; 991 uint_t privat; 992 mddb_set_t *s; 993 uint_t cvt_blks; 994 struct nm_next_hdr *nh; 995 mdkey_t key = MD_KEYWILD; 996 side_t side = 0; 997 int size; 998 int devid_flag; 999 int retval; 1000 1001 md_haltsnarf_enter(setno); 1002 1003 mutex_enter(&md_mx); 1004 if (md_set[setno].s_status & MD_SET_SNARFED) { 1005 mutex_exit(&md_mx); 1006 md_haltsnarf_exit(setno); 1007 return (0); 1008 } 1009 mutex_exit(&md_mx); 1010 1011 if (! (md_get_status() & MD_GBL_DAEMONS_LIVE)) { 1012 if (md_start_daemons(TRUE)) { 1013 if (ep != NULL) 1014 (void) mdsyserror(ep, ENXIO); 1015 err = -1; 1016 goto out; 1017 } 1018 } 1019 1020 1021 /* 1022 * Load the devid name space if it exists 1023 */ 1024 (void) md_load_namespace(setno, NULL, NM_DEVID); 1025 if (!md_load_namespace(setno, ep, 0L)) { 1026 /* 1027 * Unload the devid namespace 1028 */ 1029 (void) md_unload_namespace(setno, NM_DEVID); 1030 err = -1; 1031 goto out; 1032 } 1033 1034 /* 1035 * If replica is in non-devid state, convert if: 1036 * - not in probe during upgrade (md_keep_repl_state = 0) 1037 * - enough space available in replica 1038 * - local set 1039 * - not a multi-node diskset 1040 * - clustering is not present (for non-local set) 1041 */ 1042 s = (mddb_set_t *)md_set[setno].s_db; 1043 devid_flag = 0; 1044 if (!(s->s_lbp->lb_flags & MDDB_DEVID_STYLE) && !md_keep_repl_state) 1045 devid_flag = 1; 1046 if (cluster_bootflags & CLUSTER_CONFIGURED) 1047 if (setno != MD_LOCAL_SET) 1048 devid_flag = 0; 1049 if (MD_MNSET_SETNO(setno)) 1050 devid_flag = 0; 1051 if ((md_devid_destroy == 1) && (md_keep_repl_state == 1)) 1052 devid_flag = 0; 1053 1054 /* 1055 * if we weren't devid style before and md_keep_repl_state=1 1056 * we need to stay non-devid 1057 */ 1058 if ((md_keep_repl_state == 1) && 1059 ((s->s_lbp->lb_flags & MDDB_DEVID_STYLE) == 0)) 1060 devid_flag = 0; 1061 if (devid_flag) { 1062 /* 1063 * Determine number of free blocks needed to convert 1064 * entire replica to device id format - locator blocks 1065 * and namespace. 1066 */ 1067 cvt_blks = 0; 1068 if (mddb_lb_did_convert(s, 0, &cvt_blks) != 0) { 1069 if (ep != NULL) 1070 (void) mdsyserror(ep, EIO); 1071 err = -1; 1072 goto out; 1073 1074 } 1075 cvt_blks += md_nm_did_chkspace(setno); 1076 1077 /* add MDDB_DEVID_CONV_PERC% */ 1078 if ((md_conv_perc > 0) && (md_conv_perc <= 100)) { 1079 cvt_blks = cvt_blks * (100 + md_conv_perc) / 100; 1080 } 1081 1082 if (cvt_blks <= s->s_freeblkcnt) { 1083 if (mddb_lb_did_convert(s, 1, &cvt_blks) != 0) { 1084 if (ep != NULL) 1085 (void) mdsyserror(ep, EIO); 1086 err = -1; 1087 goto out; 1088 } 1089 1090 } else { 1091 /* 1092 * Print message that replica can't be converted for 1093 * lack of space. No failure - just continue to 1094 * run without device ids. 1095 */ 1096 cmn_err(CE_WARN, 1097 "Unable to add Solaris Volume Manager device " 1098 "relocation data.\n" 1099 " To use device relocation feature:\n" 1100 " - Increase size of listed replicas\n" 1101 " - Reboot"); 1102 md_print_block_usage(s, cvt_blks); 1103 cmn_err(CE_WARN, 1104 "Loading set without device relocation data.\n" 1105 " Solaris Volume Manager disk movement " 1106 "not tracked in local set."); 1107 } 1108 } 1109 1110 /* 1111 * go through and load any modules referenced in 1112 * data base 1113 */ 1114 recid = mddb_makerecid(setno, 0); 1115 while ((recid = mddb_getnextrec(recid, MDDB_ALL, 0)) > 0) { 1116 status = mddb_getrecstatus(recid); 1117 if (status == MDDB_STALE) { 1118 if (! (md_get_setstatus(setno) & MD_SET_STALE)) { 1119 md_set_setstatus(setno, MD_SET_STALE); 1120 cmn_err(CE_WARN, 1121 "md: state database is stale"); 1122 } 1123 } else if (status == MDDB_NODATA) { 1124 mddb_setrecprivate(recid, MD_PRV_PENDDEL); 1125 continue; 1126 } 1127 drvrid = mddb_getrectype1(recid); 1128 if (drvrid < MDDB_FIRST_MODID) 1129 continue; 1130 if (md_loadsubmod(setno, md_getshared_name(setno, drvrid), 1131 drvrid) < 0) { 1132 cmn_err(CE_NOTE, "md: could not load misc/%s", 1133 md_getshared_name(setno, drvrid)); 1134 } 1135 } 1136 1137 if (recid < 0) 1138 goto out; 1139 1140 snarf_user_data(setno); 1141 1142 /* 1143 * Initialize the md_nm_snarfed array 1144 * this array is indexed by the key and 1145 * is set by md_getdevnum during the snarf time 1146 */ 1147 if ((nh = get_first_record(setno, 0, NM_NOTSHARED)) != NULL) { 1148 size = (int)((((struct nm_rec_hdr *)nh->nmn_record)-> 1149 r_next_key) * (sizeof (int))); 1150 md_nm_snarfed = (int *)kmem_zalloc(size, KM_SLEEP); 1151 } 1152 1153 /* 1154 * go through and snarf until nothing gets added 1155 */ 1156 do { 1157 i = 0; 1158 for (ops = md_opslist; ops != NULL; ops = ops->md_next) { 1159 if (ops->md_snarf != NULL) { 1160 retval = ops->md_snarf(MD_SNARF_DOIT, setno); 1161 if (retval == -1) { 1162 err = -1; 1163 /* Don't know the failed unit */ 1164 (void) mdmderror(ep, MDE_RR_ALLOC_ERROR, 1165 0); 1166 (void) md_halt_set(setno, MD_HALT_ALL); 1167 (void) mddb_unload_set(setno); 1168 md_haltsnarf_exit(setno); 1169 return (err); 1170 } else { 1171 i += retval; 1172 } 1173 } 1174 } 1175 } while (i); 1176 1177 md_set_setstatus(setno, MD_SET_SNARFED); 1178 1179 recid = mddb_makerecid(setno, 0); 1180 while ((recid = mddb_getnextrec(recid, MDDB_ALL, 0)) > 0) { 1181 privat = mddb_getrecprivate(recid); 1182 if (privat & MD_PRV_COMMIT) { 1183 if (mddb_commitrec(recid)) { 1184 if (!(md_get_setstatus(setno) & MD_SET_STALE)) { 1185 md_set_setstatus(setno, MD_SET_STALE); 1186 cmn_err(CE_WARN, 1187 "md: state database is stale"); 1188 } 1189 } 1190 mddb_setrecprivate(recid, MD_PRV_GOTIT); 1191 } 1192 } 1193 1194 /* Deletes must happen after all the commits */ 1195 recid = mddb_makerecid(setno, 0); 1196 while ((recid = mddb_getnextrec(recid, MDDB_ALL, 0)) > 0) { 1197 privat = mddb_getrecprivate(recid); 1198 if (privat & MD_PRV_DELETE) { 1199 if (mddb_deleterec(recid)) { 1200 if (!(md_get_setstatus(setno) & MD_SET_STALE)) { 1201 md_set_setstatus(setno, MD_SET_STALE); 1202 cmn_err(CE_WARN, 1203 "md: state database is stale"); 1204 } 1205 mddb_setrecprivate(recid, MD_PRV_GOTIT); 1206 } 1207 recid = mddb_makerecid(setno, 0); 1208 } 1209 } 1210 1211 /* 1212 * go through and clean up records until nothing gets cleaned up. 1213 */ 1214 do { 1215 i = 0; 1216 for (ops = md_opslist; ops != NULL; ops = ops->md_next) 1217 if (ops->md_snarf != NULL) 1218 i += ops->md_snarf(MD_SNARF_CLEANUP, setno); 1219 } while (i); 1220 1221 if (md_nm_snarfed != NULL && 1222 !(md_get_setstatus(setno) & MD_SET_STALE)) { 1223 /* 1224 * go thru and cleanup the namespace and the device id 1225 * name space 1226 */ 1227 for (key = 1; 1228 key < ((struct nm_rec_hdr *)nh->nmn_record)->r_next_key; 1229 key++) { 1230 /* 1231 * Is the entry an 'orphan'? 1232 */ 1233 if (lookup_entry(nh, setno, side, key, NODEV64, 0L) != 1234 NULL) { 1235 /* 1236 * If the value is not set then apparently 1237 * it is not part of the current configuration, 1238 * remove it this can happen when system panic 1239 * between the primary name space update and 1240 * the device id name space update 1241 */ 1242 if (md_nm_snarfed[key] == 0) { 1243 if (md_verify_orphaned_record(setno, 1244 key) == 1) 1245 (void) remove_entry(nh, 1246 side, key, 0L); 1247 } 1248 } 1249 } 1250 } 1251 1252 if (md_nm_snarfed != NULL) { 1253 /* 1254 * Done and free the memory 1255 */ 1256 kmem_free(md_nm_snarfed, size); 1257 md_nm_snarfed = NULL; 1258 } 1259 1260 if (s->s_lbp->lb_flags & MDDB_DEVID_STYLE && 1261 !(md_get_setstatus(setno) & MD_SET_STALE)) { 1262 /* 1263 * if the destroy flag has been set and 1264 * the MD_SET_DIDCLUP bit is not set in 1265 * the set's status field, cleanup the 1266 * entire device id namespace 1267 */ 1268 if (md_devid_destroy && 1269 !(md_get_setstatus(setno) & MD_SET_DIDCLUP)) { 1270 (void) md_devid_cleanup(setno, 1); 1271 md_set_setstatus(setno, MD_SET_DIDCLUP); 1272 } else 1273 (void) md_devid_cleanup(setno, 0); 1274 } 1275 1276 /* 1277 * clear single threading on snarf, return success or error 1278 */ 1279 out: 1280 md_haltsnarf_exit(setno); 1281 return (err); 1282 } 1283 1284 void 1285 get_minfo(struct dk_minfo *info, minor_t mnum) 1286 { 1287 md_unit_t *un; 1288 mdi_unit_t *ui; 1289 1290 info->dki_capacity = 0; 1291 info->dki_lbsize = 0; 1292 info->dki_media_type = 0; 1293 1294 if ((ui = MDI_UNIT(mnum)) == NULL) { 1295 return; 1296 } 1297 un = (md_unit_t *)md_unit_readerlock(ui); 1298 info->dki_capacity = un->c.un_total_blocks; 1299 md_unit_readerexit(ui); 1300 info->dki_lbsize = DEV_BSIZE; 1301 info->dki_media_type = DK_UNKNOWN; 1302 } 1303 1304 1305 void 1306 get_info(struct dk_cinfo *info, minor_t mnum) 1307 { 1308 /* 1309 * Controller Information 1310 */ 1311 info->dki_ctype = DKC_MD; 1312 info->dki_cnum = ddi_get_instance(ddi_get_parent(md_devinfo)); 1313 (void) strcpy(info->dki_cname, 1314 ddi_get_name(ddi_get_parent(md_devinfo))); 1315 /* 1316 * Unit Information 1317 */ 1318 info->dki_unit = mnum; 1319 info->dki_slave = 0; 1320 (void) strcpy(info->dki_dname, ddi_driver_name(md_devinfo)); 1321 info->dki_flags = 0; 1322 info->dki_partition = 0; 1323 info->dki_maxtransfer = (ushort_t)(md_maxphys / DEV_BSIZE); 1324 1325 /* 1326 * We can't get from here to there yet 1327 */ 1328 info->dki_addr = 0; 1329 info->dki_space = 0; 1330 info->dki_prio = 0; 1331 info->dki_vec = 0; 1332 } 1333 1334 /* 1335 * open admin device 1336 */ 1337 static int 1338 mdadminopen( 1339 int flag, 1340 int otyp) 1341 { 1342 int err = 0; 1343 1344 /* single thread */ 1345 mutex_enter(&md_mx); 1346 1347 /* check type and flags */ 1348 if ((otyp != OTYP_CHR) && (otyp != OTYP_LYR)) { 1349 err = EINVAL; 1350 goto out; 1351 } 1352 if (((flag & FEXCL) && (md_status & MD_GBL_OPEN)) || 1353 (md_status & MD_GBL_EXCL)) { 1354 err = EBUSY; 1355 goto out; 1356 } 1357 1358 /* count and flag open */ 1359 md_ocnt[otyp]++; 1360 md_status |= MD_GBL_OPEN; 1361 if (flag & FEXCL) 1362 md_status |= MD_GBL_EXCL; 1363 1364 /* unlock return success */ 1365 out: 1366 mutex_exit(&md_mx); 1367 return (err); 1368 } 1369 1370 /* 1371 * open entry point 1372 */ 1373 static int 1374 mdopen( 1375 dev_t *dev, 1376 int flag, 1377 int otyp, 1378 cred_t *cred_p) 1379 { 1380 minor_t mnum = getminor(*dev); 1381 unit_t unit = MD_MIN2UNIT(mnum); 1382 set_t setno = MD_MIN2SET(mnum); 1383 mdi_unit_t *ui = NULL; 1384 int err = 0; 1385 md_parent_t parent; 1386 1387 /* dispatch admin device opens */ 1388 if (mnum == MD_ADM_MINOR) 1389 return (mdadminopen(flag, otyp)); 1390 1391 /* lock, check status */ 1392 rw_enter(&md_unit_array_rw.lock, RW_READER); 1393 1394 tryagain: 1395 if (md_get_status() & MD_GBL_HALTED) { 1396 err = ENODEV; 1397 goto out; 1398 } 1399 1400 /* check minor */ 1401 if ((setno >= md_nsets) || (unit >= md_nunits)) { 1402 err = ENXIO; 1403 goto out; 1404 } 1405 1406 /* make sure we're snarfed */ 1407 if ((md_get_setstatus(MD_LOCAL_SET) & MD_SET_SNARFED) == 0) { 1408 if (md_snarf_db_set(MD_LOCAL_SET, NULL) != 0) { 1409 err = ENODEV; 1410 goto out; 1411 } 1412 } 1413 if ((md_get_setstatus(setno) & MD_SET_SNARFED) == 0) { 1414 err = ENODEV; 1415 goto out; 1416 } 1417 1418 /* check unit */ 1419 if ((ui = MDI_UNIT(mnum)) == NULL) { 1420 err = ENXIO; 1421 goto out; 1422 } 1423 1424 /* 1425 * The softpart open routine may do an I/O during the open, in 1426 * which case the open routine will set the OPENINPROGRESS flag 1427 * and drop all locks during the I/O. If this thread sees 1428 * the OPENINPROGRESS flag set, if should wait until the flag 1429 * is reset before calling the driver's open routine. It must 1430 * also revalidate the world after it grabs the unit_array lock 1431 * since the set may have been released or the metadevice cleared 1432 * during the sleep. 1433 */ 1434 if (MD_MNSET_SETNO(setno)) { 1435 mutex_enter(&ui->ui_mx); 1436 if (ui->ui_lock & MD_UL_OPENINPROGRESS) { 1437 rw_exit(&md_unit_array_rw.lock); 1438 cv_wait(&ui->ui_cv, &ui->ui_mx); 1439 rw_enter(&md_unit_array_rw.lock, RW_READER); 1440 mutex_exit(&ui->ui_mx); 1441 goto tryagain; 1442 } 1443 mutex_exit(&ui->ui_mx); 1444 } 1445 1446 /* Test if device is openable */ 1447 if ((ui->ui_tstate & MD_NOTOPENABLE) != 0) { 1448 err = ENXIO; 1449 goto out; 1450 } 1451 1452 /* don't allow opens w/WRITE flag if stale */ 1453 if ((flag & FWRITE) && (md_get_setstatus(setno) & MD_SET_STALE)) { 1454 err = EROFS; 1455 goto out; 1456 } 1457 1458 /* don't allow writes to subdevices */ 1459 parent = md_get_parent(md_expldev(*dev)); 1460 if ((flag & FWRITE) && MD_HAS_PARENT(parent)) { 1461 err = EROFS; 1462 goto out; 1463 } 1464 1465 /* open underlying driver */ 1466 if (md_ops[ui->ui_opsindex]->md_open != NULL) { 1467 if ((err = (*md_ops[ui->ui_opsindex]->md_open) 1468 (dev, flag, otyp, cred_p, 0)) != 0) 1469 goto out; 1470 } 1471 1472 /* or do it ourselves */ 1473 else { 1474 /* single thread */ 1475 (void) md_unit_openclose_enter(ui); 1476 err = md_unit_incopen(mnum, flag, otyp); 1477 md_unit_openclose_exit(ui); 1478 if (err != 0) 1479 goto out; 1480 } 1481 1482 /* unlock, return status */ 1483 out: 1484 rw_exit(&md_unit_array_rw.lock); 1485 return (err); 1486 } 1487 1488 /* 1489 * close admin device 1490 */ 1491 static int 1492 mdadminclose( 1493 int otyp) 1494 { 1495 int i; 1496 int err = 0; 1497 1498 /* single thread */ 1499 mutex_enter(&md_mx); 1500 1501 /* check type and flags */ 1502 if ((otyp < 0) || (otyp >= OTYPCNT)) { 1503 err = EINVAL; 1504 goto out; 1505 } else if (md_ocnt[otyp] == 0) { 1506 err = ENXIO; 1507 goto out; 1508 } 1509 1510 /* count and flag closed */ 1511 if (otyp == OTYP_LYR) 1512 md_ocnt[otyp]--; 1513 else 1514 md_ocnt[otyp] = 0; 1515 md_status &= ~MD_GBL_OPEN; 1516 for (i = 0; (i < OTYPCNT); ++i) 1517 if (md_ocnt[i] != 0) 1518 md_status |= MD_GBL_OPEN; 1519 if (! (md_status & MD_GBL_OPEN)) 1520 md_status &= ~MD_GBL_EXCL; 1521 1522 /* unlock return success */ 1523 out: 1524 mutex_exit(&md_mx); 1525 return (err); 1526 } 1527 1528 /* 1529 * close entry point 1530 */ 1531 static int 1532 mdclose( 1533 dev_t dev, 1534 int flag, 1535 int otyp, 1536 cred_t *cred_p) 1537 { 1538 minor_t mnum = getminor(dev); 1539 set_t setno = MD_MIN2SET(mnum); 1540 unit_t unit = MD_MIN2UNIT(mnum); 1541 mdi_unit_t *ui = NULL; 1542 int err = 0; 1543 1544 /* dispatch admin device closes */ 1545 if (mnum == MD_ADM_MINOR) 1546 return (mdadminclose(otyp)); 1547 1548 /* check minor */ 1549 if ((setno >= md_nsets) || (unit >= md_nunits) || 1550 ((ui = MDI_UNIT(mnum)) == NULL)) { 1551 err = ENXIO; 1552 goto out; 1553 } 1554 1555 /* close underlying driver */ 1556 if (md_ops[ui->ui_opsindex]->md_close != NULL) { 1557 if ((err = (*md_ops[ui->ui_opsindex]->md_close) 1558 (dev, flag, otyp, cred_p, 0)) != 0) 1559 goto out; 1560 } 1561 1562 /* or do it ourselves */ 1563 else { 1564 /* single thread */ 1565 (void) md_unit_openclose_enter(ui); 1566 err = md_unit_decopen(mnum, otyp); 1567 md_unit_openclose_exit(ui); 1568 if (err != 0) 1569 goto out; 1570 } 1571 1572 /* return success */ 1573 out: 1574 return (err); 1575 } 1576 1577 1578 /* 1579 * This routine performs raw read operations. It is called from the 1580 * device switch at normal priority. 1581 * 1582 * The main catch is that the *uio struct which is passed to us may 1583 * specify a read which spans two buffers, which would be contiguous 1584 * on a single partition, but not on a striped partition. This will 1585 * be handled by mdstrategy. 1586 */ 1587 /*ARGSUSED*/ 1588 static int 1589 mdread(dev_t dev, struct uio *uio, cred_t *credp) 1590 { 1591 minor_t mnum; 1592 mdi_unit_t *ui; 1593 int error; 1594 1595 if (((mnum = getminor(dev)) == MD_ADM_MINOR) || 1596 (MD_MIN2SET(mnum) >= md_nsets) || 1597 (MD_MIN2UNIT(mnum) >= md_nunits) || 1598 ((ui = MDI_UNIT(mnum)) == NULL)) 1599 return (ENXIO); 1600 1601 if (md_ops[ui->ui_opsindex]->md_read != NULL) 1602 return ((*md_ops[ui->ui_opsindex]->md_read) 1603 (dev, uio, credp)); 1604 1605 if ((error = md_chk_uio(uio)) != 0) 1606 return (error); 1607 1608 return (physio(mdstrategy, NULL, dev, B_READ, md_minphys, uio)); 1609 } 1610 1611 /* 1612 * This routine performs async raw read operations. It is called from the 1613 * device switch at normal priority. 1614 * 1615 * The main catch is that the *aio struct which is passed to us may 1616 * specify a read which spans two buffers, which would be contiguous 1617 * on a single partition, but not on a striped partition. This will 1618 * be handled by mdstrategy. 1619 */ 1620 /*ARGSUSED*/ 1621 static int 1622 mdaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1623 { 1624 minor_t mnum; 1625 mdi_unit_t *ui; 1626 int error; 1627 1628 1629 if (((mnum = getminor(dev)) == MD_ADM_MINOR) || 1630 (MD_MIN2SET(mnum) >= md_nsets) || 1631 (MD_MIN2UNIT(mnum) >= md_nunits) || 1632 ((ui = MDI_UNIT(mnum)) == NULL)) 1633 return (ENXIO); 1634 1635 if (md_ops[ui->ui_opsindex]->md_aread != NULL) 1636 return ((*md_ops[ui->ui_opsindex]->md_aread) 1637 (dev, aio, credp)); 1638 1639 if ((error = md_chk_uio(aio->aio_uio)) != 0) 1640 return (error); 1641 1642 return (aphysio(mdstrategy, anocancel, dev, B_READ, md_minphys, aio)); 1643 } 1644 1645 /* 1646 * This routine performs raw write operations. It is called from the 1647 * device switch at normal priority. 1648 * 1649 * The main catch is that the *uio struct which is passed to us may 1650 * specify a write which spans two buffers, which would be contiguous 1651 * on a single partition, but not on a striped partition. This is 1652 * handled by mdstrategy. 1653 * 1654 */ 1655 /*ARGSUSED*/ 1656 static int 1657 mdwrite(dev_t dev, struct uio *uio, cred_t *credp) 1658 { 1659 minor_t mnum; 1660 mdi_unit_t *ui; 1661 int error; 1662 1663 if (((mnum = getminor(dev)) == MD_ADM_MINOR) || 1664 (MD_MIN2SET(mnum) >= md_nsets) || 1665 (MD_MIN2UNIT(mnum) >= md_nunits) || 1666 ((ui = MDI_UNIT(mnum)) == NULL)) 1667 return (ENXIO); 1668 1669 if (md_ops[ui->ui_opsindex]->md_write != NULL) 1670 return ((*md_ops[ui->ui_opsindex]->md_write) 1671 (dev, uio, credp)); 1672 1673 if ((error = md_chk_uio(uio)) != 0) 1674 return (error); 1675 1676 return (physio(mdstrategy, NULL, dev, B_WRITE, md_minphys, uio)); 1677 } 1678 1679 /* 1680 * This routine performs async raw write operations. It is called from the 1681 * device switch at normal priority. 1682 * 1683 * The main catch is that the *aio struct which is passed to us may 1684 * specify a write which spans two buffers, which would be contiguous 1685 * on a single partition, but not on a striped partition. This is 1686 * handled by mdstrategy. 1687 * 1688 */ 1689 /*ARGSUSED*/ 1690 static int 1691 mdawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1692 { 1693 minor_t mnum; 1694 mdi_unit_t *ui; 1695 int error; 1696 1697 1698 if (((mnum = getminor(dev)) == MD_ADM_MINOR) || 1699 (MD_MIN2SET(mnum) >= md_nsets) || 1700 (MD_MIN2UNIT(mnum) >= md_nunits) || 1701 ((ui = MDI_UNIT(mnum)) == NULL)) 1702 return (ENXIO); 1703 1704 if (md_ops[ui->ui_opsindex]->md_awrite != NULL) 1705 return ((*md_ops[ui->ui_opsindex]->md_awrite) 1706 (dev, aio, credp)); 1707 1708 if ((error = md_chk_uio(aio->aio_uio)) != 0) 1709 return (error); 1710 1711 return (aphysio(mdstrategy, anocancel, dev, B_WRITE, md_minphys, aio)); 1712 } 1713 1714 int 1715 mdstrategy(struct buf *bp) 1716 { 1717 minor_t mnum; 1718 mdi_unit_t *ui; 1719 1720 ASSERT((bp->b_flags & B_DONE) == 0); 1721 1722 if (panicstr) 1723 md_clr_status(MD_GBL_DAEMONS_LIVE); 1724 1725 if (((mnum = getminor(bp->b_edev)) == MD_ADM_MINOR) || 1726 (MD_MIN2SET(mnum) >= md_nsets) || 1727 (MD_MIN2UNIT(mnum) >= md_nunits) || 1728 ((ui = MDI_UNIT(mnum)) == NULL)) { 1729 bp->b_flags |= B_ERROR; 1730 bp->b_error = ENXIO; 1731 bp->b_resid = bp->b_bcount; 1732 biodone(bp); 1733 return (0); 1734 } 1735 1736 bp->b_flags &= ~(B_ERROR | B_DONE); 1737 if (md_ops[ui->ui_opsindex]->md_strategy != NULL) { 1738 (*md_ops[ui->ui_opsindex]->md_strategy) (bp, 0, NULL); 1739 } else { 1740 (void) errdone(ui, bp, ENXIO); 1741 } 1742 return (0); 1743 } 1744 1745 /* 1746 * Return true if the ioctl is allowed to be multithreaded. 1747 * All the ioctls with MN are sent only from the message handlers through 1748 * rpc.mdcommd, which (via it's own locking mechanism) takes care that not two 1749 * ioctl for the same metadevice are issued at the same time. 1750 * So we are safe here. 1751 * The other ioctls do not mess with any metadevice structures and therefor 1752 * are harmless too, if called multiple times at the same time. 1753 */ 1754 static boolean_t 1755 is_mt_ioctl(int cmd) { 1756 1757 switch (cmd) { 1758 case MD_IOCGUNIQMSGID: 1759 case MD_IOCGVERSION: 1760 case MD_IOCISOPEN: 1761 case MD_MN_SET_MM_OWNER: 1762 case MD_MN_SET_STATE: 1763 case MD_MN_SUSPEND_WRITES: 1764 case MD_MN_ALLOCATE_HOTSPARE: 1765 case MD_MN_SET_SETFLAGS: 1766 case MD_MN_GET_SETFLAGS: 1767 case MD_MN_MDDB_OPTRECFIX: 1768 case MD_MN_MDDB_PARSE: 1769 case MD_MN_MDDB_BLOCK: 1770 case MD_MN_DB_USERREQ: 1771 case MD_IOC_SPSTATUS: 1772 case MD_MN_COMMD_ERR: 1773 case MD_MN_SET_COMMD_RUNNING: 1774 case MD_MN_RESYNC: 1775 case MD_MN_SETSYNC: 1776 case MD_MN_POKE_HOTSPARES: 1777 return (1); 1778 default: 1779 return (0); 1780 } 1781 } 1782 1783 /* 1784 * This routine implements the ioctl calls for the Virtual Disk System. 1785 * It is called from the device switch at normal priority. 1786 */ 1787 /* ARGSUSED */ 1788 static int 1789 mdioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *cred_p, 1790 int *rval_p) 1791 { 1792 minor_t mnum = getminor(dev); 1793 mdi_unit_t *ui; 1794 IOLOCK lock; 1795 int err; 1796 1797 /* 1798 * For multinode disksets number of ioctls are allowed to be 1799 * multithreaded. 1800 * A fundamental assumption made in this implementation is that 1801 * ioctls either do not interact with other md structures or the 1802 * ioctl to the admin device can only occur if the metadevice 1803 * device is open. i.e. avoid a race between metaclear and the 1804 * progress of a multithreaded ioctl. 1805 */ 1806 1807 if (!is_mt_ioctl(cmd) && md_ioctl_lock_enter() == EINTR) { 1808 return (EINTR); 1809 } 1810 1811 /* 1812 * initialize lock tracker 1813 */ 1814 IOLOCK_INIT(&lock); 1815 1816 /* Flag to indicate that MD_GBL_IOCTL_LOCK is not acquired */ 1817 1818 if (is_mt_ioctl(cmd)) { 1819 /* increment the md_mtioctl_cnt */ 1820 mutex_enter(&md_mx); 1821 md_mtioctl_cnt++; 1822 mutex_exit(&md_mx); 1823 lock.l_flags |= MD_MT_IOCTL; 1824 } 1825 1826 /* 1827 * this has been added to prevent notification from re-snarfing 1828 * so metaunload will work. It may interfere with other modules 1829 * halt process. 1830 */ 1831 if (md_get_status() & (MD_GBL_HALTED | MD_GBL_DAEMONS_DIE)) 1832 return (IOLOCK_RETURN(ENXIO, &lock)); 1833 1834 /* 1835 * admin device ioctls 1836 */ 1837 if (mnum == MD_ADM_MINOR) { 1838 err = md_admin_ioctl(md_expldev(dev), cmd, (void *) data, 1839 mode, &lock); 1840 } 1841 1842 /* 1843 * metadevice ioctls 1844 */ 1845 else if ((MD_MIN2SET(mnum) >= md_nsets) || 1846 (MD_MIN2UNIT(mnum) >= md_nunits) || 1847 ((ui = MDI_UNIT(mnum)) == NULL)) { 1848 err = ENXIO; 1849 } else if (md_ops[ui->ui_opsindex]->md_ioctl == NULL) { 1850 err = ENOTTY; 1851 } else { 1852 err = (*md_ops[ui->ui_opsindex]->md_ioctl) 1853 (dev, cmd, (void *) data, mode, &lock); 1854 } 1855 1856 /* 1857 * drop any locks we grabbed 1858 */ 1859 return (IOLOCK_RETURN_IOCTLEND(err, &lock)); 1860 } 1861 1862 static int 1863 mddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 1864 { 1865 minor_t mnum; 1866 set_t setno; 1867 mdi_unit_t *ui; 1868 1869 if ((mnum = getminor(dev)) == MD_ADM_MINOR) 1870 return (ENXIO); 1871 1872 setno = MD_MIN2SET(mnum); 1873 1874 if ((setno >= md_nsets) || (MD_MIN2UNIT(mnum) >= md_nunits) || 1875 ((ui = MDI_UNIT(mnum)) == NULL)) 1876 return (ENXIO); 1877 1878 1879 if ((md_get_setstatus(setno) & MD_SET_SNARFED) == 0) 1880 return (ENXIO); 1881 1882 if (md_ops[ui->ui_opsindex]->md_dump != NULL) 1883 return ((*md_ops[ui->ui_opsindex]->md_dump) 1884 (dev, addr, blkno, nblk)); 1885 1886 return (ENXIO); 1887 } 1888