1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * modctl system call for loadable module support. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/user.h> 35 #include <sys/systm.h> 36 #include <sys/exec.h> 37 #include <sys/file.h> 38 #include <sys/stat.h> 39 #include <sys/conf.h> 40 #include <sys/time.h> 41 #include <sys/reboot.h> 42 #include <sys/fs/ufs_fsdir.h> 43 #include <sys/kmem.h> 44 #include <sys/sysconf.h> 45 #include <sys/cmn_err.h> 46 #include <sys/ddi.h> 47 #include <sys/sunddi.h> 48 #include <sys/sunndi.h> 49 #include <sys/ndi_impldefs.h> 50 #include <sys/ddi_impldefs.h> 51 #include <sys/ddi_implfuncs.h> 52 #include <sys/bootconf.h> 53 #include <sys/dc_ki.h> 54 #include <sys/cladm.h> 55 #include <sys/dtrace.h> 56 #include <sys/kdi.h> 57 58 #include <sys/devpolicy.h> 59 #include <sys/modctl.h> 60 #include <sys/kobj.h> 61 #include <sys/devops.h> 62 #include <sys/autoconf.h> 63 #include <sys/hwconf.h> 64 #include <sys/callb.h> 65 #include <sys/debug.h> 66 #include <sys/cpuvar.h> 67 #include <sys/sysmacros.h> 68 #include <sys/sysevent.h> 69 #include <sys/sysevent_impl.h> 70 #include <sys/instance.h> 71 #include <sys/modhash.h> 72 #include <sys/modhash_impl.h> 73 #include <sys/dacf_impl.h> 74 #include <sys/vfs.h> 75 #include <sys/pathname.h> 76 #include <sys/console.h> 77 #include <sys/policy.h> 78 #include <ipp/ipp_impl.h> 79 #include <sys/fs/dv_node.h> 80 #include <sys/strsubr.h> 81 #include <sys/fs/sdev_node.h> 82 83 static int mod_circdep(struct modctl *); 84 static int modinfo(modid_t, struct modinfo *); 85 86 static void mod_uninstall_all(void); 87 static int mod_getinfo(struct modctl *, struct modinfo *); 88 static struct modctl *allocate_modp(const char *, const char *); 89 90 static int mod_load(struct modctl *, int); 91 static void mod_unload(struct modctl *); 92 static int modinstall(struct modctl *); 93 static int moduninstall(struct modctl *); 94 95 static struct modctl *mod_hold_by_name_common(struct modctl *, const char *); 96 static struct modctl *mod_hold_next_by_id(modid_t); 97 static struct modctl *mod_hold_loaded_mod(struct modctl *, char *, int *); 98 static struct modctl *mod_hold_installed_mod(char *, int, int, int *); 99 100 static void mod_release(struct modctl *); 101 static void mod_make_requisite(struct modctl *, struct modctl *); 102 static int mod_install_requisites(struct modctl *); 103 static void check_esc_sequences(char *, char *); 104 static struct modctl *mod_hold_by_name_requisite(struct modctl *, char *); 105 106 /* 107 * module loading thread control structure. Calls to kobj_load_module()() are 108 * handled off to a separate thead using this structure. 109 */ 110 struct loadmt { 111 ksema_t sema; 112 struct modctl *mp; 113 int usepath; 114 kthread_t *owner; 115 int retval; 116 }; 117 118 static void modload_thread(struct loadmt *); 119 120 kcondvar_t mod_cv; 121 kcondvar_t mod_uninstall_cv; /* Communication between swapper */ 122 /* and the uninstall daemon. */ 123 kmutex_t mod_lock; /* protects &modules insert linkage, */ 124 /* mod_busy, mod_want, and mod_ref. */ 125 /* blocking operations while holding */ 126 /* mod_lock should be avoided */ 127 kmutex_t mod_uninstall_lock; /* protects mod_uninstall_cv */ 128 kthread_id_t mod_aul_thread; 129 130 int modunload_wait; 131 kmutex_t modunload_wait_mutex; 132 kcondvar_t modunload_wait_cv; 133 int modunload_active_count; 134 int modunload_disable_count; 135 136 int isminiroot; /* set if running as miniroot */ 137 int modrootloaded; /* set after root driver and fs are loaded */ 138 int moddebug = 0x0; /* debug flags for module writers */ 139 int swaploaded; /* set after swap driver and fs are loaded */ 140 int bop_io_quiesced = 0; /* set when BOP I/O can no longer be used */ 141 int last_module_id; 142 clock_t mod_uninstall_interval = 0; 143 int ddi_modclose_unload = 1; /* 0 -> just decrement reference */ 144 145 struct devnames *devnamesp; 146 struct devnames orphanlist; 147 148 krwlock_t devinfo_tree_lock; /* obsolete, to be removed */ 149 150 #define MAJBINDFILE "/etc/name_to_major" 151 #define SYSBINDFILE "/etc/name_to_sysnum" 152 153 static char majbind[] = MAJBINDFILE; 154 static char sysbind[] = SYSBINDFILE; 155 static uint_t mod_autounload_key; /* for module autounload detection */ 156 157 extern int obpdebug; 158 extern int make_mbind(char *, int, char *, struct bind **); 159 160 #define DEBUGGER_PRESENT ((boothowto & RB_DEBUG) || (obpdebug != 0)) 161 162 static int minorperm_loaded = 0; 163 164 void 165 mod_setup(void) 166 { 167 struct sysent *callp; 168 int callnum, exectype; 169 int num_devs; 170 int i; 171 172 /* 173 * Initialize the list of loaded driver dev_ops. 174 * XXX - This must be done before reading the system file so that 175 * forceloads of drivers will work. 176 */ 177 num_devs = read_binding_file(majbind, mb_hashtab, make_mbind); 178 /* 179 * Since read_binding_file is common code, it doesn't enforce that all 180 * of the binding file entries have major numbers <= MAXMAJ32. Thus, 181 * ensure that we don't allocate some massive amount of space due to a 182 * bad entry. We can't have major numbers bigger than MAXMAJ32 183 * until file system support for larger major numbers exists. 184 */ 185 186 /* 187 * Leave space for expansion, but not more than L_MAXMAJ32 188 */ 189 devcnt = MIN(num_devs + 30, L_MAXMAJ32); 190 devopsp = kmem_alloc(devcnt * sizeof (struct dev_ops *), KM_SLEEP); 191 for (i = 0; i < devcnt; i++) 192 devopsp[i] = &mod_nodev_ops; 193 194 init_devnamesp(devcnt); 195 196 /* 197 * Sync up with the work that the stand-alone linker has already done. 198 */ 199 (void) kobj_sync(); 200 201 if (boothowto & RB_DEBUG) 202 kdi_dvec_modavail(); 203 204 make_aliases(mb_hashtab); 205 206 /* 207 * Initialize streams device implementation structures. 208 */ 209 devimpl = kmem_zalloc(devcnt * sizeof (cdevsw_impl_t), KM_SLEEP); 210 211 /* 212 * If the cl_bootstrap module is present, 213 * we should be configured as a cluster. Loading this module 214 * will set "cluster_bootflags" to non-zero. 215 */ 216 (void) modload("misc", "cl_bootstrap"); 217 218 (void) read_binding_file(sysbind, sb_hashtab, make_mbind); 219 init_syscallnames(NSYSCALL); 220 221 /* 222 * Start up dynamic autoconfiguration framework (dacf). 223 */ 224 mod_hash_init(); 225 dacf_init(); 226 227 /* 228 * Start up IP policy framework (ipp). 229 */ 230 ipp_init(); 231 232 /* 233 * Allocate loadable native system call locks. 234 */ 235 for (callnum = 0, callp = sysent; callnum < NSYSCALL; 236 callnum++, callp++) { 237 if (LOADABLE_SYSCALL(callp)) { 238 if (mod_getsysname(callnum) != NULL) { 239 callp->sy_lock = 240 kobj_zalloc(sizeof (krwlock_t), KM_SLEEP); 241 rw_init(callp->sy_lock, NULL, RW_DEFAULT, NULL); 242 } else { 243 callp->sy_flags &= ~SE_LOADABLE; 244 callp->sy_callc = nosys; 245 } 246 #ifdef DEBUG 247 } else { 248 /* 249 * Do some sanity checks on the sysent table 250 */ 251 switch (callp->sy_flags & SE_RVAL_MASK) { 252 case SE_32RVAL1: 253 /* only r_val1 returned */ 254 case SE_32RVAL1 | SE_32RVAL2: 255 /* r_val1 and r_val2 returned */ 256 case SE_64RVAL: 257 /* 64-bit rval returned */ 258 break; 259 default: 260 cmn_err(CE_WARN, "sysent[%d]: bad flags %x", 261 callnum, callp->sy_flags); 262 } 263 #endif 264 } 265 } 266 267 #ifdef _SYSCALL32_IMPL 268 /* 269 * Allocate loadable system call locks for 32-bit compat syscalls 270 */ 271 for (callnum = 0, callp = sysent32; callnum < NSYSCALL; 272 callnum++, callp++) { 273 if (LOADABLE_SYSCALL(callp)) { 274 if (mod_getsysname(callnum) != NULL) { 275 callp->sy_lock = 276 kobj_zalloc(sizeof (krwlock_t), KM_SLEEP); 277 rw_init(callp->sy_lock, NULL, RW_DEFAULT, NULL); 278 } else { 279 callp->sy_flags &= ~SE_LOADABLE; 280 callp->sy_callc = nosys; 281 } 282 #ifdef DEBUG 283 } else { 284 /* 285 * Do some sanity checks on the sysent table 286 */ 287 switch (callp->sy_flags & SE_RVAL_MASK) { 288 case SE_32RVAL1: 289 /* only r_val1 returned */ 290 case SE_32RVAL1 | SE_32RVAL2: 291 /* r_val1 and r_val2 returned */ 292 case SE_64RVAL: 293 /* 64-bit rval returned */ 294 break; 295 default: 296 cmn_err(CE_WARN, "sysent32[%d]: bad flags %x", 297 callnum, callp->sy_flags); 298 goto skip; 299 } 300 301 /* 302 * Cross-check the native and compatibility tables. 303 */ 304 if (callp->sy_callc == nosys || 305 sysent[callnum].sy_callc == nosys) 306 continue; 307 /* 308 * If only one or the other slot is loadable, then 309 * there's an error -- they should match! 310 */ 311 if ((callp->sy_callc == loadable_syscall) ^ 312 (sysent[callnum].sy_callc == loadable_syscall)) { 313 cmn_err(CE_WARN, "sysent[%d] loadable?", 314 callnum); 315 } 316 /* 317 * This is more of a heuristic test -- if the 318 * system call returns two values in the 32-bit 319 * world, it should probably return two 32-bit 320 * values in the 64-bit world too. 321 */ 322 if (((callp->sy_flags & SE_32RVAL2) == 0) ^ 323 ((sysent[callnum].sy_flags & SE_32RVAL2) == 0)) { 324 cmn_err(CE_WARN, "sysent[%d] rval2 mismatch!", 325 callnum); 326 } 327 skip:; 328 #endif /* DEBUG */ 329 } 330 } 331 #endif /* _SYSCALL32_IMPL */ 332 333 /* 334 * Allocate loadable exec locks. (Assumes all execs are loadable) 335 */ 336 for (exectype = 0; exectype < nexectype; exectype++) { 337 execsw[exectype].exec_lock = 338 kobj_zalloc(sizeof (krwlock_t), KM_SLEEP); 339 rw_init(execsw[exectype].exec_lock, NULL, RW_DEFAULT, NULL); 340 } 341 342 read_class_file(); 343 344 /* init thread specific structure for mod_uninstall_all */ 345 tsd_create(&mod_autounload_key, NULL); 346 } 347 348 static int 349 modctl_modload(int use_path, char *filename, int *rvp) 350 { 351 struct modctl *modp; 352 int retval = 0; 353 char *filenamep; 354 int modid; 355 356 filenamep = kmem_zalloc(MOD_MAXPATH, KM_SLEEP); 357 358 if (copyinstr(filename, filenamep, MOD_MAXPATH, 0)) { 359 retval = EFAULT; 360 goto out; 361 } 362 363 filenamep[MOD_MAXPATH - 1] = 0; 364 modp = mod_hold_installed_mod(filenamep, use_path, 0, &retval); 365 366 if (modp == NULL) 367 goto out; 368 369 modp->mod_loadflags |= MOD_NOAUTOUNLOAD; 370 modid = modp->mod_id; 371 mod_release_mod(modp); 372 CPU_STATS_ADDQ(CPU, sys, modload, 1); 373 if (rvp != NULL && copyout(&modid, rvp, sizeof (modid)) != 0) 374 retval = EFAULT; 375 out: 376 kmem_free(filenamep, MOD_MAXPATH); 377 378 return (retval); 379 } 380 381 static int 382 modctl_modunload(modid_t id) 383 { 384 int rval = 0; 385 386 if (id == 0) { 387 #ifdef DEBUG 388 /* 389 * Turn on mod_uninstall_daemon 390 */ 391 if (mod_uninstall_interval == 0) { 392 mod_uninstall_interval = 60; 393 modreap(); 394 return (rval); 395 } 396 #endif 397 mod_uninstall_all(); 398 } else { 399 rval = modunload(id); 400 } 401 return (rval); 402 } 403 404 static int 405 modctl_modinfo(modid_t id, struct modinfo *umodi) 406 { 407 int retval; 408 struct modinfo modi; 409 #if defined(_SYSCALL32_IMPL) 410 int nobase; 411 struct modinfo32 modi32; 412 #endif 413 414 if (get_udatamodel() == DATAMODEL_NATIVE) { 415 if (copyin(umodi, &modi, sizeof (struct modinfo)) != 0) 416 return (EFAULT); 417 } 418 #ifdef _SYSCALL32_IMPL 419 else { 420 bzero(&modi, sizeof (modi)); 421 if (copyin(umodi, &modi32, sizeof (struct modinfo32)) != 0) 422 return (EFAULT); 423 modi.mi_info = modi32.mi_info; 424 modi.mi_id = modi32.mi_id; 425 modi.mi_nextid = modi32.mi_nextid; 426 nobase = modi.mi_info & MI_INFO_NOBASE; 427 } 428 #endif 429 /* 430 * This flag is -only- for the kernels use. 431 */ 432 modi.mi_info &= ~MI_INFO_LINKAGE; 433 434 retval = modinfo(id, &modi); 435 if (retval) 436 return (retval); 437 438 if (get_udatamodel() == DATAMODEL_NATIVE) { 439 if (copyout(&modi, umodi, sizeof (struct modinfo)) != 0) 440 retval = EFAULT; 441 #ifdef _SYSCALL32_IMPL 442 } else { 443 int i; 444 445 if (!nobase && (uintptr_t)modi.mi_base > UINT32_MAX) 446 return (EOVERFLOW); 447 448 modi32.mi_info = modi.mi_info; 449 modi32.mi_state = modi.mi_state; 450 modi32.mi_id = modi.mi_id; 451 modi32.mi_nextid = modi.mi_nextid; 452 modi32.mi_base = (caddr32_t)(uintptr_t)modi.mi_base; 453 modi32.mi_size = modi.mi_size; 454 modi32.mi_rev = modi.mi_rev; 455 modi32.mi_loadcnt = modi.mi_loadcnt; 456 bcopy(modi.mi_name, modi32.mi_name, sizeof (modi32.mi_name)); 457 for (i = 0; i < MODMAXLINK32; i++) { 458 modi32.mi_msinfo[i].msi_p0 = modi.mi_msinfo[i].msi_p0; 459 bcopy(modi.mi_msinfo[i].msi_linkinfo, 460 modi32.mi_msinfo[i].msi_linkinfo, 461 sizeof (modi32.mi_msinfo[0].msi_linkinfo)); 462 } 463 if (copyout(&modi32, umodi, sizeof (struct modinfo32)) != 0) 464 retval = EFAULT; 465 #endif 466 } 467 468 return (retval); 469 } 470 471 /* 472 * Return the last major number in the range of permissible major numbers. 473 */ 474 /*ARGSUSED*/ 475 static int 476 modctl_modreserve(modid_t id, int *data) 477 { 478 if (copyout(&devcnt, data, sizeof (devcnt)) != 0) 479 return (EFAULT); 480 return (0); 481 } 482 483 static int 484 modctl_add_major(int *data) 485 { 486 struct modconfig mc; 487 int i, rv; 488 struct aliases alias; 489 struct aliases *ap; 490 char name[MAXMODCONFNAME]; 491 char cname[MAXMODCONFNAME]; 492 char *drvname; 493 494 bzero(&mc, sizeof (struct modconfig)); 495 if (get_udatamodel() == DATAMODEL_NATIVE) { 496 if (copyin(data, &mc, sizeof (struct modconfig)) != 0) 497 return (EFAULT); 498 } 499 #ifdef _SYSCALL32_IMPL 500 else { 501 struct modconfig32 modc32; 502 503 if (copyin(data, &modc32, sizeof (struct modconfig32)) != 0) 504 return (EFAULT); 505 else { 506 bcopy(modc32.drvname, mc.drvname, 507 sizeof (modc32.drvname)); 508 bcopy(modc32.drvclass, mc.drvclass, 509 sizeof (modc32.drvclass)); 510 mc.major = modc32.major; 511 mc.num_aliases = modc32.num_aliases; 512 mc.ap = (struct aliases *)(uintptr_t)modc32.ap; 513 } 514 } 515 #endif 516 517 /* 518 * If the driver is already in the mb_hashtab, and the name given 519 * doesn't match that driver's name, fail. Otherwise, pass, since 520 * we may be adding aliases. 521 */ 522 if ((drvname = mod_major_to_name(mc.major)) != NULL && 523 strcmp(drvname, mc.drvname) != 0) 524 return (EINVAL); 525 526 /* 527 * Add each supplied driver alias to mb_hashtab 528 */ 529 ap = mc.ap; 530 for (i = 0; i < mc.num_aliases; i++) { 531 bzero(&alias, sizeof (struct aliases)); 532 533 if (get_udatamodel() == DATAMODEL_NATIVE) { 534 if (copyin(ap, &alias, sizeof (struct aliases)) != 0) 535 return (EFAULT); 536 537 if (alias.a_len > MAXMODCONFNAME) 538 return (EINVAL); 539 540 if (copyin(alias.a_name, name, alias.a_len) != 0) 541 return (EFAULT); 542 543 if (name[alias.a_len - 1] != '\0') 544 return (EINVAL); 545 } 546 #ifdef _SYSCALL32_IMPL 547 else { 548 struct aliases32 al32; 549 550 bzero(&al32, sizeof (struct aliases32)); 551 if (copyin(ap, &al32, sizeof (struct aliases32)) != 0) 552 return (EFAULT); 553 554 if (al32.a_len > MAXMODCONFNAME) 555 return (EINVAL); 556 557 if (copyin((void *)(uintptr_t)al32.a_name, 558 name, al32.a_len) != 0) 559 return (EFAULT); 560 561 if (name[al32.a_len - 1] != '\0') 562 return (EINVAL); 563 564 alias.a_next = (void *)(uintptr_t)al32.a_next; 565 } 566 #endif 567 check_esc_sequences(name, cname); 568 (void) make_mbind(cname, mc.major, NULL, mb_hashtab); 569 ap = alias.a_next; 570 } 571 572 /* 573 * Try to establish an mbinding for mc.drvname, and add it to devnames. 574 * Add class if any after establishing the major number 575 */ 576 (void) make_mbind(mc.drvname, mc.major, NULL, mb_hashtab); 577 rv = make_devname(mc.drvname, mc.major); 578 579 if (rv == 0) { 580 if (mc.drvclass[0] != '\0') 581 add_class(mc.drvname, mc.drvclass); 582 (void) i_ddi_load_drvconf(mc.major); 583 i_ddi_bind_devs(); 584 i_ddi_di_cache_invalidate(KM_SLEEP); 585 } 586 return (rv); 587 } 588 589 static int 590 modctl_rem_major(major_t major) 591 { 592 struct devnames *dnp; 593 594 if (major >= devcnt) 595 return (EINVAL); 596 597 /* mark devnames as removed */ 598 dnp = &devnamesp[major]; 599 LOCK_DEV_OPS(&dnp->dn_lock); 600 if (dnp->dn_name == NULL || 601 (dnp->dn_flags & (DN_DRIVER_REMOVED | DN_TAKEN_GETUDEV))) { 602 UNLOCK_DEV_OPS(&dnp->dn_lock); 603 return (EINVAL); 604 } 605 dnp->dn_flags |= DN_DRIVER_REMOVED; 606 pm_driver_removed(major); 607 UNLOCK_DEV_OPS(&dnp->dn_lock); 608 609 (void) i_ddi_unload_drvconf(major); 610 i_ddi_unbind_devs(major); 611 i_ddi_di_cache_invalidate(KM_SLEEP); 612 return (0); 613 } 614 615 static struct vfs * 616 path_to_vfs(char *name) 617 { 618 vnode_t *vp; 619 struct vfs *vfsp; 620 621 if (lookupname(name, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp)) 622 return (NULL); 623 624 vfsp = vp->v_vfsp; 625 VN_RELE(vp); 626 return (vfsp); 627 } 628 629 static int 630 new_vfs_in_modpath() 631 { 632 static int n_modpath = 0; 633 static char *modpath_copy; 634 static struct pathvfs { 635 char *path; 636 struct vfs *vfsp; 637 } *pathvfs; 638 639 int i, new_vfs = 0; 640 char *tmp, *tmp1; 641 struct vfs *vfsp; 642 643 if (n_modpath != 0) { 644 for (i = 0; i < n_modpath; i++) { 645 vfsp = path_to_vfs(pathvfs[i].path); 646 if (vfsp != pathvfs[i].vfsp) { 647 pathvfs[i].vfsp = vfsp; 648 if (vfsp) 649 new_vfs = 1; 650 } 651 } 652 return (new_vfs); 653 } 654 655 /* 656 * First call, initialize the pathvfs structure 657 */ 658 modpath_copy = i_ddi_strdup(default_path, KM_SLEEP); 659 tmp = modpath_copy; 660 n_modpath = 1; 661 tmp1 = strchr(tmp, ' '); 662 while (tmp1) { 663 *tmp1 = '\0'; 664 n_modpath++; 665 tmp = tmp1 + 1; 666 tmp1 = strchr(tmp, ' '); 667 } 668 669 pathvfs = kmem_zalloc(n_modpath * sizeof (struct pathvfs), KM_SLEEP); 670 tmp = modpath_copy; 671 for (i = 0; i < n_modpath; i++) { 672 pathvfs[i].path = tmp; 673 vfsp = path_to_vfs(tmp); 674 pathvfs[i].vfsp = vfsp; 675 tmp += strlen(tmp) + 1; 676 } 677 return (1); /* always reread driver.conf the first time */ 678 } 679 680 static int 681 modctl_load_drvconf(major_t major) 682 { 683 int ret; 684 685 if (major != (major_t)-1) { 686 ret = i_ddi_load_drvconf(major); 687 if (ret == 0) 688 i_ddi_bind_devs(); 689 return (ret); 690 } 691 692 /* 693 * We are invoked to rescan new driver.conf files. It is 694 * only necessary if a new file system was mounted in the 695 * module_path. Because rescanning driver.conf files can 696 * take some time on older platforms (sun4m), the following 697 * code skips unnecessary driver.conf rescans to optimize 698 * boot performance. 699 */ 700 if (new_vfs_in_modpath()) { 701 (void) i_ddi_load_drvconf((major_t)-1); 702 /* 703 * If we are still initializing io subsystem, 704 * load drivers with ddi-forceattach property 705 */ 706 if (!i_ddi_io_initialized()) 707 i_ddi_forceattach_drivers(); 708 } 709 return (0); 710 } 711 712 static int 713 modctl_unload_drvconf(major_t major) 714 { 715 int ret; 716 717 if (major >= devcnt) 718 return (EINVAL); 719 720 ret = i_ddi_unload_drvconf(major); 721 if (ret != 0) 722 return (ret); 723 (void) i_ddi_unbind_devs(major); 724 725 return (0); 726 } 727 728 static void 729 check_esc_sequences(char *str, char *cstr) 730 { 731 int i; 732 size_t len; 733 char *p; 734 735 len = strlen(str); 736 for (i = 0; i < len; i++, str++, cstr++) { 737 if (*str != '\\') { 738 *cstr = *str; 739 } else { 740 p = str + 1; 741 /* 742 * we only handle octal escape sequences for SPACE 743 */ 744 if (*p++ == '0' && *p++ == '4' && *p == '0') { 745 *cstr = ' '; 746 str += 3; 747 } else { 748 *cstr = *str; 749 } 750 } 751 } 752 *cstr = 0; 753 } 754 755 static int 756 modctl_getmodpathlen(int *data) 757 { 758 int len; 759 len = strlen(default_path); 760 if (copyout(&len, data, sizeof (len)) != 0) 761 return (EFAULT); 762 return (0); 763 } 764 765 static int 766 modctl_getmodpath(char *data) 767 { 768 if (copyout(default_path, data, strlen(default_path) + 1) != 0) 769 return (EFAULT); 770 return (0); 771 } 772 773 static int 774 modctl_read_sysbinding_file(void) 775 { 776 (void) read_binding_file(sysbind, sb_hashtab, make_mbind); 777 return (0); 778 } 779 780 static int 781 modctl_getmaj(char *uname, uint_t ulen, int *umajorp) 782 { 783 char name[256]; 784 int retval; 785 major_t major; 786 787 if (ulen == 0) 788 return (EINVAL); 789 if ((retval = copyinstr(uname, name, 790 (ulen < 256) ? ulen : 256, 0)) != 0) 791 return (retval); 792 if ((major = mod_name_to_major(name)) == (major_t)-1) 793 return (ENODEV); 794 if (copyout(&major, umajorp, sizeof (major_t)) != 0) 795 return (EFAULT); 796 return (0); 797 } 798 799 static char ** 800 convert_constraint_string(char *constraints, size_t len) 801 { 802 int i; 803 int n; 804 char *p; 805 char **array; 806 807 ASSERT(constraints != NULL); 808 ASSERT(len > 0); 809 810 for (i = 0, p = constraints; strlen(p) > 0; i++, p += strlen(p) + 1) 811 ; 812 813 n = i; 814 815 if (n == 0) { 816 kmem_free(constraints, len); 817 return (NULL); 818 } 819 820 array = kmem_alloc((n + 1) * sizeof (char *), KM_SLEEP); 821 822 for (i = 0, p = constraints; i < n; i++, p += strlen(p) + 1) { 823 array[i] = i_ddi_strdup(p, KM_SLEEP); 824 } 825 array[n] = NULL; 826 827 kmem_free(constraints, len); 828 829 return (array); 830 } 831 /*ARGSUSED*/ 832 static int 833 modctl_retire(char *path, char *uconstraints, size_t ulen) 834 { 835 char *pathbuf; 836 char *devpath; 837 size_t pathsz; 838 int retval; 839 char *constraints; 840 char **cons_array; 841 842 if (path == NULL) 843 return (EINVAL); 844 845 if ((uconstraints == NULL) ^ (ulen == 0)) 846 return (EINVAL); 847 848 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 849 retval = copyinstr(path, pathbuf, MAXPATHLEN, &pathsz); 850 if (retval != 0) { 851 kmem_free(pathbuf, MAXPATHLEN); 852 return (retval); 853 } 854 devpath = i_ddi_strdup(pathbuf, KM_SLEEP); 855 kmem_free(pathbuf, MAXPATHLEN); 856 857 /* 858 * First check if the device is already retired. 859 * If it is, this becomes a NOP 860 */ 861 if (e_ddi_device_retired(devpath)) { 862 cmn_err(CE_NOTE, "Device: already retired: %s", devpath); 863 kmem_free(devpath, strlen(devpath) + 1); 864 return (0); 865 } 866 867 cons_array = NULL; 868 if (uconstraints) { 869 constraints = kmem_alloc(ulen, KM_SLEEP); 870 if (copyin(uconstraints, constraints, ulen)) { 871 kmem_free(constraints, ulen); 872 kmem_free(devpath, strlen(devpath) + 1); 873 return (EFAULT); 874 } 875 cons_array = convert_constraint_string(constraints, ulen); 876 } 877 878 /* 879 * Try to retire the device first. The following 880 * routine will return an error only if the device 881 * is not retireable i.e. retire constraints forbid 882 * a retire. A return of success from this routine 883 * indicates that device is retireable. 884 */ 885 retval = e_ddi_retire_device(devpath, cons_array); 886 if (retval != DDI_SUCCESS) { 887 cmn_err(CE_WARN, "constraints forbid retire: %s", devpath); 888 kmem_free(devpath, strlen(devpath) + 1); 889 return (ENOTSUP); 890 } 891 892 /* 893 * Ok, the retire succeeded. Persist the retire. 894 * If retiring a nexus, we need to only persist the 895 * nexus retire. Any children of a retired nexus 896 * are automatically covered by the retire store 897 * code. 898 */ 899 retval = e_ddi_retire_persist(devpath); 900 if (retval != 0) { 901 cmn_err(CE_WARN, "Failed to persist device retire: error %d: " 902 "%s", retval, devpath); 903 kmem_free(devpath, strlen(devpath) + 1); 904 return (retval); 905 } 906 if (moddebug & MODDEBUG_RETIRE) 907 cmn_err(CE_NOTE, "Persisted retire of device: %s", devpath); 908 909 kmem_free(devpath, strlen(devpath) + 1); 910 return (0); 911 } 912 913 static int 914 modctl_is_retired(char *path, int *statep) 915 { 916 char *pathbuf; 917 char *devpath; 918 size_t pathsz; 919 int error; 920 int status; 921 922 if (path == NULL || statep == NULL) 923 return (EINVAL); 924 925 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 926 error = copyinstr(path, pathbuf, MAXPATHLEN, &pathsz); 927 if (error != 0) { 928 kmem_free(pathbuf, MAXPATHLEN); 929 return (error); 930 } 931 devpath = i_ddi_strdup(pathbuf, KM_SLEEP); 932 kmem_free(pathbuf, MAXPATHLEN); 933 934 if (e_ddi_device_retired(devpath)) 935 status = 1; 936 else 937 status = 0; 938 kmem_free(devpath, strlen(devpath) + 1); 939 940 return (copyout(&status, statep, sizeof (status)) ? EFAULT : 0); 941 } 942 943 static int 944 modctl_unretire(char *path) 945 { 946 char *pathbuf; 947 char *devpath; 948 size_t pathsz; 949 int retired; 950 int retval; 951 952 if (path == NULL) 953 return (EINVAL); 954 955 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 956 retval = copyinstr(path, pathbuf, MAXPATHLEN, &pathsz); 957 if (retval != 0) { 958 kmem_free(pathbuf, MAXPATHLEN); 959 return (retval); 960 } 961 devpath = i_ddi_strdup(pathbuf, KM_SLEEP); 962 kmem_free(pathbuf, MAXPATHLEN); 963 964 /* 965 * We check if a device is retired (first) before 966 * unpersisting the retire, because we use the 967 * retire store to determine if a device is retired. 968 * If we unpersist first, the device will always appear 969 * to be unretired. For the rationale behind unpersisting 970 * a device that is not retired, see the next comment. 971 */ 972 retired = e_ddi_device_retired(devpath); 973 974 /* 975 * We call unpersist unconditionally because the lookup 976 * for retired devices (e_ddi_device_retired()), skips "bypassed" 977 * devices. We still want to be able remove "bypassed" entries 978 * from the persistent store, so we unpersist unconditionally 979 * i.e. whether or not the entry is found on a lookup. 980 * 981 * e_ddi_retire_unpersist() returns 1 if it found and cleared 982 * an entry from the retire store or 0 otherwise. 983 */ 984 if (e_ddi_retire_unpersist(devpath)) 985 if (moddebug & MODDEBUG_RETIRE) { 986 cmn_err(CE_NOTE, "Unpersisted retire of device: %s", 987 devpath); 988 } 989 990 /* 991 * Check if the device is already unretired. If so, 992 * the unretire becomes a NOP 993 */ 994 if (!retired) { 995 cmn_err(CE_NOTE, "Not retired: %s", devpath); 996 kmem_free(devpath, strlen(devpath) + 1); 997 return (0); 998 } 999 1000 retval = e_ddi_unretire_device(devpath); 1001 if (retval != 0) { 1002 cmn_err(CE_WARN, "cannot unretire device: error %d, path %s\n", 1003 retval, devpath); 1004 } 1005 1006 kmem_free(devpath, strlen(devpath) + 1); 1007 1008 return (retval); 1009 } 1010 1011 static int 1012 modctl_getname(char *uname, uint_t ulen, int *umajorp) 1013 { 1014 char *name; 1015 major_t major; 1016 1017 if (copyin(umajorp, &major, sizeof (major)) != 0) 1018 return (EFAULT); 1019 if ((name = mod_major_to_name(major)) == NULL) 1020 return (ENODEV); 1021 if ((strlen(name) + 1) > ulen) 1022 return (ENOSPC); 1023 return (copyoutstr(name, uname, ulen, NULL)); 1024 } 1025 1026 static int 1027 modctl_devt2instance(dev_t dev, int *uinstancep) 1028 { 1029 int instance; 1030 1031 if ((instance = dev_to_instance(dev)) == -1) 1032 return (EINVAL); 1033 1034 return (copyout(&instance, uinstancep, sizeof (int))); 1035 } 1036 1037 /* 1038 * Return the sizeof of the device id. 1039 */ 1040 static int 1041 modctl_sizeof_devid(dev_t dev, uint_t *len) 1042 { 1043 uint_t sz; 1044 ddi_devid_t devid; 1045 1046 /* get device id */ 1047 if (ddi_lyr_get_devid(dev, &devid) == DDI_FAILURE) 1048 return (EINVAL); 1049 1050 sz = ddi_devid_sizeof(devid); 1051 ddi_devid_free(devid); 1052 1053 /* copyout device id size */ 1054 if (copyout(&sz, len, sizeof (sz)) != 0) 1055 return (EFAULT); 1056 1057 return (0); 1058 } 1059 1060 /* 1061 * Return a copy of the device id. 1062 */ 1063 static int 1064 modctl_get_devid(dev_t dev, uint_t len, ddi_devid_t udevid) 1065 { 1066 uint_t sz; 1067 ddi_devid_t devid; 1068 int err = 0; 1069 1070 /* get device id */ 1071 if (ddi_lyr_get_devid(dev, &devid) == DDI_FAILURE) 1072 return (EINVAL); 1073 1074 sz = ddi_devid_sizeof(devid); 1075 1076 /* Error if device id is larger than space allocated */ 1077 if (sz > len) { 1078 ddi_devid_free(devid); 1079 return (ENOSPC); 1080 } 1081 1082 /* copy out device id */ 1083 if (copyout(devid, udevid, sz) != 0) 1084 err = EFAULT; 1085 ddi_devid_free(devid); 1086 return (err); 1087 } 1088 1089 /* 1090 * return the /devices paths associated with the specified devid and 1091 * minor name. 1092 */ 1093 /*ARGSUSED*/ 1094 static int 1095 modctl_devid2paths(ddi_devid_t udevid, char *uminor_name, uint_t flag, 1096 size_t *ulensp, char *upaths) 1097 { 1098 ddi_devid_t devid = NULL; 1099 int devid_len; 1100 char *minor_name = NULL; 1101 dev_info_t *dip = NULL; 1102 struct ddi_minor_data *dmdp; 1103 char *path = NULL; 1104 int ulens; 1105 int lens; 1106 int len; 1107 dev_t *devlist = NULL; 1108 int ndevs; 1109 int i; 1110 int ret = 0; 1111 1112 /* 1113 * If upaths is NULL then we are only computing the amount of space 1114 * needed to hold the paths and returning the value in *ulensp. If we 1115 * are copying out paths then we get the amount of space allocated by 1116 * the caller. If the actual space needed for paths is larger, or 1117 * things are changing out from under us, then we return EAGAIN. 1118 */ 1119 if (upaths) { 1120 if (ulensp == NULL) 1121 return (EINVAL); 1122 if (copyin(ulensp, &ulens, sizeof (ulens)) != 0) 1123 return (EFAULT); 1124 } 1125 1126 /* 1127 * copyin enough of the devid to determine the length then 1128 * reallocate and copy in the entire devid. 1129 */ 1130 devid_len = ddi_devid_sizeof(NULL); 1131 devid = kmem_alloc(devid_len, KM_SLEEP); 1132 if (copyin(udevid, devid, devid_len)) { 1133 ret = EFAULT; 1134 goto out; 1135 } 1136 len = devid_len; 1137 devid_len = ddi_devid_sizeof(devid); 1138 kmem_free(devid, len); 1139 devid = kmem_alloc(devid_len, KM_SLEEP); 1140 if (copyin(udevid, devid, devid_len)) { 1141 ret = EFAULT; 1142 goto out; 1143 } 1144 1145 /* copyin the minor name if specified. */ 1146 minor_name = uminor_name; 1147 if ((minor_name != DEVID_MINOR_NAME_ALL) && 1148 (minor_name != DEVID_MINOR_NAME_ALL_CHR) && 1149 (minor_name != DEVID_MINOR_NAME_ALL_BLK)) { 1150 minor_name = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1151 if (copyinstr(uminor_name, minor_name, MAXPATHLEN, 0)) { 1152 ret = EFAULT; 1153 goto out; 1154 } 1155 } 1156 1157 /* 1158 * Use existing function to resolve the devid into a devlist. 1159 * 1160 * NOTE: there is a loss of spectype information in the current 1161 * ddi_lyr_devid_to_devlist implementation. We work around this by not 1162 * passing down DEVID_MINOR_NAME_ALL here, but reproducing all minor 1163 * node forms in the loop processing the devlist below. It would be 1164 * best if at some point the use of this interface here was replaced 1165 * with a path oriented call. 1166 */ 1167 if (ddi_lyr_devid_to_devlist(devid, 1168 (minor_name == DEVID_MINOR_NAME_ALL) ? 1169 DEVID_MINOR_NAME_ALL_CHR : minor_name, 1170 &ndevs, &devlist) != DDI_SUCCESS) { 1171 ret = EINVAL; 1172 goto out; 1173 } 1174 1175 /* 1176 * loop over the devlist, converting each devt to a path and doing 1177 * a copyout of the path and computation of the amount of space 1178 * needed to hold all the paths 1179 */ 1180 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1181 for (i = 0, lens = 0; i < ndevs; i++) { 1182 1183 /* find the dip associated with the dev_t */ 1184 if ((dip = e_ddi_hold_devi_by_dev(devlist[i], 0)) == NULL) 1185 continue; 1186 1187 /* loop over all the minor nodes, skipping ones we don't want */ 1188 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 1189 if ((dmdp->ddm_dev != devlist[i]) || 1190 (dmdp->type != DDM_MINOR)) 1191 continue; 1192 1193 if ((minor_name != DEVID_MINOR_NAME_ALL) && 1194 (minor_name != DEVID_MINOR_NAME_ALL_CHR) && 1195 (minor_name != DEVID_MINOR_NAME_ALL_BLK) && 1196 strcmp(minor_name, dmdp->ddm_name)) 1197 continue; 1198 else { 1199 if ((minor_name == DEVID_MINOR_NAME_ALL_CHR) && 1200 (dmdp->ddm_spec_type != S_IFCHR)) 1201 continue; 1202 if ((minor_name == DEVID_MINOR_NAME_ALL_BLK) && 1203 (dmdp->ddm_spec_type != S_IFBLK)) 1204 continue; 1205 } 1206 1207 /* XXX need ddi_pathname_minor(dmdp, path); interface */ 1208 if (ddi_dev_pathname(dmdp->ddm_dev, dmdp->ddm_spec_type, 1209 path) != DDI_SUCCESS) { 1210 ret = EAGAIN; 1211 goto out; 1212 } 1213 len = strlen(path) + 1; 1214 *(path + len) = '\0'; /* set double termination */ 1215 lens += len; 1216 1217 /* copyout the path with double terminations */ 1218 if (upaths) { 1219 if (lens > ulens) { 1220 ret = EAGAIN; 1221 goto out; 1222 } 1223 if (copyout(path, upaths, len + 1)) { 1224 ret = EFAULT; 1225 goto out; 1226 } 1227 upaths += len; 1228 } 1229 } 1230 ddi_release_devi(dip); 1231 dip = NULL; 1232 } 1233 lens++; /* add one for double termination */ 1234 1235 /* copy out the amount of space needed to hold the paths */ 1236 if (ulensp && copyout(&lens, ulensp, sizeof (lens))) { 1237 ret = EFAULT; 1238 goto out; 1239 } 1240 ret = 0; 1241 1242 out: if (dip) 1243 ddi_release_devi(dip); 1244 if (path) 1245 kmem_free(path, MAXPATHLEN); 1246 if (devlist) 1247 ddi_lyr_free_devlist(devlist, ndevs); 1248 if (minor_name && 1249 (minor_name != DEVID_MINOR_NAME_ALL) && 1250 (minor_name != DEVID_MINOR_NAME_ALL_CHR) && 1251 (minor_name != DEVID_MINOR_NAME_ALL_BLK)) 1252 kmem_free(minor_name, MAXPATHLEN); 1253 if (devid) 1254 kmem_free(devid, devid_len); 1255 return (ret); 1256 } 1257 1258 /* 1259 * Return the size of the minor name. 1260 */ 1261 static int 1262 modctl_sizeof_minorname(dev_t dev, int spectype, uint_t *len) 1263 { 1264 uint_t sz; 1265 char *name; 1266 1267 /* get the minor name */ 1268 if (ddi_lyr_get_minor_name(dev, spectype, &name) == DDI_FAILURE) 1269 return (EINVAL); 1270 1271 sz = strlen(name) + 1; 1272 kmem_free(name, sz); 1273 1274 /* copy out the size of the minor name */ 1275 if (copyout(&sz, len, sizeof (sz)) != 0) 1276 return (EFAULT); 1277 1278 return (0); 1279 } 1280 1281 /* 1282 * Return the minor name. 1283 */ 1284 static int 1285 modctl_get_minorname(dev_t dev, int spectype, uint_t len, char *uname) 1286 { 1287 uint_t sz; 1288 char *name; 1289 int err = 0; 1290 1291 /* get the minor name */ 1292 if (ddi_lyr_get_minor_name(dev, spectype, &name) == DDI_FAILURE) 1293 return (EINVAL); 1294 1295 sz = strlen(name) + 1; 1296 1297 /* Error if the minor name is larger than the space allocated */ 1298 if (sz > len) { 1299 kmem_free(name, sz); 1300 return (ENOSPC); 1301 } 1302 1303 /* copy out the minor name */ 1304 if (copyout(name, uname, sz) != 0) 1305 err = EFAULT; 1306 kmem_free(name, sz); 1307 return (err); 1308 } 1309 1310 /* 1311 * Return the size of the (dev_t,spectype) devfspath name. 1312 */ 1313 static int 1314 modctl_devfspath_len(dev_t dev, int spectype, uint_t *len) 1315 { 1316 uint_t sz; 1317 char *name; 1318 1319 /* get the path name */ 1320 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1321 if (ddi_dev_pathname(dev, spectype, name) == DDI_FAILURE) { 1322 kmem_free(name, MAXPATHLEN); 1323 return (EINVAL); 1324 } 1325 1326 sz = strlen(name) + 1; 1327 kmem_free(name, MAXPATHLEN); 1328 1329 /* copy out the size of the path name */ 1330 if (copyout(&sz, len, sizeof (sz)) != 0) 1331 return (EFAULT); 1332 1333 return (0); 1334 } 1335 1336 /* 1337 * Return the (dev_t,spectype) devfspath name. 1338 */ 1339 static int 1340 modctl_devfspath(dev_t dev, int spectype, uint_t len, char *uname) 1341 { 1342 uint_t sz; 1343 char *name; 1344 int err = 0; 1345 1346 /* get the path name */ 1347 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1348 if (ddi_dev_pathname(dev, spectype, name) == DDI_FAILURE) { 1349 kmem_free(name, MAXPATHLEN); 1350 return (EINVAL); 1351 } 1352 1353 sz = strlen(name) + 1; 1354 1355 /* Error if the path name is larger than the space allocated */ 1356 if (sz > len) { 1357 kmem_free(name, MAXPATHLEN); 1358 return (ENOSPC); 1359 } 1360 1361 /* copy out the path name */ 1362 if (copyout(name, uname, sz) != 0) 1363 err = EFAULT; 1364 kmem_free(name, MAXPATHLEN); 1365 return (err); 1366 } 1367 1368 /* 1369 * Return the size of the (major,instance) devfspath name. 1370 */ 1371 static int 1372 modctl_devfspath_mi_len(major_t major, int instance, uint_t *len) 1373 { 1374 uint_t sz; 1375 char *name; 1376 1377 /* get the path name */ 1378 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1379 if (e_ddi_majorinstance_to_path(major, instance, name) != DDI_SUCCESS) { 1380 kmem_free(name, MAXPATHLEN); 1381 return (EINVAL); 1382 } 1383 1384 sz = strlen(name) + 1; 1385 kmem_free(name, MAXPATHLEN); 1386 1387 /* copy out the size of the path name */ 1388 if (copyout(&sz, len, sizeof (sz)) != 0) 1389 return (EFAULT); 1390 1391 return (0); 1392 } 1393 1394 /* 1395 * Return the (major_instance) devfspath name. 1396 * NOTE: e_ddi_majorinstance_to_path does not require the device to attach to 1397 * return a path - it uses the instance tree. 1398 */ 1399 static int 1400 modctl_devfspath_mi(major_t major, int instance, uint_t len, char *uname) 1401 { 1402 uint_t sz; 1403 char *name; 1404 int err = 0; 1405 1406 /* get the path name */ 1407 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1408 if (e_ddi_majorinstance_to_path(major, instance, name) != DDI_SUCCESS) { 1409 kmem_free(name, MAXPATHLEN); 1410 return (EINVAL); 1411 } 1412 1413 sz = strlen(name) + 1; 1414 1415 /* Error if the path name is larger than the space allocated */ 1416 if (sz > len) { 1417 kmem_free(name, MAXPATHLEN); 1418 return (ENOSPC); 1419 } 1420 1421 /* copy out the path name */ 1422 if (copyout(name, uname, sz) != 0) 1423 err = EFAULT; 1424 kmem_free(name, MAXPATHLEN); 1425 return (err); 1426 } 1427 1428 static int 1429 modctl_get_fbname(char *path) 1430 { 1431 extern dev_t fbdev; 1432 char *pathname = NULL; 1433 int rval = 0; 1434 1435 /* make sure fbdev is set before we plunge in */ 1436 if (fbdev == NODEV) 1437 return (ENODEV); 1438 1439 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1440 if ((rval = ddi_dev_pathname(fbdev, S_IFCHR, 1441 pathname)) == DDI_SUCCESS) { 1442 if (copyout(pathname, path, strlen(pathname)+1) != 0) { 1443 rval = EFAULT; 1444 } 1445 } 1446 kmem_free(pathname, MAXPATHLEN); 1447 return (rval); 1448 } 1449 1450 /* 1451 * modctl_reread_dacf() 1452 * Reread the dacf rules database from the named binding file. 1453 * If NULL is specified, pass along the NULL, it means 'use the default'. 1454 */ 1455 static int 1456 modctl_reread_dacf(char *path) 1457 { 1458 int rval = 0; 1459 char *filename, *filenamep; 1460 1461 filename = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1462 1463 if (path == NULL) { 1464 filenamep = NULL; 1465 } else { 1466 if (copyinstr(path, filename, MAXPATHLEN, 0) != 0) { 1467 rval = EFAULT; 1468 goto out; 1469 } 1470 filenamep = filename; 1471 filenamep[MAXPATHLEN - 1] = '\0'; 1472 } 1473 1474 rval = read_dacf_binding_file(filenamep); 1475 out: 1476 kmem_free(filename, MAXPATHLEN); 1477 return (rval); 1478 } 1479 1480 /*ARGSUSED*/ 1481 static int 1482 modctl_modevents(int subcmd, uintptr_t a2, uintptr_t a3, uintptr_t a4, 1483 uint_t flag) 1484 { 1485 int error = 0; 1486 char *filenamep; 1487 1488 switch (subcmd) { 1489 1490 case MODEVENTS_FLUSH: 1491 /* flush all currently queued events */ 1492 log_sysevent_flushq(subcmd, flag); 1493 break; 1494 1495 case MODEVENTS_SET_DOOR_UPCALL_FILENAME: 1496 /* 1497 * bind door_upcall to filename 1498 * this should only be done once per invocation 1499 * of the event daemon. 1500 */ 1501 1502 filenamep = kmem_zalloc(MOD_MAXPATH, KM_SLEEP); 1503 1504 if (copyinstr((char *)a2, filenamep, MOD_MAXPATH, 0)) { 1505 error = EFAULT; 1506 } else { 1507 error = log_sysevent_filename(filenamep); 1508 } 1509 kmem_free(filenamep, MOD_MAXPATH); 1510 break; 1511 1512 case MODEVENTS_GETDATA: 1513 error = log_sysevent_copyout_data((sysevent_id_t *)a2, 1514 (size_t)a3, (caddr_t)a4); 1515 break; 1516 1517 case MODEVENTS_FREEDATA: 1518 error = log_sysevent_free_data((sysevent_id_t *)a2); 1519 break; 1520 case MODEVENTS_POST_EVENT: 1521 error = log_usr_sysevent((sysevent_t *)a2, (uint32_t)a3, 1522 (sysevent_id_t *)a4); 1523 break; 1524 case MODEVENTS_REGISTER_EVENT: 1525 error = log_sysevent_register((char *)a2, (char *)a3, 1526 (se_pubsub_t *)a4); 1527 break; 1528 default: 1529 error = EINVAL; 1530 } 1531 1532 return (error); 1533 } 1534 1535 static void 1536 free_mperm(mperm_t *mp) 1537 { 1538 int len; 1539 1540 if (mp->mp_minorname) { 1541 len = strlen(mp->mp_minorname) + 1; 1542 kmem_free(mp->mp_minorname, len); 1543 } 1544 kmem_free(mp, sizeof (mperm_t)); 1545 } 1546 1547 #define MP_NO_DRV_ERR \ 1548 "/etc/minor_perm: no driver for %s\n" 1549 1550 #define MP_EMPTY_MINOR \ 1551 "/etc/minor_perm: empty minor name for driver %s\n" 1552 1553 #define MP_NO_MINOR \ 1554 "/etc/minor_perm: no minor matching %s for driver %s\n" 1555 1556 /* 1557 * Remove mperm entry with matching minorname 1558 */ 1559 static void 1560 rem_minorperm(major_t major, char *drvname, mperm_t *mp, int is_clone) 1561 { 1562 mperm_t **mp_head; 1563 mperm_t *freemp = NULL; 1564 struct devnames *dnp = &devnamesp[major]; 1565 mperm_t **wildmp; 1566 1567 ASSERT(mp->mp_minorname && strlen(mp->mp_minorname) > 0); 1568 1569 LOCK_DEV_OPS(&dnp->dn_lock); 1570 if (strcmp(mp->mp_minorname, "*") == 0) { 1571 wildmp = ((is_clone == 0) ? 1572 &dnp->dn_mperm_wild : &dnp->dn_mperm_clone); 1573 if (*wildmp) 1574 freemp = *wildmp; 1575 *wildmp = NULL; 1576 } else { 1577 mp_head = &dnp->dn_mperm; 1578 while (*mp_head) { 1579 if (strcmp((*mp_head)->mp_minorname, 1580 mp->mp_minorname) != 0) { 1581 mp_head = &(*mp_head)->mp_next; 1582 continue; 1583 } 1584 /* remove the entry */ 1585 freemp = *mp_head; 1586 *mp_head = freemp->mp_next; 1587 break; 1588 } 1589 } 1590 if (freemp) { 1591 if (moddebug & MODDEBUG_MINORPERM) { 1592 cmn_err(CE_CONT, "< %s %s 0%o %d %d\n", 1593 drvname, freemp->mp_minorname, 1594 freemp->mp_mode & 0777, 1595 freemp->mp_uid, freemp->mp_gid); 1596 } 1597 free_mperm(freemp); 1598 } else { 1599 if (moddebug & MODDEBUG_MINORPERM) { 1600 cmn_err(CE_CONT, MP_NO_MINOR, 1601 drvname, mp->mp_minorname); 1602 } 1603 } 1604 1605 UNLOCK_DEV_OPS(&dnp->dn_lock); 1606 } 1607 1608 /* 1609 * Add minor perm entry 1610 */ 1611 static void 1612 add_minorperm(major_t major, char *drvname, mperm_t *mp, int is_clone) 1613 { 1614 mperm_t **mp_head; 1615 mperm_t *freemp = NULL; 1616 struct devnames *dnp = &devnamesp[major]; 1617 mperm_t **wildmp; 1618 1619 ASSERT(mp->mp_minorname && strlen(mp->mp_minorname) > 0); 1620 1621 /* 1622 * Note that update_drv replace semantics require 1623 * replacing matching entries with the new permissions. 1624 */ 1625 LOCK_DEV_OPS(&dnp->dn_lock); 1626 if (strcmp(mp->mp_minorname, "*") == 0) { 1627 wildmp = ((is_clone == 0) ? 1628 &dnp->dn_mperm_wild : &dnp->dn_mperm_clone); 1629 if (*wildmp) 1630 freemp = *wildmp; 1631 *wildmp = mp; 1632 } else { 1633 mperm_t *p, *v = NULL; 1634 for (p = dnp->dn_mperm; p; v = p, p = p->mp_next) { 1635 if (strcmp(p->mp_minorname, mp->mp_minorname) == 0) { 1636 if (v == NULL) 1637 dnp->dn_mperm = mp; 1638 else 1639 v->mp_next = mp; 1640 mp->mp_next = p->mp_next; 1641 freemp = p; 1642 goto replaced; 1643 } 1644 } 1645 if (p == NULL) { 1646 mp_head = &dnp->dn_mperm; 1647 if (*mp_head == NULL) { 1648 *mp_head = mp; 1649 } else { 1650 mp->mp_next = *mp_head; 1651 *mp_head = mp; 1652 } 1653 } 1654 } 1655 replaced: 1656 if (freemp) { 1657 if (moddebug & MODDEBUG_MINORPERM) { 1658 cmn_err(CE_CONT, "< %s %s 0%o %d %d\n", 1659 drvname, freemp->mp_minorname, 1660 freemp->mp_mode & 0777, 1661 freemp->mp_uid, freemp->mp_gid); 1662 } 1663 free_mperm(freemp); 1664 } 1665 if (moddebug & MODDEBUG_MINORPERM) { 1666 cmn_err(CE_CONT, "> %s %s 0%o %d %d\n", 1667 drvname, mp->mp_minorname, mp->mp_mode & 0777, 1668 mp->mp_uid, mp->mp_gid); 1669 } 1670 UNLOCK_DEV_OPS(&dnp->dn_lock); 1671 } 1672 1673 1674 static int 1675 process_minorperm(int cmd, nvlist_t *nvl) 1676 { 1677 char *minor; 1678 major_t major; 1679 mperm_t *mp; 1680 nvpair_t *nvp; 1681 char *name; 1682 int is_clone; 1683 major_t minmaj; 1684 1685 ASSERT(cmd == MODLOADMINORPERM || 1686 cmd == MODADDMINORPERM || cmd == MODREMMINORPERM); 1687 1688 nvp = NULL; 1689 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { 1690 name = nvpair_name(nvp); 1691 1692 is_clone = 0; 1693 (void) nvpair_value_string(nvp, &minor); 1694 major = ddi_name_to_major(name); 1695 if (major != (major_t)-1) { 1696 mp = kmem_zalloc(sizeof (*mp), KM_SLEEP); 1697 if (minor == NULL || strlen(minor) == 0) { 1698 if (moddebug & MODDEBUG_MINORPERM) { 1699 cmn_err(CE_CONT, MP_EMPTY_MINOR, name); 1700 } 1701 minor = "*"; 1702 } 1703 1704 /* 1705 * The minor name of a node using the clone 1706 * driver must be the driver name. To avoid 1707 * multiple searches, we map entries in the form 1708 * clone:<driver> to <driver>:*. This also allows us 1709 * to filter out some of the litter in /etc/minor_perm. 1710 * Minor perm alias entries where the name is not 1711 * the driver kept on the clone list itself. 1712 * This all seems very fragile as a driver could 1713 * be introduced with an existing alias name. 1714 */ 1715 if (strcmp(name, "clone") == 0) { 1716 minmaj = ddi_name_to_major(minor); 1717 if (minmaj != (major_t)-1) { 1718 if (moddebug & MODDEBUG_MINORPERM) { 1719 cmn_err(CE_CONT, 1720 "mapping %s:%s to %s:*\n", 1721 name, minor, minor); 1722 } 1723 major = minmaj; 1724 name = minor; 1725 minor = "*"; 1726 is_clone = 1; 1727 } 1728 } 1729 1730 if (mp) { 1731 mp->mp_minorname = 1732 i_ddi_strdup(minor, KM_SLEEP); 1733 } 1734 } else { 1735 mp = NULL; 1736 if (moddebug & MODDEBUG_MINORPERM) { 1737 cmn_err(CE_CONT, MP_NO_DRV_ERR, name); 1738 } 1739 } 1740 1741 /* mode */ 1742 nvp = nvlist_next_nvpair(nvl, nvp); 1743 ASSERT(strcmp(nvpair_name(nvp), "mode") == 0); 1744 if (mp) 1745 (void) nvpair_value_int32(nvp, (int *)&mp->mp_mode); 1746 /* uid */ 1747 nvp = nvlist_next_nvpair(nvl, nvp); 1748 ASSERT(strcmp(nvpair_name(nvp), "uid") == 0); 1749 if (mp) 1750 (void) nvpair_value_uint32(nvp, &mp->mp_uid); 1751 /* gid */ 1752 nvp = nvlist_next_nvpair(nvl, nvp); 1753 ASSERT(strcmp(nvpair_name(nvp), "gid") == 0); 1754 if (mp) { 1755 (void) nvpair_value_uint32(nvp, &mp->mp_gid); 1756 1757 if (cmd == MODREMMINORPERM) { 1758 rem_minorperm(major, name, mp, is_clone); 1759 free_mperm(mp); 1760 } else { 1761 add_minorperm(major, name, mp, is_clone); 1762 } 1763 } 1764 } 1765 1766 if (cmd == MODLOADMINORPERM) 1767 minorperm_loaded = 1; 1768 1769 /* 1770 * Reset permissions of cached dv_nodes 1771 */ 1772 (void) devfs_reset_perm(DV_RESET_PERM); 1773 1774 return (0); 1775 } 1776 1777 static int 1778 modctl_minorperm(int cmd, char *usrbuf, size_t buflen) 1779 { 1780 int error; 1781 nvlist_t *nvl; 1782 char *buf = kmem_alloc(buflen, KM_SLEEP); 1783 1784 if ((error = ddi_copyin(usrbuf, buf, buflen, 0)) != 0) { 1785 kmem_free(buf, buflen); 1786 return (error); 1787 } 1788 1789 error = nvlist_unpack(buf, buflen, &nvl, KM_SLEEP); 1790 kmem_free(buf, buflen); 1791 if (error) 1792 return (error); 1793 1794 error = process_minorperm(cmd, nvl); 1795 nvlist_free(nvl); 1796 return (error); 1797 } 1798 1799 struct walk_args { 1800 char *wa_drvname; 1801 list_t wa_pathlist; 1802 }; 1803 1804 struct path_elem { 1805 char *pe_dir; 1806 char *pe_nodename; 1807 list_node_t pe_node; 1808 int pe_dirlen; 1809 }; 1810 1811 /*ARGSUSED*/ 1812 static int 1813 modctl_inst_walker(const char *path, in_node_t *np, in_drv_t *dp, void *arg) 1814 { 1815 struct walk_args *wargs = (struct walk_args *)arg; 1816 struct path_elem *pe; 1817 char *nodename; 1818 1819 /* 1820 * Search may be restricted to a single driver in the case of rem_drv 1821 */ 1822 if (wargs->wa_drvname && 1823 strcmp(dp->ind_driver_name, wargs->wa_drvname) != 0) 1824 return (INST_WALK_CONTINUE); 1825 1826 pe = kmem_zalloc(sizeof (*pe), KM_SLEEP); 1827 pe->pe_dir = i_ddi_strdup((char *)path, KM_SLEEP); 1828 pe->pe_dirlen = strlen(pe->pe_dir) + 1; 1829 ASSERT(strrchr(pe->pe_dir, '/') != NULL); 1830 nodename = strrchr(pe->pe_dir, '/'); 1831 *nodename++ = 0; 1832 pe->pe_nodename = nodename; 1833 list_insert_tail(&wargs->wa_pathlist, pe); 1834 1835 return (INST_WALK_CONTINUE); 1836 } 1837 1838 /* 1839 * /devices attribute nodes clean-up optionally performed 1840 * when removing a driver (rem_drv -C). 1841 * 1842 * Removing attribute nodes allows a machine to be reprovisioned 1843 * without the side-effect of inadvertently picking up stale 1844 * device node ownership or permissions. 1845 * 1846 * Preserving attributes (not performing cleanup) allows devices 1847 * attribute changes to be preserved across upgrades, as 1848 * upgrade rather heavy-handedly does a rem_drv/add_drv cycle. 1849 */ 1850 static int 1851 modctl_remdrv_cleanup(const char *u_drvname) 1852 { 1853 struct walk_args *wargs; 1854 struct path_elem *pe; 1855 char *drvname; 1856 int err, rval = 0; 1857 1858 drvname = kmem_alloc(MAXMODCONFNAME, KM_SLEEP); 1859 if ((err = copyinstr(u_drvname, drvname, MAXMODCONFNAME, 0))) { 1860 kmem_free(drvname, MAXMODCONFNAME); 1861 return (err); 1862 } 1863 1864 /* 1865 * First go through the instance database. For each 1866 * instance of a device bound to the driver being 1867 * removed, remove any underlying devfs attribute nodes. 1868 * 1869 * This is a two-step process. First we go through 1870 * the instance data itself, constructing a list of 1871 * the nodes discovered. The second step is then 1872 * to find and remove any devfs attribute nodes 1873 * for the instances discovered in the first step. 1874 * The two-step process avoids any difficulties 1875 * which could arise by holding the instance data 1876 * lock with simultaneous devfs operations. 1877 */ 1878 wargs = kmem_zalloc(sizeof (*wargs), KM_SLEEP); 1879 1880 wargs->wa_drvname = drvname; 1881 list_create(&wargs->wa_pathlist, 1882 sizeof (struct path_elem), offsetof(struct path_elem, pe_node)); 1883 1884 (void) e_ddi_walk_instances(modctl_inst_walker, (void *)wargs); 1885 1886 for (pe = list_head(&wargs->wa_pathlist); pe != NULL; 1887 pe = list_next(&wargs->wa_pathlist, pe)) { 1888 err = devfs_remdrv_cleanup((const char *)pe->pe_dir, 1889 (const char *)pe->pe_nodename); 1890 if (rval == 0) 1891 rval = err; 1892 } 1893 1894 while ((pe = list_head(&wargs->wa_pathlist)) != NULL) { 1895 list_remove(&wargs->wa_pathlist, pe); 1896 kmem_free(pe->pe_dir, pe->pe_dirlen); 1897 kmem_free(pe, sizeof (*pe)); 1898 } 1899 kmem_free(wargs, sizeof (*wargs)); 1900 1901 /* 1902 * Pseudo nodes aren't recorded in the instance database 1903 * so any such nodes need to be handled separately. 1904 */ 1905 err = devfs_remdrv_cleanup("pseudo", (const char *)drvname); 1906 if (rval == 0) 1907 rval = err; 1908 1909 kmem_free(drvname, MAXMODCONFNAME); 1910 return (rval); 1911 } 1912 1913 /* 1914 * Perform a cleanup of non-existent /devices attribute nodes, 1915 * similar to rem_drv -C, but for all drivers/devices. 1916 * This is also optional, performed as part of devfsadm -C. 1917 */ 1918 void 1919 dev_devices_cleanup() 1920 { 1921 struct walk_args *wargs; 1922 struct path_elem *pe; 1923 dev_info_t *devi; 1924 char *path; 1925 int err; 1926 1927 /* 1928 * It's expected that all drivers have been loaded and 1929 * module unloading disabled while performing cleanup. 1930 */ 1931 ASSERT(modunload_disable_count > 0); 1932 1933 wargs = kmem_zalloc(sizeof (*wargs), KM_SLEEP); 1934 wargs->wa_drvname = NULL; 1935 list_create(&wargs->wa_pathlist, 1936 sizeof (struct path_elem), offsetof(struct path_elem, pe_node)); 1937 1938 (void) e_ddi_walk_instances(modctl_inst_walker, (void *)wargs); 1939 1940 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1941 1942 for (pe = list_head(&wargs->wa_pathlist); pe != NULL; 1943 pe = list_next(&wargs->wa_pathlist, pe)) { 1944 (void) snprintf(path, MAXPATHLEN, "%s/%s", 1945 pe->pe_dir, pe->pe_nodename); 1946 devi = e_ddi_hold_devi_by_path(path, 0); 1947 if (devi != NULL) { 1948 ddi_release_devi(devi); 1949 } else { 1950 err = devfs_remdrv_cleanup((const char *)pe->pe_dir, 1951 (const char *)pe->pe_nodename); 1952 if (err) { 1953 cmn_err(CE_CONT, 1954 "devfs: %s: clean-up error %d\n", 1955 path, err); 1956 } 1957 } 1958 } 1959 1960 while ((pe = list_head(&wargs->wa_pathlist)) != NULL) { 1961 list_remove(&wargs->wa_pathlist, pe); 1962 kmem_free(pe->pe_dir, pe->pe_dirlen); 1963 kmem_free(pe, sizeof (*pe)); 1964 } 1965 kmem_free(wargs, sizeof (*wargs)); 1966 kmem_free(path, MAXPATHLEN); 1967 } 1968 1969 static int 1970 modctl_allocpriv(const char *name) 1971 { 1972 char *pstr = kmem_alloc(PRIVNAME_MAX, KM_SLEEP); 1973 int error; 1974 1975 if ((error = copyinstr(name, pstr, PRIVNAME_MAX, 0))) { 1976 kmem_free(pstr, PRIVNAME_MAX); 1977 return (error); 1978 } 1979 error = priv_getbyname(pstr, PRIV_ALLOC); 1980 if (error < 0) 1981 error = -error; 1982 else 1983 error = 0; 1984 kmem_free(pstr, PRIVNAME_MAX); 1985 return (error); 1986 } 1987 1988 static int 1989 modctl_devexists(const char *upath, int pathlen) 1990 { 1991 char *path; 1992 int ret; 1993 1994 /* 1995 * copy in the path, including the terminating null 1996 */ 1997 pathlen++; 1998 if (pathlen <= 1 || pathlen > MAXPATHLEN) 1999 return (EINVAL); 2000 path = kmem_zalloc(pathlen + 1, KM_SLEEP); 2001 if ((ret = copyinstr(upath, path, pathlen, NULL)) == 0) { 2002 ret = sdev_modctl_devexists(path); 2003 } 2004 2005 kmem_free(path, pathlen + 1); 2006 return (ret); 2007 } 2008 2009 static int 2010 modctl_devreaddir(const char *udir, int udirlen, 2011 char *upaths, int64_t *ulensp) 2012 { 2013 char *paths = NULL; 2014 char **dirlist = NULL; 2015 char *dir; 2016 int64_t ulens; 2017 int64_t lens; 2018 int i, n; 2019 int ret = 0; 2020 char *p; 2021 int npaths; 2022 int npaths_alloc; 2023 2024 /* 2025 * If upaths is NULL then we are only computing the amount of space 2026 * needed to return the paths, with the value returned in *ulensp. If we 2027 * are copying out paths then we get the amount of space allocated by 2028 * the caller. If the actual space needed for paths is larger, or 2029 * things are changing out from under us, then we return EAGAIN. 2030 */ 2031 if (upaths) { 2032 if (ulensp == NULL) 2033 return (EINVAL); 2034 if (copyin(ulensp, &ulens, sizeof (ulens)) != 0) 2035 return (EFAULT); 2036 } 2037 2038 /* 2039 * copyin the /dev path including terminating null 2040 */ 2041 udirlen++; 2042 if (udirlen <= 1 || udirlen > MAXPATHLEN) 2043 return (EINVAL); 2044 dir = kmem_zalloc(udirlen + 1, KM_SLEEP); 2045 if ((ret = copyinstr(udir, dir, udirlen, NULL)) != 0) 2046 goto err; 2047 2048 if ((ret = sdev_modctl_readdir(dir, &dirlist, 2049 &npaths, &npaths_alloc, 0)) != 0) { 2050 ASSERT(dirlist == NULL); 2051 goto err; 2052 } 2053 2054 lens = 0; 2055 for (i = 0; i < npaths; i++) { 2056 lens += strlen(dirlist[i]) + 1; 2057 } 2058 lens++; /* add one for double termination */ 2059 2060 if (upaths) { 2061 if (lens > ulens) { 2062 ret = EAGAIN; 2063 goto out; 2064 } 2065 2066 paths = kmem_alloc(lens, KM_SLEEP); 2067 2068 p = paths; 2069 for (i = 0; i < npaths; i++) { 2070 n = strlen(dirlist[i]) + 1; 2071 bcopy(dirlist[i], p, n); 2072 p += n; 2073 } 2074 *p = 0; 2075 2076 if (copyout(paths, upaths, lens)) { 2077 ret = EFAULT; 2078 goto err; 2079 } 2080 } 2081 2082 out: 2083 /* copy out the amount of space needed to hold the paths */ 2084 if (copyout(&lens, ulensp, sizeof (lens))) 2085 ret = EFAULT; 2086 2087 err: 2088 if (dirlist) 2089 sdev_modctl_readdir_free(dirlist, npaths, npaths_alloc); 2090 if (paths) 2091 kmem_free(paths, lens); 2092 kmem_free(dir, udirlen + 1); 2093 return (ret); 2094 } 2095 2096 static int 2097 modctl_devemptydir(const char *udir, int udirlen, int *uempty) 2098 { 2099 char *dir; 2100 int ret; 2101 char **dirlist = NULL; 2102 int npaths; 2103 int npaths_alloc; 2104 int empty; 2105 2106 /* 2107 * copyin the /dev path including terminating null 2108 */ 2109 udirlen++; 2110 if (udirlen <= 1 || udirlen > MAXPATHLEN) 2111 return (EINVAL); 2112 dir = kmem_zalloc(udirlen + 1, KM_SLEEP); 2113 if ((ret = copyinstr(udir, dir, udirlen, NULL)) != 0) 2114 goto err; 2115 2116 if ((ret = sdev_modctl_readdir(dir, &dirlist, 2117 &npaths, &npaths_alloc, 1)) != 0) { 2118 goto err; 2119 } 2120 2121 empty = npaths ? 0 : 1; 2122 if (copyout(&empty, uempty, sizeof (empty))) 2123 ret = EFAULT; 2124 2125 err: 2126 if (dirlist) 2127 sdev_modctl_readdir_free(dirlist, npaths, npaths_alloc); 2128 kmem_free(dir, udirlen + 1); 2129 return (ret); 2130 } 2131 2132 int 2133 modctl_moddevname(int subcmd, uintptr_t a1, uintptr_t a2) 2134 { 2135 int error = 0; 2136 2137 switch (subcmd) { 2138 case MODDEVNAME_LOOKUPDOOR: 2139 case MODDEVNAME_DEVFSADMNODE: 2140 error = devname_filename_register(subcmd, (char *)a1); 2141 break; 2142 case MODDEVNAME_NSMAPS: 2143 error = devname_nsmaps_register((char *)a1, (size_t)a2); 2144 break; 2145 case MODDEVNAME_PROFILE: 2146 error = devname_profile_update((char *)a1, (size_t)a2); 2147 break; 2148 case MODDEVNAME_RECONFIG: 2149 i_ddi_set_reconfig(); 2150 break; 2151 case MODDEVNAME_SYSAVAIL: 2152 i_ddi_set_sysavail(); 2153 break; 2154 default: 2155 error = EINVAL; 2156 break; 2157 } 2158 2159 return (error); 2160 } 2161 2162 /*ARGSUSED5*/ 2163 int 2164 modctl(int cmd, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, 2165 uintptr_t a5) 2166 { 2167 int error = EINVAL; 2168 dev_t dev; 2169 2170 if (secpolicy_modctl(CRED(), cmd) != 0) 2171 return (set_errno(EPERM)); 2172 2173 switch (cmd) { 2174 case MODLOAD: /* load a module */ 2175 error = modctl_modload((int)a1, (char *)a2, (int *)a3); 2176 break; 2177 2178 case MODUNLOAD: /* unload a module */ 2179 error = modctl_modunload((modid_t)a1); 2180 break; 2181 2182 case MODINFO: /* get module status */ 2183 error = modctl_modinfo((modid_t)a1, (struct modinfo *)a2); 2184 break; 2185 2186 case MODRESERVED: /* get last major number in range */ 2187 error = modctl_modreserve((modid_t)a1, (int *)a2); 2188 break; 2189 2190 case MODSETMINIROOT: /* we are running in miniroot */ 2191 isminiroot = 1; 2192 error = 0; 2193 break; 2194 2195 case MODADDMAJBIND: /* read major binding file */ 2196 error = modctl_add_major((int *)a2); 2197 break; 2198 2199 case MODGETPATHLEN: /* get modpath length */ 2200 error = modctl_getmodpathlen((int *)a2); 2201 break; 2202 2203 case MODGETPATH: /* get modpath */ 2204 error = modctl_getmodpath((char *)a2); 2205 break; 2206 2207 case MODREADSYSBIND: /* read system call binding file */ 2208 error = modctl_read_sysbinding_file(); 2209 break; 2210 2211 case MODGETMAJBIND: /* get major number for named device */ 2212 error = modctl_getmaj((char *)a1, (uint_t)a2, (int *)a3); 2213 break; 2214 2215 case MODGETNAME: /* get name of device given major number */ 2216 error = modctl_getname((char *)a1, (uint_t)a2, (int *)a3); 2217 break; 2218 2219 case MODDEVT2INSTANCE: 2220 if (get_udatamodel() == DATAMODEL_NATIVE) { 2221 dev = (dev_t)a1; 2222 } 2223 #ifdef _SYSCALL32_IMPL 2224 else { 2225 dev = expldev(a1); 2226 } 2227 #endif 2228 error = modctl_devt2instance(dev, (int *)a2); 2229 break; 2230 2231 case MODSIZEOF_DEVID: /* sizeof device id of device given dev_t */ 2232 if (get_udatamodel() == DATAMODEL_NATIVE) { 2233 dev = (dev_t)a1; 2234 } 2235 #ifdef _SYSCALL32_IMPL 2236 else { 2237 dev = expldev(a1); 2238 } 2239 #endif 2240 error = modctl_sizeof_devid(dev, (uint_t *)a2); 2241 break; 2242 2243 case MODGETDEVID: /* get device id of device given dev_t */ 2244 if (get_udatamodel() == DATAMODEL_NATIVE) { 2245 dev = (dev_t)a1; 2246 } 2247 #ifdef _SYSCALL32_IMPL 2248 else { 2249 dev = expldev(a1); 2250 } 2251 #endif 2252 error = modctl_get_devid(dev, (uint_t)a2, (ddi_devid_t)a3); 2253 break; 2254 2255 case MODSIZEOF_MINORNAME: /* sizeof minor nm (dev_t,spectype) */ 2256 if (get_udatamodel() == DATAMODEL_NATIVE) { 2257 error = modctl_sizeof_minorname((dev_t)a1, (int)a2, 2258 (uint_t *)a3); 2259 } 2260 #ifdef _SYSCALL32_IMPL 2261 else { 2262 error = modctl_sizeof_minorname(expldev(a1), (int)a2, 2263 (uint_t *)a3); 2264 } 2265 2266 #endif 2267 break; 2268 2269 case MODGETMINORNAME: /* get minor name of (dev_t,spectype) */ 2270 if (get_udatamodel() == DATAMODEL_NATIVE) { 2271 error = modctl_get_minorname((dev_t)a1, (int)a2, 2272 (uint_t)a3, (char *)a4); 2273 } 2274 #ifdef _SYSCALL32_IMPL 2275 else { 2276 error = modctl_get_minorname(expldev(a1), (int)a2, 2277 (uint_t)a3, (char *)a4); 2278 } 2279 #endif 2280 break; 2281 2282 case MODGETDEVFSPATH_LEN: /* sizeof path nm of (dev_t,spectype) */ 2283 if (get_udatamodel() == DATAMODEL_NATIVE) { 2284 error = modctl_devfspath_len((dev_t)a1, (int)a2, 2285 (uint_t *)a3); 2286 } 2287 #ifdef _SYSCALL32_IMPL 2288 else { 2289 error = modctl_devfspath_len(expldev(a1), (int)a2, 2290 (uint_t *)a3); 2291 } 2292 2293 #endif 2294 break; 2295 2296 case MODGETDEVFSPATH: /* get path name of (dev_t,spec) type */ 2297 if (get_udatamodel() == DATAMODEL_NATIVE) { 2298 error = modctl_devfspath((dev_t)a1, (int)a2, 2299 (uint_t)a3, (char *)a4); 2300 } 2301 #ifdef _SYSCALL32_IMPL 2302 else { 2303 error = modctl_devfspath(expldev(a1), (int)a2, 2304 (uint_t)a3, (char *)a4); 2305 } 2306 #endif 2307 break; 2308 2309 case MODGETDEVFSPATH_MI_LEN: /* sizeof path nm of (major,instance) */ 2310 error = modctl_devfspath_mi_len((major_t)a1, (int)a2, 2311 (uint_t *)a3); 2312 break; 2313 2314 case MODGETDEVFSPATH_MI: /* get path name of (major,instance) */ 2315 error = modctl_devfspath_mi((major_t)a1, (int)a2, 2316 (uint_t)a3, (char *)a4); 2317 break; 2318 2319 2320 case MODEVENTS: 2321 error = modctl_modevents((int)a1, a2, a3, a4, (uint_t)a5); 2322 break; 2323 2324 case MODGETFBNAME: /* get the framebuffer name */ 2325 error = modctl_get_fbname((char *)a1); 2326 break; 2327 2328 case MODREREADDACF: /* reread dacf rule database from given file */ 2329 error = modctl_reread_dacf((char *)a1); 2330 break; 2331 2332 case MODLOADDRVCONF: /* load driver.conf file for major */ 2333 error = modctl_load_drvconf((major_t)a1); 2334 break; 2335 2336 case MODUNLOADDRVCONF: /* unload driver.conf file for major */ 2337 error = modctl_unload_drvconf((major_t)a1); 2338 break; 2339 2340 case MODREMMAJBIND: /* remove a major binding */ 2341 error = modctl_rem_major((major_t)a1); 2342 break; 2343 2344 case MODDEVID2PATHS: /* get paths given devid */ 2345 error = modctl_devid2paths((ddi_devid_t)a1, (char *)a2, 2346 (uint_t)a3, (size_t *)a4, (char *)a5); 2347 break; 2348 2349 case MODSETDEVPOLICY: /* establish device policy */ 2350 error = devpolicy_load((int)a1, (size_t)a2, (devplcysys_t *)a3); 2351 break; 2352 2353 case MODGETDEVPOLICY: /* get device policy */ 2354 error = devpolicy_get((int *)a1, (size_t)a2, 2355 (devplcysys_t *)a3); 2356 break; 2357 2358 case MODALLOCPRIV: 2359 error = modctl_allocpriv((const char *)a1); 2360 break; 2361 2362 case MODGETDEVPOLICYBYNAME: 2363 error = devpolicy_getbyname((size_t)a1, 2364 (devplcysys_t *)a2, (char *)a3); 2365 break; 2366 2367 case MODLOADMINORPERM: 2368 case MODADDMINORPERM: 2369 case MODREMMINORPERM: 2370 error = modctl_minorperm(cmd, (char *)a1, (size_t)a2); 2371 break; 2372 2373 case MODREMDRVCLEANUP: 2374 error = modctl_remdrv_cleanup((const char *)a1); 2375 break; 2376 2377 case MODDEVEXISTS: /* non-reconfiguring /dev lookup */ 2378 error = modctl_devexists((const char *)a1, (size_t)a2); 2379 break; 2380 2381 case MODDEVREADDIR: /* non-reconfiguring /dev readdir */ 2382 error = modctl_devreaddir((const char *)a1, (size_t)a2, 2383 (char *)a3, (int64_t *)a4); 2384 break; 2385 2386 case MODDEVEMPTYDIR: /* non-reconfiguring /dev emptydir */ 2387 error = modctl_devemptydir((const char *)a1, (size_t)a2, 2388 (int *)a3); 2389 break; 2390 2391 case MODDEVNAME: 2392 error = modctl_moddevname((int)a1, a2, a3); 2393 break; 2394 2395 case MODRETIRE: /* retire device named by physpath a1 */ 2396 error = modctl_retire((char *)a1, (char *)a2, (size_t)a3); 2397 break; 2398 2399 case MODISRETIRED: /* check if a device is retired. */ 2400 error = modctl_is_retired((char *)a1, (int *)a2); 2401 break; 2402 2403 case MODUNRETIRE: /* unretire device named by physpath a1 */ 2404 error = modctl_unretire((char *)a1); 2405 break; 2406 2407 default: 2408 error = EINVAL; 2409 break; 2410 } 2411 2412 return (error ? set_errno(error) : 0); 2413 } 2414 2415 /* 2416 * Calls to kobj_load_module()() are handled off to this routine in a 2417 * separate thread. 2418 */ 2419 static void 2420 modload_thread(struct loadmt *ltp) 2421 { 2422 /* load the module and signal the creator of this thread */ 2423 kmutex_t cpr_lk; 2424 callb_cpr_t cpr_i; 2425 2426 mutex_init(&cpr_lk, NULL, MUTEX_DEFAULT, NULL); 2427 CALLB_CPR_INIT(&cpr_i, &cpr_lk, callb_generic_cpr, "modload"); 2428 /* borrow the devi lock from thread which invoked us */ 2429 pm_borrow_lock(ltp->owner); 2430 ltp->retval = kobj_load_module(ltp->mp, ltp->usepath); 2431 pm_return_lock(); 2432 sema_v(<p->sema); 2433 mutex_enter(&cpr_lk); 2434 CALLB_CPR_EXIT(&cpr_i); 2435 mutex_destroy(&cpr_lk); 2436 thread_exit(); 2437 } 2438 2439 /* 2440 * load a module, adding a reference if caller specifies rmodp. If rmodp 2441 * is specified then an errno is returned, otherwise a module index is 2442 * returned (-1 on error). 2443 */ 2444 static int 2445 modrload(char *subdir, char *filename, struct modctl **rmodp) 2446 { 2447 struct modctl *modp; 2448 size_t size; 2449 char *fullname; 2450 int retval = EINVAL; 2451 int id = -1; 2452 2453 if (rmodp) 2454 *rmodp = NULL; /* avoid garbage */ 2455 2456 if (subdir != NULL) { 2457 /* 2458 * refuse / in filename to prevent "../" escapes. 2459 */ 2460 if (strchr(filename, '/') != NULL) 2461 return (rmodp ? retval : id); 2462 2463 /* 2464 * allocate enough space for <subdir>/<filename><NULL> 2465 */ 2466 size = strlen(subdir) + strlen(filename) + 2; 2467 fullname = kmem_zalloc(size, KM_SLEEP); 2468 (void) sprintf(fullname, "%s/%s", subdir, filename); 2469 } else { 2470 fullname = filename; 2471 } 2472 2473 modp = mod_hold_installed_mod(fullname, 1, 0, &retval); 2474 if (modp != NULL) { 2475 id = modp->mod_id; 2476 if (rmodp) { 2477 /* add mod_ref and return *rmodp */ 2478 mutex_enter(&mod_lock); 2479 modp->mod_ref++; 2480 mutex_exit(&mod_lock); 2481 *rmodp = modp; 2482 } 2483 mod_release_mod(modp); 2484 CPU_STATS_ADDQ(CPU, sys, modload, 1); 2485 } 2486 2487 done: if (subdir != NULL) 2488 kmem_free(fullname, size); 2489 return (rmodp ? retval : id); 2490 } 2491 2492 /* 2493 * This is the primary kernel interface to load a module. It loads and 2494 * installs the named module. It does not hold mod_ref of the module, so 2495 * a module unload attempt can occur at any time - it is up to the 2496 * _fini/mod_remove implementation to determine if unload will succeed. 2497 */ 2498 int 2499 modload(char *subdir, char *filename) 2500 { 2501 return (modrload(subdir, filename, NULL)); 2502 } 2503 2504 /* 2505 * Load a module using a series of qualified names from most specific to least 2506 * specific, e.g. for subdir "foo", p1 "bar", p2 "baz", we might try: 2507 * Value returned in *chosen 2508 * foo/bar.baz.1.2.3 3 2509 * foo/bar.baz.1.2 2 2510 * foo/bar.baz.1 1 2511 * foo/bar.baz 0 2512 * 2513 * Return the module ID on success; -1 if no module was loaded. On success 2514 * and if 'chosen' is not NULL we also return the number of suffices that 2515 * were in the module we chose to load. 2516 */ 2517 int 2518 modload_qualified(const char *subdir, const char *p1, 2519 const char *p2, const char *delim, uint_t suffv[], int suffc, int *chosen) 2520 { 2521 char path[MOD_MAXPATH]; 2522 size_t n, resid = sizeof (path); 2523 char *p = path; 2524 2525 char **dotv; 2526 int i, rc, id; 2527 modctl_t *mp; 2528 2529 if (p2 != NULL) 2530 n = snprintf(p, resid, "%s/%s%s%s", subdir, p1, delim, p2); 2531 else 2532 n = snprintf(p, resid, "%s/%s", subdir, p1); 2533 2534 if (n >= resid) 2535 return (-1); 2536 2537 p += n; 2538 resid -= n; 2539 dotv = kmem_alloc(sizeof (char *) * (suffc + 1), KM_SLEEP); 2540 2541 for (i = 0; i < suffc; i++) { 2542 dotv[i] = p; 2543 n = snprintf(p, resid, "%s%u", delim, suffv[i]); 2544 2545 if (n >= resid) { 2546 kmem_free(dotv, sizeof (char *) * (suffc + 1)); 2547 return (-1); 2548 } 2549 2550 p += n; 2551 resid -= n; 2552 } 2553 2554 dotv[suffc] = p; 2555 2556 for (i = suffc; i >= 0; i--) { 2557 dotv[i][0] = '\0'; 2558 mp = mod_hold_installed_mod(path, 1, 1, &rc); 2559 2560 if (mp != NULL) { 2561 kmem_free(dotv, sizeof (char *) * (suffc + 1)); 2562 id = mp->mod_id; 2563 mod_release_mod(mp); 2564 if (chosen != NULL) 2565 *chosen = i; 2566 return (id); 2567 } 2568 } 2569 2570 kmem_free(dotv, sizeof (char *) * (suffc + 1)); 2571 return (-1); 2572 } 2573 2574 /* 2575 * Load a module. 2576 */ 2577 int 2578 modloadonly(char *subdir, char *filename) 2579 { 2580 struct modctl *modp; 2581 char *fullname; 2582 size_t size; 2583 int id, retval; 2584 2585 if (subdir != NULL) { 2586 /* 2587 * allocate enough space for <subdir>/<filename><NULL> 2588 */ 2589 size = strlen(subdir) + strlen(filename) + 2; 2590 fullname = kmem_zalloc(size, KM_SLEEP); 2591 (void) sprintf(fullname, "%s/%s", subdir, filename); 2592 } else { 2593 fullname = filename; 2594 } 2595 2596 modp = mod_hold_loaded_mod(NULL, fullname, &retval); 2597 if (modp) { 2598 id = modp->mod_id; 2599 mod_release_mod(modp); 2600 } 2601 2602 if (subdir != NULL) 2603 kmem_free(fullname, size); 2604 2605 if (retval == 0) 2606 return (id); 2607 return (-1); 2608 } 2609 2610 /* 2611 * Try to uninstall and unload a module, removing a reference if caller 2612 * specifies rmodp. 2613 */ 2614 static int 2615 modunrload(modid_t id, struct modctl **rmodp, int unload) 2616 { 2617 struct modctl *modp; 2618 int retval; 2619 2620 if (rmodp) 2621 *rmodp = NULL; /* avoid garbage */ 2622 2623 if ((modp = mod_hold_by_id((modid_t)id)) == NULL) 2624 return (EINVAL); 2625 2626 if (rmodp) { 2627 mutex_enter(&mod_lock); 2628 modp->mod_ref--; 2629 mutex_exit(&mod_lock); 2630 *rmodp = modp; 2631 } 2632 2633 if (unload) { 2634 retval = moduninstall(modp); 2635 if (retval == 0) { 2636 mod_unload(modp); 2637 CPU_STATS_ADDQ(CPU, sys, modunload, 1); 2638 } else if (retval == EALREADY) 2639 retval = 0; /* already unloaded, not an error */ 2640 } else 2641 retval = 0; 2642 2643 mod_release_mod(modp); 2644 return (retval); 2645 } 2646 2647 /* 2648 * Uninstall and unload a module. 2649 */ 2650 int 2651 modunload(modid_t id) 2652 { 2653 int retval; 2654 2655 /* synchronize with any active modunload_disable() */ 2656 modunload_begin(); 2657 if (ddi_root_node()) 2658 (void) devfs_clean(ddi_root_node(), NULL, 0); 2659 retval = modunrload(id, NULL, 1); 2660 modunload_end(); 2661 return (retval); 2662 } 2663 2664 /* 2665 * Return status of a loaded module. 2666 */ 2667 static int 2668 modinfo(modid_t id, struct modinfo *modinfop) 2669 { 2670 struct modctl *modp; 2671 modid_t mid; 2672 int i; 2673 2674 mid = modinfop->mi_id; 2675 if (modinfop->mi_info & MI_INFO_ALL) { 2676 while ((modp = mod_hold_next_by_id(mid++)) != NULL) { 2677 if ((modinfop->mi_info & MI_INFO_CNT) || 2678 modp->mod_installed) 2679 break; 2680 mod_release_mod(modp); 2681 } 2682 if (modp == NULL) 2683 return (EINVAL); 2684 } else { 2685 modp = mod_hold_by_id(id); 2686 if (modp == NULL) 2687 return (EINVAL); 2688 if (!(modinfop->mi_info & MI_INFO_CNT) && 2689 (modp->mod_installed == 0)) { 2690 mod_release_mod(modp); 2691 return (EINVAL); 2692 } 2693 } 2694 2695 modinfop->mi_rev = 0; 2696 modinfop->mi_state = 0; 2697 for (i = 0; i < MODMAXLINK; i++) { 2698 modinfop->mi_msinfo[i].msi_p0 = -1; 2699 modinfop->mi_msinfo[i].msi_linkinfo[0] = 0; 2700 } 2701 if (modp->mod_loaded) { 2702 modinfop->mi_state = MI_LOADED; 2703 kobj_getmodinfo(modp->mod_mp, modinfop); 2704 } 2705 if (modp->mod_installed) { 2706 modinfop->mi_state |= MI_INSTALLED; 2707 2708 (void) mod_getinfo(modp, modinfop); 2709 } 2710 2711 modinfop->mi_id = modp->mod_id; 2712 modinfop->mi_loadcnt = modp->mod_loadcnt; 2713 (void) strcpy(modinfop->mi_name, modp->mod_modname); 2714 2715 mod_release_mod(modp); 2716 return (0); 2717 } 2718 2719 static char mod_stub_err[] = "mod_hold_stub: Couldn't load stub module %s"; 2720 static char no_err[] = "No error function for weak stub %s"; 2721 2722 /* 2723 * used by the stubs themselves to load and hold a module. 2724 * Returns 0 if the module is successfully held; 2725 * the stub needs to call mod_release_stub(). 2726 * -1 if the stub should just call the err_fcn. 2727 * Note that this code is stretched out so that we avoid subroutine calls 2728 * and optimize for the most likely case. That is, the case where the 2729 * module is loaded and installed and not held. In that case we just inc 2730 * the mod_ref count and continue. 2731 */ 2732 int 2733 mod_hold_stub(struct mod_stub_info *stub) 2734 { 2735 struct modctl *mp; 2736 struct mod_modinfo *mip; 2737 2738 mip = stub->mods_modinfo; 2739 2740 mutex_enter(&mod_lock); 2741 2742 /* we do mod_hold_by_modctl inline for speed */ 2743 2744 mod_check_again: 2745 if ((mp = mip->mp) != NULL) { 2746 if (mp->mod_busy == 0) { 2747 if (mp->mod_installed) { 2748 /* increment the reference count */ 2749 mp->mod_ref++; 2750 ASSERT(mp->mod_ref && mp->mod_installed); 2751 mutex_exit(&mod_lock); 2752 return (0); 2753 } else { 2754 mp->mod_busy = 1; 2755 mp->mod_inprogress_thread = 2756 (curthread == NULL ? 2757 (kthread_id_t)-1 : curthread); 2758 } 2759 } else { 2760 /* 2761 * wait one time and then go see if someone 2762 * else has resolved the stub (set mip->mp). 2763 */ 2764 if (mod_hold_by_modctl(mp, 2765 MOD_WAIT_ONCE | MOD_LOCK_HELD)) 2766 goto mod_check_again; 2767 2768 /* 2769 * what we have now may have been unloaded!, in 2770 * that case, mip->mp will be NULL, we'll hit this 2771 * module and load again.. 2772 */ 2773 cmn_err(CE_PANIC, "mod_hold_stub should have blocked"); 2774 } 2775 mutex_exit(&mod_lock); 2776 } else { 2777 /* first time we've hit this module */ 2778 mutex_exit(&mod_lock); 2779 mp = mod_hold_by_name(mip->modm_module_name); 2780 mip->mp = mp; 2781 } 2782 2783 /* 2784 * If we are here, it means that the following conditions 2785 * are satisfied. 2786 * 2787 * mip->mp != NULL 2788 * this thread has set the mp->mod_busy = 1 2789 * mp->mod_installed = 0 2790 * 2791 */ 2792 ASSERT(mp != NULL); 2793 ASSERT(mp->mod_busy == 1); 2794 2795 if (mp->mod_installed == 0) { 2796 /* Module not loaded, if weak stub don't load it */ 2797 if (stub->mods_flag & MODS_WEAK) { 2798 if (stub->mods_errfcn == NULL) { 2799 mod_release_mod(mp); 2800 cmn_err(CE_PANIC, no_err, 2801 mip->modm_module_name); 2802 } 2803 } else { 2804 /* Not a weak stub so load the module */ 2805 2806 if (mod_load(mp, 1) != 0 || modinstall(mp) != 0) { 2807 /* 2808 * If mod_load() was successful 2809 * and modinstall() failed, then 2810 * unload the module. 2811 */ 2812 if (mp->mod_loaded) 2813 mod_unload(mp); 2814 2815 mod_release_mod(mp); 2816 if (stub->mods_errfcn == NULL) { 2817 cmn_err(CE_PANIC, mod_stub_err, 2818 mip->modm_module_name); 2819 } else { 2820 return (-1); 2821 } 2822 } 2823 } 2824 } 2825 2826 /* 2827 * At this point module is held and loaded. Release 2828 * the mod_busy and mod_inprogress_thread before 2829 * returning. We actually call mod_release() here so 2830 * that if another stub wants to access this module, 2831 * it can do so. mod_ref is incremented before mod_release() 2832 * is called to prevent someone else from snatching the 2833 * module from this thread. 2834 */ 2835 mutex_enter(&mod_lock); 2836 mp->mod_ref++; 2837 ASSERT(mp->mod_ref && 2838 (mp->mod_loaded || (stub->mods_flag & MODS_WEAK))); 2839 mod_release(mp); 2840 mutex_exit(&mod_lock); 2841 return (0); 2842 } 2843 2844 void 2845 mod_release_stub(struct mod_stub_info *stub) 2846 { 2847 struct modctl *mp = stub->mods_modinfo->mp; 2848 2849 /* inline mod_release_mod */ 2850 mutex_enter(&mod_lock); 2851 ASSERT(mp->mod_ref && 2852 (mp->mod_loaded || (stub->mods_flag & MODS_WEAK))); 2853 mp->mod_ref--; 2854 if (mp->mod_want) { 2855 mp->mod_want = 0; 2856 cv_broadcast(&mod_cv); 2857 } 2858 mutex_exit(&mod_lock); 2859 } 2860 2861 static struct modctl * 2862 mod_hold_loaded_mod(struct modctl *dep, char *filename, int *status) 2863 { 2864 struct modctl *modp; 2865 int retval; 2866 2867 /* 2868 * Hold the module. 2869 */ 2870 modp = mod_hold_by_name_requisite(dep, filename); 2871 if (modp) { 2872 retval = mod_load(modp, 1); 2873 if (retval != 0) { 2874 mod_release_mod(modp); 2875 modp = NULL; 2876 } 2877 *status = retval; 2878 } else { 2879 *status = ENOSPC; 2880 } 2881 2882 /* 2883 * if dep is not NULL, clear the module dependency information. 2884 * This information is set in mod_hold_by_name_common(). 2885 */ 2886 if (dep != NULL && dep->mod_requisite_loading != NULL) { 2887 ASSERT(dep->mod_busy); 2888 dep->mod_requisite_loading = NULL; 2889 } 2890 2891 return (modp); 2892 } 2893 2894 /* 2895 * hold, load, and install the named module 2896 */ 2897 static struct modctl * 2898 mod_hold_installed_mod(char *name, int usepath, int forcecheck, int *r) 2899 { 2900 struct modctl *modp; 2901 int retval; 2902 2903 /* 2904 * Verify that that module in question actually exists on disk 2905 * before allocation of module structure by mod_hold_by_name. 2906 */ 2907 if (modrootloaded && swaploaded || forcecheck) { 2908 if (!kobj_path_exists(name, usepath)) { 2909 *r = ENOENT; 2910 return (NULL); 2911 } 2912 } 2913 2914 /* 2915 * Hold the module. 2916 */ 2917 modp = mod_hold_by_name(name); 2918 if (modp) { 2919 retval = mod_load(modp, usepath); 2920 if (retval != 0) { 2921 mod_release_mod(modp); 2922 modp = NULL; 2923 *r = retval; 2924 } else { 2925 if ((*r = modinstall(modp)) != 0) { 2926 /* 2927 * We loaded it, but failed to _init() it. 2928 * Be kind to developers -- force it 2929 * out of memory now so that the next 2930 * attempt to use the module will cause 2931 * a reload. See 1093793. 2932 */ 2933 mod_unload(modp); 2934 mod_release_mod(modp); 2935 modp = NULL; 2936 } 2937 } 2938 } else { 2939 *r = ENOSPC; 2940 } 2941 return (modp); 2942 } 2943 2944 static char mod_excl_msg[] = 2945 "module %s(%s) is EXCLUDED and will not be loaded\n"; 2946 static char mod_init_msg[] = "loadmodule:%s(%s): _init() error %d\n"; 2947 2948 /* 2949 * This routine is needed for dependencies. Users specify dependencies 2950 * by declaring a character array initialized to filenames of dependents. 2951 * So the code that handles dependents deals with filenames (and not 2952 * module names) because that's all it has. We load by filename and once 2953 * we've loaded a file we can get the module name. 2954 * Unfortunately there isn't a single unified filename/modulename namespace. 2955 * C'est la vie. 2956 * 2957 * We allow the name being looked up to be prepended by an optional 2958 * subdirectory e.g. we can lookup (NULL, "fs/ufs") or ("fs", "ufs") 2959 */ 2960 struct modctl * 2961 mod_find_by_filename(char *subdir, char *filename) 2962 { 2963 struct modctl *mp; 2964 size_t sublen; 2965 2966 ASSERT(!MUTEX_HELD(&mod_lock)); 2967 if (subdir != NULL) 2968 sublen = strlen(subdir); 2969 else 2970 sublen = 0; 2971 2972 mutex_enter(&mod_lock); 2973 mp = &modules; 2974 do { 2975 if (sublen) { 2976 char *mod_filename = mp->mod_filename; 2977 2978 if (strncmp(subdir, mod_filename, sublen) == 0 && 2979 mod_filename[sublen] == '/' && 2980 strcmp(filename, &mod_filename[sublen + 1]) == 0) { 2981 mutex_exit(&mod_lock); 2982 return (mp); 2983 } 2984 } else if (strcmp(filename, mp->mod_filename) == 0) { 2985 mutex_exit(&mod_lock); 2986 return (mp); 2987 } 2988 } while ((mp = mp->mod_next) != &modules); 2989 mutex_exit(&mod_lock); 2990 return (NULL); 2991 } 2992 2993 /* 2994 * Check for circular dependencies. This is called from do_dependents() 2995 * in kobj.c. If we are the thread already loading this module, then 2996 * we're trying to load a dependent that we're already loading which 2997 * means the user specified circular dependencies. 2998 */ 2999 static int 3000 mod_circdep(struct modctl *modp) 3001 { 3002 struct modctl *rmod; 3003 3004 ASSERT(MUTEX_HELD(&mod_lock)); 3005 3006 /* 3007 * Check the mod_inprogress_thread first. 3008 * mod_inprogress_thread is used in mod_hold_stub() 3009 * directly to improve performance. 3010 */ 3011 if (modp->mod_inprogress_thread == curthread) 3012 return (1); 3013 3014 /* 3015 * Check the module circular dependencies. 3016 */ 3017 for (rmod = modp; rmod != NULL; rmod = rmod->mod_requisite_loading) { 3018 /* 3019 * Check if there is a module circular dependency. 3020 */ 3021 if (rmod->mod_requisite_loading == modp) 3022 return (1); 3023 } 3024 return (0); 3025 } 3026 3027 static int 3028 mod_getinfo(struct modctl *modp, struct modinfo *modinfop) 3029 { 3030 int (*func)(struct modinfo *); 3031 int retval; 3032 3033 ASSERT(modp->mod_busy); 3034 3035 /* primary modules don't do getinfo */ 3036 if (modp->mod_prim) 3037 return (0); 3038 3039 func = (int (*)(struct modinfo *))kobj_lookup(modp->mod_mp, "_info"); 3040 3041 if (kobj_addrcheck(modp->mod_mp, (caddr_t)func)) { 3042 cmn_err(CE_WARN, "_info() not defined properly in %s", 3043 modp->mod_filename); 3044 /* 3045 * The semantics of mod_info(9F) are that 0 is failure 3046 * and non-zero is success. 3047 */ 3048 retval = 0; 3049 } else 3050 retval = (*func)(modinfop); /* call _info() function */ 3051 3052 if (moddebug & MODDEBUG_USERDEBUG) 3053 printf("Returned from _info, retval = %x\n", retval); 3054 3055 return (retval); 3056 } 3057 3058 static void 3059 modadd(struct modctl *mp) 3060 { 3061 ASSERT(MUTEX_HELD(&mod_lock)); 3062 3063 mp->mod_id = last_module_id++; 3064 mp->mod_next = &modules; 3065 mp->mod_prev = modules.mod_prev; 3066 modules.mod_prev->mod_next = mp; 3067 modules.mod_prev = mp; 3068 } 3069 3070 /*ARGSUSED*/ 3071 static struct modctl * 3072 allocate_modp(const char *filename, const char *modname) 3073 { 3074 struct modctl *mp; 3075 3076 mp = kobj_zalloc(sizeof (*mp), KM_SLEEP); 3077 mp->mod_modname = kobj_zalloc(strlen(modname) + 1, KM_SLEEP); 3078 (void) strcpy(mp->mod_modname, modname); 3079 return (mp); 3080 } 3081 3082 /* 3083 * Get the value of a symbol. This is a wrapper routine that 3084 * calls kobj_getsymvalue(). kobj_getsymvalue() may go away but this 3085 * wrapper will prevent callers from noticing. 3086 */ 3087 uintptr_t 3088 modgetsymvalue(char *name, int kernelonly) 3089 { 3090 return (kobj_getsymvalue(name, kernelonly)); 3091 } 3092 3093 /* 3094 * Get the symbol nearest an address. This is a wrapper routine that 3095 * calls kobj_getsymname(). kobj_getsymname() may go away but this 3096 * wrapper will prevent callers from noticing. 3097 */ 3098 char * 3099 modgetsymname(uintptr_t value, ulong_t *offset) 3100 { 3101 return (kobj_getsymname(value, offset)); 3102 } 3103 3104 /* 3105 * Lookup a symbol in a specified module. These are wrapper routines that 3106 * call kobj_lookup(). kobj_lookup() may go away but these wrappers will 3107 * prevent callers from noticing. 3108 */ 3109 uintptr_t 3110 modlookup(const char *modname, const char *symname) 3111 { 3112 struct modctl *modp; 3113 uintptr_t val; 3114 3115 if ((modp = mod_hold_by_name(modname)) == NULL) 3116 return (0); 3117 val = kobj_lookup(modp->mod_mp, symname); 3118 mod_release_mod(modp); 3119 return (val); 3120 } 3121 3122 uintptr_t 3123 modlookup_by_modctl(modctl_t *modp, const char *symname) 3124 { 3125 ASSERT(modp->mod_ref > 0 || modp->mod_busy); 3126 3127 return (kobj_lookup(modp->mod_mp, symname)); 3128 } 3129 3130 /* 3131 * Ask the user for the name of the system file and the default path 3132 * for modules. 3133 */ 3134 void 3135 mod_askparams() 3136 { 3137 static char s0[64]; 3138 intptr_t fd; 3139 3140 if ((fd = kobj_open(systemfile)) != -1L) 3141 kobj_close(fd); 3142 else 3143 systemfile = NULL; 3144 3145 /*CONSTANTCONDITION*/ 3146 while (1) { 3147 printf("Name of system file [%s]: ", 3148 systemfile ? systemfile : "/dev/null"); 3149 3150 console_gets(s0, sizeof (s0)); 3151 3152 if (s0[0] == '\0') 3153 break; 3154 else if (strcmp(s0, "/dev/null") == 0) { 3155 systemfile = NULL; 3156 break; 3157 } else { 3158 if ((fd = kobj_open(s0)) != -1L) { 3159 kobj_close(fd); 3160 systemfile = s0; 3161 break; 3162 } 3163 } 3164 printf("can't find file %s\n", s0); 3165 } 3166 } 3167 3168 static char loading_msg[] = "loading '%s' id %d\n"; 3169 static char load_msg[] = "load '%s' id %d loaded @ 0x%p/0x%p size %d/%d\n"; 3170 3171 /* 3172 * Common code for loading a module (but not installing it). 3173 * Handoff the task of module loading to a separate thread 3174 * with a large stack if possible, since this code may recurse a few times. 3175 * Return zero if there are no errors or an errno value. 3176 */ 3177 static int 3178 mod_load(struct modctl *mp, int usepath) 3179 { 3180 int retval; 3181 struct modinfo *modinfop = NULL; 3182 struct loadmt lt; 3183 3184 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 3185 ASSERT(mp->mod_busy); 3186 3187 if (mp->mod_loaded) 3188 return (0); 3189 3190 if (mod_sysctl(SYS_CHECK_EXCLUDE, mp->mod_modname) != 0 || 3191 mod_sysctl(SYS_CHECK_EXCLUDE, mp->mod_filename) != 0) { 3192 if (moddebug & MODDEBUG_LOADMSG) { 3193 printf(mod_excl_msg, mp->mod_filename, 3194 mp->mod_modname); 3195 } 3196 return (ENXIO); 3197 } 3198 if (moddebug & MODDEBUG_LOADMSG2) 3199 printf(loading_msg, mp->mod_filename, mp->mod_id); 3200 3201 if (curthread != &t0) { 3202 lt.mp = mp; 3203 lt.usepath = usepath; 3204 lt.owner = curthread; 3205 sema_init(<.sema, 0, NULL, SEMA_DEFAULT, NULL); 3206 3207 /* create thread to hand of call to */ 3208 (void) thread_create(NULL, DEFAULTSTKSZ * 2, 3209 modload_thread, <, 0, &p0, TS_RUN, maxclsyspri); 3210 3211 /* wait for thread to complete kobj_load_module */ 3212 sema_p(<.sema); 3213 3214 sema_destroy(<.sema); 3215 retval = lt.retval; 3216 } else 3217 retval = kobj_load_module(mp, usepath); 3218 3219 if (mp->mod_mp) { 3220 ASSERT(retval == 0); 3221 mp->mod_loaded = 1; 3222 mp->mod_loadcnt++; 3223 if (moddebug & MODDEBUG_LOADMSG) { 3224 printf(load_msg, mp->mod_filename, mp->mod_id, 3225 (void *)((struct module *)mp->mod_mp)->text, 3226 (void *)((struct module *)mp->mod_mp)->data, 3227 ((struct module *)mp->mod_mp)->text_size, 3228 ((struct module *)mp->mod_mp)->data_size); 3229 } 3230 3231 /* 3232 * XXX - There should be a better way to get this. 3233 */ 3234 modinfop = kmem_zalloc(sizeof (struct modinfo), KM_SLEEP); 3235 modinfop->mi_info = MI_INFO_LINKAGE; 3236 if (mod_getinfo(mp, modinfop) == 0) 3237 mp->mod_linkage = NULL; 3238 else { 3239 mp->mod_linkage = (void *)modinfop->mi_base; 3240 ASSERT(mp->mod_linkage->ml_rev == MODREV_1); 3241 } 3242 3243 /* 3244 * DCS: bootstrapping code. If the driver is loaded 3245 * before root mount, it is assumed that the driver 3246 * may be used before mounting root. In order to 3247 * access mappings of global to local minor no.'s 3248 * during installation/open of the driver, we load 3249 * them into memory here while the BOP_interfaces 3250 * are still up. 3251 */ 3252 if ((cluster_bootflags & CLUSTER_BOOTED) && !modrootloaded) { 3253 retval = clboot_modload(mp); 3254 } 3255 3256 kmem_free(modinfop, sizeof (struct modinfo)); 3257 (void) mod_sysctl(SYS_SET_MVAR, (void *)mp); 3258 retval = install_stubs_by_name(mp, mp->mod_modname); 3259 3260 /* 3261 * Now that the module is loaded, we need to give DTrace 3262 * a chance to notify its providers. This is done via 3263 * the dtrace_modload function pointer. 3264 */ 3265 if (strcmp(mp->mod_modname, "dtrace") != 0) { 3266 struct modctl *dmp = mod_hold_by_name("dtrace"); 3267 3268 if (dmp != NULL && dtrace_modload != NULL) 3269 (*dtrace_modload)(mp); 3270 3271 mod_release_mod(dmp); 3272 } 3273 3274 } else { 3275 /* 3276 * If load failed then we need to release any requisites 3277 * that we had established. 3278 */ 3279 ASSERT(retval); 3280 mod_release_requisites(mp); 3281 3282 if (moddebug & MODDEBUG_ERRMSG) 3283 printf("error loading '%s', error %d\n", 3284 mp->mod_filename, retval); 3285 } 3286 return (retval); 3287 } 3288 3289 static char unload_msg[] = "unloading %s, module id %d, loadcnt %d.\n"; 3290 3291 static void 3292 mod_unload(struct modctl *mp) 3293 { 3294 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 3295 ASSERT(mp->mod_busy); 3296 ASSERT((mp->mod_loaded && (mp->mod_installed == 0)) && 3297 ((mp->mod_prim == 0) && (mp->mod_ref >= 0))); 3298 3299 if (moddebug & MODDEBUG_LOADMSG) 3300 printf(unload_msg, mp->mod_modname, 3301 mp->mod_id, mp->mod_loadcnt); 3302 3303 /* 3304 * If mod_ref is not zero, it means some modules might still refer 3305 * to this module. Then you can't unload this module right now. 3306 * Instead, set 1 to mod_delay_unload to notify the system of 3307 * unloading this module later when it's not required any more. 3308 */ 3309 if (mp->mod_ref > 0) { 3310 mp->mod_delay_unload = 1; 3311 if (moddebug & MODDEBUG_LOADMSG2) { 3312 printf("module %s not unloaded," 3313 " non-zero reference count (%d)", 3314 mp->mod_modname, mp->mod_ref); 3315 } 3316 return; 3317 } 3318 3319 if (((mp->mod_loaded == 0) || mp->mod_installed) || 3320 (mp->mod_ref || mp->mod_prim)) { 3321 /* 3322 * A DEBUG kernel would ASSERT panic above, the code is broken 3323 * if we get this warning. 3324 */ 3325 cmn_err(CE_WARN, "mod_unload: %s in incorrect state: %d %d %d", 3326 mp->mod_filename, mp->mod_installed, mp->mod_loaded, 3327 mp->mod_ref); 3328 return; 3329 } 3330 3331 /* reset stub functions to call the binder again */ 3332 reset_stubs(mp); 3333 3334 /* 3335 * mark module as unloaded before the modctl structure is freed. 3336 * This is required not to reuse the modctl structure before 3337 * the module is marked as unloaded. 3338 */ 3339 mp->mod_loaded = 0; 3340 mp->mod_linkage = NULL; 3341 3342 /* free the memory */ 3343 kobj_unload_module(mp); 3344 3345 if (mp->mod_delay_unload) { 3346 mp->mod_delay_unload = 0; 3347 if (moddebug & MODDEBUG_LOADMSG2) { 3348 printf("deferred unload of module %s" 3349 " (id %d) successful", 3350 mp->mod_modname, mp->mod_id); 3351 } 3352 } 3353 3354 /* release hold on requisites */ 3355 mod_release_requisites(mp); 3356 3357 /* 3358 * Now that the module is gone, we need to give DTrace a chance to 3359 * remove any probes that it may have had in the module. This is 3360 * done via the dtrace_modunload function pointer. 3361 */ 3362 if (strcmp(mp->mod_modname, "dtrace") != 0) { 3363 struct modctl *dmp = mod_hold_by_name("dtrace"); 3364 3365 if (dmp != NULL && dtrace_modunload != NULL) 3366 (*dtrace_modunload)(mp); 3367 3368 mod_release_mod(dmp); 3369 } 3370 } 3371 3372 static int 3373 modinstall(struct modctl *mp) 3374 { 3375 int val; 3376 int (*func)(void); 3377 3378 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 3379 ASSERT(mp->mod_busy && mp->mod_loaded); 3380 3381 if (mp->mod_installed) 3382 return (0); 3383 /* 3384 * If mod_delay_unload is on, it means the system chose the deferred 3385 * unload for this module. Then you can't install this module until 3386 * it's unloaded from the system. 3387 */ 3388 if (mp->mod_delay_unload) 3389 return (ENXIO); 3390 3391 if (moddebug & MODDEBUG_LOADMSG) 3392 printf("installing %s, module id %d.\n", 3393 mp->mod_modname, mp->mod_id); 3394 3395 ASSERT(mp->mod_mp != NULL); 3396 if (mod_install_requisites(mp) != 0) { 3397 /* 3398 * Note that we can't call mod_unload(mp) here since 3399 * if modinstall() was called by mod_install_requisites(), 3400 * we won't be able to hold the dependent modules 3401 * (otherwise there would be a deadlock). 3402 */ 3403 return (ENXIO); 3404 } 3405 3406 if (moddebug & MODDEBUG_ERRMSG) { 3407 printf("init '%s' id %d loaded @ 0x%p/0x%p size %lu/%lu\n", 3408 mp->mod_filename, mp->mod_id, 3409 (void *)((struct module *)mp->mod_mp)->text, 3410 (void *)((struct module *)mp->mod_mp)->data, 3411 ((struct module *)mp->mod_mp)->text_size, 3412 ((struct module *)mp->mod_mp)->data_size); 3413 } 3414 3415 func = (int (*)())kobj_lookup(mp->mod_mp, "_init"); 3416 3417 if (kobj_addrcheck(mp->mod_mp, (caddr_t)func)) { 3418 cmn_err(CE_WARN, "_init() not defined properly in %s", 3419 mp->mod_filename); 3420 return (EFAULT); 3421 } 3422 3423 if (moddebug & MODDEBUG_USERDEBUG) { 3424 printf("breakpoint before calling %s:_init()\n", 3425 mp->mod_modname); 3426 if (DEBUGGER_PRESENT) 3427 debug_enter("_init"); 3428 } 3429 3430 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 3431 ASSERT(mp->mod_busy && mp->mod_loaded); 3432 val = (*func)(); /* call _init */ 3433 3434 if (moddebug & MODDEBUG_USERDEBUG) 3435 printf("Returned from _init, val = %x\n", val); 3436 3437 if (val == 0) { 3438 /* 3439 * Set the MODS_INSTALLED flag to enable this module 3440 * being called now. 3441 */ 3442 install_stubs(mp); 3443 mp->mod_installed = 1; 3444 } else if (moddebug & MODDEBUG_ERRMSG) 3445 printf(mod_init_msg, mp->mod_filename, mp->mod_modname, val); 3446 3447 return (val); 3448 } 3449 3450 int detach_driver_unconfig = 0; 3451 3452 static int 3453 detach_driver(char *name) 3454 { 3455 major_t major; 3456 int error; 3457 3458 /* 3459 * If being called from mod_uninstall_all() then the appropriate 3460 * driver detaches (leaf only) have already been done. 3461 */ 3462 if (mod_in_autounload()) 3463 return (0); 3464 3465 major = ddi_name_to_major(name); 3466 if (major == (major_t)-1) 3467 return (0); 3468 3469 error = ndi_devi_unconfig_driver(ddi_root_node(), 3470 NDI_DETACH_DRIVER | detach_driver_unconfig, major); 3471 return (error == NDI_SUCCESS ? 0 : -1); 3472 } 3473 3474 static char finiret_msg[] = "Returned from _fini for %s, status = %x\n"; 3475 3476 static int 3477 moduninstall(struct modctl *mp) 3478 { 3479 int status = 0; 3480 int (*func)(void); 3481 3482 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 3483 ASSERT(mp->mod_busy); 3484 3485 /* 3486 * Verify that we need to do something and can uninstall the module. 3487 * 3488 * If we should not uninstall the module or if the module is not in 3489 * the correct state to start an uninstall we return EBUSY to prevent 3490 * us from progressing to mod_unload. If the module has already been 3491 * uninstalled and unloaded we return EALREADY. 3492 */ 3493 if (mp->mod_prim || mp->mod_ref || mp->mod_nenabled != 0) 3494 return (EBUSY); 3495 if ((mp->mod_installed == 0) || (mp->mod_loaded == 0)) 3496 return (EALREADY); 3497 3498 /* 3499 * To avoid devinfo / module deadlock we must release this module 3500 * prior to initiating the detach_driver, otherwise the detach_driver 3501 * might deadlock on a devinfo node held by another thread 3502 * coming top down and involving the module we have locked. 3503 * 3504 * When we regrab the module we must reverify that it is OK 3505 * to proceed with the uninstall operation. 3506 */ 3507 mod_release_mod(mp); 3508 status = detach_driver(mp->mod_modname); 3509 (void) mod_hold_by_modctl(mp, MOD_WAIT_FOREVER | MOD_LOCK_NOT_HELD); 3510 3511 /* check detach status and reverify state with lock */ 3512 mutex_enter(&mod_lock); 3513 if ((status != 0) || mp->mod_prim || mp->mod_ref) { 3514 mutex_exit(&mod_lock); 3515 return (EBUSY); 3516 } 3517 if ((mp->mod_installed == 0) || (mp->mod_loaded == 0)) { 3518 mutex_exit(&mod_lock); 3519 return (EALREADY); 3520 } 3521 mutex_exit(&mod_lock); 3522 3523 if (moddebug & MODDEBUG_LOADMSG2) 3524 printf("uninstalling %s\n", mp->mod_modname); 3525 3526 /* 3527 * lookup _fini, return EBUSY if not defined. 3528 * 3529 * The MODDEBUG_FINI_EBUSY is usefull in resolving leaks in 3530 * detach(9E) - it allows bufctl addresses to be resolved. 3531 */ 3532 func = (int (*)())kobj_lookup(mp->mod_mp, "_fini"); 3533 if ((func == NULL) || (mp->mod_loadflags & MOD_NOUNLOAD) || 3534 (moddebug & MODDEBUG_FINI_EBUSY)) 3535 return (EBUSY); 3536 3537 /* verify that _fini is in this module */ 3538 if (kobj_addrcheck(mp->mod_mp, (caddr_t)func)) { 3539 cmn_err(CE_WARN, "_fini() not defined properly in %s", 3540 mp->mod_filename); 3541 return (EFAULT); 3542 } 3543 3544 /* call _fini() */ 3545 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 3546 ASSERT(mp->mod_busy && mp->mod_loaded && mp->mod_installed); 3547 3548 status = (*func)(); 3549 3550 if (status == 0) { 3551 /* _fini returned success, the module is no longer installed */ 3552 if (moddebug & MODDEBUG_LOADMSG) 3553 printf("uninstalled %s\n", mp->mod_modname); 3554 3555 /* 3556 * Even though we only set mod_installed to zero here, a zero 3557 * return value means we are committed to a code path were 3558 * mod_loaded will also end up as zero - we have no other 3559 * way to get the module data and bss back to the pre _init 3560 * state except a reload. To ensure this, after return, 3561 * mod_busy must stay set until mod_loaded is cleared. 3562 */ 3563 mp->mod_installed = 0; 3564 3565 /* 3566 * Clear the MODS_INSTALLED flag not to call functions 3567 * in the module directly from now on. 3568 */ 3569 uninstall_stubs(mp); 3570 } else { 3571 if (moddebug & MODDEBUG_USERDEBUG) 3572 printf(finiret_msg, mp->mod_filename, status); 3573 /* 3574 * By definition _fini is only allowed to return EBUSY or the 3575 * result of mod_remove (EBUSY or EINVAL). In the off chance 3576 * that a driver returns EALREADY we convert this to EINVAL 3577 * since to our caller EALREADY means module was already 3578 * removed. 3579 */ 3580 if (status == EALREADY) 3581 status = EINVAL; 3582 } 3583 3584 return (status); 3585 } 3586 3587 /* 3588 * Uninstall all modules. 3589 */ 3590 static void 3591 mod_uninstall_all(void) 3592 { 3593 struct modctl *mp; 3594 modid_t modid = 0; 3595 3596 /* synchronize with any active modunload_disable() */ 3597 modunload_begin(); 3598 3599 /* mark this thread as doing autounloading */ 3600 (void) tsd_set(mod_autounload_key, (void *)1); 3601 3602 (void) devfs_clean(ddi_root_node(), NULL, 0); 3603 (void) ndi_devi_unconfig(ddi_root_node(), NDI_AUTODETACH); 3604 3605 while ((mp = mod_hold_next_by_id(modid)) != NULL) { 3606 modid = mp->mod_id; 3607 /* 3608 * Skip modules with the MOD_NOAUTOUNLOAD flag set 3609 */ 3610 if (mp->mod_loadflags & MOD_NOAUTOUNLOAD) { 3611 mod_release_mod(mp); 3612 continue; 3613 } 3614 3615 if (moduninstall(mp) == 0) { 3616 mod_unload(mp); 3617 CPU_STATS_ADDQ(CPU, sys, modunload, 1); 3618 } 3619 mod_release_mod(mp); 3620 } 3621 3622 (void) tsd_set(mod_autounload_key, NULL); 3623 modunload_end(); 3624 } 3625 3626 /* wait for unloads that have begun before registering disable */ 3627 void 3628 modunload_disable(void) 3629 { 3630 mutex_enter(&modunload_wait_mutex); 3631 while (modunload_active_count) { 3632 modunload_wait++; 3633 cv_wait(&modunload_wait_cv, &modunload_wait_mutex); 3634 modunload_wait--; 3635 } 3636 modunload_disable_count++; 3637 mutex_exit(&modunload_wait_mutex); 3638 } 3639 3640 /* mark end of disable and signal waiters */ 3641 void 3642 modunload_enable(void) 3643 { 3644 mutex_enter(&modunload_wait_mutex); 3645 modunload_disable_count--; 3646 if ((modunload_disable_count == 0) && modunload_wait) 3647 cv_broadcast(&modunload_wait_cv); 3648 mutex_exit(&modunload_wait_mutex); 3649 } 3650 3651 /* wait for disables to complete before begining unload */ 3652 void 3653 modunload_begin() 3654 { 3655 mutex_enter(&modunload_wait_mutex); 3656 while (modunload_disable_count) { 3657 modunload_wait++; 3658 cv_wait(&modunload_wait_cv, &modunload_wait_mutex); 3659 modunload_wait--; 3660 } 3661 modunload_active_count++; 3662 mutex_exit(&modunload_wait_mutex); 3663 } 3664 3665 /* mark end of unload and signal waiters */ 3666 void 3667 modunload_end() 3668 { 3669 mutex_enter(&modunload_wait_mutex); 3670 modunload_active_count--; 3671 if ((modunload_active_count == 0) && modunload_wait) 3672 cv_broadcast(&modunload_wait_cv); 3673 mutex_exit(&modunload_wait_mutex); 3674 } 3675 3676 void 3677 mod_uninstall_daemon(void) 3678 { 3679 callb_cpr_t cprinfo; 3680 clock_t ticks = 0; 3681 3682 mod_aul_thread = curthread; 3683 3684 CALLB_CPR_INIT(&cprinfo, &mod_uninstall_lock, callb_generic_cpr, "mud"); 3685 for (;;) { 3686 mutex_enter(&mod_uninstall_lock); 3687 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3688 /* 3689 * In DEBUG kernels, unheld drivers are uninstalled periodically 3690 * every mod_uninstall_interval seconds. Periodic uninstall can 3691 * be disabled by setting mod_uninstall_interval to 0 which is 3692 * the default for a non-DEBUG kernel. 3693 */ 3694 if (mod_uninstall_interval) { 3695 ticks = ddi_get_lbolt() + 3696 drv_usectohz(mod_uninstall_interval * 1000000); 3697 (void) cv_timedwait(&mod_uninstall_cv, 3698 &mod_uninstall_lock, ticks); 3699 } else { 3700 cv_wait(&mod_uninstall_cv, &mod_uninstall_lock); 3701 } 3702 /* 3703 * The whole daemon is safe for CPR except we don't want 3704 * the daemon to run if FREEZE is issued and this daemon 3705 * wakes up from the cv_wait above. In this case, it'll be 3706 * blocked in CALLB_CPR_SAFE_END until THAW is issued. 3707 * 3708 * The reason of calling CALLB_CPR_SAFE_BEGIN twice is that 3709 * mod_uninstall_lock is used to protect cprinfo and 3710 * CALLB_CPR_SAFE_BEGIN assumes that this lock is held when 3711 * called. 3712 */ 3713 CALLB_CPR_SAFE_END(&cprinfo, &mod_uninstall_lock); 3714 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3715 mutex_exit(&mod_uninstall_lock); 3716 if ((modunload_disable_count == 0) && 3717 ((moddebug & MODDEBUG_NOAUTOUNLOAD) == 0)) { 3718 mod_uninstall_all(); 3719 } 3720 } 3721 } 3722 3723 /* 3724 * Unload all uninstalled modules. 3725 */ 3726 void 3727 modreap(void) 3728 { 3729 mutex_enter(&mod_uninstall_lock); 3730 cv_broadcast(&mod_uninstall_cv); 3731 mutex_exit(&mod_uninstall_lock); 3732 } 3733 3734 /* 3735 * Hold the specified module. This is the module holding primitive. 3736 * 3737 * If MOD_LOCK_HELD then the caller already holds the mod_lock. 3738 * 3739 * Return values: 3740 * 0 ==> the module is held 3741 * 1 ==> the module is not held and the MOD_WAIT_ONCE caller needs 3742 * to determine how to retry. 3743 */ 3744 int 3745 mod_hold_by_modctl(struct modctl *mp, int f) 3746 { 3747 ASSERT((f & (MOD_WAIT_ONCE | MOD_WAIT_FOREVER)) && 3748 ((f & (MOD_WAIT_ONCE | MOD_WAIT_FOREVER)) != 3749 (MOD_WAIT_ONCE | MOD_WAIT_FOREVER))); 3750 ASSERT((f & (MOD_LOCK_HELD | MOD_LOCK_NOT_HELD)) && 3751 ((f & (MOD_LOCK_HELD | MOD_LOCK_NOT_HELD)) != 3752 (MOD_LOCK_HELD | MOD_LOCK_NOT_HELD))); 3753 ASSERT((f & MOD_LOCK_NOT_HELD) || MUTEX_HELD(&mod_lock)); 3754 3755 if (f & MOD_LOCK_NOT_HELD) 3756 mutex_enter(&mod_lock); 3757 3758 while (mp->mod_busy) { 3759 mp->mod_want = 1; 3760 cv_wait(&mod_cv, &mod_lock); 3761 /* 3762 * Module may be unloaded by daemon. 3763 * Nevertheless, modctl structure is still in linked list 3764 * (i.e., off &modules), not freed! 3765 * Caller is not supposed to assume "mp" is valid, but there 3766 * is no reasonable way to detect this but using 3767 * mp->mod_modinfo->mp == NULL check (follow the back pointer) 3768 * (or similar check depending on calling context) 3769 * DON'T free modctl structure, it will be very very 3770 * problematic. 3771 */ 3772 if (f & MOD_WAIT_ONCE) { 3773 if (f & MOD_LOCK_NOT_HELD) 3774 mutex_exit(&mod_lock); 3775 return (1); /* caller decides how to retry */ 3776 } 3777 } 3778 3779 mp->mod_busy = 1; 3780 mp->mod_inprogress_thread = 3781 (curthread == NULL ? (kthread_id_t)-1 : curthread); 3782 3783 if (f & MOD_LOCK_NOT_HELD) 3784 mutex_exit(&mod_lock); 3785 return (0); 3786 } 3787 3788 static struct modctl * 3789 mod_hold_by_name_common(struct modctl *dep, const char *filename) 3790 { 3791 const char *modname; 3792 struct modctl *mp; 3793 char *curname, *newname; 3794 int found = 0; 3795 3796 mutex_enter(&mod_lock); 3797 3798 if ((modname = strrchr(filename, '/')) == NULL) 3799 modname = filename; 3800 else 3801 modname++; 3802 3803 mp = &modules; 3804 do { 3805 if (strcmp(modname, mp->mod_modname) == 0) { 3806 found = 1; 3807 break; 3808 } 3809 } while ((mp = mp->mod_next) != &modules); 3810 3811 if (found == 0) { 3812 mp = allocate_modp(filename, modname); 3813 modadd(mp); 3814 } 3815 3816 /* 3817 * if dep is not NULL, set the mp in mod_requisite_loading for 3818 * the module circular dependency check. This field is used in 3819 * mod_circdep(), but it's cleard in mod_hold_loaded_mod(). 3820 */ 3821 if (dep != NULL) { 3822 ASSERT(dep->mod_busy && dep->mod_requisite_loading == NULL); 3823 dep->mod_requisite_loading = mp; 3824 } 3825 3826 /* 3827 * If the module was held, then it must be us who has it held. 3828 */ 3829 if (mod_circdep(mp)) 3830 mp = NULL; 3831 else { 3832 (void) mod_hold_by_modctl(mp, MOD_WAIT_FOREVER | MOD_LOCK_HELD); 3833 3834 /* 3835 * If the name hadn't been set or has changed, allocate 3836 * space and set it. Free space used by previous name. 3837 * 3838 * Do not change the name of primary modules, for primary 3839 * modules the mod_filename was allocated in standalone mode: 3840 * it is illegal to kobj_alloc in standalone mode and kobj_free 3841 * in non-standalone mode. 3842 */ 3843 curname = mp->mod_filename; 3844 if (curname == NULL || 3845 ((mp->mod_prim == 0) && 3846 (curname != filename) && 3847 (modname != filename) && 3848 (strcmp(curname, filename) != 0))) { 3849 newname = kobj_zalloc(strlen(filename) + 1, KM_SLEEP); 3850 (void) strcpy(newname, filename); 3851 mp->mod_filename = newname; 3852 if (curname != NULL) 3853 kobj_free(curname, strlen(curname) + 1); 3854 } 3855 } 3856 3857 mutex_exit(&mod_lock); 3858 if (mp && moddebug & MODDEBUG_LOADMSG2) 3859 printf("Holding %s\n", mp->mod_filename); 3860 if (mp == NULL && moddebug & MODDEBUG_LOADMSG2) 3861 printf("circular dependency loading %s\n", filename); 3862 return (mp); 3863 } 3864 3865 static struct modctl * 3866 mod_hold_by_name_requisite(struct modctl *dep, char *filename) 3867 { 3868 return (mod_hold_by_name_common(dep, filename)); 3869 } 3870 3871 struct modctl * 3872 mod_hold_by_name(const char *filename) 3873 { 3874 return (mod_hold_by_name_common(NULL, filename)); 3875 } 3876 3877 struct modctl * 3878 mod_hold_by_id(modid_t modid) 3879 { 3880 struct modctl *mp; 3881 int found = 0; 3882 3883 mutex_enter(&mod_lock); 3884 mp = &modules; 3885 do { 3886 if (mp->mod_id == modid) { 3887 found = 1; 3888 break; 3889 } 3890 } while ((mp = mp->mod_next) != &modules); 3891 3892 if ((found == 0) || mod_circdep(mp)) 3893 mp = NULL; 3894 else 3895 (void) mod_hold_by_modctl(mp, MOD_WAIT_FOREVER | MOD_LOCK_HELD); 3896 3897 mutex_exit(&mod_lock); 3898 return (mp); 3899 } 3900 3901 static struct modctl * 3902 mod_hold_next_by_id(modid_t modid) 3903 { 3904 struct modctl *mp; 3905 int found = 0; 3906 3907 if (modid < -1) 3908 return (NULL); 3909 3910 mutex_enter(&mod_lock); 3911 3912 mp = &modules; 3913 do { 3914 if (mp->mod_id > modid) { 3915 found = 1; 3916 break; 3917 } 3918 } while ((mp = mp->mod_next) != &modules); 3919 3920 if ((found == 0) || mod_circdep(mp)) 3921 mp = NULL; 3922 else 3923 (void) mod_hold_by_modctl(mp, MOD_WAIT_FOREVER | MOD_LOCK_HELD); 3924 3925 mutex_exit(&mod_lock); 3926 return (mp); 3927 } 3928 3929 static void 3930 mod_release(struct modctl *mp) 3931 { 3932 ASSERT(MUTEX_HELD(&mod_lock)); 3933 ASSERT(mp->mod_busy); 3934 3935 mp->mod_busy = 0; 3936 mp->mod_inprogress_thread = NULL; 3937 if (mp->mod_want) { 3938 mp->mod_want = 0; 3939 cv_broadcast(&mod_cv); 3940 } 3941 } 3942 3943 void 3944 mod_release_mod(struct modctl *mp) 3945 { 3946 if (moddebug & MODDEBUG_LOADMSG2) 3947 printf("Releasing %s\n", mp->mod_filename); 3948 mutex_enter(&mod_lock); 3949 mod_release(mp); 3950 mutex_exit(&mod_lock); 3951 } 3952 3953 modid_t 3954 mod_name_to_modid(char *filename) 3955 { 3956 char *modname; 3957 struct modctl *mp; 3958 3959 mutex_enter(&mod_lock); 3960 3961 if ((modname = strrchr(filename, '/')) == NULL) 3962 modname = filename; 3963 else 3964 modname++; 3965 3966 mp = &modules; 3967 do { 3968 if (strcmp(modname, mp->mod_modname) == 0) { 3969 mutex_exit(&mod_lock); 3970 return (mp->mod_id); 3971 } 3972 } while ((mp = mp->mod_next) != &modules); 3973 3974 mutex_exit(&mod_lock); 3975 return (-1); 3976 } 3977 3978 3979 int 3980 mod_remove_by_name(char *name) 3981 { 3982 struct modctl *mp; 3983 int retval; 3984 3985 mp = mod_hold_by_name(name); 3986 3987 if (mp == NULL) 3988 return (EINVAL); 3989 3990 if (mp->mod_loadflags & MOD_NOAUTOUNLOAD) { 3991 /* 3992 * Do not unload forceloaded modules 3993 */ 3994 mod_release_mod(mp); 3995 return (0); 3996 } 3997 3998 if ((retval = moduninstall(mp)) == 0) { 3999 mod_unload(mp); 4000 CPU_STATS_ADDQ(CPU, sys, modunload, 1); 4001 } else if (retval == EALREADY) 4002 retval = 0; /* already unloaded, not an error */ 4003 mod_release_mod(mp); 4004 return (retval); 4005 } 4006 4007 /* 4008 * Record that module "dep" is dependent on module "on_mod." 4009 */ 4010 static void 4011 mod_make_requisite(struct modctl *dependent, struct modctl *on_mod) 4012 { 4013 struct modctl_list **pmlnp; /* previous next pointer */ 4014 struct modctl_list *mlp; 4015 struct modctl_list *new; 4016 4017 ASSERT(dependent->mod_busy && on_mod->mod_busy); 4018 mutex_enter(&mod_lock); 4019 4020 /* 4021 * Search dependent's requisite list to see if on_mod is recorded. 4022 * List is ordered by id. 4023 */ 4024 for (pmlnp = &dependent->mod_requisites, mlp = *pmlnp; 4025 mlp; pmlnp = &mlp->modl_next, mlp = *pmlnp) 4026 if (mlp->modl_modp->mod_id >= on_mod->mod_id) 4027 break; 4028 4029 /* Create and insert if not already recorded */ 4030 if ((mlp == NULL) || (mlp->modl_modp->mod_id != on_mod->mod_id)) { 4031 new = kobj_zalloc(sizeof (*new), KM_SLEEP); 4032 new->modl_modp = on_mod; 4033 new->modl_next = mlp; 4034 *pmlnp = new; 4035 4036 /* 4037 * Increment the mod_ref count in our new requisite module. 4038 * This is what keeps a module that has other modules 4039 * which are dependent on it from being uninstalled and 4040 * unloaded. "on_mod"'s mod_ref count decremented in 4041 * mod_release_requisites when the "dependent" module 4042 * unload is complete. "on_mod" must be loaded, but may not 4043 * yet be installed. 4044 */ 4045 on_mod->mod_ref++; 4046 ASSERT(on_mod->mod_ref && on_mod->mod_loaded); 4047 } 4048 4049 mutex_exit(&mod_lock); 4050 } 4051 4052 /* 4053 * release the hold associated with mod_make_requisite mod_ref++ 4054 * as part of unload. 4055 */ 4056 void 4057 mod_release_requisites(struct modctl *modp) 4058 { 4059 struct modctl_list *modl; 4060 struct modctl_list *next; 4061 struct modctl *req; 4062 struct modctl_list *start = NULL, *mod_garbage; 4063 4064 ASSERT(modp->mod_busy); 4065 ASSERT(!MUTEX_HELD(&mod_lock)); 4066 4067 mutex_enter(&mod_lock); /* needed for manipulation of req */ 4068 for (modl = modp->mod_requisites; modl; modl = next) { 4069 next = modl->modl_next; 4070 req = modl->modl_modp; 4071 ASSERT(req->mod_ref >= 1 && req->mod_loaded); 4072 req->mod_ref--; 4073 4074 /* 4075 * Check if the module has to be unloaded or not. 4076 */ 4077 if (req->mod_ref == 0 && req->mod_delay_unload) { 4078 struct modctl_list *new; 4079 /* 4080 * Allocate the modclt_list holding the garbage 4081 * module which should be unloaded later. 4082 */ 4083 new = kobj_zalloc(sizeof (struct modctl_list), 4084 KM_SLEEP); 4085 new->modl_modp = req; 4086 4087 if (start == NULL) 4088 mod_garbage = start = new; 4089 else { 4090 mod_garbage->modl_next = new; 4091 mod_garbage = new; 4092 } 4093 } 4094 4095 /* free the list as we go */ 4096 kobj_free(modl, sizeof (*modl)); 4097 } 4098 modp->mod_requisites = NULL; 4099 mutex_exit(&mod_lock); 4100 4101 /* 4102 * Unload the garbage modules. 4103 */ 4104 for (mod_garbage = start; mod_garbage != NULL; /* nothing */) { 4105 struct modctl_list *old = mod_garbage; 4106 struct modctl *mp = mod_garbage->modl_modp; 4107 ASSERT(mp != NULL); 4108 4109 /* 4110 * Hold this module until it's unloaded completely. 4111 */ 4112 (void) mod_hold_by_modctl(mp, 4113 MOD_WAIT_FOREVER | MOD_LOCK_NOT_HELD); 4114 /* 4115 * Check if the module is not unloaded yet and nobody requires 4116 * the module. If it's unloaded already or somebody still 4117 * requires the module, don't unload it now. 4118 */ 4119 if (mp->mod_loaded && mp->mod_ref == 0) 4120 mod_unload(mp); 4121 ASSERT((mp->mod_loaded == 0 && mp->mod_delay_unload == 0) || 4122 (mp->mod_ref > 0)); 4123 mod_release_mod(mp); 4124 4125 mod_garbage = mod_garbage->modl_next; 4126 kobj_free(old, sizeof (struct modctl_list)); 4127 } 4128 } 4129 4130 /* 4131 * Process dependency of the module represented by "dep" on the 4132 * module named by "on." 4133 * 4134 * Called from kobj_do_dependents() to load a module "on" on which 4135 * "dep" depends. 4136 */ 4137 struct modctl * 4138 mod_load_requisite(struct modctl *dep, char *on) 4139 { 4140 struct modctl *on_mod; 4141 int retval; 4142 4143 if ((on_mod = mod_hold_loaded_mod(dep, on, &retval)) != NULL) { 4144 mod_make_requisite(dep, on_mod); 4145 } else if (moddebug & MODDEBUG_ERRMSG) { 4146 printf("error processing %s on which module %s depends\n", 4147 on, dep->mod_modname); 4148 } 4149 return (on_mod); 4150 } 4151 4152 static int 4153 mod_install_requisites(struct modctl *modp) 4154 { 4155 struct modctl_list *modl; 4156 struct modctl *req; 4157 int status = 0; 4158 4159 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 4160 ASSERT(modp->mod_busy); 4161 4162 for (modl = modp->mod_requisites; modl; modl = modl->modl_next) { 4163 req = modl->modl_modp; 4164 (void) mod_hold_by_modctl(req, 4165 MOD_WAIT_FOREVER | MOD_LOCK_NOT_HELD); 4166 status = modinstall(req); 4167 mod_release_mod(req); 4168 4169 if (status != 0) 4170 break; 4171 } 4172 return (status); 4173 } 4174 4175 /* 4176 * returns 1 if this thread is doing autounload, 0 otherwise. 4177 * see mod_uninstall_all. 4178 */ 4179 int 4180 mod_in_autounload() 4181 { 4182 return ((int)(uintptr_t)tsd_get(mod_autounload_key)); 4183 } 4184 4185 /* 4186 * gmatch adapted from libc, stripping the wchar stuff 4187 */ 4188 #define popchar(p, c) { \ 4189 c = *p++; \ 4190 if (c == 0) { \ 4191 return (0); \ 4192 } \ 4193 } 4194 4195 int 4196 gmatch(const char *s, const char *p) 4197 { 4198 int c, sc; 4199 int ok, lc, notflag; 4200 4201 sc = *s++; 4202 c = *p++; 4203 if (c == 0) 4204 return (sc == c); /* nothing matches nothing */ 4205 4206 switch (c) { 4207 case '\\': 4208 /* skip to quoted character */ 4209 popchar(p, c); 4210 /*FALLTHRU*/ 4211 4212 default: 4213 /* straight comparison */ 4214 if (c != sc) 4215 return (0); 4216 /*FALLTHRU*/ 4217 4218 case '?': 4219 /* first char matches, move to remainder */ 4220 return (sc != '\0' ? gmatch(s, p) : 0); 4221 4222 4223 case '*': 4224 while (*p == '*') 4225 p++; 4226 4227 /* * matches everything */ 4228 if (*p == 0) 4229 return (1); 4230 4231 /* undo skip at the beginning & iterate over substrings */ 4232 --s; 4233 while (*s) { 4234 if (gmatch(s, p)) 4235 return (1); 4236 s++; 4237 } 4238 return (0); 4239 4240 case '[': 4241 /* match any char within [] */ 4242 if (sc == 0) 4243 return (0); 4244 4245 ok = lc = notflag = 0; 4246 4247 if (*p == '!') { 4248 notflag = 1; 4249 p++; 4250 } 4251 popchar(p, c); 4252 4253 do { 4254 if (c == '-' && lc && *p != ']') { 4255 /* test sc against range [c1-c2] */ 4256 popchar(p, c); 4257 if (c == '\\') { 4258 popchar(p, c); 4259 } 4260 4261 if (notflag) { 4262 /* return 0 on mismatch */ 4263 if (lc <= sc && sc <= c) 4264 return (0); 4265 ok++; 4266 } else if (lc <= sc && sc <= c) { 4267 ok++; 4268 } 4269 /* keep going, may get a match next */ 4270 } else if (c == '\\') { 4271 /* skip to quoted character */ 4272 popchar(p, c); 4273 } 4274 lc = c; 4275 if (notflag) { 4276 if (sc == lc) 4277 return (0); 4278 ok++; 4279 } else if (sc == lc) { 4280 ok++; 4281 } 4282 popchar(p, c); 4283 } while (c != ']'); 4284 4285 /* recurse on remainder of string */ 4286 return (ok ? gmatch(s, p) : 0); 4287 } 4288 /*NOTREACHED*/ 4289 } 4290 4291 4292 /* 4293 * Get default perm for device from /etc/minor_perm. Return 0 if match found. 4294 * 4295 * Pure wild-carded patterns are handled separately so the ordering of 4296 * these patterns doesn't matter. We're still dependent on ordering 4297 * however as the first matching entry is the one returned. 4298 * Not ideal but all existing examples and usage do imply this 4299 * ordering implicitly. 4300 * 4301 * Drivers using the clone driver are always good for some entertainment. 4302 * Clone nodes under pseudo have the form clone@0:<driver>. Some minor 4303 * perm entries have the form clone:<driver>, others use <driver>:* 4304 * Examples are clone:llc1 vs. llc2:*, for example. 4305 * 4306 * Minor perms in the clone:<driver> form are mapped to the drivers's 4307 * mperm list, not the clone driver, as wildcard entries for clone 4308 * reference only. In other words, a clone wildcard will match 4309 * references for clone@0:<driver> but never <driver>@<minor>. 4310 * 4311 * Additional minor perms in the standard form are also supported, 4312 * for mixed usage, ie a node with an entry clone:<driver> could 4313 * provide further entries <driver>:<minor>. 4314 * 4315 * Finally, some uses of clone use an alias as the minor name rather 4316 * than the driver name, with the alias as the minor perm entry. 4317 * This case is handled by attaching the driver to bring its 4318 * minor list into existence, then discover the alias via DDI_ALIAS. 4319 * The clone device's minor perm list can then be searched for 4320 * that alias. 4321 */ 4322 4323 static int 4324 dev_alias_minorperm(dev_info_t *dip, char *minor_name, mperm_t *rmp) 4325 { 4326 major_t major; 4327 struct devnames *dnp; 4328 mperm_t *mp; 4329 char *alias = NULL; 4330 dev_info_t *cdevi; 4331 struct ddi_minor_data *dmd; 4332 4333 major = ddi_name_to_major(minor_name); 4334 4335 ASSERT(dip == clone_dip); 4336 ASSERT(major != (major_t)-1); 4337 4338 /* 4339 * Attach the driver named by the minor node, then 4340 * search its first instance's minor list for an 4341 * alias node. 4342 */ 4343 if (ddi_hold_installed_driver(major) == NULL) 4344 return (1); 4345 4346 dnp = &devnamesp[major]; 4347 LOCK_DEV_OPS(&dnp->dn_lock); 4348 4349 if ((cdevi = dnp->dn_head) != NULL) { 4350 mutex_enter(&DEVI(cdevi)->devi_lock); 4351 for (dmd = DEVI(cdevi)->devi_minor; dmd; dmd = dmd->next) { 4352 if (dmd->type == DDM_ALIAS) { 4353 alias = i_ddi_strdup(dmd->ddm_name, KM_SLEEP); 4354 break; 4355 } 4356 } 4357 mutex_exit(&DEVI(cdevi)->devi_lock); 4358 } 4359 4360 UNLOCK_DEV_OPS(&dnp->dn_lock); 4361 ddi_rele_driver(major); 4362 4363 if (alias == NULL) { 4364 if (moddebug & MODDEBUG_MINORPERM) 4365 cmn_err(CE_CONT, "dev_minorperm: " 4366 "no alias for %s\n", minor_name); 4367 return (1); 4368 } 4369 4370 major = ddi_driver_major(clone_dip); 4371 dnp = &devnamesp[major]; 4372 LOCK_DEV_OPS(&dnp->dn_lock); 4373 4374 /* 4375 * Go through the clone driver's mperm list looking 4376 * for a match for the specified alias. 4377 */ 4378 for (mp = dnp->dn_mperm; mp; mp = mp->mp_next) { 4379 if (strcmp(alias, mp->mp_minorname) == 0) { 4380 break; 4381 } 4382 } 4383 4384 if (mp) { 4385 if (moddebug & MODDEBUG_MP_MATCH) { 4386 cmn_err(CE_CONT, 4387 "minor perm defaults: %s %s 0%o %d %d (aliased)\n", 4388 minor_name, alias, mp->mp_mode, 4389 mp->mp_uid, mp->mp_gid); 4390 } 4391 rmp->mp_uid = mp->mp_uid; 4392 rmp->mp_gid = mp->mp_gid; 4393 rmp->mp_mode = mp->mp_mode; 4394 } 4395 UNLOCK_DEV_OPS(&dnp->dn_lock); 4396 4397 kmem_free(alias, strlen(alias)+1); 4398 4399 return (mp == NULL); 4400 } 4401 4402 int 4403 dev_minorperm(dev_info_t *dip, char *name, mperm_t *rmp) 4404 { 4405 major_t major; 4406 char *minor_name; 4407 struct devnames *dnp; 4408 mperm_t *mp; 4409 int is_clone = 0; 4410 4411 if (!minorperm_loaded) { 4412 if (moddebug & MODDEBUG_MINORPERM) 4413 cmn_err(CE_CONT, 4414 "%s: minor perm not yet loaded\n", name); 4415 return (1); 4416 } 4417 4418 minor_name = strchr(name, ':'); 4419 if (minor_name == NULL) 4420 return (1); 4421 minor_name++; 4422 4423 /* 4424 * If it's the clone driver, search the driver as named 4425 * by the minor. All clone minor perm entries other than 4426 * alias nodes are actually installed on the real driver's list. 4427 */ 4428 if (dip == clone_dip) { 4429 major = ddi_name_to_major(minor_name); 4430 if (major == (major_t)-1) { 4431 if (moddebug & MODDEBUG_MINORPERM) 4432 cmn_err(CE_CONT, "dev_minorperm: " 4433 "%s: no such driver\n", minor_name); 4434 return (1); 4435 } 4436 is_clone = 1; 4437 } else { 4438 major = ddi_driver_major(dip); 4439 ASSERT(major != (major_t)-1); 4440 } 4441 4442 dnp = &devnamesp[major]; 4443 LOCK_DEV_OPS(&dnp->dn_lock); 4444 4445 /* 4446 * Go through the driver's mperm list looking for 4447 * a match for the specified minor. If there's 4448 * no matching pattern, use the wild card. 4449 * Defer to the clone wild for clone if specified, 4450 * otherwise fall back to the normal form. 4451 */ 4452 for (mp = dnp->dn_mperm; mp; mp = mp->mp_next) { 4453 if (gmatch(minor_name, mp->mp_minorname) != 0) { 4454 break; 4455 } 4456 } 4457 if (mp == NULL) { 4458 if (is_clone) 4459 mp = dnp->dn_mperm_clone; 4460 if (mp == NULL) 4461 mp = dnp->dn_mperm_wild; 4462 } 4463 4464 if (mp) { 4465 if (moddebug & MODDEBUG_MP_MATCH) { 4466 cmn_err(CE_CONT, 4467 "minor perm defaults: %s %s 0%o %d %d\n", 4468 name, mp->mp_minorname, mp->mp_mode, 4469 mp->mp_uid, mp->mp_gid); 4470 } 4471 rmp->mp_uid = mp->mp_uid; 4472 rmp->mp_gid = mp->mp_gid; 4473 rmp->mp_mode = mp->mp_mode; 4474 } 4475 UNLOCK_DEV_OPS(&dnp->dn_lock); 4476 4477 /* 4478 * If no match can be found for a clone node, 4479 * search for a possible match for an alias. 4480 * One such example is /dev/ptmx -> /devices/pseudo/clone@0:ptm, 4481 * with minor perm entry clone:ptmx. 4482 */ 4483 if (mp == NULL && is_clone) { 4484 return (dev_alias_minorperm(dip, minor_name, rmp)); 4485 } 4486 4487 return (mp == NULL); 4488 } 4489 4490 /* 4491 * dynamicaly reference load a dl module/library, returning handle 4492 */ 4493 /*ARGSUSED*/ 4494 ddi_modhandle_t 4495 ddi_modopen(const char *modname, int mode, int *errnop) 4496 { 4497 char *subdir; 4498 char *mod; 4499 int subdirlen; 4500 struct modctl *hmodp = NULL; 4501 int retval = EINVAL; 4502 4503 ASSERT(modname && (mode == KRTLD_MODE_FIRST)); 4504 if ((modname == NULL) || (mode != KRTLD_MODE_FIRST)) 4505 goto out; 4506 4507 /* find last '/' in modname */ 4508 mod = strrchr(modname, '/'); 4509 4510 if (mod) { 4511 /* for subdir string without modification to argument */ 4512 mod++; 4513 subdirlen = mod - modname; 4514 subdir = kmem_alloc(subdirlen, KM_SLEEP); 4515 (void) strlcpy(subdir, modname, subdirlen); 4516 } else { 4517 subdirlen = 0; 4518 subdir = "misc"; 4519 mod = (char *)modname; 4520 } 4521 4522 /* reference load with errno return value */ 4523 retval = modrload(subdir, mod, &hmodp); 4524 4525 if (subdirlen) 4526 kmem_free(subdir, subdirlen); 4527 4528 out: if (errnop) 4529 *errnop = retval; 4530 4531 if (moddebug & MODDEBUG_DDI_MOD) 4532 printf("ddi_modopen %s mode %x: %s %p %d\n", 4533 modname ? modname : "<unknown>", mode, 4534 hmodp ? hmodp->mod_filename : "<unknown>", 4535 (void *)hmodp, retval); 4536 4537 return ((ddi_modhandle_t)hmodp); 4538 } 4539 4540 /* lookup "name" in open dl module/library */ 4541 void * 4542 ddi_modsym(ddi_modhandle_t h, const char *name, int *errnop) 4543 { 4544 struct modctl *hmodp = (struct modctl *)h; 4545 void *f; 4546 int retval; 4547 4548 ASSERT(hmodp && name && hmodp->mod_installed && (hmodp->mod_ref >= 1)); 4549 if ((hmodp == NULL) || (name == NULL) || 4550 (hmodp->mod_installed == 0) || (hmodp->mod_ref < 1)) { 4551 f = NULL; 4552 retval = EINVAL; 4553 } else { 4554 f = (void *)kobj_lookup(hmodp->mod_mp, (char *)name); 4555 if (f) 4556 retval = 0; 4557 else 4558 retval = ENOTSUP; 4559 } 4560 4561 if (moddebug & MODDEBUG_DDI_MOD) 4562 printf("ddi_modsym in %s of %s: %d %p\n", 4563 hmodp ? hmodp->mod_modname : "<unknown>", 4564 name ? name : "<unknown>", retval, f); 4565 4566 if (errnop) 4567 *errnop = retval; 4568 return (f); 4569 } 4570 4571 /* dynamic (un)reference unload of an open dl module/library */ 4572 int 4573 ddi_modclose(ddi_modhandle_t h) 4574 { 4575 struct modctl *hmodp = (struct modctl *)h; 4576 struct modctl *modp = NULL; 4577 int retval; 4578 4579 ASSERT(hmodp && hmodp->mod_installed && (hmodp->mod_ref >= 1)); 4580 if ((hmodp == NULL) || 4581 (hmodp->mod_installed == 0) || (hmodp->mod_ref < 1)) { 4582 retval = EINVAL; 4583 goto out; 4584 } 4585 4586 retval = modunrload(hmodp->mod_id, &modp, ddi_modclose_unload); 4587 if (retval == EBUSY) 4588 retval = 0; /* EBUSY is not an error */ 4589 4590 if (retval == 0) { 4591 ASSERT(hmodp == modp); 4592 if (hmodp != modp) 4593 retval = EINVAL; 4594 } 4595 4596 out: if (moddebug & MODDEBUG_DDI_MOD) 4597 printf("ddi_modclose %s: %d\n", 4598 hmodp ? hmodp->mod_modname : "<unknown>", retval); 4599 4600 return (retval); 4601 } 4602