1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * modctl system call for loadable module support. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/user.h> 35 #include <sys/systm.h> 36 #include <sys/exec.h> 37 #include <sys/file.h> 38 #include <sys/stat.h> 39 #include <sys/conf.h> 40 #include <sys/time.h> 41 #include <sys/reboot.h> 42 #include <sys/fs/ufs_fsdir.h> 43 #include <sys/kmem.h> 44 #include <sys/sysconf.h> 45 #include <sys/cmn_err.h> 46 #include <sys/ddi.h> 47 #include <sys/sunddi.h> 48 #include <sys/sunndi.h> 49 #include <sys/ndi_impldefs.h> 50 #include <sys/ddi_impldefs.h> 51 #include <sys/ddi_implfuncs.h> 52 #include <sys/bootconf.h> 53 #include <sys/dc_ki.h> 54 #include <sys/cladm.h> 55 #include <sys/dtrace.h> 56 #include <sys/kdi.h> 57 58 #include <sys/devpolicy.h> 59 #include <sys/modctl.h> 60 #include <sys/kobj.h> 61 #include <sys/devops.h> 62 #include <sys/autoconf.h> 63 #include <sys/hwconf.h> 64 #include <sys/callb.h> 65 #include <sys/debug.h> 66 #include <sys/cpuvar.h> 67 #include <sys/sysmacros.h> 68 #include <sys/sysevent.h> 69 #include <sys/sysevent_impl.h> 70 #include <sys/instance.h> 71 #include <sys/modhash.h> 72 #include <sys/modhash_impl.h> 73 #include <sys/dacf_impl.h> 74 #include <sys/vfs.h> 75 #include <sys/pathname.h> 76 #include <sys/console.h> 77 #include <sys/policy.h> 78 #include <ipp/ipp_impl.h> 79 #include <sys/fs/dv_node.h> 80 #include <sys/strsubr.h> 81 82 static int mod_circdep(struct modctl *); 83 static int modinfo(modid_t, struct modinfo *); 84 85 static void mod_uninstall_all(void); 86 static int mod_getinfo(struct modctl *, struct modinfo *); 87 static struct modctl *allocate_modp(char *, char *); 88 89 static int mod_load(struct modctl *, int); 90 static void mod_unload(struct modctl *); 91 static int modinstall(struct modctl *); 92 static int moduninstall(struct modctl *); 93 94 static struct modctl *mod_hold_by_name_common(struct modctl *, char *); 95 static struct modctl *mod_hold_by_id(modid_t); 96 static struct modctl *mod_hold_next_by_id(modid_t); 97 static struct modctl *mod_hold_loaded_mod(struct modctl *, char *, int *); 98 static struct modctl *mod_hold_installed_mod(char *, int, int *); 99 100 static void mod_release(struct modctl *); 101 static void mod_make_requisite(struct modctl *, struct modctl *); 102 static int mod_install_requisites(struct modctl *); 103 static void check_esc_sequences(char *, char *); 104 static struct modctl *mod_hold_by_name_requisite(struct modctl *, char *); 105 106 /* 107 * module loading thread control structure. Calls to kobj_load_module()() are 108 * handled off to a separate thead using this structure. 109 */ 110 struct loadmt { 111 ksema_t sema; 112 struct modctl *mp; 113 int usepath; 114 kthread_t *owner; 115 int retval; 116 }; 117 118 static void modload_thread(struct loadmt *); 119 120 kcondvar_t mod_cv; 121 kcondvar_t mod_uninstall_cv; /* Communication between swapper */ 122 /* and the uninstall daemon. */ 123 kmutex_t mod_lock; /* protects &modules insert linkage, */ 124 /* mod_busy, mod_want, and mod_ref. */ 125 /* blocking operations while holding */ 126 /* mod_lock should be avoided */ 127 kmutex_t mod_uninstall_lock; /* protects mod_uninstall_cv */ 128 kthread_id_t mod_aul_thread; 129 130 int modunload_wait; 131 kmutex_t modunload_wait_mutex; 132 kcondvar_t modunload_wait_cv; 133 int modunload_active_count; 134 int modunload_disable_count; 135 136 int isminiroot; /* set if running as miniroot */ 137 int modrootloaded; /* set after root driver and fs are loaded */ 138 int moddebug = 0x0; /* debug flags for module writers */ 139 int swaploaded; /* set after swap driver and fs are loaded */ 140 int bop_io_quiesced = 0; /* set when BOP I/O can no longer be used */ 141 int last_module_id; 142 clock_t mod_uninstall_interval = 0; 143 int ddi_modclose_unload = 1; /* 0 -> just decrement reference */ 144 145 struct devnames *devnamesp; 146 struct devnames orphanlist; 147 148 krwlock_t devinfo_tree_lock; /* obsolete, to be removed */ 149 150 #define MAJBINDFILE "/etc/name_to_major" 151 #define SYSBINDFILE "/etc/name_to_sysnum" 152 153 static char majbind[] = MAJBINDFILE; 154 static char sysbind[] = SYSBINDFILE; 155 static uint_t mod_autounload_key; /* for module autounload detection */ 156 157 extern int obpdebug; 158 extern int make_mbind(char *, int, char *, struct bind **); 159 160 #define DEBUGGER_PRESENT ((boothowto & RB_DEBUG) || (obpdebug != 0)) 161 162 static int minorperm_loaded = 0; 163 164 165 166 void 167 mod_setup(void) 168 { 169 struct sysent *callp; 170 int callnum, exectype; 171 int num_devs; 172 int i; 173 174 /* 175 * Initialize the list of loaded driver dev_ops. 176 * XXX - This must be done before reading the system file so that 177 * forceloads of drivers will work. 178 */ 179 num_devs = read_binding_file(majbind, mb_hashtab, make_mbind); 180 /* 181 * Since read_binding_file is common code, it doesn't enforce that all 182 * of the binding file entries have major numbers <= MAXMAJ32. Thus, 183 * ensure that we don't allocate some massive amount of space due to a 184 * bad entry. We can't have major numbers bigger than MAXMAJ32 185 * until file system support for larger major numbers exists. 186 */ 187 188 /* 189 * Leave space for expansion, but not more than L_MAXMAJ32 190 */ 191 devcnt = MIN(num_devs + 30, L_MAXMAJ32); 192 devopsp = kmem_alloc(devcnt * sizeof (struct dev_ops *), KM_SLEEP); 193 for (i = 0; i < devcnt; i++) 194 devopsp[i] = &mod_nodev_ops; 195 196 init_devnamesp(devcnt); 197 198 /* 199 * Sync up with the work that the stand-alone linker has already done. 200 */ 201 (void) kobj_sync(); 202 203 if (boothowto & RB_DEBUG) 204 kdi_dvec_modavail(); 205 206 make_aliases(mb_hashtab); 207 208 /* 209 * Initialize streams device implementation structures. 210 */ 211 devimpl = kmem_zalloc(devcnt * sizeof (cdevsw_impl_t), KM_SLEEP); 212 213 /* 214 * If the cl_bootstrap module is present, 215 * we should be configured as a cluster. Loading this module 216 * will set "cluster_bootflags" to non-zero. 217 */ 218 (void) modload("misc", "cl_bootstrap"); 219 220 (void) read_binding_file(sysbind, sb_hashtab, make_mbind); 221 init_syscallnames(NSYSCALL); 222 223 /* 224 * Start up dynamic autoconfiguration framework (dacf). 225 */ 226 mod_hash_init(); 227 dacf_init(); 228 229 /* 230 * Start up IP policy framework (ipp). 231 */ 232 ipp_init(); 233 234 /* 235 * Allocate loadable native system call locks. 236 */ 237 for (callnum = 0, callp = sysent; callnum < NSYSCALL; 238 callnum++, callp++) { 239 if (LOADABLE_SYSCALL(callp)) { 240 if (mod_getsysname(callnum) != NULL) { 241 callp->sy_lock = 242 kobj_zalloc(sizeof (krwlock_t), KM_SLEEP); 243 rw_init(callp->sy_lock, NULL, RW_DEFAULT, NULL); 244 } else { 245 callp->sy_flags &= ~SE_LOADABLE; 246 callp->sy_callc = nosys; 247 } 248 #ifdef DEBUG 249 } else { 250 /* 251 * Do some sanity checks on the sysent table 252 */ 253 switch (callp->sy_flags & SE_RVAL_MASK) { 254 case SE_32RVAL1: 255 /* only r_val1 returned */ 256 case SE_32RVAL1 | SE_32RVAL2: 257 /* r_val1 and r_val2 returned */ 258 case SE_64RVAL: 259 /* 64-bit rval returned */ 260 break; 261 default: 262 cmn_err(CE_WARN, "sysent[%d]: bad flags %x", 263 callnum, callp->sy_flags); 264 } 265 #endif 266 } 267 } 268 269 #ifdef _SYSCALL32_IMPL 270 /* 271 * Allocate loadable system call locks for 32-bit compat syscalls 272 */ 273 for (callnum = 0, callp = sysent32; callnum < NSYSCALL; 274 callnum++, callp++) { 275 if (LOADABLE_SYSCALL(callp)) { 276 if (mod_getsysname(callnum) != NULL) { 277 callp->sy_lock = 278 kobj_zalloc(sizeof (krwlock_t), KM_SLEEP); 279 rw_init(callp->sy_lock, NULL, RW_DEFAULT, NULL); 280 } else { 281 callp->sy_flags &= ~SE_LOADABLE; 282 callp->sy_callc = nosys; 283 } 284 #ifdef DEBUG 285 } else { 286 /* 287 * Do some sanity checks on the sysent table 288 */ 289 switch (callp->sy_flags & SE_RVAL_MASK) { 290 case SE_32RVAL1: 291 /* only r_val1 returned */ 292 case SE_32RVAL1 | SE_32RVAL2: 293 /* r_val1 and r_val2 returned */ 294 case SE_64RVAL: 295 /* 64-bit rval returned */ 296 break; 297 default: 298 cmn_err(CE_WARN, "sysent32[%d]: bad flags %x", 299 callnum, callp->sy_flags); 300 goto skip; 301 } 302 303 /* 304 * Cross-check the native and compatibility tables. 305 */ 306 if (callp->sy_callc == nosys || 307 sysent[callnum].sy_callc == nosys) 308 continue; 309 /* 310 * If only one or the other slot is loadable, then 311 * there's an error -- they should match! 312 */ 313 if ((callp->sy_callc == loadable_syscall) ^ 314 (sysent[callnum].sy_callc == loadable_syscall)) { 315 cmn_err(CE_WARN, "sysent[%d] loadable?", 316 callnum); 317 } 318 /* 319 * This is more of a heuristic test -- if the 320 * system call returns two values in the 32-bit 321 * world, it should probably return two 32-bit 322 * values in the 64-bit world too. 323 */ 324 if (((callp->sy_flags & SE_32RVAL2) == 0) ^ 325 ((sysent[callnum].sy_flags & SE_32RVAL2) == 0)) { 326 cmn_err(CE_WARN, "sysent[%d] rval2 mismatch!", 327 callnum); 328 } 329 skip:; 330 #endif /* DEBUG */ 331 } 332 } 333 #endif /* _SYSCALL32_IMPL */ 334 335 /* 336 * Allocate loadable exec locks. (Assumes all execs are loadable) 337 */ 338 for (exectype = 0; exectype < nexectype; exectype++) { 339 execsw[exectype].exec_lock = 340 kobj_zalloc(sizeof (krwlock_t), KM_SLEEP); 341 rw_init(execsw[exectype].exec_lock, NULL, RW_DEFAULT, NULL); 342 } 343 344 read_class_file(); 345 346 /* init thread specific structure for mod_uninstall_all */ 347 tsd_create(&mod_autounload_key, NULL); 348 } 349 350 static int 351 modctl_modload(int use_path, char *filename, int *rvp) 352 { 353 struct modctl *modp; 354 int retval = 0; 355 char *filenamep; 356 int modid; 357 358 filenamep = kmem_zalloc(MOD_MAXPATH, KM_SLEEP); 359 360 if (copyinstr(filename, filenamep, MOD_MAXPATH, 0)) { 361 retval = EFAULT; 362 goto out; 363 } 364 365 filenamep[MOD_MAXPATH - 1] = 0; 366 modp = mod_hold_installed_mod(filenamep, use_path, &retval); 367 368 if (modp == NULL) 369 goto out; 370 371 modp->mod_loadflags |= MOD_NOAUTOUNLOAD; 372 modid = modp->mod_id; 373 mod_release_mod(modp); 374 CPU_STATS_ADDQ(CPU, sys, modload, 1); 375 if (rvp != NULL && copyout(&modid, rvp, sizeof (modid)) != 0) 376 retval = EFAULT; 377 out: 378 kmem_free(filenamep, MOD_MAXPATH); 379 380 return (retval); 381 } 382 383 static int 384 modctl_modunload(modid_t id) 385 { 386 int rval = 0; 387 388 if (id == 0) { 389 #ifdef DEBUG 390 /* 391 * Turn on mod_uninstall_daemon 392 */ 393 if (mod_uninstall_interval == 0) { 394 mod_uninstall_interval = 60; 395 modreap(); 396 return (rval); 397 } 398 #endif 399 mod_uninstall_all(); 400 } else { 401 rval = modunload(id); 402 } 403 return (rval); 404 } 405 406 static int 407 modctl_modinfo(modid_t id, struct modinfo *umodi) 408 { 409 int retval; 410 struct modinfo modi; 411 #if defined(_SYSCALL32_IMPL) 412 int nobase; 413 struct modinfo32 modi32; 414 #endif 415 416 if (get_udatamodel() == DATAMODEL_NATIVE) { 417 if (copyin(umodi, &modi, sizeof (struct modinfo)) != 0) 418 return (EFAULT); 419 } 420 #ifdef _SYSCALL32_IMPL 421 else { 422 bzero(&modi, sizeof (modi)); 423 if (copyin(umodi, &modi32, sizeof (struct modinfo32)) != 0) 424 return (EFAULT); 425 modi.mi_info = modi32.mi_info; 426 modi.mi_id = modi32.mi_id; 427 modi.mi_nextid = modi32.mi_nextid; 428 nobase = modi.mi_info & MI_INFO_NOBASE; 429 } 430 #endif 431 /* 432 * This flag is -only- for the kernels use. 433 */ 434 modi.mi_info &= ~MI_INFO_LINKAGE; 435 436 retval = modinfo(id, &modi); 437 if (retval) 438 return (retval); 439 440 if (get_udatamodel() == DATAMODEL_NATIVE) { 441 if (copyout(&modi, umodi, sizeof (struct modinfo)) != 0) 442 retval = EFAULT; 443 #ifdef _SYSCALL32_IMPL 444 } else { 445 int i; 446 447 if (!nobase && (uintptr_t)modi.mi_base > UINT32_MAX) 448 return (EOVERFLOW); 449 450 modi32.mi_info = modi.mi_info; 451 modi32.mi_state = modi.mi_state; 452 modi32.mi_id = modi.mi_id; 453 modi32.mi_nextid = modi.mi_nextid; 454 modi32.mi_base = (caddr32_t)(uintptr_t)modi.mi_base; 455 modi32.mi_size = modi.mi_size; 456 modi32.mi_rev = modi.mi_rev; 457 modi32.mi_loadcnt = modi.mi_loadcnt; 458 bcopy(modi.mi_name, modi32.mi_name, sizeof (modi32.mi_name)); 459 for (i = 0; i < MODMAXLINK32; i++) { 460 modi32.mi_msinfo[i].msi_p0 = modi.mi_msinfo[i].msi_p0; 461 bcopy(modi.mi_msinfo[i].msi_linkinfo, 462 modi32.mi_msinfo[i].msi_linkinfo, 463 sizeof (modi32.mi_msinfo[0].msi_linkinfo)); 464 } 465 if (copyout(&modi32, umodi, sizeof (struct modinfo32)) != 0) 466 retval = EFAULT; 467 #endif 468 } 469 470 return (retval); 471 } 472 473 /* 474 * Return the last major number in the range of permissible major numbers. 475 */ 476 /*ARGSUSED*/ 477 static int 478 modctl_modreserve(modid_t id, int *data) 479 { 480 if (copyout(&devcnt, data, sizeof (devcnt)) != 0) 481 return (EFAULT); 482 return (0); 483 } 484 485 static int 486 modctl_add_major(int *data) 487 { 488 struct modconfig mc; 489 int i, rv; 490 struct aliases alias; 491 struct aliases *ap; 492 char name[MAXMODCONFNAME]; 493 char cname[MAXMODCONFNAME]; 494 char *drvname; 495 496 bzero(&mc, sizeof (struct modconfig)); 497 if (get_udatamodel() == DATAMODEL_NATIVE) { 498 if (copyin(data, &mc, sizeof (struct modconfig)) != 0) 499 return (EFAULT); 500 } 501 #ifdef _SYSCALL32_IMPL 502 else { 503 struct modconfig32 modc32; 504 505 if (copyin(data, &modc32, sizeof (struct modconfig32)) != 0) 506 return (EFAULT); 507 else { 508 bcopy(modc32.drvname, mc.drvname, 509 sizeof (modc32.drvname)); 510 bcopy(modc32.drvclass, mc.drvclass, 511 sizeof (modc32.drvclass)); 512 mc.major = modc32.major; 513 mc.num_aliases = modc32.num_aliases; 514 mc.ap = (struct aliases *)(uintptr_t)modc32.ap; 515 } 516 } 517 #endif 518 519 /* 520 * If the driver is already in the mb_hashtab, and the name given 521 * doesn't match that driver's name, fail. Otherwise, pass, since 522 * we may be adding aliases. 523 */ 524 if ((drvname = mod_major_to_name(mc.major)) != NULL && 525 strcmp(drvname, mc.drvname) != 0) 526 return (EINVAL); 527 528 /* 529 * Add each supplied driver alias to mb_hashtab 530 */ 531 ap = mc.ap; 532 for (i = 0; i < mc.num_aliases; i++) { 533 bzero(&alias, sizeof (struct aliases)); 534 535 if (get_udatamodel() == DATAMODEL_NATIVE) { 536 if (copyin(ap, &alias, sizeof (struct aliases)) != 0) 537 return (EFAULT); 538 539 if (alias.a_len > MAXMODCONFNAME) 540 return (EINVAL); 541 542 if (copyin(alias.a_name, name, alias.a_len) != 0) 543 return (EFAULT); 544 545 if (name[alias.a_len - 1] != '\0') 546 return (EINVAL); 547 } 548 #ifdef _SYSCALL32_IMPL 549 else { 550 struct aliases32 al32; 551 552 bzero(&al32, sizeof (struct aliases32)); 553 if (copyin(ap, &al32, sizeof (struct aliases32)) != 0) 554 return (EFAULT); 555 556 if (al32.a_len > MAXMODCONFNAME) 557 return (EINVAL); 558 559 if (copyin((void *)(uintptr_t)al32.a_name, 560 name, al32.a_len) != 0) 561 return (EFAULT); 562 563 if (name[al32.a_len - 1] != '\0') 564 return (EINVAL); 565 566 alias.a_next = (void *)(uintptr_t)al32.a_next; 567 } 568 #endif 569 check_esc_sequences(name, cname); 570 (void) make_mbind(cname, mc.major, NULL, mb_hashtab); 571 ap = alias.a_next; 572 } 573 574 /* 575 * Try to establish an mbinding for mc.drvname, and add it to devnames. 576 * Add class if any after establishing the major number 577 */ 578 (void) make_mbind(mc.drvname, mc.major, NULL, mb_hashtab); 579 rv = make_devname(mc.drvname, mc.major); 580 581 if (rv == 0) { 582 if (mc.drvclass[0] != '\0') 583 add_class(mc.drvname, mc.drvclass); 584 (void) i_ddi_load_drvconf(mc.major); 585 i_ddi_bind_devs(); 586 i_ddi_di_cache_invalidate(KM_SLEEP); 587 } 588 return (rv); 589 } 590 591 static int 592 modctl_rem_major(major_t major) 593 { 594 struct devnames *dnp; 595 596 if (major >= devcnt) 597 return (EINVAL); 598 599 /* mark devnames as removed */ 600 dnp = &devnamesp[major]; 601 LOCK_DEV_OPS(&dnp->dn_lock); 602 if (dnp->dn_name == NULL || 603 (dnp->dn_flags & (DN_DRIVER_REMOVED | DN_TAKEN_GETUDEV))) { 604 UNLOCK_DEV_OPS(&dnp->dn_lock); 605 return (EINVAL); 606 } 607 dnp->dn_flags |= DN_DRIVER_REMOVED; 608 pm_driver_removed(major); 609 UNLOCK_DEV_OPS(&dnp->dn_lock); 610 611 (void) i_ddi_unload_drvconf(major); 612 i_ddi_unbind_devs(major); 613 i_ddi_di_cache_invalidate(KM_SLEEP); 614 return (0); 615 } 616 617 static struct vfs * 618 path_to_vfs(char *name) 619 { 620 vnode_t *vp; 621 struct vfs *vfsp; 622 623 if (lookupname(name, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp)) 624 return (NULL); 625 626 vfsp = vp->v_vfsp; 627 VN_RELE(vp); 628 return (vfsp); 629 } 630 631 static int 632 new_vfs_in_modpath() 633 { 634 static int n_modpath = 0; 635 static char *modpath_copy; 636 static struct pathvfs { 637 char *path; 638 struct vfs *vfsp; 639 } *pathvfs; 640 641 int i, new_vfs = 0; 642 char *tmp, *tmp1; 643 struct vfs *vfsp; 644 645 if (n_modpath != 0) { 646 for (i = 0; i < n_modpath; i++) { 647 vfsp = path_to_vfs(pathvfs[i].path); 648 if (vfsp != pathvfs[i].vfsp) { 649 pathvfs[i].vfsp = vfsp; 650 if (vfsp) 651 new_vfs = 1; 652 } 653 } 654 return (new_vfs); 655 } 656 657 /* 658 * First call, initialize the pathvfs structure 659 */ 660 modpath_copy = i_ddi_strdup(default_path, KM_SLEEP); 661 tmp = modpath_copy; 662 n_modpath = 1; 663 tmp1 = strchr(tmp, ' '); 664 while (tmp1) { 665 *tmp1 = '\0'; 666 n_modpath++; 667 tmp = tmp1 + 1; 668 tmp1 = strchr(tmp, ' '); 669 } 670 671 pathvfs = kmem_zalloc(n_modpath * sizeof (struct pathvfs), KM_SLEEP); 672 tmp = modpath_copy; 673 for (i = 0; i < n_modpath; i++) { 674 pathvfs[i].path = tmp; 675 vfsp = path_to_vfs(tmp); 676 pathvfs[i].vfsp = vfsp; 677 tmp += strlen(tmp) + 1; 678 } 679 return (1); /* always reread driver.conf the first time */ 680 } 681 682 static int modctl_load_drvconf(major_t major) 683 { 684 int ret; 685 686 if (major != (major_t)-1) { 687 ret = i_ddi_load_drvconf(major); 688 if (ret == 0) 689 i_ddi_bind_devs(); 690 return (ret); 691 } 692 693 /* 694 * We are invoked to rescan new driver.conf files. It is 695 * only necessary if a new file system was mounted in the 696 * module_path. Because rescanning driver.conf files can 697 * take some time on older platforms (sun4m), the following 698 * code skips unnecessary driver.conf rescans to optimize 699 * boot performance. 700 */ 701 if (new_vfs_in_modpath()) { 702 (void) i_ddi_load_drvconf((major_t)-1); 703 /* 704 * If we are still initializing io subsystem, 705 * load drivers with ddi-forceattach property 706 */ 707 if (!i_ddi_io_initialized()) 708 i_ddi_forceattach_drivers(); 709 } 710 return (0); 711 } 712 713 static int 714 modctl_unload_drvconf(major_t major) 715 { 716 int ret; 717 718 if (major >= devcnt) 719 return (EINVAL); 720 721 ret = i_ddi_unload_drvconf(major); 722 if (ret != 0) 723 return (ret); 724 (void) i_ddi_unbind_devs(major); 725 726 return (0); 727 } 728 729 static void 730 check_esc_sequences(char *str, char *cstr) 731 { 732 int i; 733 size_t len; 734 char *p; 735 736 len = strlen(str); 737 for (i = 0; i < len; i++, str++, cstr++) { 738 if (*str != '\\') { 739 *cstr = *str; 740 } else { 741 p = str + 1; 742 /* 743 * we only handle octal escape sequences for SPACE 744 */ 745 if (*p++ == '0' && *p++ == '4' && *p == '0') { 746 *cstr = ' '; 747 str += 3; 748 } else { 749 *cstr = *str; 750 } 751 } 752 } 753 *cstr = 0; 754 } 755 756 static int 757 modctl_getmodpathlen(int *data) 758 { 759 int len; 760 len = strlen(default_path); 761 if (copyout(&len, data, sizeof (len)) != 0) 762 return (EFAULT); 763 return (0); 764 } 765 766 static int 767 modctl_getmodpath(char *data) 768 { 769 if (copyout(default_path, data, strlen(default_path) + 1) != 0) 770 return (EFAULT); 771 return (0); 772 } 773 774 static int 775 modctl_read_sysbinding_file(void) 776 { 777 (void) read_binding_file(sysbind, sb_hashtab, make_mbind); 778 return (0); 779 } 780 781 static int 782 modctl_getmaj(char *uname, uint_t ulen, int *umajorp) 783 { 784 char name[256]; 785 int retval; 786 major_t major; 787 788 if ((retval = copyinstr(uname, name, 789 (ulen < 256) ? ulen : 256, 0)) != 0) 790 return (retval); 791 if ((major = mod_name_to_major(name)) == (major_t)-1) 792 return (ENODEV); 793 if (copyout(&major, umajorp, sizeof (major_t)) != 0) 794 return (EFAULT); 795 return (0); 796 } 797 798 static int 799 modctl_getname(char *uname, uint_t ulen, int *umajorp) 800 { 801 char *name; 802 major_t major; 803 804 if (copyin(umajorp, &major, sizeof (major)) != 0) 805 return (EFAULT); 806 if ((name = mod_major_to_name(major)) == NULL) 807 return (ENODEV); 808 if ((strlen(name) + 1) > ulen) 809 return (ENOSPC); 810 return (copyoutstr(name, uname, ulen, NULL)); 811 } 812 813 static int 814 modctl_devt2instance(dev_t dev, int *uinstancep) 815 { 816 int instance; 817 818 if ((instance = dev_to_instance(dev)) == -1) 819 return (EINVAL); 820 821 return (copyout(&instance, uinstancep, sizeof (int))); 822 } 823 824 /* 825 * Return the sizeof of the device id. 826 */ 827 static int 828 modctl_sizeof_devid(dev_t dev, uint_t *len) 829 { 830 uint_t sz; 831 ddi_devid_t devid; 832 833 /* get device id */ 834 if (ddi_lyr_get_devid(dev, &devid) == DDI_FAILURE) 835 return (EINVAL); 836 837 sz = ddi_devid_sizeof(devid); 838 ddi_devid_free(devid); 839 840 /* copyout device id size */ 841 if (copyout(&sz, len, sizeof (sz)) != 0) 842 return (EFAULT); 843 844 return (0); 845 } 846 847 /* 848 * Return a copy of the device id. 849 */ 850 static int 851 modctl_get_devid(dev_t dev, uint_t len, ddi_devid_t udevid) 852 { 853 uint_t sz; 854 ddi_devid_t devid; 855 int err = 0; 856 857 /* get device id */ 858 if (ddi_lyr_get_devid(dev, &devid) == DDI_FAILURE) 859 return (EINVAL); 860 861 sz = ddi_devid_sizeof(devid); 862 863 /* Error if device id is larger than space allocated */ 864 if (sz > len) { 865 ddi_devid_free(devid); 866 return (ENOSPC); 867 } 868 869 /* copy out device id */ 870 if (copyout(devid, udevid, sz) != 0) 871 err = EFAULT; 872 ddi_devid_free(devid); 873 return (err); 874 } 875 876 /* 877 * return the /devices paths associated with the specified devid and 878 * minor name. 879 */ 880 /*ARGSUSED*/ 881 static int 882 modctl_devid2paths(ddi_devid_t udevid, char *uminor_name, uint_t flag, 883 size_t *ulensp, char *upaths) 884 { 885 ddi_devid_t devid = NULL; 886 int devid_len; 887 char *minor_name = NULL; 888 dev_info_t *dip = NULL; 889 struct ddi_minor_data *dmdp; 890 char *path = NULL; 891 int ulens; 892 int lens; 893 int len; 894 dev_t *devlist = NULL; 895 int ndevs; 896 int i; 897 int ret = 0; 898 899 /* 900 * If upaths is NULL then we are only computing the amount of space 901 * needed to hold the paths and returning the value in *ulensp. If we 902 * are copying out paths then we get the amount of space allocated by 903 * the caller. If the actual space needed for paths is larger, or 904 * things are changing out from under us, then we return EAGAIN. 905 */ 906 if (upaths) { 907 if (ulensp == NULL) 908 return (EINVAL); 909 if (copyin(ulensp, &ulens, sizeof (ulens)) != 0) 910 return (EFAULT); 911 } 912 913 /* 914 * copyin enough of the devid to determine the length then 915 * reallocate and copy in the entire devid. 916 */ 917 devid_len = ddi_devid_sizeof(NULL); 918 devid = kmem_alloc(devid_len, KM_SLEEP); 919 if (copyin(udevid, devid, devid_len)) { 920 ret = EFAULT; 921 goto out; 922 } 923 len = devid_len; 924 devid_len = ddi_devid_sizeof(devid); 925 kmem_free(devid, len); 926 devid = kmem_alloc(devid_len, KM_SLEEP); 927 if (copyin(udevid, devid, devid_len)) { 928 ret = EFAULT; 929 goto out; 930 } 931 932 /* copyin the minor name if specified. */ 933 minor_name = uminor_name; 934 if ((minor_name != DEVID_MINOR_NAME_ALL) && 935 (minor_name != DEVID_MINOR_NAME_ALL_CHR) && 936 (minor_name != DEVID_MINOR_NAME_ALL_BLK)) { 937 minor_name = kmem_alloc(MAXPATHLEN, KM_SLEEP); 938 if (copyinstr(uminor_name, minor_name, MAXPATHLEN, 0)) { 939 ret = EFAULT; 940 goto out; 941 } 942 } 943 944 /* 945 * Use existing function to resolve the devid into a devlist. 946 * 947 * NOTE: there is a loss of spectype information in the current 948 * ddi_lyr_devid_to_devlist implementation. We work around this by not 949 * passing down DEVID_MINOR_NAME_ALL here, but reproducing all minor 950 * node forms in the loop processing the devlist below. It would be 951 * best if at some point the use of this interface here was replaced 952 * with a path oriented call. 953 */ 954 if (ddi_lyr_devid_to_devlist(devid, 955 (minor_name == DEVID_MINOR_NAME_ALL) ? 956 DEVID_MINOR_NAME_ALL_CHR : minor_name, 957 &ndevs, &devlist) != DDI_SUCCESS) { 958 ret = EINVAL; 959 goto out; 960 } 961 962 /* 963 * loop over the devlist, converting each devt to a path and doing 964 * a copyout of the path and computation of the amount of space 965 * needed to hold all the paths 966 */ 967 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 968 for (i = 0, lens = 0; i < ndevs; i++) { 969 970 /* find the dip associated with the dev_t */ 971 if ((dip = e_ddi_hold_devi_by_dev(devlist[i], 0)) == NULL) 972 continue; 973 974 /* loop over all the minor nodes, skipping ones we don't want */ 975 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 976 if ((dmdp->ddm_dev != devlist[i]) || 977 (dmdp->type != DDM_MINOR)) 978 continue; 979 980 if ((minor_name != DEVID_MINOR_NAME_ALL) && 981 (minor_name != DEVID_MINOR_NAME_ALL_CHR) && 982 (minor_name != DEVID_MINOR_NAME_ALL_BLK) && 983 strcmp(minor_name, dmdp->ddm_name)) 984 continue; 985 else { 986 if ((minor_name == DEVID_MINOR_NAME_ALL_CHR) && 987 (dmdp->ddm_spec_type != S_IFCHR)) 988 continue; 989 if ((minor_name == DEVID_MINOR_NAME_ALL_BLK) && 990 (dmdp->ddm_spec_type != S_IFBLK)) 991 continue; 992 } 993 994 /* XXX need ddi_pathname_minor(dmdp, path); interface */ 995 if (ddi_dev_pathname(dmdp->ddm_dev, dmdp->ddm_spec_type, 996 path) != DDI_SUCCESS) { 997 ret = EAGAIN; 998 goto out; 999 } 1000 len = strlen(path) + 1; 1001 *(path + len) = '\0'; /* set double termination */ 1002 lens += len; 1003 1004 /* copyout the path with double terminations */ 1005 if (upaths) { 1006 if (lens > ulens) { 1007 ret = EAGAIN; 1008 goto out; 1009 } 1010 if (copyout(path, upaths, len + 1)) { 1011 ret = EFAULT; 1012 goto out; 1013 } 1014 upaths += len; 1015 } 1016 } 1017 ddi_release_devi(dip); 1018 dip = NULL; 1019 } 1020 lens++; /* add one for double termination */ 1021 1022 /* copy out the amount of space needed to hold the paths */ 1023 if (ulensp && copyout(&lens, ulensp, sizeof (lens))) { 1024 ret = EFAULT; 1025 goto out; 1026 } 1027 ret = 0; 1028 1029 out: if (dip) 1030 ddi_release_devi(dip); 1031 if (path) 1032 kmem_free(path, MAXPATHLEN); 1033 if (devlist) 1034 ddi_lyr_free_devlist(devlist, ndevs); 1035 if (minor_name && 1036 (minor_name != DEVID_MINOR_NAME_ALL) && 1037 (minor_name != DEVID_MINOR_NAME_ALL_CHR) && 1038 (minor_name != DEVID_MINOR_NAME_ALL_BLK)) 1039 kmem_free(minor_name, MAXPATHLEN); 1040 if (devid) 1041 kmem_free(devid, devid_len); 1042 return (ret); 1043 } 1044 1045 /* 1046 * Return the size of the minor name. 1047 */ 1048 static int 1049 modctl_sizeof_minorname(dev_t dev, int spectype, uint_t *len) 1050 { 1051 uint_t sz; 1052 char *name; 1053 1054 /* get the minor name */ 1055 if (ddi_lyr_get_minor_name(dev, spectype, &name) == DDI_FAILURE) 1056 return (EINVAL); 1057 1058 sz = strlen(name) + 1; 1059 kmem_free(name, sz); 1060 1061 /* copy out the size of the minor name */ 1062 if (copyout(&sz, len, sizeof (sz)) != 0) 1063 return (EFAULT); 1064 1065 return (0); 1066 } 1067 1068 /* 1069 * Return the minor name. 1070 */ 1071 static int 1072 modctl_get_minorname(dev_t dev, int spectype, uint_t len, char *uname) 1073 { 1074 uint_t sz; 1075 char *name; 1076 int err = 0; 1077 1078 /* get the minor name */ 1079 if (ddi_lyr_get_minor_name(dev, spectype, &name) == DDI_FAILURE) 1080 return (EINVAL); 1081 1082 sz = strlen(name) + 1; 1083 1084 /* Error if the minor name is larger than the space allocated */ 1085 if (sz > len) { 1086 kmem_free(name, sz); 1087 return (ENOSPC); 1088 } 1089 1090 /* copy out the minor name */ 1091 if (copyout(name, uname, sz) != 0) 1092 err = EFAULT; 1093 kmem_free(name, sz); 1094 return (err); 1095 } 1096 1097 /* 1098 * Return the size of the devfspath name. 1099 */ 1100 static int 1101 modctl_devfspath_len(dev_t dev, int spectype, uint_t *len) 1102 { 1103 uint_t sz; 1104 char *name; 1105 1106 /* get the path name */ 1107 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1108 if (ddi_dev_pathname(dev, spectype, name) == DDI_FAILURE) { 1109 kmem_free(name, MAXPATHLEN); 1110 return (EINVAL); 1111 } 1112 1113 sz = strlen(name) + 1; 1114 kmem_free(name, MAXPATHLEN); 1115 1116 /* copy out the size of the path name */ 1117 if (copyout(&sz, len, sizeof (sz)) != 0) 1118 return (EFAULT); 1119 1120 return (0); 1121 } 1122 1123 /* 1124 * Return the devfspath name. 1125 */ 1126 static int 1127 modctl_devfspath(dev_t dev, int spectype, uint_t len, char *uname) 1128 { 1129 uint_t sz; 1130 char *name; 1131 int err = 0; 1132 1133 /* get the path name */ 1134 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1135 if (ddi_dev_pathname(dev, spectype, name) == DDI_FAILURE) { 1136 kmem_free(name, MAXPATHLEN); 1137 return (EINVAL); 1138 } 1139 1140 sz = strlen(name) + 1; 1141 1142 /* Error if the path name is larger than the space allocated */ 1143 if (sz > len) { 1144 kmem_free(name, MAXPATHLEN); 1145 return (ENOSPC); 1146 } 1147 1148 /* copy out the path name */ 1149 if (copyout(name, uname, sz) != 0) 1150 err = EFAULT; 1151 kmem_free(name, MAXPATHLEN); 1152 return (err); 1153 } 1154 1155 static int 1156 modctl_get_fbname(char *path) 1157 { 1158 extern dev_t fbdev; 1159 char *pathname = NULL; 1160 int rval = 0; 1161 1162 /* make sure fbdev is set before we plunge in */ 1163 if (fbdev == NODEV) 1164 return (ENODEV); 1165 1166 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1167 if ((rval = ddi_dev_pathname(fbdev, S_IFCHR, 1168 pathname)) == DDI_SUCCESS) { 1169 if (copyout(pathname, path, strlen(pathname)+1) != 0) { 1170 rval = EFAULT; 1171 } 1172 } 1173 kmem_free(pathname, MAXPATHLEN); 1174 return (rval); 1175 } 1176 1177 /* 1178 * modctl_reread_dacf() 1179 * Reread the dacf rules database from the named binding file. 1180 * If NULL is specified, pass along the NULL, it means 'use the default'. 1181 */ 1182 static int 1183 modctl_reread_dacf(char *path) 1184 { 1185 int rval = 0; 1186 char *filename, *filenamep; 1187 1188 filename = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1189 1190 if (path == NULL) { 1191 filenamep = NULL; 1192 } else { 1193 if (copyinstr(path, filename, MAXPATHLEN, 0) != 0) { 1194 rval = EFAULT; 1195 goto out; 1196 } 1197 filenamep = filename; 1198 filenamep[MAXPATHLEN - 1] = '\0'; 1199 } 1200 1201 rval = read_dacf_binding_file(filenamep); 1202 out: 1203 kmem_free(filename, MAXPATHLEN); 1204 return (rval); 1205 } 1206 1207 /*ARGSUSED*/ 1208 static int 1209 modctl_modevents(int subcmd, uintptr_t a2, uintptr_t a3, uintptr_t a4, 1210 uint_t flag) 1211 { 1212 int error = 0; 1213 char *filenamep; 1214 1215 switch (subcmd) { 1216 1217 case MODEVENTS_FLUSH: 1218 /* flush all currently queued events */ 1219 log_sysevent_flushq(subcmd, flag); 1220 break; 1221 1222 case MODEVENTS_SET_DOOR_UPCALL_FILENAME: 1223 /* 1224 * bind door_upcall to filename 1225 * this should only be done once per invocation 1226 * of the event daemon. 1227 */ 1228 1229 filenamep = kmem_zalloc(MOD_MAXPATH, KM_SLEEP); 1230 1231 if (copyinstr((char *)a2, filenamep, MOD_MAXPATH, 0)) { 1232 error = EFAULT; 1233 } else { 1234 error = log_sysevent_filename(filenamep); 1235 } 1236 kmem_free(filenamep, MOD_MAXPATH); 1237 break; 1238 1239 case MODEVENTS_GETDATA: 1240 error = log_sysevent_copyout_data((sysevent_id_t *)a2, 1241 (size_t)a3, (caddr_t)a4); 1242 break; 1243 1244 case MODEVENTS_FREEDATA: 1245 error = log_sysevent_free_data((sysevent_id_t *)a2); 1246 break; 1247 case MODEVENTS_POST_EVENT: 1248 error = log_usr_sysevent((sysevent_t *)a2, (uint32_t)a3, 1249 (sysevent_id_t *)a4); 1250 break; 1251 case MODEVENTS_REGISTER_EVENT: 1252 error = log_sysevent_register((char *)a2, (char *)a3, 1253 (se_pubsub_t *)a4); 1254 break; 1255 default: 1256 error = EINVAL; 1257 } 1258 1259 return (error); 1260 } 1261 1262 static void 1263 free_mperm(mperm_t *mp) 1264 { 1265 int len; 1266 1267 if (mp->mp_minorname) { 1268 len = strlen(mp->mp_minorname) + 1; 1269 kmem_free(mp->mp_minorname, len); 1270 } 1271 kmem_free(mp, sizeof (mperm_t)); 1272 } 1273 1274 #define MP_NO_DRV_ERR \ 1275 "/etc/minor_perm: no driver for %s\n" 1276 1277 #define MP_EMPTY_MINOR \ 1278 "/etc/minor_perm: empty minor name for driver %s\n" 1279 1280 #define MP_NO_MINOR \ 1281 "/etc/minor_perm: no minor matching %s for driver %s\n" 1282 1283 /* 1284 * Remove mperm entry with matching minorname 1285 */ 1286 static void 1287 rem_minorperm(major_t major, char *drvname, mperm_t *mp, int is_clone) 1288 { 1289 mperm_t **mp_head; 1290 mperm_t *freemp = NULL; 1291 struct devnames *dnp = &devnamesp[major]; 1292 mperm_t **wildmp; 1293 1294 ASSERT(mp->mp_minorname && strlen(mp->mp_minorname) > 0); 1295 1296 LOCK_DEV_OPS(&dnp->dn_lock); 1297 if (strcmp(mp->mp_minorname, "*") == 0) { 1298 wildmp = ((is_clone == 0) ? 1299 &dnp->dn_mperm_wild : &dnp->dn_mperm_clone); 1300 if (*wildmp) 1301 freemp = *wildmp; 1302 *wildmp = NULL; 1303 } else { 1304 mp_head = &dnp->dn_mperm; 1305 while (*mp_head) { 1306 if (strcmp((*mp_head)->mp_minorname, 1307 mp->mp_minorname) != 0) { 1308 mp_head = &(*mp_head)->mp_next; 1309 continue; 1310 } 1311 /* remove the entry */ 1312 freemp = *mp_head; 1313 *mp_head = freemp->mp_next; 1314 break; 1315 } 1316 } 1317 if (freemp) { 1318 if (moddebug & MODDEBUG_MINORPERM) { 1319 cmn_err(CE_CONT, "< %s %s 0%o %d %d\n", 1320 drvname, freemp->mp_minorname, 1321 freemp->mp_mode & 0777, 1322 freemp->mp_uid, freemp->mp_gid); 1323 } 1324 free_mperm(freemp); 1325 } else { 1326 if (moddebug & MODDEBUG_MINORPERM) { 1327 cmn_err(CE_CONT, MP_NO_MINOR, 1328 drvname, mp->mp_minorname); 1329 } 1330 } 1331 1332 UNLOCK_DEV_OPS(&dnp->dn_lock); 1333 } 1334 1335 /* 1336 * Add minor perm entry 1337 */ 1338 static void 1339 add_minorperm(major_t major, char *drvname, mperm_t *mp, int is_clone) 1340 { 1341 mperm_t **mp_head; 1342 mperm_t *freemp = NULL; 1343 struct devnames *dnp = &devnamesp[major]; 1344 mperm_t **wildmp; 1345 1346 ASSERT(mp->mp_minorname && strlen(mp->mp_minorname) > 0); 1347 1348 /* 1349 * Note that update_drv replace semantics require 1350 * replacing matching entries with the new permissions. 1351 */ 1352 LOCK_DEV_OPS(&dnp->dn_lock); 1353 if (strcmp(mp->mp_minorname, "*") == 0) { 1354 wildmp = ((is_clone == 0) ? 1355 &dnp->dn_mperm_wild : &dnp->dn_mperm_clone); 1356 if (*wildmp) 1357 freemp = *wildmp; 1358 *wildmp = mp; 1359 } else { 1360 mperm_t *p, *v = NULL; 1361 for (p = dnp->dn_mperm; p; v = p, p = p->mp_next) { 1362 if (strcmp(p->mp_minorname, mp->mp_minorname) == 0) { 1363 if (v == NULL) 1364 dnp->dn_mperm = mp; 1365 else 1366 v->mp_next = mp; 1367 mp->mp_next = p->mp_next; 1368 freemp = p; 1369 goto replaced; 1370 } 1371 } 1372 if (p == NULL) { 1373 mp_head = &dnp->dn_mperm; 1374 if (*mp_head == NULL) { 1375 *mp_head = mp; 1376 } else { 1377 mp->mp_next = *mp_head; 1378 *mp_head = mp; 1379 } 1380 } 1381 } 1382 replaced: 1383 if (freemp) { 1384 if (moddebug & MODDEBUG_MINORPERM) { 1385 cmn_err(CE_CONT, "< %s %s 0%o %d %d\n", 1386 drvname, freemp->mp_minorname, 1387 freemp->mp_mode & 0777, 1388 freemp->mp_uid, freemp->mp_gid); 1389 } 1390 free_mperm(freemp); 1391 } 1392 if (moddebug & MODDEBUG_MINORPERM) { 1393 cmn_err(CE_CONT, "> %s %s 0%o %d %d\n", 1394 drvname, mp->mp_minorname, mp->mp_mode & 0777, 1395 mp->mp_uid, mp->mp_gid); 1396 } 1397 UNLOCK_DEV_OPS(&dnp->dn_lock); 1398 } 1399 1400 1401 static int 1402 process_minorperm(int cmd, nvlist_t *nvl) 1403 { 1404 char *minor; 1405 major_t major; 1406 mperm_t *mp; 1407 nvpair_t *nvp; 1408 char *name; 1409 int is_clone; 1410 major_t minmaj; 1411 1412 ASSERT(cmd == MODLOADMINORPERM || 1413 cmd == MODADDMINORPERM || cmd == MODREMMINORPERM); 1414 1415 nvp = NULL; 1416 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { 1417 name = nvpair_name(nvp); 1418 1419 is_clone = 0; 1420 (void) nvpair_value_string(nvp, &minor); 1421 major = ddi_name_to_major(name); 1422 if (major != (major_t)-1) { 1423 mp = kmem_zalloc(sizeof (*mp), KM_SLEEP); 1424 if (minor == NULL || strlen(minor) == 0) { 1425 if (moddebug & MODDEBUG_MINORPERM) { 1426 cmn_err(CE_CONT, MP_EMPTY_MINOR, name); 1427 } 1428 minor = "*"; 1429 } 1430 1431 /* 1432 * The minor name of a node using the clone 1433 * driver must be the driver name. To avoid 1434 * multiple searches, we map entries in the form 1435 * clone:<driver> to <driver>:*. This also allows us 1436 * to filter out some of the litter in /etc/minor_perm. 1437 * Minor perm alias entries where the name is not 1438 * the driver kept on the clone list itself. 1439 * This all seems very fragile as a driver could 1440 * be introduced with an existing alias name. 1441 */ 1442 if (strcmp(name, "clone") == 0) { 1443 minmaj = ddi_name_to_major(minor); 1444 if (minmaj != (major_t)-1) { 1445 if (moddebug & MODDEBUG_MINORPERM) { 1446 cmn_err(CE_CONT, 1447 "mapping %s:%s to %s:*\n", 1448 name, minor, minor); 1449 } 1450 major = minmaj; 1451 name = minor; 1452 minor = "*"; 1453 is_clone = 1; 1454 } 1455 } 1456 1457 if (mp) { 1458 mp->mp_minorname = 1459 i_ddi_strdup(minor, KM_SLEEP); 1460 } 1461 } else { 1462 mp = NULL; 1463 if (moddebug & MODDEBUG_MINORPERM) { 1464 cmn_err(CE_CONT, MP_NO_DRV_ERR, name); 1465 } 1466 } 1467 1468 /* mode */ 1469 nvp = nvlist_next_nvpair(nvl, nvp); 1470 ASSERT(strcmp(nvpair_name(nvp), "mode") == 0); 1471 if (mp) 1472 (void) nvpair_value_int32(nvp, (int *)&mp->mp_mode); 1473 /* uid */ 1474 nvp = nvlist_next_nvpair(nvl, nvp); 1475 ASSERT(strcmp(nvpair_name(nvp), "uid") == 0); 1476 if (mp) 1477 (void) nvpair_value_int32(nvp, &mp->mp_uid); 1478 /* gid */ 1479 nvp = nvlist_next_nvpair(nvl, nvp); 1480 ASSERT(strcmp(nvpair_name(nvp), "gid") == 0); 1481 if (mp) { 1482 (void) nvpair_value_int32(nvp, &mp->mp_gid); 1483 1484 if (cmd == MODREMMINORPERM) { 1485 rem_minorperm(major, name, mp, is_clone); 1486 free_mperm(mp); 1487 } else { 1488 add_minorperm(major, name, mp, is_clone); 1489 } 1490 } 1491 } 1492 1493 if (cmd == MODLOADMINORPERM) 1494 minorperm_loaded = 1; 1495 1496 /* 1497 * Reset permissions of cached dv_nodes 1498 */ 1499 (void) devfs_reset_perm(DV_RESET_PERM); 1500 1501 return (0); 1502 } 1503 1504 static int 1505 modctl_minorperm(int cmd, char *usrbuf, size_t buflen) 1506 { 1507 int error; 1508 nvlist_t *nvl; 1509 char *buf = kmem_alloc(buflen, KM_SLEEP); 1510 1511 if ((error = ddi_copyin(usrbuf, buf, buflen, 0)) != 0) { 1512 kmem_free(buf, buflen); 1513 return (error); 1514 } 1515 1516 error = nvlist_unpack(buf, buflen, &nvl, KM_SLEEP); 1517 kmem_free(buf, buflen); 1518 if (error) 1519 return (error); 1520 1521 error = process_minorperm(cmd, nvl); 1522 nvlist_free(nvl); 1523 return (error); 1524 } 1525 1526 struct walk_args { 1527 char *wa_drvname; 1528 list_t wa_pathlist; 1529 }; 1530 1531 struct path_elem { 1532 char *pe_dir; 1533 char *pe_nodename; 1534 list_node_t pe_node; 1535 int pe_dirlen; 1536 }; 1537 1538 /*ARGSUSED*/ 1539 static int 1540 modctl_inst_walker(const char *path, in_node_t *np, in_drv_t *dp, void *arg) 1541 { 1542 struct walk_args *wargs = (struct walk_args *)arg; 1543 struct path_elem *pe; 1544 char *nodename; 1545 1546 if (strcmp(dp->ind_driver_name, wargs->wa_drvname) != 0) 1547 return (INST_WALK_CONTINUE); 1548 1549 pe = kmem_zalloc(sizeof (*pe), KM_SLEEP); 1550 pe->pe_dir = i_ddi_strdup((char *)path, KM_SLEEP); 1551 pe->pe_dirlen = strlen(pe->pe_dir) + 1; 1552 ASSERT(strrchr(pe->pe_dir, '/') != NULL); 1553 nodename = strrchr(pe->pe_dir, '/'); 1554 *nodename++ = 0; 1555 pe->pe_nodename = nodename; 1556 list_insert_tail(&wargs->wa_pathlist, pe); 1557 1558 return (INST_WALK_CONTINUE); 1559 } 1560 1561 static int 1562 modctl_remdrv_cleanup(const char *u_drvname) 1563 { 1564 struct walk_args *wargs; 1565 struct path_elem *pe; 1566 char *drvname; 1567 int err, rval = 0; 1568 1569 drvname = kmem_alloc(MAXMODCONFNAME, KM_SLEEP); 1570 if ((err = copyinstr(u_drvname, drvname, MAXMODCONFNAME, 0))) { 1571 kmem_free(drvname, MAXMODCONFNAME); 1572 return (err); 1573 } 1574 1575 /* 1576 * First go through the instance database. For each 1577 * instance of a device bound to the driver being 1578 * removed, remove any underlying devfs attribute nodes. 1579 * 1580 * This is a two-step process. First we go through 1581 * the instance data itself, constructing a list of 1582 * the nodes discovered. The second step is then 1583 * to find and remove any devfs attribute nodes 1584 * for the instances discovered in the first step. 1585 * The two-step process avoids any difficulties 1586 * which could arise by holding the instance data 1587 * lock with simultaneous devfs operations. 1588 */ 1589 wargs = kmem_zalloc(sizeof (*wargs), KM_SLEEP); 1590 1591 wargs->wa_drvname = drvname; 1592 list_create(&wargs->wa_pathlist, 1593 sizeof (struct path_elem), offsetof(struct path_elem, pe_node)); 1594 1595 (void) e_ddi_walk_instances(modctl_inst_walker, (void *)wargs); 1596 1597 for (pe = list_head(&wargs->wa_pathlist); pe != NULL; 1598 pe = list_next(&wargs->wa_pathlist, pe)) { 1599 err = devfs_remdrv_cleanup((const char *)pe->pe_dir, 1600 (const char *)pe->pe_nodename); 1601 if (rval == 0) 1602 rval = err; 1603 } 1604 1605 while ((pe = list_head(&wargs->wa_pathlist)) != NULL) { 1606 list_remove(&wargs->wa_pathlist, pe); 1607 kmem_free(pe->pe_dir, pe->pe_dirlen); 1608 kmem_free(pe, sizeof (*pe)); 1609 } 1610 kmem_free(wargs, sizeof (*wargs)); 1611 1612 /* 1613 * Pseudo nodes aren't recorded in the instance database 1614 * so any such nodes need to be handled separately. 1615 */ 1616 err = devfs_remdrv_cleanup("pseudo", (const char *)drvname); 1617 if (rval == 0) 1618 rval = err; 1619 1620 kmem_free(drvname, MAXMODCONFNAME); 1621 return (rval); 1622 } 1623 1624 static int 1625 modctl_allocpriv(const char *name) 1626 { 1627 char *pstr = kmem_alloc(PRIVNAME_MAX, KM_SLEEP); 1628 int error; 1629 1630 if ((error = copyinstr(name, pstr, PRIVNAME_MAX, 0))) { 1631 kmem_free(pstr, PRIVNAME_MAX); 1632 return (error); 1633 } 1634 error = priv_getbyname(pstr, PRIV_ALLOC); 1635 if (error < 0) 1636 error = -error; 1637 else 1638 error = 0; 1639 kmem_free(pstr, PRIVNAME_MAX); 1640 return (error); 1641 } 1642 1643 /*ARGSUSED5*/ 1644 int 1645 modctl(int cmd, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, 1646 uintptr_t a5) 1647 { 1648 int error = EINVAL; 1649 dev_t dev; 1650 1651 if (secpolicy_modctl(CRED(), cmd) != 0) 1652 return (set_errno(EPERM)); 1653 1654 switch (cmd) { 1655 case MODLOAD: /* load a module */ 1656 error = modctl_modload((int)a1, (char *)a2, (int *)a3); 1657 break; 1658 1659 case MODUNLOAD: /* unload a module */ 1660 error = modctl_modunload((modid_t)a1); 1661 break; 1662 1663 case MODINFO: /* get module status */ 1664 error = modctl_modinfo((modid_t)a1, (struct modinfo *)a2); 1665 break; 1666 1667 case MODRESERVED: /* get last major number in range */ 1668 error = modctl_modreserve((modid_t)a1, (int *)a2); 1669 break; 1670 1671 case MODSETMINIROOT: /* we are running in miniroot */ 1672 isminiroot = 1; 1673 error = 0; 1674 break; 1675 1676 case MODADDMAJBIND: /* read major binding file */ 1677 error = modctl_add_major((int *)a2); 1678 break; 1679 1680 case MODGETPATHLEN: /* get modpath length */ 1681 error = modctl_getmodpathlen((int *)a2); 1682 break; 1683 1684 case MODGETPATH: /* get modpath */ 1685 error = modctl_getmodpath((char *)a2); 1686 break; 1687 1688 case MODREADSYSBIND: /* read system call binding file */ 1689 error = modctl_read_sysbinding_file(); 1690 break; 1691 1692 case MODGETMAJBIND: /* get major number for named device */ 1693 error = modctl_getmaj((char *)a1, (uint_t)a2, (int *)a3); 1694 break; 1695 1696 case MODGETNAME: /* get name of device given major number */ 1697 error = modctl_getname((char *)a1, (uint_t)a2, (int *)a3); 1698 break; 1699 1700 case MODDEVT2INSTANCE: 1701 if (get_udatamodel() == DATAMODEL_NATIVE) { 1702 dev = (dev_t)a1; 1703 } 1704 #ifdef _SYSCALL32_IMPL 1705 else { 1706 dev = expldev(a1); 1707 } 1708 #endif 1709 error = modctl_devt2instance(dev, (int *)a2); 1710 break; 1711 1712 case MODSIZEOF_DEVID: /* sizeof device id of device given dev_t */ 1713 if (get_udatamodel() == DATAMODEL_NATIVE) { 1714 dev = (dev_t)a1; 1715 } 1716 #ifdef _SYSCALL32_IMPL 1717 else { 1718 dev = expldev(a1); 1719 } 1720 #endif 1721 error = modctl_sizeof_devid(dev, (uint_t *)a2); 1722 break; 1723 1724 case MODGETDEVID: /* get device id of device given dev_t */ 1725 if (get_udatamodel() == DATAMODEL_NATIVE) { 1726 dev = (dev_t)a1; 1727 } 1728 #ifdef _SYSCALL32_IMPL 1729 else { 1730 dev = expldev(a1); 1731 } 1732 #endif 1733 error = modctl_get_devid(dev, (uint_t)a2, (ddi_devid_t)a3); 1734 break; 1735 1736 case MODSIZEOF_MINORNAME: /* sizeof minor nm of dev_t/spectype */ 1737 if (get_udatamodel() == DATAMODEL_NATIVE) { 1738 error = modctl_sizeof_minorname((dev_t)a1, (int)a2, 1739 (uint_t *)a3); 1740 } 1741 #ifdef _SYSCALL32_IMPL 1742 else { 1743 error = modctl_sizeof_minorname(expldev(a1), (int)a2, 1744 (uint_t *)a3); 1745 } 1746 1747 #endif 1748 break; 1749 1750 case MODGETMINORNAME: /* get minor name of dev_t and spec type */ 1751 if (get_udatamodel() == DATAMODEL_NATIVE) { 1752 error = modctl_get_minorname((dev_t)a1, (int)a2, 1753 (uint_t)a3, (char *)a4); 1754 } 1755 #ifdef _SYSCALL32_IMPL 1756 else { 1757 error = modctl_get_minorname(expldev(a1), (int)a2, 1758 (uint_t)a3, (char *)a4); 1759 } 1760 #endif 1761 break; 1762 1763 case MODGETDEVFSPATH_LEN: /* sizeof path nm of dev_t/spectype */ 1764 if (get_udatamodel() == DATAMODEL_NATIVE) { 1765 error = modctl_devfspath_len((dev_t)a1, (int)a2, 1766 (uint_t *)a3); 1767 } 1768 #ifdef _SYSCALL32_IMPL 1769 else { 1770 error = modctl_devfspath_len(expldev(a1), (int)a2, 1771 (uint_t *)a3); 1772 } 1773 1774 #endif 1775 break; 1776 1777 case MODGETDEVFSPATH: /* get path name of dev_t and spec type */ 1778 if (get_udatamodel() == DATAMODEL_NATIVE) { 1779 error = modctl_devfspath((dev_t)a1, (int)a2, 1780 (uint_t)a3, (char *)a4); 1781 } 1782 #ifdef _SYSCALL32_IMPL 1783 else { 1784 error = modctl_devfspath(expldev(a1), (int)a2, 1785 (uint_t)a3, (char *)a4); 1786 } 1787 #endif 1788 break; 1789 1790 1791 case MODEVENTS: 1792 error = modctl_modevents((int)a1, a2, a3, a4, (uint_t)a5); 1793 break; 1794 1795 case MODGETFBNAME: /* get the framebuffer name */ 1796 error = modctl_get_fbname((char *)a1); 1797 break; 1798 1799 case MODREREADDACF: /* reread dacf rule database from given file */ 1800 error = modctl_reread_dacf((char *)a1); 1801 break; 1802 1803 case MODLOADDRVCONF: /* load driver.conf file for major */ 1804 error = modctl_load_drvconf((major_t)a1); 1805 break; 1806 1807 case MODUNLOADDRVCONF: /* unload driver.conf file for major */ 1808 error = modctl_unload_drvconf((major_t)a1); 1809 break; 1810 1811 case MODREMMAJBIND: /* remove a major binding */ 1812 error = modctl_rem_major((major_t)a1); 1813 break; 1814 1815 case MODDEVID2PATHS: /* get paths given devid */ 1816 error = modctl_devid2paths((ddi_devid_t)a1, (char *)a2, 1817 (uint_t)a3, (size_t *)a4, (char *)a5); 1818 break; 1819 1820 case MODSETDEVPOLICY: /* establish device policy */ 1821 error = devpolicy_load((int)a1, (size_t)a2, (devplcysys_t *)a3); 1822 break; 1823 1824 case MODGETDEVPOLICY: /* get device policy */ 1825 error = devpolicy_get((int *)a1, (size_t)a2, 1826 (devplcysys_t *)a3); 1827 break; 1828 1829 case MODALLOCPRIV: 1830 error = modctl_allocpriv((const char *)a1); 1831 break; 1832 1833 case MODGETDEVPOLICYBYNAME: 1834 error = devpolicy_getbyname((size_t)a1, 1835 (devplcysys_t *)a2, (char *)a3); 1836 break; 1837 1838 case MODLOADMINORPERM: 1839 case MODADDMINORPERM: 1840 case MODREMMINORPERM: 1841 error = modctl_minorperm(cmd, (char *)a1, (size_t)a2); 1842 break; 1843 1844 case MODREMDRVCLEANUP: 1845 error = modctl_remdrv_cleanup((const char *)a1); 1846 break; 1847 1848 default: 1849 error = EINVAL; 1850 break; 1851 } 1852 1853 return (error ? set_errno(error) : 0); 1854 } 1855 1856 /* 1857 * Calls to kobj_load_module()() are handled off to this routine in a 1858 * separate thread. 1859 */ 1860 static void 1861 modload_thread(struct loadmt *ltp) 1862 { 1863 /* load the module and signal the creator of this thread */ 1864 kmutex_t cpr_lk; 1865 callb_cpr_t cpr_i; 1866 1867 mutex_init(&cpr_lk, NULL, MUTEX_DEFAULT, NULL); 1868 CALLB_CPR_INIT(&cpr_i, &cpr_lk, callb_generic_cpr, "modload"); 1869 /* borrow the devi lock from thread which invoked us */ 1870 pm_borrow_lock(ltp->owner); 1871 ltp->retval = kobj_load_module(ltp->mp, ltp->usepath); 1872 pm_return_lock(); 1873 sema_v(<p->sema); 1874 mutex_enter(&cpr_lk); 1875 CALLB_CPR_EXIT(&cpr_i); 1876 mutex_destroy(&cpr_lk); 1877 thread_exit(); 1878 } 1879 1880 /* 1881 * load a module, adding a reference if caller specifies rmodp. If rmodp 1882 * is specified then an errno is returned, otherwise a module index is 1883 * returned (-1 on error). 1884 */ 1885 static int 1886 modrload(char *subdir, char *filename, struct modctl **rmodp) 1887 { 1888 struct modctl *modp; 1889 size_t size; 1890 char *fullname; 1891 int retval = EINVAL; 1892 int id = -1; 1893 1894 if (rmodp) 1895 *rmodp = NULL; /* avoid garbage */ 1896 1897 if (subdir != NULL) { 1898 /* 1899 * refuse / in filename to prevent "../" escapes. 1900 */ 1901 if (strchr(filename, '/') != NULL) 1902 return (rmodp ? retval : id); 1903 1904 /* 1905 * allocate enough space for <subdir>/<filename><NULL> 1906 */ 1907 size = strlen(subdir) + strlen(filename) + 2; 1908 fullname = kmem_zalloc(size, KM_SLEEP); 1909 (void) sprintf(fullname, "%s/%s", subdir, filename); 1910 } else { 1911 fullname = filename; 1912 } 1913 1914 modp = mod_hold_installed_mod(fullname, 1, &retval); 1915 if (modp != NULL) { 1916 id = modp->mod_id; 1917 if (rmodp) { 1918 /* add mod_ref and return *rmodp */ 1919 mutex_enter(&mod_lock); 1920 modp->mod_ref++; 1921 mutex_exit(&mod_lock); 1922 *rmodp = modp; 1923 } 1924 mod_release_mod(modp); 1925 CPU_STATS_ADDQ(CPU, sys, modload, 1); 1926 } 1927 1928 done: if (subdir != NULL) 1929 kmem_free(fullname, size); 1930 return (rmodp ? retval : id); 1931 } 1932 1933 /* 1934 * This is the primary kernel interface to load a module. It loads and 1935 * installs the named module. It does not hold mod_ref of the module, so 1936 * a module unload attempt can occur at any time - it is up to the 1937 * _fini/mod_remove implementation to determine if unload will succeed. 1938 */ 1939 int 1940 modload(char *subdir, char *filename) 1941 { 1942 return (modrload(subdir, filename, NULL)); 1943 } 1944 1945 /* 1946 * Load a module. 1947 */ 1948 int 1949 modloadonly(char *subdir, char *filename) 1950 { 1951 struct modctl *modp; 1952 char *fullname; 1953 size_t size; 1954 int id, retval; 1955 1956 if (subdir != NULL) { 1957 /* 1958 * allocate enough space for <subdir>/<filename><NULL> 1959 */ 1960 size = strlen(subdir) + strlen(filename) + 2; 1961 fullname = kmem_zalloc(size, KM_SLEEP); 1962 (void) sprintf(fullname, "%s/%s", subdir, filename); 1963 } else { 1964 fullname = filename; 1965 } 1966 1967 modp = mod_hold_loaded_mod(NULL, fullname, &retval); 1968 if (modp) { 1969 id = modp->mod_id; 1970 mod_release_mod(modp); 1971 } 1972 1973 if (subdir != NULL) 1974 kmem_free(fullname, size); 1975 1976 if (retval == 0) 1977 return (id); 1978 return (-1); 1979 } 1980 1981 /* 1982 * Try to uninstall and unload a module, removing a reference if caller 1983 * specifies rmodp. 1984 */ 1985 static int 1986 modunrload(modid_t id, struct modctl **rmodp, int unload) 1987 { 1988 struct modctl *modp; 1989 int retval; 1990 1991 if (rmodp) 1992 *rmodp = NULL; /* avoid garbage */ 1993 1994 if ((modp = mod_hold_by_id((modid_t)id)) == NULL) 1995 return (EINVAL); 1996 1997 if (rmodp) { 1998 mutex_enter(&mod_lock); 1999 modp->mod_ref--; 2000 mutex_exit(&mod_lock); 2001 *rmodp = modp; 2002 } 2003 2004 if (unload) { 2005 retval = moduninstall(modp); 2006 if (retval == 0) { 2007 mod_unload(modp); 2008 CPU_STATS_ADDQ(CPU, sys, modunload, 1); 2009 } else if (retval == EALREADY) 2010 retval = 0; /* already unloaded, not an error */ 2011 } else 2012 retval = 0; 2013 2014 mod_release_mod(modp); 2015 return (retval); 2016 } 2017 2018 /* 2019 * Uninstall and unload a module. 2020 */ 2021 int 2022 modunload(modid_t id) 2023 { 2024 int retval; 2025 2026 /* synchronize with any active modunload_disable() */ 2027 modunload_begin(); 2028 if (ddi_root_node()) 2029 (void) devfs_clean(ddi_root_node(), NULL, 0); 2030 retval = modunrload(id, NULL, 1); 2031 modunload_end(); 2032 return (retval); 2033 } 2034 2035 /* 2036 * Return status of a loaded module. 2037 */ 2038 static int 2039 modinfo(modid_t id, struct modinfo *modinfop) 2040 { 2041 struct modctl *modp; 2042 modid_t mid; 2043 int i; 2044 2045 mid = modinfop->mi_id; 2046 if (modinfop->mi_info & MI_INFO_ALL) { 2047 while ((modp = mod_hold_next_by_id(mid++)) != NULL) { 2048 if ((modinfop->mi_info & MI_INFO_CNT) || 2049 modp->mod_installed) 2050 break; 2051 mod_release_mod(modp); 2052 } 2053 if (modp == NULL) 2054 return (EINVAL); 2055 } else { 2056 modp = mod_hold_by_id(id); 2057 if (modp == NULL) 2058 return (EINVAL); 2059 if (!(modinfop->mi_info & MI_INFO_CNT) && 2060 (modp->mod_installed == 0)) { 2061 mod_release_mod(modp); 2062 return (EINVAL); 2063 } 2064 } 2065 2066 modinfop->mi_rev = 0; 2067 modinfop->mi_state = 0; 2068 for (i = 0; i < MODMAXLINK; i++) { 2069 modinfop->mi_msinfo[i].msi_p0 = -1; 2070 modinfop->mi_msinfo[i].msi_linkinfo[0] = 0; 2071 } 2072 if (modp->mod_loaded) { 2073 modinfop->mi_state = MI_LOADED; 2074 kobj_getmodinfo(modp->mod_mp, modinfop); 2075 } 2076 if (modp->mod_installed) { 2077 modinfop->mi_state |= MI_INSTALLED; 2078 2079 (void) mod_getinfo(modp, modinfop); 2080 } 2081 2082 modinfop->mi_id = modp->mod_id; 2083 modinfop->mi_loadcnt = modp->mod_loadcnt; 2084 (void) strcpy(modinfop->mi_name, modp->mod_modname); 2085 2086 mod_release_mod(modp); 2087 return (0); 2088 } 2089 2090 static char mod_stub_err[] = "mod_hold_stub: Couldn't load stub module %s"; 2091 static char no_err[] = "No error function for weak stub %s"; 2092 2093 /* 2094 * used by the stubs themselves to load and hold a module. 2095 * Returns 0 if the module is successfully held; 2096 * the stub needs to call mod_release_stub(). 2097 * -1 if the stub should just call the err_fcn. 2098 * Note that this code is stretched out so that we avoid subroutine calls 2099 * and optimize for the most likely case. That is, the case where the 2100 * module is loaded and installed and not held. In that case we just inc 2101 * the mod_ref count and continue. 2102 */ 2103 int 2104 mod_hold_stub(struct mod_stub_info *stub) 2105 { 2106 struct modctl *mp; 2107 struct mod_modinfo *mip; 2108 2109 mip = stub->mods_modinfo; 2110 2111 mutex_enter(&mod_lock); 2112 2113 /* we do mod_hold_by_modctl inline for speed */ 2114 2115 mod_check_again: 2116 if ((mp = mip->mp) != NULL) { 2117 if (mp->mod_busy == 0) { 2118 if (mp->mod_installed) { 2119 /* increment the reference count */ 2120 mp->mod_ref++; 2121 ASSERT(mp->mod_ref && mp->mod_installed); 2122 mutex_exit(&mod_lock); 2123 return (0); 2124 } else { 2125 mp->mod_busy = 1; 2126 mp->mod_inprogress_thread = 2127 (curthread == NULL ? 2128 (kthread_id_t)-1 : curthread); 2129 } 2130 } else { 2131 /* 2132 * wait one time and then go see if someone 2133 * else has resolved the stub (set mip->mp). 2134 */ 2135 if (mod_hold_by_modctl(mp, 2136 MOD_WAIT_ONCE | MOD_LOCK_HELD)) 2137 goto mod_check_again; 2138 2139 /* 2140 * what we have now may have been unloaded!, in 2141 * that case, mip->mp will be NULL, we'll hit this 2142 * module and load again.. 2143 */ 2144 cmn_err(CE_PANIC, "mod_hold_stub should have blocked"); 2145 } 2146 mutex_exit(&mod_lock); 2147 } else { 2148 /* first time we've hit this module */ 2149 mutex_exit(&mod_lock); 2150 mp = mod_hold_by_name(mip->modm_module_name); 2151 mip->mp = mp; 2152 } 2153 2154 /* 2155 * If we are here, it means that the following conditions 2156 * are satisfied. 2157 * 2158 * mip->mp != NULL 2159 * this thread has set the mp->mod_busy = 1 2160 * mp->mod_installed = 0 2161 * 2162 */ 2163 ASSERT(mp != NULL); 2164 ASSERT(mp->mod_busy == 1); 2165 2166 if (mp->mod_installed == 0) { 2167 /* Module not loaded, if weak stub don't load it */ 2168 if (stub->mods_flag & MODS_WEAK) { 2169 if (stub->mods_errfcn == NULL) { 2170 mod_release_mod(mp); 2171 cmn_err(CE_PANIC, no_err, 2172 mip->modm_module_name); 2173 } 2174 } else { 2175 /* Not a weak stub so load the module */ 2176 2177 if (mod_load(mp, 1) != 0 || modinstall(mp) != 0) { 2178 /* 2179 * If mod_load() was successful 2180 * and modinstall() failed, then 2181 * unload the module. 2182 */ 2183 if (mp->mod_loaded) 2184 mod_unload(mp); 2185 2186 mod_release_mod(mp); 2187 if (stub->mods_errfcn == NULL) { 2188 cmn_err(CE_PANIC, mod_stub_err, 2189 mip->modm_module_name); 2190 } else { 2191 return (-1); 2192 } 2193 } 2194 } 2195 } 2196 2197 /* 2198 * At this point module is held and loaded. Release 2199 * the mod_busy and mod_inprogress_thread before 2200 * returning. We actually call mod_release() here so 2201 * that if another stub wants to access this module, 2202 * it can do so. mod_ref is incremented before mod_release() 2203 * is called to prevent someone else from snatching the 2204 * module from this thread. 2205 */ 2206 mutex_enter(&mod_lock); 2207 mp->mod_ref++; 2208 ASSERT(mp->mod_ref && 2209 (mp->mod_loaded || (stub->mods_flag & MODS_WEAK))); 2210 mod_release(mp); 2211 mutex_exit(&mod_lock); 2212 return (0); 2213 } 2214 2215 void 2216 mod_release_stub(struct mod_stub_info *stub) 2217 { 2218 struct modctl *mp = stub->mods_modinfo->mp; 2219 2220 /* inline mod_release_mod */ 2221 mutex_enter(&mod_lock); 2222 ASSERT(mp->mod_ref && 2223 (mp->mod_loaded || (stub->mods_flag & MODS_WEAK))); 2224 mp->mod_ref--; 2225 if (mp->mod_want) { 2226 mp->mod_want = 0; 2227 cv_broadcast(&mod_cv); 2228 } 2229 mutex_exit(&mod_lock); 2230 } 2231 2232 static struct modctl * 2233 mod_hold_loaded_mod(struct modctl *dep, char *filename, int *status) 2234 { 2235 struct modctl *modp; 2236 int retval; 2237 2238 /* 2239 * Hold the module. 2240 */ 2241 modp = mod_hold_by_name_requisite(dep, filename); 2242 if (modp) { 2243 retval = mod_load(modp, 1); 2244 if (retval != 0) { 2245 mod_release_mod(modp); 2246 modp = NULL; 2247 } 2248 *status = retval; 2249 } else { 2250 *status = ENOSPC; 2251 } 2252 2253 /* 2254 * if dep is not NULL, clear the module dependency information. 2255 * This information is set in mod_hold_by_name_common(). 2256 */ 2257 if (dep != NULL && dep->mod_requisite_loading != NULL) { 2258 ASSERT(dep->mod_busy); 2259 dep->mod_requisite_loading = NULL; 2260 } 2261 2262 return (modp); 2263 } 2264 2265 /* 2266 * hold, load, and install the named module 2267 */ 2268 static struct modctl * 2269 mod_hold_installed_mod(char *name, int usepath, int *r) 2270 { 2271 struct modctl *modp; 2272 int retval; 2273 2274 /* 2275 * Verify that that module in question actually exists on disk 2276 * before allocation of module structure by mod_hold_by_name. 2277 */ 2278 if (modrootloaded && swaploaded) { 2279 if (!kobj_path_exists(name, usepath)) { 2280 *r = ENOENT; 2281 return (NULL); 2282 } 2283 } 2284 2285 /* 2286 * Hold the module. 2287 */ 2288 modp = mod_hold_by_name(name); 2289 if (modp) { 2290 retval = mod_load(modp, usepath); 2291 if (retval != 0) { 2292 mod_release_mod(modp); 2293 modp = NULL; 2294 *r = retval; 2295 } else { 2296 if ((*r = modinstall(modp)) != 0) { 2297 /* 2298 * We loaded it, but failed to _init() it. 2299 * Be kind to developers -- force it 2300 * out of memory now so that the next 2301 * attempt to use the module will cause 2302 * a reload. See 1093793. 2303 */ 2304 mod_unload(modp); 2305 mod_release_mod(modp); 2306 modp = NULL; 2307 } 2308 } 2309 } else { 2310 *r = ENOSPC; 2311 } 2312 return (modp); 2313 } 2314 2315 static char mod_excl_msg[] = 2316 "module %s(%s) is EXCLUDED and will not be loaded\n"; 2317 static char mod_init_msg[] = "loadmodule:%s(%s): _init() error %d\n"; 2318 2319 /* 2320 * This routine is needed for dependencies. Users specify dependencies 2321 * by declaring a character array initialized to filenames of dependents. 2322 * So the code that handles dependents deals with filenames (and not 2323 * module names) because that's all it has. We load by filename and once 2324 * we've loaded a file we can get the module name. 2325 * Unfortunately there isn't a single unified filename/modulename namespace. 2326 * C'est la vie. 2327 * 2328 * We allow the name being looked up to be prepended by an optional 2329 * subdirectory e.g. we can lookup (NULL, "fs/ufs") or ("fs", "ufs") 2330 */ 2331 struct modctl * 2332 mod_find_by_filename(char *subdir, char *filename) 2333 { 2334 struct modctl *mp; 2335 size_t sublen; 2336 2337 ASSERT(!MUTEX_HELD(&mod_lock)); 2338 if (subdir != NULL) 2339 sublen = strlen(subdir); 2340 else 2341 sublen = 0; 2342 2343 mutex_enter(&mod_lock); 2344 mp = &modules; 2345 do { 2346 if (sublen) { 2347 char *mod_filename = mp->mod_filename; 2348 2349 if (strncmp(subdir, mod_filename, sublen) == 0 && 2350 mod_filename[sublen] == '/' && 2351 strcmp(filename, &mod_filename[sublen + 1]) == 0) { 2352 mutex_exit(&mod_lock); 2353 return (mp); 2354 } 2355 } else if (strcmp(filename, mp->mod_filename) == 0) { 2356 mutex_exit(&mod_lock); 2357 return (mp); 2358 } 2359 } while ((mp = mp->mod_next) != &modules); 2360 mutex_exit(&mod_lock); 2361 return (NULL); 2362 } 2363 2364 /* 2365 * Check for circular dependencies. This is called from do_dependents() 2366 * in kobj.c. If we are the thread already loading this module, then 2367 * we're trying to load a dependent that we're already loading which 2368 * means the user specified circular dependencies. 2369 */ 2370 static int 2371 mod_circdep(struct modctl *modp) 2372 { 2373 struct modctl *rmod; 2374 2375 ASSERT(MUTEX_HELD(&mod_lock)); 2376 2377 /* 2378 * Check the mod_inprogress_thread first. 2379 * mod_inprogress_thread is used in mod_hold_stub() 2380 * directly to improve performance. 2381 */ 2382 if (modp->mod_inprogress_thread == curthread) 2383 return (1); 2384 2385 /* 2386 * Check the module circular dependencies. 2387 */ 2388 for (rmod = modp; rmod != NULL; rmod = rmod->mod_requisite_loading) { 2389 /* 2390 * Check if there is a module circular dependency. 2391 */ 2392 if (rmod->mod_requisite_loading == modp) 2393 return (1); 2394 } 2395 return (0); 2396 } 2397 2398 static int 2399 mod_getinfo(struct modctl *modp, struct modinfo *modinfop) 2400 { 2401 int (*func)(struct modinfo *); 2402 int retval; 2403 2404 ASSERT(modp->mod_busy); 2405 2406 /* primary modules don't do getinfo */ 2407 if (modp->mod_prim) 2408 return (0); 2409 2410 func = (int (*)(struct modinfo *))kobj_lookup(modp->mod_mp, "_info"); 2411 2412 if (kobj_addrcheck(modp->mod_mp, (caddr_t)func)) { 2413 cmn_err(CE_WARN, "_info() not defined properly in %s", 2414 modp->mod_filename); 2415 /* 2416 * The semantics of mod_info(9F) are that 0 is failure 2417 * and non-zero is success. 2418 */ 2419 retval = 0; 2420 } else 2421 retval = (*func)(modinfop); /* call _info() function */ 2422 2423 if (moddebug & MODDEBUG_USERDEBUG) 2424 printf("Returned from _info, retval = %x\n", retval); 2425 2426 return (retval); 2427 } 2428 2429 static void 2430 modadd(struct modctl *mp) 2431 { 2432 ASSERT(MUTEX_HELD(&mod_lock)); 2433 2434 mp->mod_id = last_module_id++; 2435 mp->mod_next = &modules; 2436 mp->mod_prev = modules.mod_prev; 2437 modules.mod_prev->mod_next = mp; 2438 modules.mod_prev = mp; 2439 } 2440 2441 /*ARGSUSED*/ 2442 static struct modctl * 2443 allocate_modp(char *filename, char *modname) 2444 { 2445 struct modctl *mp; 2446 2447 mp = kobj_zalloc(sizeof (*mp), KM_SLEEP); 2448 mp->mod_modname = kobj_zalloc(strlen(modname) + 1, KM_SLEEP); 2449 (void) strcpy(mp->mod_modname, modname); 2450 return (mp); 2451 } 2452 2453 /* 2454 * Get the value of a symbol. This is a wrapper routine that 2455 * calls kobj_getsymvalue(). kobj_getsymvalue() may go away but this 2456 * wrapper will prevent callers from noticing. 2457 */ 2458 uintptr_t 2459 modgetsymvalue(char *name, int kernelonly) 2460 { 2461 return (kobj_getsymvalue(name, kernelonly)); 2462 } 2463 2464 /* 2465 * Get the symbol nearest an address. This is a wrapper routine that 2466 * calls kobj_getsymname(). kobj_getsymname() may go away but this 2467 * wrapper will prevent callers from noticing. 2468 */ 2469 char * 2470 modgetsymname(uintptr_t value, ulong_t *offset) 2471 { 2472 return (kobj_getsymname(value, offset)); 2473 } 2474 2475 /* 2476 * Lookup a symbol in a specified module. This is a wrapper routine that 2477 * calls kobj_lookup(). kobj_lookup() may go away but this 2478 * wrapper will prevent callers from noticing. 2479 */ 2480 uintptr_t 2481 modlookup(char *modname, char *symname) 2482 { 2483 struct modctl *modp; 2484 uintptr_t val; 2485 2486 if ((modp = mod_hold_by_name(modname)) == NULL) 2487 return (0); 2488 val = kobj_lookup(modp->mod_mp, symname); 2489 mod_release_mod(modp); 2490 return (val); 2491 } 2492 2493 /* 2494 * Ask the user for the name of the system file and the default path 2495 * for modules. 2496 */ 2497 void 2498 mod_askparams() 2499 { 2500 static char s0[64]; 2501 intptr_t fd; 2502 2503 if ((fd = kobj_open(systemfile)) != -1L) 2504 kobj_close(fd); 2505 else 2506 systemfile = NULL; 2507 2508 /*CONSTANTCONDITION*/ 2509 while (1) { 2510 printf("Name of system file [%s]: ", 2511 systemfile ? systemfile : "/dev/null"); 2512 2513 console_gets(s0, sizeof (s0)); 2514 2515 if (s0[0] == '\0') 2516 break; 2517 else if (strcmp(s0, "/dev/null") == 0) { 2518 systemfile = NULL; 2519 break; 2520 } else { 2521 if ((fd = kobj_open(s0)) != -1L) { 2522 kobj_close(fd); 2523 systemfile = s0; 2524 break; 2525 } 2526 } 2527 printf("can't find file %s\n", s0); 2528 } 2529 } 2530 2531 static char loading_msg[] = "loading '%s' id %d\n"; 2532 static char load_msg[] = "load '%s' id %d loaded @ 0x%p/0x%p size %d/%d\n"; 2533 2534 /* 2535 * Common code for loading a module (but not installing it). 2536 * Handoff the task of module loading to a seperate thread 2537 * with a large stack if possible, since this code may recurse a few times. 2538 * Return zero if there are no errors or an errno value. 2539 */ 2540 static int 2541 mod_load(struct modctl *mp, int usepath) 2542 { 2543 int retval; 2544 struct modinfo *modinfop = NULL; 2545 struct loadmt lt; 2546 2547 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 2548 ASSERT(mp->mod_busy); 2549 2550 if (mp->mod_loaded) 2551 return (0); 2552 2553 if (mod_sysctl(SYS_CHECK_EXCLUDE, mp->mod_modname) != 0 || 2554 mod_sysctl(SYS_CHECK_EXCLUDE, mp->mod_filename) != 0) { 2555 if (moddebug & MODDEBUG_LOADMSG) { 2556 printf(mod_excl_msg, mp->mod_filename, 2557 mp->mod_modname); 2558 } 2559 return (ENXIO); 2560 } 2561 if (moddebug & MODDEBUG_LOADMSG2) 2562 printf(loading_msg, mp->mod_filename, mp->mod_id); 2563 2564 if (curthread != &t0) { 2565 lt.mp = mp; 2566 lt.usepath = usepath; 2567 lt.owner = curthread; 2568 sema_init(<.sema, 0, NULL, SEMA_DEFAULT, NULL); 2569 2570 /* create thread to hand of call to */ 2571 (void) thread_create(NULL, DEFAULTSTKSZ * 2, 2572 modload_thread, <, 0, &p0, TS_RUN, maxclsyspri); 2573 2574 /* wait for thread to complete kobj_load_module */ 2575 sema_p(<.sema); 2576 2577 sema_destroy(<.sema); 2578 retval = lt.retval; 2579 } else 2580 retval = kobj_load_module(mp, usepath); 2581 2582 if (mp->mod_mp) { 2583 ASSERT(retval == 0); 2584 mp->mod_loaded = 1; 2585 mp->mod_loadcnt++; 2586 if (moddebug & MODDEBUG_LOADMSG) { 2587 printf(load_msg, mp->mod_filename, mp->mod_id, 2588 (void *)((struct module *)mp->mod_mp)->text, 2589 (void *)((struct module *)mp->mod_mp)->data, 2590 ((struct module *)mp->mod_mp)->text_size, 2591 ((struct module *)mp->mod_mp)->data_size); 2592 } 2593 2594 /* 2595 * XXX - There should be a better way to get this. 2596 */ 2597 modinfop = kmem_zalloc(sizeof (struct modinfo), KM_SLEEP); 2598 modinfop->mi_info = MI_INFO_LINKAGE; 2599 if (mod_getinfo(mp, modinfop) == 0) 2600 mp->mod_linkage = NULL; 2601 else { 2602 mp->mod_linkage = (void *)modinfop->mi_base; 2603 ASSERT(mp->mod_linkage->ml_rev == MODREV_1); 2604 } 2605 2606 /* 2607 * DCS: bootstrapping code. If the driver is loaded 2608 * before root mount, it is assumed that the driver 2609 * may be used before mounting root. In order to 2610 * access mappings of global to local minor no.'s 2611 * during installation/open of the driver, we load 2612 * them into memory here while the BOP_interfaces 2613 * are still up. 2614 */ 2615 if ((cluster_bootflags & CLUSTER_BOOTED) && !modrootloaded) { 2616 retval = clboot_modload(mp); 2617 } 2618 2619 kmem_free(modinfop, sizeof (struct modinfo)); 2620 (void) mod_sysctl(SYS_SET_MVAR, (void *)mp); 2621 retval = install_stubs_by_name(mp, mp->mod_modname); 2622 2623 /* 2624 * Now that the module is loaded, we need to give DTrace 2625 * a chance to notify its providers. This is done via 2626 * the dtrace_modload function pointer. 2627 */ 2628 if (strcmp(mp->mod_modname, "dtrace") != 0) { 2629 struct modctl *dmp = mod_hold_by_name("dtrace"); 2630 2631 if (dmp != NULL && dtrace_modload != NULL) 2632 (*dtrace_modload)(mp); 2633 2634 mod_release_mod(dmp); 2635 } 2636 2637 } else { 2638 /* 2639 * If load failed then we need to release any requisites 2640 * that we had established. 2641 */ 2642 ASSERT(retval); 2643 mod_release_requisites(mp); 2644 2645 if (moddebug & MODDEBUG_ERRMSG) 2646 printf("error loading '%s', error %d\n", 2647 mp->mod_filename, retval); 2648 } 2649 return (retval); 2650 } 2651 2652 static char unload_msg[] = "unloading %s, module id %d, loadcnt %d.\n"; 2653 2654 static void 2655 mod_unload(struct modctl *mp) 2656 { 2657 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 2658 ASSERT(mp->mod_busy); 2659 ASSERT((mp->mod_loaded && (mp->mod_installed == 0)) && 2660 ((mp->mod_prim == 0) && (mp->mod_ref >= 0))); 2661 2662 if (moddebug & MODDEBUG_LOADMSG) 2663 printf(unload_msg, mp->mod_modname, 2664 mp->mod_id, mp->mod_loadcnt); 2665 2666 /* 2667 * If mod_ref is not zero, it means some modules might still refer 2668 * to this module. Then you can't unload this module right now. 2669 * Instead, set 1 to mod_delay_unload to notify the system of 2670 * unloading this module later when it's not required any more. 2671 */ 2672 if (mp->mod_ref > 0) { 2673 mp->mod_delay_unload = 1; 2674 if (moddebug & MODDEBUG_LOADMSG2) { 2675 printf("module %s not unloaded," 2676 " non-zero reference count (%d)", 2677 mp->mod_modname, mp->mod_ref); 2678 } 2679 return; 2680 } 2681 2682 if (((mp->mod_loaded == 0) || mp->mod_installed) || 2683 (mp->mod_ref || mp->mod_prim)) { 2684 /* 2685 * A DEBUG kernel would ASSERT panic above, the code is broken 2686 * if we get this warning. 2687 */ 2688 cmn_err(CE_WARN, "mod_unload: %s in incorrect state: %d %d %d", 2689 mp->mod_filename, mp->mod_installed, mp->mod_loaded, 2690 mp->mod_ref); 2691 return; 2692 } 2693 2694 /* reset stub functions to call the binder again */ 2695 reset_stubs(mp); 2696 2697 /* 2698 * mark module as unloaded before the modctl structure is freed. 2699 * This is required not to reuse the modctl structure before 2700 * the module is marked as unloaded. 2701 */ 2702 mp->mod_loaded = 0; 2703 mp->mod_linkage = NULL; 2704 2705 /* free the memory */ 2706 kobj_unload_module(mp); 2707 2708 if (mp->mod_delay_unload) { 2709 mp->mod_delay_unload = 0; 2710 if (moddebug & MODDEBUG_LOADMSG2) { 2711 printf("deferred unload of module %s" 2712 " (id %d) successful", 2713 mp->mod_modname, mp->mod_id); 2714 } 2715 } 2716 2717 /* release hold on requisites */ 2718 mod_release_requisites(mp); 2719 2720 /* 2721 * Now that the module is gone, we need to give DTrace a chance to 2722 * remove any probes that it may have had in the module. This is 2723 * done via the dtrace_modunload function pointer. 2724 */ 2725 if (strcmp(mp->mod_modname, "dtrace") != 0) { 2726 struct modctl *dmp = mod_hold_by_name("dtrace"); 2727 2728 if (dmp != NULL && dtrace_modunload != NULL) 2729 (*dtrace_modunload)(mp); 2730 2731 mod_release_mod(dmp); 2732 } 2733 } 2734 2735 static int 2736 modinstall(struct modctl *mp) 2737 { 2738 int val; 2739 int (*func)(void); 2740 2741 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 2742 ASSERT(mp->mod_busy && mp->mod_loaded); 2743 2744 if (mp->mod_installed) 2745 return (0); 2746 /* 2747 * If mod_delay_unload is on, it means the system chose the deferred 2748 * unload for this module. Then you can't install this module until 2749 * it's unloaded from the system. 2750 */ 2751 if (mp->mod_delay_unload) 2752 return (ENXIO); 2753 2754 if (moddebug & MODDEBUG_LOADMSG) 2755 printf("installing %s, module id %d.\n", 2756 mp->mod_modname, mp->mod_id); 2757 2758 ASSERT(mp->mod_mp != NULL); 2759 if (mod_install_requisites(mp) != 0) { 2760 /* 2761 * Note that we can't call mod_unload(mp) here since 2762 * if modinstall() was called by mod_install_requisites(), 2763 * we won't be able to hold the dependent modules 2764 * (otherwise there would be a deadlock). 2765 */ 2766 return (ENXIO); 2767 } 2768 2769 if (moddebug & MODDEBUG_ERRMSG) { 2770 printf("init '%s' id %d loaded @ 0x%p/0x%p size %lu/%lu\n", 2771 mp->mod_filename, mp->mod_id, 2772 (void *)((struct module *)mp->mod_mp)->text, 2773 (void *)((struct module *)mp->mod_mp)->data, 2774 ((struct module *)mp->mod_mp)->text_size, 2775 ((struct module *)mp->mod_mp)->data_size); 2776 } 2777 2778 func = (int (*)())kobj_lookup(mp->mod_mp, "_init"); 2779 2780 if (kobj_addrcheck(mp->mod_mp, (caddr_t)func)) { 2781 cmn_err(CE_WARN, "_init() not defined properly in %s", 2782 mp->mod_filename); 2783 return (EFAULT); 2784 } 2785 2786 if (moddebug & MODDEBUG_USERDEBUG) { 2787 printf("breakpoint before calling %s:_init()\n", 2788 mp->mod_modname); 2789 if (DEBUGGER_PRESENT) 2790 debug_enter("_init"); 2791 } 2792 2793 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 2794 ASSERT(mp->mod_busy && mp->mod_loaded); 2795 val = (*func)(); /* call _init */ 2796 2797 if (moddebug & MODDEBUG_USERDEBUG) 2798 printf("Returned from _init, val = %x\n", val); 2799 2800 if (val == 0) { 2801 /* 2802 * Set the MODS_INSTALLED flag to enable this module 2803 * being called now. 2804 */ 2805 install_stubs(mp); 2806 mp->mod_installed = 1; 2807 } else if (moddebug & MODDEBUG_ERRMSG) 2808 printf(mod_init_msg, mp->mod_filename, mp->mod_modname, val); 2809 2810 return (val); 2811 } 2812 2813 int detach_driver_unconfig = 0; 2814 2815 static int 2816 detach_driver(char *name) 2817 { 2818 major_t major; 2819 int error; 2820 2821 /* 2822 * If being called from mod_uninstall_all() then the appropriate 2823 * driver detaches (leaf only) have already been done. 2824 */ 2825 if (mod_in_autounload()) 2826 return (0); 2827 2828 major = ddi_name_to_major(name); 2829 if (major == (major_t)-1) 2830 return (0); 2831 2832 error = ndi_devi_unconfig_driver(ddi_root_node(), 2833 NDI_DETACH_DRIVER | detach_driver_unconfig, major); 2834 return (error == NDI_SUCCESS ? 0 : -1); 2835 } 2836 2837 static char finiret_msg[] = "Returned from _fini for %s, status = %x\n"; 2838 2839 static int 2840 moduninstall(struct modctl *mp) 2841 { 2842 int status = 0; 2843 int (*func)(void); 2844 2845 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 2846 ASSERT(mp->mod_busy); 2847 2848 /* 2849 * Verify that we need to do something and can uninstall the module. 2850 * 2851 * If we should not uninstall the module or if the module is not in 2852 * the correct state to start an uninstall we return EBUSY to prevent 2853 * us from progressing to mod_unload. If the module has already been 2854 * uninstalled and unloaded we return EALREADY. 2855 */ 2856 if (mp->mod_prim || mp->mod_ref || mp->mod_nenabled != 0) 2857 return (EBUSY); 2858 if ((mp->mod_installed == 0) || (mp->mod_loaded == 0)) 2859 return (EALREADY); 2860 2861 /* 2862 * To avoid devinfo / module deadlock we must release this module 2863 * prior to initiating the detach_driver, otherwise the detach_driver 2864 * might deadlock on a devinfo node held by another thread 2865 * coming top down and involving the module we have locked. 2866 * 2867 * When we regrab the module we must reverify that it is OK 2868 * to proceed with the uninstall operation. 2869 */ 2870 mod_release_mod(mp); 2871 status = detach_driver(mp->mod_modname); 2872 (void) mod_hold_by_modctl(mp, MOD_WAIT_FOREVER | MOD_LOCK_NOT_HELD); 2873 2874 /* check detach status and reverify state with lock */ 2875 mutex_enter(&mod_lock); 2876 if ((status != 0) || mp->mod_prim || mp->mod_ref) { 2877 mutex_exit(&mod_lock); 2878 return (EBUSY); 2879 } 2880 if ((mp->mod_installed == 0) || (mp->mod_loaded == 0)) { 2881 mutex_exit(&mod_lock); 2882 return (EALREADY); 2883 } 2884 mutex_exit(&mod_lock); 2885 2886 if (moddebug & MODDEBUG_LOADMSG2) 2887 printf("uninstalling %s\n", mp->mod_modname); 2888 2889 /* 2890 * lookup _fini, return EBUSY if not defined. 2891 * 2892 * The MODDEBUG_FINI_EBUSY is usefull in resolving leaks in 2893 * detach(9E) - it allows bufctl addresses to be resolved. 2894 */ 2895 func = (int (*)())kobj_lookup(mp->mod_mp, "_fini"); 2896 if ((func == NULL) || (mp->mod_loadflags & MOD_NOUNLOAD) || 2897 (moddebug & MODDEBUG_FINI_EBUSY)) 2898 return (EBUSY); 2899 2900 /* verify that _fini is in this module */ 2901 if (kobj_addrcheck(mp->mod_mp, (caddr_t)func)) { 2902 cmn_err(CE_WARN, "_fini() not defined properly in %s", 2903 mp->mod_filename); 2904 return (EFAULT); 2905 } 2906 2907 /* call _fini() */ 2908 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 2909 ASSERT(mp->mod_busy && mp->mod_loaded && mp->mod_installed); 2910 2911 status = (*func)(); 2912 2913 if (status == 0) { 2914 /* _fini returned success, the module is no longer installed */ 2915 if (moddebug & MODDEBUG_LOADMSG) 2916 printf("uninstalled %s\n", mp->mod_modname); 2917 2918 /* 2919 * Even though we only set mod_installed to zero here, a zero 2920 * return value means we are commited to a code path were 2921 * mod_loaded will also end up as zero - we have no other 2922 * way to get the module data and bss back to the pre _init 2923 * state except a reload. To ensure this, after return, 2924 * mod_busy must stay set until mod_loaded is cleared. 2925 */ 2926 mp->mod_installed = 0; 2927 2928 /* 2929 * Clear the MODS_INSTALLED flag not to call functions 2930 * in the module directly from now on. 2931 */ 2932 uninstall_stubs(mp); 2933 } else { 2934 if (moddebug & MODDEBUG_USERDEBUG) 2935 printf(finiret_msg, mp->mod_filename, status); 2936 /* 2937 * By definition _fini is only allowed to return EBUSY or the 2938 * result of mod_remove (EBUSY or EINVAL). In the off chance 2939 * that a driver returns EALREADY we convert this to EINVAL 2940 * since to our caller EALREADY means module was already 2941 * removed. 2942 */ 2943 if (status == EALREADY) 2944 status = EINVAL; 2945 } 2946 2947 return (status); 2948 } 2949 2950 /* 2951 * Uninstall all modules. 2952 */ 2953 static void 2954 mod_uninstall_all(void) 2955 { 2956 struct modctl *mp; 2957 modid_t modid = 0; 2958 2959 /* synchronize with any active modunload_disable() */ 2960 modunload_begin(); 2961 2962 /* mark this thread as doing autounloading */ 2963 (void) tsd_set(mod_autounload_key, (void *)1); 2964 2965 (void) devfs_clean(ddi_root_node(), NULL, 0); 2966 (void) ndi_devi_unconfig(ddi_root_node(), NDI_AUTODETACH); 2967 2968 while ((mp = mod_hold_next_by_id(modid)) != NULL) { 2969 modid = mp->mod_id; 2970 /* 2971 * Skip modules with the MOD_NOAUTOUNLOAD flag set 2972 */ 2973 if (mp->mod_loadflags & MOD_NOAUTOUNLOAD) { 2974 mod_release_mod(mp); 2975 continue; 2976 } 2977 2978 if (moduninstall(mp) == 0) { 2979 mod_unload(mp); 2980 CPU_STATS_ADDQ(CPU, sys, modunload, 1); 2981 } 2982 mod_release_mod(mp); 2983 } 2984 2985 (void) tsd_set(mod_autounload_key, NULL); 2986 modunload_end(); 2987 } 2988 2989 /* wait for unloads that have begun before registering disable */ 2990 void 2991 modunload_disable(void) 2992 { 2993 mutex_enter(&modunload_wait_mutex); 2994 while (modunload_active_count) { 2995 modunload_wait++; 2996 cv_wait(&modunload_wait_cv, &modunload_wait_mutex); 2997 modunload_wait--; 2998 } 2999 modunload_disable_count++; 3000 mutex_exit(&modunload_wait_mutex); 3001 } 3002 3003 /* mark end of disable and signal waiters */ 3004 void 3005 modunload_enable(void) 3006 { 3007 mutex_enter(&modunload_wait_mutex); 3008 modunload_disable_count--; 3009 if ((modunload_disable_count == 0) && modunload_wait) 3010 cv_broadcast(&modunload_wait_cv); 3011 mutex_exit(&modunload_wait_mutex); 3012 } 3013 3014 /* wait for disables to complete before begining unload */ 3015 void 3016 modunload_begin() 3017 { 3018 mutex_enter(&modunload_wait_mutex); 3019 while (modunload_disable_count) { 3020 modunload_wait++; 3021 cv_wait(&modunload_wait_cv, &modunload_wait_mutex); 3022 modunload_wait--; 3023 } 3024 modunload_active_count++; 3025 mutex_exit(&modunload_wait_mutex); 3026 } 3027 3028 /* mark end of unload and signal waiters */ 3029 void 3030 modunload_end() 3031 { 3032 mutex_enter(&modunload_wait_mutex); 3033 modunload_active_count--; 3034 if ((modunload_active_count == 0) && modunload_wait) 3035 cv_broadcast(&modunload_wait_cv); 3036 mutex_exit(&modunload_wait_mutex); 3037 } 3038 3039 void 3040 mod_uninstall_daemon(void) 3041 { 3042 callb_cpr_t cprinfo; 3043 clock_t ticks = 0; 3044 3045 mod_aul_thread = curthread; 3046 3047 CALLB_CPR_INIT(&cprinfo, &mod_uninstall_lock, callb_generic_cpr, "mud"); 3048 for (;;) { 3049 mutex_enter(&mod_uninstall_lock); 3050 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3051 /* 3052 * In DEBUG kernels, unheld drivers are uninstalled periodically 3053 * every mod_uninstall_interval seconds. Periodic uninstall can 3054 * be disabled by setting mod_uninstall_interval to 0 which is 3055 * the default for a non-DEBUG kernel. 3056 */ 3057 if (mod_uninstall_interval) { 3058 ticks = ddi_get_lbolt() + 3059 drv_usectohz(mod_uninstall_interval * 1000000); 3060 (void) cv_timedwait(&mod_uninstall_cv, 3061 &mod_uninstall_lock, ticks); 3062 } else { 3063 cv_wait(&mod_uninstall_cv, &mod_uninstall_lock); 3064 } 3065 /* 3066 * The whole daemon is safe for CPR except we don't want 3067 * the daemon to run if FREEZE is issued and this daemon 3068 * wakes up from the cv_wait above. In this case, it'll be 3069 * blocked in CALLB_CPR_SAFE_END until THAW is issued. 3070 * 3071 * The reason of calling CALLB_CPR_SAFE_BEGIN twice is that 3072 * mod_uninstall_lock is used to protect cprinfo and 3073 * CALLB_CPR_SAFE_BEGIN assumes that this lock is held when 3074 * called. 3075 */ 3076 CALLB_CPR_SAFE_END(&cprinfo, &mod_uninstall_lock); 3077 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3078 mutex_exit(&mod_uninstall_lock); 3079 if ((modunload_disable_count == 0) && 3080 ((moddebug & MODDEBUG_NOAUTOUNLOAD) == 0)) { 3081 mod_uninstall_all(); 3082 } 3083 } 3084 } 3085 3086 /* 3087 * Unload all uninstalled modules. 3088 */ 3089 void 3090 modreap(void) 3091 { 3092 mutex_enter(&mod_uninstall_lock); 3093 cv_broadcast(&mod_uninstall_cv); 3094 mutex_exit(&mod_uninstall_lock); 3095 } 3096 3097 /* 3098 * Hold the specified module. This is the module holding primitive. 3099 * 3100 * If MOD_LOCK_HELD then the caller already holds the mod_lock. 3101 * 3102 * Return values: 3103 * 0 ==> the module is held 3104 * 1 ==> the module is not held and the MOD_WAIT_ONCE caller needs 3105 * to determine how to retry. 3106 */ 3107 int 3108 mod_hold_by_modctl(struct modctl *mp, int f) 3109 { 3110 ASSERT((f & (MOD_WAIT_ONCE | MOD_WAIT_FOREVER)) && 3111 ((f & (MOD_WAIT_ONCE | MOD_WAIT_FOREVER)) != 3112 (MOD_WAIT_ONCE | MOD_WAIT_FOREVER))); 3113 ASSERT((f & (MOD_LOCK_HELD | MOD_LOCK_NOT_HELD)) && 3114 ((f & (MOD_LOCK_HELD | MOD_LOCK_NOT_HELD)) != 3115 (MOD_LOCK_HELD | MOD_LOCK_NOT_HELD))); 3116 ASSERT((f & MOD_LOCK_NOT_HELD) || MUTEX_HELD(&mod_lock)); 3117 3118 if (f & MOD_LOCK_NOT_HELD) 3119 mutex_enter(&mod_lock); 3120 3121 while (mp->mod_busy) { 3122 mp->mod_want = 1; 3123 cv_wait(&mod_cv, &mod_lock); 3124 /* 3125 * Module may be unloaded by daemon. 3126 * Nevertheless, modctl structure is still in linked list 3127 * (i.e., off &modules), not freed! 3128 * Caller is not supposed to assume "mp" is valid, but there 3129 * is no reasonable way to detect this but using 3130 * mp->mod_modinfo->mp == NULL check (follow the back pointer) 3131 * (or similar check depending on calling context) 3132 * DON'T free modctl structure, it will be very very 3133 * problematic. 3134 */ 3135 if (f & MOD_WAIT_ONCE) { 3136 if (f & MOD_LOCK_NOT_HELD) 3137 mutex_exit(&mod_lock); 3138 return (1); /* caller decides how to retry */ 3139 } 3140 } 3141 3142 mp->mod_busy = 1; 3143 mp->mod_inprogress_thread = 3144 (curthread == NULL ? (kthread_id_t)-1 : curthread); 3145 3146 if (f & MOD_LOCK_NOT_HELD) 3147 mutex_exit(&mod_lock); 3148 return (0); 3149 } 3150 3151 static struct modctl * 3152 mod_hold_by_name_common(struct modctl *dep, char *filename) 3153 { 3154 char *modname; 3155 struct modctl *mp; 3156 char *curname, *newname; 3157 int found = 0; 3158 3159 mutex_enter(&mod_lock); 3160 3161 if ((modname = strrchr(filename, '/')) == NULL) 3162 modname = filename; 3163 else 3164 modname++; 3165 3166 mp = &modules; 3167 do { 3168 if (strcmp(modname, mp->mod_modname) == 0) { 3169 found = 1; 3170 break; 3171 } 3172 } while ((mp = mp->mod_next) != &modules); 3173 3174 if (found == 0) { 3175 mp = allocate_modp(filename, modname); 3176 modadd(mp); 3177 } 3178 3179 /* 3180 * if dep is not NULL, set the mp in mod_requisite_loading for 3181 * the module circular dependency check. This field is used in 3182 * mod_circdep(), but it's cleard in mod_hold_loaded_mod(). 3183 */ 3184 if (dep != NULL) { 3185 ASSERT(dep->mod_busy && dep->mod_requisite_loading == NULL); 3186 dep->mod_requisite_loading = mp; 3187 } 3188 3189 /* 3190 * If the module was held, then it must be us who has it held. 3191 */ 3192 if (mod_circdep(mp)) 3193 mp = NULL; 3194 else { 3195 (void) mod_hold_by_modctl(mp, MOD_WAIT_FOREVER | MOD_LOCK_HELD); 3196 3197 /* 3198 * If the name hadn't been set or has changed, allocate 3199 * space and set it. Free space used by previous name. 3200 * 3201 * Do not change the name of primary modules, for primary 3202 * modules the mod_filename was allocated in standalone mode: 3203 * it is illegal to kobj_alloc in standalone mode and kobj_free 3204 * in non-standalone mode. 3205 */ 3206 curname = mp->mod_filename; 3207 if (curname == NULL || 3208 ((mp->mod_prim == 0) && 3209 (curname != filename) && 3210 (modname != filename) && 3211 (strcmp(curname, filename) != 0))) { 3212 newname = kobj_zalloc(strlen(filename) + 1, KM_SLEEP); 3213 (void) strcpy(newname, filename); 3214 mp->mod_filename = newname; 3215 if (curname != NULL) 3216 kobj_free(curname, strlen(curname) + 1); 3217 } 3218 } 3219 3220 mutex_exit(&mod_lock); 3221 if (mp && moddebug & MODDEBUG_LOADMSG2) 3222 printf("Holding %s\n", mp->mod_filename); 3223 if (mp == NULL && moddebug & MODDEBUG_LOADMSG2) 3224 printf("circular dependency loading %s\n", filename); 3225 return (mp); 3226 } 3227 3228 static struct modctl * 3229 mod_hold_by_name_requisite(struct modctl *dep, char *filename) 3230 { 3231 return (mod_hold_by_name_common(dep, filename)); 3232 } 3233 3234 struct modctl * 3235 mod_hold_by_name(char *filename) 3236 { 3237 return (mod_hold_by_name_common(NULL, filename)); 3238 } 3239 3240 static struct modctl * 3241 mod_hold_by_id(modid_t modid) 3242 { 3243 struct modctl *mp; 3244 int found = 0; 3245 3246 mutex_enter(&mod_lock); 3247 mp = &modules; 3248 do { 3249 if (mp->mod_id == modid) { 3250 found = 1; 3251 break; 3252 } 3253 } while ((mp = mp->mod_next) != &modules); 3254 3255 if ((found == 0) || mod_circdep(mp)) 3256 mp = NULL; 3257 else 3258 (void) mod_hold_by_modctl(mp, MOD_WAIT_FOREVER | MOD_LOCK_HELD); 3259 3260 mutex_exit(&mod_lock); 3261 return (mp); 3262 } 3263 3264 static struct modctl * 3265 mod_hold_next_by_id(modid_t modid) 3266 { 3267 struct modctl *mp; 3268 int found = 0; 3269 3270 if (modid < -1) 3271 return (NULL); 3272 3273 mutex_enter(&mod_lock); 3274 3275 mp = &modules; 3276 do { 3277 if (mp->mod_id > modid) { 3278 found = 1; 3279 break; 3280 } 3281 } while ((mp = mp->mod_next) != &modules); 3282 3283 if ((found == 0) || mod_circdep(mp)) 3284 mp = NULL; 3285 else 3286 (void) mod_hold_by_modctl(mp, MOD_WAIT_FOREVER | MOD_LOCK_HELD); 3287 3288 mutex_exit(&mod_lock); 3289 return (mp); 3290 } 3291 3292 static void 3293 mod_release(struct modctl *mp) 3294 { 3295 ASSERT(MUTEX_HELD(&mod_lock)); 3296 ASSERT(mp->mod_busy); 3297 3298 mp->mod_busy = 0; 3299 mp->mod_inprogress_thread = NULL; 3300 if (mp->mod_want) { 3301 mp->mod_want = 0; 3302 cv_broadcast(&mod_cv); 3303 } 3304 } 3305 3306 void 3307 mod_release_mod(struct modctl *mp) 3308 { 3309 if (moddebug & MODDEBUG_LOADMSG2) 3310 printf("Releasing %s\n", mp->mod_filename); 3311 mutex_enter(&mod_lock); 3312 mod_release(mp); 3313 mutex_exit(&mod_lock); 3314 } 3315 3316 modid_t 3317 mod_name_to_modid(char *filename) 3318 { 3319 char *modname; 3320 struct modctl *mp; 3321 3322 mutex_enter(&mod_lock); 3323 3324 if ((modname = strrchr(filename, '/')) == NULL) 3325 modname = filename; 3326 else 3327 modname++; 3328 3329 mp = &modules; 3330 do { 3331 if (strcmp(modname, mp->mod_modname) == 0) { 3332 mutex_exit(&mod_lock); 3333 return (mp->mod_id); 3334 } 3335 } while ((mp = mp->mod_next) != &modules); 3336 3337 mutex_exit(&mod_lock); 3338 return (-1); 3339 } 3340 3341 3342 int 3343 mod_remove_by_name(char *name) 3344 { 3345 struct modctl *mp; 3346 int retval; 3347 3348 mp = mod_hold_by_name(name); 3349 3350 if (mp == NULL) 3351 return (EINVAL); 3352 3353 if (mp->mod_loadflags & MOD_NOAUTOUNLOAD) { 3354 /* 3355 * Do not unload forceloaded modules 3356 */ 3357 mod_release_mod(mp); 3358 return (0); 3359 } 3360 3361 if ((retval = moduninstall(mp)) == 0) { 3362 mod_unload(mp); 3363 CPU_STATS_ADDQ(CPU, sys, modunload, 1); 3364 } else if (retval == EALREADY) 3365 retval = 0; /* already unloaded, not an error */ 3366 mod_release_mod(mp); 3367 return (retval); 3368 } 3369 3370 /* 3371 * Record that module "dep" is dependent on module "on_mod." 3372 */ 3373 static void 3374 mod_make_requisite(struct modctl *dependent, struct modctl *on_mod) 3375 { 3376 struct modctl_list **pmlnp; /* previous next pointer */ 3377 struct modctl_list *mlp; 3378 struct modctl_list *new; 3379 3380 ASSERT(dependent->mod_busy && on_mod->mod_busy); 3381 mutex_enter(&mod_lock); 3382 3383 /* 3384 * Search dependent's requisite list to see if on_mod is recorded. 3385 * List is ordered by id. 3386 */ 3387 for (pmlnp = &dependent->mod_requisites, mlp = *pmlnp; 3388 mlp; pmlnp = &mlp->modl_next, mlp = *pmlnp) 3389 if (mlp->modl_modp->mod_id >= on_mod->mod_id) 3390 break; 3391 3392 /* Create and insert if not already recorded */ 3393 if ((mlp == NULL) || (mlp->modl_modp->mod_id != on_mod->mod_id)) { 3394 new = kobj_zalloc(sizeof (*new), KM_SLEEP); 3395 new->modl_modp = on_mod; 3396 new->modl_next = mlp; 3397 *pmlnp = new; 3398 3399 /* 3400 * Increment the mod_ref count in our new requisite module. 3401 * This is what keeps a module that has other modules 3402 * which are dependent on it from being uninstalled and 3403 * unloaded. "on_mod"'s mod_ref count decremented in 3404 * mod_release_requisites when the "dependent" module 3405 * unload is complete. "on_mod" must be loaded, but may not 3406 * yet be installed. 3407 */ 3408 on_mod->mod_ref++; 3409 ASSERT(on_mod->mod_ref && on_mod->mod_loaded); 3410 } 3411 3412 mutex_exit(&mod_lock); 3413 } 3414 3415 /* 3416 * release the hold associated with mod_make_requisite mod_ref++ 3417 * as part of unload. 3418 */ 3419 void 3420 mod_release_requisites(struct modctl *modp) 3421 { 3422 struct modctl_list *modl; 3423 struct modctl_list *next; 3424 struct modctl *req; 3425 struct modctl_list *start = NULL, *mod_garbage; 3426 3427 ASSERT(modp->mod_busy); 3428 ASSERT(!MUTEX_HELD(&mod_lock)); 3429 3430 mutex_enter(&mod_lock); /* needed for manipulation of req */ 3431 for (modl = modp->mod_requisites; modl; modl = next) { 3432 next = modl->modl_next; 3433 req = modl->modl_modp; 3434 ASSERT(req->mod_ref >= 1 && req->mod_loaded); 3435 req->mod_ref--; 3436 3437 /* 3438 * Check if the module has to be unloaded or not. 3439 */ 3440 if (req->mod_ref == 0 && req->mod_delay_unload) { 3441 struct modctl_list *new; 3442 /* 3443 * Allocate the modclt_list holding the garbage 3444 * module which should be unloaded later. 3445 */ 3446 new = kobj_zalloc(sizeof (struct modctl_list), 3447 KM_SLEEP); 3448 new->modl_modp = req; 3449 3450 if (start == NULL) 3451 mod_garbage = start = new; 3452 else { 3453 mod_garbage->modl_next = new; 3454 mod_garbage = new; 3455 } 3456 } 3457 3458 /* free the list as we go */ 3459 kobj_free(modl, sizeof (*modl)); 3460 } 3461 modp->mod_requisites = NULL; 3462 mutex_exit(&mod_lock); 3463 3464 /* 3465 * Unload the garbage modules. 3466 */ 3467 for (mod_garbage = start; mod_garbage != NULL; /* nothing */) { 3468 struct modctl_list *old = mod_garbage; 3469 struct modctl *mp = mod_garbage->modl_modp; 3470 ASSERT(mp != NULL); 3471 3472 /* 3473 * Hold this module until it's unloaded completely. 3474 */ 3475 (void) mod_hold_by_modctl(mp, 3476 MOD_WAIT_FOREVER | MOD_LOCK_NOT_HELD); 3477 /* 3478 * Check if the module is not unloaded yet and nobody requires 3479 * the module. If it's unloaded already or somebody still 3480 * requires the module, don't unload it now. 3481 */ 3482 if (mp->mod_loaded && mp->mod_ref == 0) 3483 mod_unload(mp); 3484 ASSERT((mp->mod_loaded == 0 && mp->mod_delay_unload == 0) || 3485 (mp->mod_ref > 0)); 3486 mod_release_mod(mp); 3487 3488 mod_garbage = mod_garbage->modl_next; 3489 kobj_free(old, sizeof (struct modctl_list)); 3490 } 3491 } 3492 3493 /* 3494 * Process dependency of the module represented by "dep" on the 3495 * module named by "on." 3496 * 3497 * Called from kobj_do_dependents() to load a module "on" on which 3498 * "dep" depends. 3499 */ 3500 struct modctl * 3501 mod_load_requisite(struct modctl *dep, char *on) 3502 { 3503 struct modctl *on_mod; 3504 int retval; 3505 3506 if ((on_mod = mod_hold_loaded_mod(dep, on, &retval)) != NULL) { 3507 mod_make_requisite(dep, on_mod); 3508 } else if (moddebug & MODDEBUG_ERRMSG) { 3509 printf("error processing %s on which module %s depends\n", 3510 on, dep->mod_modname); 3511 } 3512 return (on_mod); 3513 } 3514 3515 static int 3516 mod_install_requisites(struct modctl *modp) 3517 { 3518 struct modctl_list *modl; 3519 struct modctl *req; 3520 int status = 0; 3521 3522 ASSERT(MUTEX_NOT_HELD(&mod_lock)); 3523 ASSERT(modp->mod_busy); 3524 3525 for (modl = modp->mod_requisites; modl; modl = modl->modl_next) { 3526 req = modl->modl_modp; 3527 (void) mod_hold_by_modctl(req, 3528 MOD_WAIT_FOREVER | MOD_LOCK_NOT_HELD); 3529 status = modinstall(req); 3530 mod_release_mod(req); 3531 3532 if (status != 0) 3533 break; 3534 } 3535 return (status); 3536 } 3537 3538 /* 3539 * returns 1 if this thread is doing autounload, 0 otherwise. 3540 * see mod_uninstall_all. 3541 */ 3542 int 3543 mod_in_autounload() 3544 { 3545 return ((int)(uintptr_t)tsd_get(mod_autounload_key)); 3546 } 3547 3548 /* 3549 * gmatch adapted from libc, stripping the wchar stuff 3550 */ 3551 #define popchar(p, c) \ 3552 c = *p++; \ 3553 if (c == 0) \ 3554 return (0); 3555 3556 static int 3557 gmatch(const char *s, const char *p) 3558 { 3559 int c, sc; 3560 int ok, lc, notflag; 3561 3562 sc = *s++; 3563 c = *p++; 3564 if (c == 0) 3565 return (sc == c); /* nothing matches nothing */ 3566 3567 switch (c) { 3568 case '\\': 3569 /* skip to quoted character */ 3570 popchar(p, c) 3571 /*FALLTHRU*/ 3572 3573 default: 3574 /* straight comparison */ 3575 if (c != sc) 3576 return (0); 3577 /*FALLTHRU*/ 3578 3579 case '?': 3580 /* first char matches, move to remainder */ 3581 return (sc != '\0' ? gmatch(s, p) : 0); 3582 3583 3584 case '*': 3585 while (*p == '*') 3586 p++; 3587 3588 /* * matches everything */ 3589 if (*p == 0) 3590 return (1); 3591 3592 /* undo skip at the beginning & iterate over substrings */ 3593 --s; 3594 while (*s) { 3595 if (gmatch(s, p)) 3596 return (1); 3597 s++; 3598 } 3599 return (0); 3600 3601 case '[': 3602 /* match any char within [] */ 3603 if (sc == 0) 3604 return (0); 3605 3606 ok = lc = notflag = 0; 3607 3608 if (*p == '!') { 3609 notflag = 1; 3610 p++; 3611 } 3612 popchar(p, c) 3613 3614 do { 3615 if (c == '-' && lc && *p != ']') { 3616 /* test sc against range [c1-c2] */ 3617 popchar(p, c) 3618 if (c == '\\') { 3619 popchar(p, c) 3620 } 3621 3622 if (notflag) { 3623 /* return 0 on mismatch */ 3624 if (lc <= sc && sc <= c) 3625 return (0); 3626 ok++; 3627 } else if (lc <= sc && sc <= c) { 3628 ok++; 3629 } 3630 /* keep going, may get a match next */ 3631 } else if (c == '\\') { 3632 /* skip to quoted character */ 3633 popchar(p, c) 3634 } 3635 lc = c; 3636 if (notflag) { 3637 if (sc == lc) 3638 return (0); 3639 ok++; 3640 } else if (sc == lc) { 3641 ok++; 3642 } 3643 popchar(p, c) 3644 } while (c != ']'); 3645 3646 /* recurse on remainder of string */ 3647 return (ok ? gmatch(s, p) : 0); 3648 } 3649 /*NOTREACHED*/ 3650 } 3651 3652 3653 /* 3654 * Get default perm for device from /etc/minor_perm. Return 0 if match found. 3655 * 3656 * Pure wild-carded patterns are handled separately so the ordering of 3657 * these patterns doesn't matter. We're still dependent on ordering 3658 * however as the first matching entry is the one returned. 3659 * Not ideal but all existing examples and usage do imply this 3660 * ordering implicitly. 3661 * 3662 * Drivers using the clone driver are always good for some entertainment. 3663 * Clone nodes under pseudo have the form clone@0:<driver>. Some minor 3664 * perm entries have the form clone:<driver>, others use <driver>:* 3665 * Examples are clone:llc1 vs. llc2:*, for example. 3666 * 3667 * Minor perms in the clone:<driver> form are mapped to the drivers's 3668 * mperm list, not the clone driver, as wildcard entries for clone 3669 * reference only. In other words, a clone wildcard will match 3670 * references for clone@0:<driver> but never <driver>@<minor>. 3671 * 3672 * Additional minor perms in the standard form are also supported, 3673 * for mixed usage, ie a node with an entry clone:<driver> could 3674 * provide further entries <driver>:<minor>. 3675 * 3676 * Finally, some uses of clone use an alias as the minor name rather 3677 * than the driver name, with the alias as the minor perm entry. 3678 * This case is handled by attaching the driver to bring its 3679 * minor list into existence, then discover the alias via DDI_ALIAS. 3680 * The clone device's minor perm list can then be searched for 3681 * that alias. 3682 */ 3683 3684 static int 3685 dev_alias_minorperm(dev_info_t *dip, char *minor_name, mperm_t *rmp) 3686 { 3687 major_t major; 3688 struct devnames *dnp; 3689 mperm_t *mp; 3690 char *alias = NULL; 3691 dev_info_t *cdevi; 3692 struct ddi_minor_data *dmd; 3693 3694 major = ddi_name_to_major(minor_name); 3695 3696 ASSERT(dip == clone_dip); 3697 ASSERT(major != (major_t)-1); 3698 3699 /* 3700 * Attach the driver named by the minor node, then 3701 * search its first instance's minor list for an 3702 * alias node. 3703 */ 3704 if (ddi_hold_installed_driver(major) == NULL) 3705 return (1); 3706 3707 dnp = &devnamesp[major]; 3708 LOCK_DEV_OPS(&dnp->dn_lock); 3709 3710 if ((cdevi = dnp->dn_head) != NULL) { 3711 mutex_enter(&DEVI(cdevi)->devi_lock); 3712 for (dmd = DEVI(cdevi)->devi_minor; dmd; dmd = dmd->next) { 3713 if (dmd->type == DDM_ALIAS) { 3714 alias = i_ddi_strdup(dmd->ddm_name, KM_SLEEP); 3715 break; 3716 } 3717 } 3718 mutex_exit(&DEVI(cdevi)->devi_lock); 3719 } 3720 3721 UNLOCK_DEV_OPS(&dnp->dn_lock); 3722 ddi_rele_driver(major); 3723 3724 if (alias == NULL) { 3725 if (moddebug & MODDEBUG_MINORPERM) 3726 cmn_err(CE_CONT, "dev_minorperm: " 3727 "no alias for %s\n", minor_name); 3728 return (1); 3729 } 3730 3731 major = ddi_driver_major(clone_dip); 3732 dnp = &devnamesp[major]; 3733 LOCK_DEV_OPS(&dnp->dn_lock); 3734 3735 /* 3736 * Go through the clone driver's mperm list looking 3737 * for a match for the specified alias. 3738 */ 3739 for (mp = dnp->dn_mperm; mp; mp = mp->mp_next) { 3740 if (strcmp(alias, mp->mp_minorname) == 0) { 3741 break; 3742 } 3743 } 3744 3745 if (mp) { 3746 if (moddebug & MODDEBUG_MP_MATCH) { 3747 cmn_err(CE_CONT, 3748 "minor perm defaults: %s %s 0%o %d %d (aliased)\n", 3749 minor_name, alias, mp->mp_mode, 3750 mp->mp_uid, mp->mp_gid); 3751 } 3752 rmp->mp_uid = mp->mp_uid; 3753 rmp->mp_gid = mp->mp_gid; 3754 rmp->mp_mode = mp->mp_mode; 3755 } 3756 UNLOCK_DEV_OPS(&dnp->dn_lock); 3757 3758 kmem_free(alias, strlen(alias)+1); 3759 3760 return (mp == NULL); 3761 } 3762 3763 int 3764 dev_minorperm(dev_info_t *dip, char *name, mperm_t *rmp) 3765 { 3766 major_t major; 3767 char *minor_name; 3768 struct devnames *dnp; 3769 mperm_t *mp; 3770 int is_clone = 0; 3771 3772 if (!minorperm_loaded) { 3773 if (moddebug & MODDEBUG_MINORPERM) 3774 cmn_err(CE_CONT, 3775 "%s: minor perm not yet loaded\n", name); 3776 return (1); 3777 } 3778 3779 minor_name = strchr(name, ':'); 3780 if (minor_name == NULL) 3781 return (1); 3782 minor_name++; 3783 3784 /* 3785 * If it's the clone driver, search the driver as named 3786 * by the minor. All clone minor perm entries other than 3787 * alias nodes are actually installed on the real driver's list. 3788 */ 3789 if (dip == clone_dip) { 3790 major = ddi_name_to_major(minor_name); 3791 if (major == (major_t)-1) { 3792 if (moddebug & MODDEBUG_MINORPERM) 3793 cmn_err(CE_CONT, "dev_minorperm: " 3794 "%s: no such driver\n", minor_name); 3795 return (1); 3796 } 3797 is_clone = 1; 3798 } else { 3799 major = ddi_driver_major(dip); 3800 ASSERT(major != (major_t)-1); 3801 } 3802 3803 dnp = &devnamesp[major]; 3804 LOCK_DEV_OPS(&dnp->dn_lock); 3805 3806 /* 3807 * Go through the driver's mperm list looking for 3808 * a match for the specified minor. If there's 3809 * no matching pattern, use the wild card. 3810 * Defer to the clone wild for clone if specified, 3811 * otherwise fall back to the normal form. 3812 */ 3813 for (mp = dnp->dn_mperm; mp; mp = mp->mp_next) { 3814 if (gmatch(minor_name, mp->mp_minorname) != 0) { 3815 break; 3816 } 3817 } 3818 if (mp == NULL) { 3819 if (is_clone) 3820 mp = dnp->dn_mperm_clone; 3821 if (mp == NULL) 3822 mp = dnp->dn_mperm_wild; 3823 } 3824 3825 if (mp) { 3826 if (moddebug & MODDEBUG_MP_MATCH) { 3827 cmn_err(CE_CONT, 3828 "minor perm defaults: %s %s 0%o %d %d\n", 3829 name, mp->mp_minorname, mp->mp_mode, 3830 mp->mp_uid, mp->mp_gid); 3831 } 3832 rmp->mp_uid = mp->mp_uid; 3833 rmp->mp_gid = mp->mp_gid; 3834 rmp->mp_mode = mp->mp_mode; 3835 } 3836 UNLOCK_DEV_OPS(&dnp->dn_lock); 3837 3838 /* 3839 * If no match can be found for a clone node, 3840 * search for a possible match for an alias. 3841 * One such example is /dev/ptmx -> /devices/pseudo/clone@0:ptm, 3842 * with minor perm entry clone:ptmx. 3843 */ 3844 if (mp == NULL && is_clone) { 3845 return (dev_alias_minorperm(dip, minor_name, rmp)); 3846 } 3847 3848 return (mp == NULL); 3849 } 3850 3851 /* 3852 * dynamicaly reference load a dl module/library, returning handle 3853 */ 3854 /*ARGSUSED*/ 3855 ddi_modhandle_t 3856 ddi_modopen(const char *modname, int mode, int *errnop) 3857 { 3858 char *subdir; 3859 char *mod; 3860 int subdirlen; 3861 struct modctl *hmodp = NULL; 3862 int retval = EINVAL; 3863 3864 ASSERT(modname && (mode == KRTLD_MODE_FIRST)); 3865 if ((modname == NULL) || (mode != KRTLD_MODE_FIRST)) 3866 goto out; 3867 3868 /* find optional first '/' in modname */ 3869 mod = strchr(modname, '/'); 3870 if (mod != strrchr(modname, '/')) 3871 goto out; /* only one '/' is legal */ 3872 3873 if (mod) { 3874 /* for subdir string without modification to argument */ 3875 mod++; 3876 subdirlen = mod - modname; 3877 subdir = kmem_alloc(subdirlen, KM_SLEEP); 3878 (void) strlcpy(subdir, modname, subdirlen); 3879 } else { 3880 subdirlen = 0; 3881 subdir = "misc"; 3882 mod = (char *)modname; 3883 } 3884 3885 /* reference load with errno return value */ 3886 retval = modrload(subdir, mod, &hmodp); 3887 3888 if (subdirlen) 3889 kmem_free(subdir, subdirlen); 3890 3891 out: if (errnop) 3892 *errnop = retval; 3893 3894 if (moddebug & MODDEBUG_DDI_MOD) 3895 printf("ddi_modopen %s mode %x: %s %p %d\n", 3896 modname ? modname : "<unknown>", mode, 3897 hmodp ? hmodp->mod_filename : "<unknown>", 3898 (void *)hmodp, retval); 3899 3900 return ((ddi_modhandle_t)hmodp); 3901 } 3902 3903 /* lookup "name" in open dl module/library */ 3904 void * 3905 ddi_modsym(ddi_modhandle_t h, const char *name, int *errnop) 3906 { 3907 struct modctl *hmodp = (struct modctl *)h; 3908 void *f; 3909 int retval; 3910 3911 ASSERT(hmodp && name && hmodp->mod_installed && (hmodp->mod_ref >= 1)); 3912 if ((hmodp == NULL) || (name == NULL) || 3913 (hmodp->mod_installed == 0) || (hmodp->mod_ref < 1)) { 3914 f = NULL; 3915 retval = EINVAL; 3916 } else { 3917 f = (void *)kobj_lookup(hmodp->mod_mp, (char *)name); 3918 if (f) 3919 retval = 0; 3920 else 3921 retval = ENOTSUP; 3922 } 3923 3924 if (moddebug & MODDEBUG_DDI_MOD) 3925 printf("ddi_modsym in %s of %s: %d %p\n", 3926 hmodp ? hmodp->mod_modname : "<unknown>", 3927 name ? name : "<unknown>", retval, f); 3928 3929 if (errnop) 3930 *errnop = retval; 3931 return (f); 3932 } 3933 3934 /* dynamic (un)reference unload of an open dl module/library */ 3935 int 3936 ddi_modclose(ddi_modhandle_t h) 3937 { 3938 struct modctl *hmodp = (struct modctl *)h; 3939 struct modctl *modp = NULL; 3940 int retval; 3941 3942 ASSERT(hmodp && hmodp->mod_installed && (hmodp->mod_ref >= 1)); 3943 if ((hmodp == NULL) || 3944 (hmodp->mod_installed == 0) || (hmodp->mod_ref < 1)) { 3945 retval = EINVAL; 3946 goto out; 3947 } 3948 3949 retval = modunrload(hmodp->mod_id, &modp, ddi_modclose_unload); 3950 if (retval == EBUSY) 3951 retval = 0; /* EBUSY is not an error */ 3952 3953 if (retval == 0) { 3954 ASSERT(hmodp == modp); 3955 if (hmodp != modp) 3956 retval = EINVAL; 3957 } 3958 3959 out: if (moddebug & MODDEBUG_DDI_MOD) 3960 printf("ddi_modclose %s: %d\n", 3961 hmodp ? hmodp->mod_modname : "<unknown>", retval); 3962 3963 return (retval); 3964 } 3965