1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Driver-side functions for loading and unloading dmods. 30 */ 31 32 #include <sys/types.h> 33 #include <sys/kobj.h> 34 #include <sys/kobj_impl.h> 35 #include <sys/modctl.h> 36 #include <sys/systm.h> 37 #include <sys/ctf_api.h> 38 #include <sys/kmdb.h> 39 40 #include <kmdb/kctl/kctl.h> 41 #include <kmdb/kctl/kctl_wr.h> 42 #include <kmdb/kmdb_wr_impl.h> 43 #include <kmdb/kmdb_kdi.h> 44 #include <mdb/mdb_errno.h> 45 46 struct modctl *kdi_dmods; 47 48 /* 49 * When a load is attempted, a check is first made of the modules on the 50 * kctl_dmods list. If a module is found, the load will not proceed. 51 * kctl_dmods_lock must be held while traversing kctl_dmods, and while adding 52 * to and subtracting from it. 53 */ 54 static struct modctl kctl_dmods; 55 static kmutex_t kctl_dmods_lock; 56 57 static kmdb_wr_path_t *kctl_dmod_path; 58 59 /* 60 * Used to track outstanding driver-initiated load notifications. These 61 * notifications have been allocated by driver, and thus must be freed by the 62 * driver in the event of an emergency unload. If we don't free them free 63 * them ourselves, they'll leak. Granted, the world is probably melting down 64 * at that point, but there's no reason why we shouldn't tidy up the deck 65 * chairs before we go. 66 */ 67 static kmdb_wr_load_t *kctl_dmod_loads; 68 static kmutex_t kctl_dmod_loads_lock; 69 70 static int 71 kctl_find_module(char *modname, char *fullname, size_t fullnamelen) 72 { 73 intptr_t fd; 74 int i; 75 76 /* If they gave us an absolute path, we don't need to search */ 77 if (modname[0] == '/') { 78 if (strlen(modname) + 1 > fullnamelen) { 79 cmn_err(CE_WARN, "Can't load dmod %s - name too long", 80 modname); 81 return (0); 82 } 83 84 if ((fd = kobj_open(modname)) == -1) 85 return (0); 86 kobj_close(fd); 87 88 strcpy(fullname, modname); 89 90 return (1); 91 } 92 93 for (i = 0; kctl_dmod_path->dpth_path[i] != NULL; i++) { 94 const char *path = kctl_dmod_path->dpth_path[i]; 95 96 if (strlen(path) + 1 + strlen(modname) + 1 > fullnamelen) { 97 kctl_dprintf("Can't load dmod from %s/%s - " 98 "name too long", path, modname); 99 continue; 100 } 101 102 (void) snprintf(fullname, fullnamelen, "%s/%s", path, modname); 103 104 if ((fd = kobj_open(fullname)) == -1) 105 continue; 106 107 kobj_close(fd); 108 109 kctl_dprintf("kobj_open %s found", fullname); 110 111 /* Found it */ 112 return (1); 113 } 114 115 /* No luck */ 116 return (0); 117 } 118 119 static void 120 kctl_dlr_free(kmdb_wr_load_t *dlr) 121 { 122 if (dlr->dlr_node.wn_flags & WNFLAGS_NOFREE) 123 return; 124 125 kctl_strfree(dlr->dlr_fname); 126 kmem_free(dlr, sizeof (kmdb_wr_load_t)); 127 } 128 129 int 130 kctl_dmod_load(kmdb_wr_load_t *dlr) 131 { 132 struct modctl *modp; 133 char modpath[MAXPATHLEN]; 134 const char *modname = kctl_basename(dlr->dlr_fname); 135 int rc; 136 137 mutex_enter(&kctl_dmods_lock); 138 139 /* Have we already loaded this dmod? */ 140 for (modp = kctl_dmods.mod_next; modp != &kctl_dmods; 141 modp = modp->mod_next) { 142 if (strcmp(modname, modp->mod_modname) == 0) { 143 mutex_exit(&kctl_dmods_lock); 144 dlr->dlr_errno = EEXIST; 145 return (-1); 146 } 147 } 148 149 /* 150 * If we find something that looks like a dmod, create a modctl for it, 151 * and add said modctl to our dmods list. This will allow us to drop 152 * the dmods lock, while still preventing duplicate loads. If we aren't 153 * able to actually load the dmod, we can always remove the modctl 154 * later. 155 */ 156 if (!kctl_find_module(dlr->dlr_fname, modpath, sizeof (modpath))) { 157 mutex_exit(&kctl_dmods_lock); 158 dlr->dlr_errno = ENOENT; 159 return (-1); 160 } 161 162 modp = kobj_zalloc(sizeof (struct modctl), KM_SLEEP); 163 164 modp->mod_filename = kctl_strdup(modpath); 165 modp->mod_modname = kctl_basename(modp->mod_filename); 166 modp->mod_busy = 1; 167 modp->mod_loadflags |= MOD_NOAUTOUNLOAD | MOD_NONOTIFY; 168 modp->mod_next = &kctl_dmods; 169 modp->mod_prev = kctl_dmods.mod_prev; 170 modp->mod_prev->mod_next = modp; 171 kctl_dmods.mod_prev = modp; 172 173 mutex_exit(&kctl_dmods_lock); 174 175 if (kctl.kctl_boot_ops == NULL) 176 rc = kobj_load_module(modp, 0); 177 else 178 rc = kobj_load_primary_module(modp); 179 180 if (rc != 0) { 181 kctl_warn("failed to load dmod %s", modp->mod_modname); 182 183 if (kctl.kctl_boot_ops == NULL) 184 mod_release_requisites(modp); 185 186 mutex_enter(&kctl_dmods_lock); 187 modp->mod_next->mod_prev = modp->mod_prev; 188 modp->mod_prev->mod_next = modp->mod_next; 189 mutex_exit(&kctl_dmods_lock); 190 191 kctl_strfree(modp->mod_filename); 192 kobj_free(modp, sizeof (struct modctl)); 193 194 dlr->dlr_errno = EMDB_NOMOD; 195 return (-1); 196 } 197 198 /* 199 * It worked! If the module has any CTF data, decompress it, and make a 200 * note of the load. 201 */ 202 mutex_enter(&mod_lock); 203 if ((rc = kctl_mod_decompress(modp)) != 0) { 204 kctl_warn("failed to decompress CTF data for dmod %s: %s", 205 modpath, ctf_errmsg(rc)); 206 } 207 mutex_exit(&mod_lock); 208 209 kctl_dprintf("loaded dmod %s at %p", modpath, modp); 210 211 modp->mod_ref = 1; 212 modp->mod_loaded = 1; 213 214 dlr->dlr_modctl = modp; 215 216 return (0); 217 } 218 219 /* 220 * Driver-initiated loads. Load the module and announce it to the debugger. 221 */ 222 void 223 kctl_dmod_autoload(const char *fname) 224 { 225 kmdb_wr_load_t *dlr; 226 227 dlr = kobj_zalloc(sizeof (kmdb_wr_load_t), KM_SLEEP); 228 dlr->dlr_node.wn_task = WNTASK_DMOD_LOAD; 229 dlr->dlr_fname = kctl_strdup(fname); 230 231 /* 232 * If we're loading at boot, the kmdb_wr_load_t will have been 233 * "allocated" by krtld, and will thus not be under the control of 234 * kmem. We need to ensure that we don't attempt to free it when 235 * we get it back from the debugger. 236 */ 237 if (kctl.kctl_boot_ops != NULL) 238 dlr->dlr_node.wn_flags |= WNFLAGS_NOFREE; 239 240 if (kctl_dmod_load(dlr) < 0) { 241 kctl_dlr_free(dlr); 242 return; 243 } 244 245 /* 246 * Add to the list of open driver-initiated loads. We need to track 247 * these so we can free them (and thus avoid leaks) in the event that 248 * the debugger needs to be blown away before it can return them. 249 */ 250 mutex_enter(&kctl_dmod_loads_lock); 251 dlr->dlr_next = kctl_dmod_loads; 252 if (kctl_dmod_loads != NULL) 253 kctl_dmod_loads->dlr_prev = dlr; 254 kctl_dmod_loads = dlr; 255 mutex_exit(&kctl_dmod_loads_lock); 256 257 kmdb_wr_debugger_notify(dlr); 258 } 259 260 void 261 kctl_dmod_load_all(void) 262 { 263 /* 264 * The standard list of modules isn't populated until the tail end of 265 * kobj_init(). Prior to that point, the only available list is that of 266 * primaries. We'll use that if the normal list isn't ready yet. 267 */ 268 if (modules.mod_mp == NULL) { 269 /* modules hasn't been initialized yet -- use primaries */ 270 struct modctl_list *ml; 271 272 for (ml = kobj_linkmaps[KOBJ_LM_PRIMARY]; ml != NULL; 273 ml = ml->modl_next) 274 kctl_dmod_autoload(ml->modl_modp->mod_modname); 275 276 } else { 277 struct modctl *modp = &modules; 278 279 do { 280 if (modp->mod_mp != NULL) 281 kctl_dmod_autoload(modp->mod_modname); 282 } while ((modp = modp->mod_next) != &modules); 283 } 284 } 285 286 void 287 kctl_dmod_load_ack(kmdb_wr_load_t *dlr) 288 { 289 /* Remove from the list of open driver-initiated requests */ 290 mutex_enter(&kctl_dmod_loads_lock); 291 if (dlr->dlr_prev == NULL) 292 kctl_dmod_loads = dlr->dlr_next; 293 else 294 dlr->dlr_prev->dlr_next = dlr->dlr_next; 295 296 if (dlr->dlr_next != NULL) 297 dlr->dlr_next->dlr_prev = dlr->dlr_prev; 298 mutex_exit(&kctl_dmod_loads_lock); 299 300 kctl_dlr_free(dlr); 301 } 302 303 static int 304 kctl_dmod_unload_common(struct modctl *modp) 305 { 306 struct modctl *m; 307 308 kctl_dprintf("unloading dmod %s", modp->mod_modname); 309 310 mutex_enter(&kctl_dmods_lock); 311 for (m = kctl_dmods.mod_next; m != &kctl_dmods; m = m->mod_next) { 312 if (m == modp) 313 break; 314 } 315 mutex_exit(&kctl_dmods_lock); 316 317 if (m != modp) 318 return (ENOENT); 319 320 /* Found it */ 321 modp->mod_ref = 0; 322 modp->mod_loaded = 0; 323 324 kobj_unload_module(modp); 325 326 mod_release_requisites(modp); 327 328 /* Remove it from our dmods list */ 329 mutex_enter(&kctl_dmods_lock); 330 modp->mod_next->mod_prev = modp->mod_prev; 331 modp->mod_prev->mod_next = modp->mod_next; 332 mutex_exit(&kctl_dmods_lock); 333 334 kctl_strfree(modp->mod_filename); 335 kmem_free(modp, sizeof (struct modctl)); 336 337 return (0); 338 } 339 340 void 341 kctl_dmod_unload(kmdb_wr_unload_t *dur) 342 { 343 int rc; 344 345 if ((rc = kctl_dmod_unload_common(dur->dur_modctl)) != 0) { 346 cmn_err(CE_WARN, "unexpected dmod unload failure: %d", rc); 347 dur->dur_errno = rc; 348 } 349 } 350 351 /* 352 * This will be called during shutdown. The debugger has been stopped, we're 353 * off the module notification list, and we've already processed everything in 354 * the driver's work queue. We should have received (and processed) unload 355 * requests for each of the dmods we've loaded. To be safe, however, we'll 356 * double-check. 357 * 358 * If we're doing an emergency shutdown, there may be outstanding 359 * driver-initiated messages that haven't been returned to us. The debugger is 360 * dead, so it's not going to be returning them. We'll leak them unless we 361 * find and free them ourselves. 362 */ 363 void 364 kctl_dmod_unload_all(void) 365 { 366 kmdb_wr_load_t *dlr; 367 struct modctl *modp; 368 369 while ((modp = kctl_dmods.mod_next) != &kctl_dmods) 370 (void) kctl_dmod_unload_common(modp); 371 372 while ((dlr = kctl_dmod_loads) != NULL) { 373 kctl_dmod_loads = dlr->dlr_next; 374 375 kctl_dprintf("freed orphan load notification for %s", 376 dlr->dlr_fname); 377 kctl_dlr_free(dlr); 378 } 379 } 380 381 kmdb_wr_path_t * 382 kctl_dmod_path_set(kmdb_wr_path_t *pth) 383 { 384 kmdb_wr_path_t *opth; 385 386 if (kctl.kctl_flags & KMDB_F_DRV_DEBUG) { 387 if (pth != NULL) { 388 int i; 389 kctl_dprintf("changing dmod path to: %p", pth); 390 for (i = 0; pth->dpth_path[i] != NULL; i++) 391 kctl_dprintf(" %s", pth->dpth_path[i]); 392 } else { 393 kctl_dprintf("changing dmod path to NULL"); 394 } 395 } 396 397 opth = kctl_dmod_path; 398 kctl_dmod_path = pth; 399 400 return (opth); 401 } 402 403 void 404 kctl_dmod_path_reset(void) 405 { 406 kmdb_wr_path_t *pth; 407 408 if ((pth = kctl_dmod_path_set(NULL)) != NULL) { 409 WR_ACK(pth); 410 kmdb_wr_debugger_notify(pth); 411 } 412 } 413 414 void 415 kctl_dmod_sync(void) 416 { 417 struct modctl *modp; 418 419 /* 420 * kobj_sync() has no visibility into our dmods, so we need to 421 * explicitly tell krtld to export the portions of our dmods that were 422 * allocated using boot scratch memory. 423 */ 424 for (modp = kctl_dmods.mod_next; modp != &kctl_dmods; 425 modp = modp->mod_next) 426 kobj_export_module(modp->mod_mp); 427 } 428 429 void 430 kctl_dmod_init(void) 431 { 432 mutex_init(&kctl_dmod_loads_lock, NULL, MUTEX_DRIVER, NULL); 433 mutex_init(&kctl_dmods_lock, NULL, MUTEX_DRIVER, NULL); 434 435 bzero(&kctl_dmods, sizeof (struct modctl)); 436 kctl_dmods.mod_next = kctl_dmods.mod_prev = &kctl_dmods; 437 kdi_dmods = &kctl_dmods; 438 } 439 440 void 441 kctl_dmod_fini(void) 442 { 443 mutex_destroy(&kctl_dmods_lock); 444 mutex_destroy(&kctl_dmod_loads_lock); 445 kdi_dmods = NULL; 446 } 447