1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Driver-side functions for loading and unloading dmods. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/kobj.h> 35 #include <sys/kobj_impl.h> 36 #include <sys/modctl.h> 37 #include <sys/systm.h> 38 #include <sys/ctf_api.h> 39 #include <sys/kmdb.h> 40 41 #include <kmdb/kctl/kctl.h> 42 #include <kmdb/kctl/kctl_wr.h> 43 #include <kmdb/kmdb_wr_impl.h> 44 #include <kmdb/kmdb_kdi.h> 45 #include <mdb/mdb_errno.h> 46 47 struct modctl *kdi_dmods; 48 49 /* 50 * When a load is attempted, a check is first made of the modules on the 51 * kctl_dmods list. If a module is found, the load will not proceed. 52 * kctl_dmods_lock must be held while traversing kctl_dmods, and while adding 53 * to and subtracting from it. 54 */ 55 static struct modctl kctl_dmods; 56 static kmutex_t kctl_dmods_lock; 57 58 static kmdb_wr_path_t *kctl_dmod_path; 59 60 /* 61 * Used to track outstanding driver-initiated load notifications. These 62 * notifications have been allocated by driver, and thus must be freed by the 63 * driver in the event of an emergency unload. If we don't free them free 64 * them ourselves, they'll leak. Granted, the world is probably melting down 65 * at that point, but there's no reason why we shouldn't tidy up the deck 66 * chairs before we go. 67 */ 68 static kmdb_wr_load_t *kctl_dmod_loads; 69 static kmutex_t kctl_dmod_loads_lock; 70 71 static int 72 kctl_find_module(char *modname, char *fullname, size_t fullnamelen) 73 { 74 intptr_t fd; 75 int i; 76 77 /* If they gave us an absolute path, we don't need to search */ 78 if (modname[0] == '/') { 79 if (strlen(modname) + 1 > fullnamelen) { 80 cmn_err(CE_WARN, "Can't load dmod %s - name too long\n", 81 modname); 82 return (0); 83 } 84 85 if ((fd = kobj_open(modname)) == -1) 86 return (0); 87 kobj_close(fd); 88 89 strcpy(fullname, modname); 90 91 return (1); 92 } 93 94 for (i = 0; kctl_dmod_path->dpth_path[i] != NULL; i++) { 95 const char *path = kctl_dmod_path->dpth_path[i]; 96 97 if (strlen(path) + 1 + strlen(modname) + 1 > fullnamelen) { 98 kctl_dprintf("Can't load dmod from %s/%s - " 99 "name too long", path, modname); 100 continue; 101 } 102 103 (void) snprintf(fullname, fullnamelen, "%s/%s", path, modname); 104 105 if ((fd = kobj_open(fullname)) == -1) 106 continue; 107 108 kobj_close(fd); 109 110 kctl_dprintf("kobj_open %s found", fullname); 111 112 /* Found it */ 113 return (1); 114 } 115 116 /* No luck */ 117 return (0); 118 } 119 120 static void 121 kctl_dlr_free(kmdb_wr_load_t *dlr) 122 { 123 if (dlr->dlr_node.wn_flags & WNFLAGS_NOFREE) 124 return; 125 126 kctl_strfree(dlr->dlr_fname); 127 kmem_free(dlr, sizeof (kmdb_wr_load_t)); 128 } 129 130 int 131 kctl_dmod_load(kmdb_wr_load_t *dlr) 132 { 133 struct modctl *modp; 134 char modpath[MAXPATHLEN]; 135 const char *modname = kctl_basename(dlr->dlr_fname); 136 int rc; 137 138 mutex_enter(&kctl_dmods_lock); 139 140 /* Have we already loaded this dmod? */ 141 for (modp = kctl_dmods.mod_next; modp != &kctl_dmods; 142 modp = modp->mod_next) { 143 if (strcmp(modname, modp->mod_modname) == 0) { 144 mutex_exit(&kctl_dmods_lock); 145 dlr->dlr_errno = EEXIST; 146 return (-1); 147 } 148 } 149 150 /* 151 * If we find something that looks like a dmod, create a modctl for it, 152 * and add said modctl to our dmods list. This will allow us to drop 153 * the dmods lock, while still preventing duplicate loads. If we aren't 154 * able to actually load the dmod, we can always remove the modctl 155 * later. 156 */ 157 if (!kctl_find_module(dlr->dlr_fname, modpath, sizeof (modpath))) { 158 mutex_exit(&kctl_dmods_lock); 159 dlr->dlr_errno = ENOENT; 160 return (-1); 161 } 162 163 modp = kobj_zalloc(sizeof (struct modctl), KM_SLEEP); 164 165 modp->mod_filename = kctl_strdup(modpath); 166 modp->mod_modname = kctl_basename(modp->mod_filename); 167 modp->mod_busy = 1; 168 modp->mod_loadflags |= MOD_NOAUTOUNLOAD | MOD_NONOTIFY; 169 modp->mod_next = &kctl_dmods; 170 modp->mod_prev = kctl_dmods.mod_prev; 171 modp->mod_prev->mod_next = modp; 172 kctl_dmods.mod_prev = modp; 173 174 mutex_exit(&kctl_dmods_lock); 175 176 if (kctl.kctl_boot_ops == NULL) 177 rc = kobj_load_module(modp, 0); 178 else 179 rc = kobj_load_primary_module(modp); 180 181 if (rc < 0) { 182 kctl_warn("failed to load dmod %s", modp->mod_modname); 183 184 if (kctl.kctl_boot_ops == NULL) 185 mod_release_requisites(modp); 186 187 mutex_enter(&kctl_dmods_lock); 188 modp->mod_next->mod_prev = modp->mod_prev; 189 modp->mod_prev->mod_next = modp->mod_next; 190 mutex_exit(&kctl_dmods_lock); 191 192 kctl_strfree(modp->mod_filename); 193 kobj_free(modp, sizeof (struct modctl)); 194 195 dlr->dlr_errno = EMDB_NOMOD; 196 return (-1); 197 } 198 199 /* 200 * It worked! If the module has any CTF data, decompress it, and make a 201 * note of the load. 202 */ 203 mutex_enter(&mod_lock); 204 if ((rc = kctl_mod_decompress(modp)) != 0) { 205 kctl_warn("failed to decompress CTF data for dmod %s: %s", 206 modpath, ctf_errmsg(rc)); 207 } 208 mutex_exit(&mod_lock); 209 210 kctl_dprintf("loaded dmod %s at %p", modpath, modp); 211 212 modp->mod_ref = 1; 213 modp->mod_loaded = 1; 214 215 dlr->dlr_modctl = modp; 216 217 return (0); 218 } 219 220 /* 221 * Driver-initiated loads. Load the module and announce it to the debugger. 222 */ 223 void 224 kctl_dmod_autoload(const char *fname) 225 { 226 kmdb_wr_load_t *dlr; 227 228 dlr = kobj_zalloc(sizeof (kmdb_wr_load_t), KM_SLEEP); 229 dlr->dlr_node.wn_task = WNTASK_DMOD_LOAD; 230 dlr->dlr_fname = kctl_strdup(fname); 231 232 /* 233 * If we're loading at boot, the kmdb_wr_load_t will have been 234 * "allocated" by krtld, and will thus not be under the control of 235 * kmem. We need to ensure that we don't attempt to free it when 236 * we get it back from the debugger. 237 */ 238 if (kctl.kctl_boot_ops != NULL) 239 dlr->dlr_node.wn_flags |= WNFLAGS_NOFREE; 240 241 if (kctl_dmod_load(dlr) < 0) { 242 kctl_dlr_free(dlr); 243 return; 244 } 245 246 /* 247 * Add to the list of open driver-initiated loads. We need to track 248 * these so we can free them (and thus avoid leaks) in the event that 249 * the debugger needs to be blown away before it can return them. 250 */ 251 mutex_enter(&kctl_dmod_loads_lock); 252 dlr->dlr_next = kctl_dmod_loads; 253 if (kctl_dmod_loads != NULL) 254 kctl_dmod_loads->dlr_prev = dlr; 255 kctl_dmod_loads = dlr; 256 mutex_exit(&kctl_dmod_loads_lock); 257 258 kmdb_wr_debugger_notify(dlr); 259 } 260 261 void 262 kctl_dmod_load_all(void) 263 { 264 /* 265 * The standard list of modules isn't populated until the tail end of 266 * kobj_init(). Prior to that point, the only available list is that of 267 * primaries. We'll use that if the normal list isn't ready yet. 268 */ 269 if (modules.mod_mp == NULL) { 270 /* modules hasn't been initialized yet -- use primaries */ 271 struct modctl_list *ml; 272 273 for (ml = kobj_linkmaps[KOBJ_LM_PRIMARY]; ml != NULL; 274 ml = ml->modl_next) 275 kctl_dmod_autoload(ml->modl_modp->mod_modname); 276 277 } else { 278 struct modctl *modp = &modules; 279 280 do { 281 if (modp->mod_mp != NULL) 282 kctl_dmod_autoload(modp->mod_modname); 283 } while ((modp = modp->mod_next) != &modules); 284 } 285 } 286 287 void 288 kctl_dmod_load_ack(kmdb_wr_load_t *dlr) 289 { 290 /* Remove from the list of open driver-initiated requests */ 291 mutex_enter(&kctl_dmod_loads_lock); 292 if (dlr->dlr_prev == NULL) 293 kctl_dmod_loads = dlr->dlr_next; 294 else 295 dlr->dlr_prev->dlr_next = dlr->dlr_next; 296 297 if (dlr->dlr_next != NULL) 298 dlr->dlr_next->dlr_prev = dlr->dlr_prev; 299 mutex_exit(&kctl_dmod_loads_lock); 300 301 kctl_dlr_free(dlr); 302 } 303 304 static int 305 kctl_dmod_unload_common(struct modctl *modp) 306 { 307 struct modctl *m; 308 309 kctl_dprintf("unloading dmod %s", modp->mod_modname); 310 311 mutex_enter(&kctl_dmods_lock); 312 for (m = kctl_dmods.mod_next; m != &kctl_dmods; m = m->mod_next) { 313 if (m == modp) 314 break; 315 } 316 mutex_exit(&kctl_dmods_lock); 317 318 if (m != modp) 319 return (ENOENT); 320 321 /* Found it */ 322 modp->mod_ref = 0; 323 modp->mod_loaded = 0; 324 325 kobj_unload_module(modp); 326 327 mod_release_requisites(modp); 328 329 /* Remove it from our dmods list */ 330 mutex_enter(&kctl_dmods_lock); 331 modp->mod_next->mod_prev = modp->mod_prev; 332 modp->mod_prev->mod_next = modp->mod_next; 333 mutex_exit(&kctl_dmods_lock); 334 335 kctl_strfree(modp->mod_filename); 336 kmem_free(modp, sizeof (struct modctl)); 337 338 return (0); 339 } 340 341 void 342 kctl_dmod_unload(kmdb_wr_unload_t *dur) 343 { 344 int rc; 345 346 if ((rc = kctl_dmod_unload_common(dur->dur_modctl)) != 0) { 347 cmn_err(CE_WARN, "unexpected dmod unload failure: %d\n", rc); 348 dur->dur_errno = rc; 349 } 350 } 351 352 /* 353 * This will be called during shutdown. The debugger has been stopped, we're 354 * off the module notification list, and we've already processed everything in 355 * the driver's work queue. We should have received (and processed) unload 356 * requests for each of the dmods we've loaded. To be safe, however, we'll 357 * double-check. 358 * 359 * If we're doing an emergency shutdown, there may be outstanding 360 * driver-initiated messages that haven't been returned to us. The debugger is 361 * dead, so it's not going to be returning them. We'll leak them unless we 362 * find and free them ourselves. 363 */ 364 void 365 kctl_dmod_unload_all(void) 366 { 367 kmdb_wr_load_t *dlr; 368 struct modctl *modp; 369 370 while ((modp = kctl_dmods.mod_next) != &kctl_dmods) 371 (void) kctl_dmod_unload_common(modp); 372 373 while ((dlr = kctl_dmod_loads) != NULL) { 374 kctl_dmod_loads = dlr->dlr_next; 375 376 kctl_dprintf("freed orphan load notification for %s", 377 dlr->dlr_fname); 378 kctl_dlr_free(dlr); 379 } 380 } 381 382 kmdb_wr_path_t * 383 kctl_dmod_path_set(kmdb_wr_path_t *pth) 384 { 385 kmdb_wr_path_t *opth; 386 387 if (kctl.kctl_flags & KMDB_F_DRV_DEBUG) { 388 int i; 389 kctl_dprintf("changing dmod path to: %p", pth); 390 for (i = 0; pth->dpth_path[i] != NULL; i++) 391 kctl_dprintf(" %s", pth->dpth_path[i]); 392 } 393 394 opth = kctl_dmod_path; 395 kctl_dmod_path = pth; 396 397 return (opth); 398 } 399 400 void 401 kctl_dmod_path_reset(void) 402 { 403 kmdb_wr_path_t *pth; 404 405 if ((pth = kctl_dmod_path_set(NULL)) != NULL) { 406 WR_ACK(pth); 407 kmdb_wr_debugger_notify(pth); 408 } 409 } 410 411 void 412 kctl_dmod_sync(void) 413 { 414 struct modctl *modp; 415 416 /* 417 * kobj_sync() has no visibility into our dmods, so we need to 418 * explicitly tell krtld to export the portions of our dmods that were 419 * allocated using boot scratch memory. 420 */ 421 for (modp = kctl_dmods.mod_next; modp != &kctl_dmods; 422 modp = modp->mod_next) 423 kobj_export_module(modp->mod_mp); 424 } 425 426 void 427 kctl_dmod_init(void) 428 { 429 mutex_init(&kctl_dmod_loads_lock, NULL, MUTEX_DRIVER, NULL); 430 mutex_init(&kctl_dmods_lock, NULL, MUTEX_DRIVER, NULL); 431 432 bzero(&kctl_dmods, sizeof (struct modctl)); 433 kctl_dmods.mod_next = kctl_dmods.mod_prev = &kctl_dmods; 434 kdi_dmods = &kctl_dmods; 435 } 436 437 void 438 kctl_dmod_fini(void) 439 { 440 mutex_destroy(&kctl_dmods_lock); 441 mutex_destroy(&kctl_dmod_loads_lock); 442 kdi_dmods = NULL; 443 } 444