1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Driver-side functions for loading and unloading dmods.
28 */
29
30 #include <sys/types.h>
31 #include <sys/kobj.h>
32 #include <sys/kobj_impl.h>
33 #include <sys/modctl.h>
34 #include <sys/systm.h>
35 #include <sys/ctf_api.h>
36 #include <sys/kmdb.h>
37
38 #include <kmdb/kctl/kctl.h>
39 #include <kmdb/kctl/kctl_wr.h>
40 #include <kmdb/kmdb_wr_impl.h>
41 #include <kmdb/kmdb_kdi.h>
42 #include <mdb/mdb_errno.h>
43
44 struct modctl *kdi_dmods;
45
46 /*
47 * When a load is attempted, a check is first made of the modules on the
48 * kctl_dmods list. If a module is found, the load will not proceed.
49 * kctl_dmods_lock must be held while traversing kctl_dmods, and while adding
50 * to and subtracting from it.
51 */
52 static struct modctl kctl_dmods;
53 static kmutex_t kctl_dmods_lock;
54
55 static kmdb_wr_path_t *kctl_dmod_path;
56
57 /*
58 * Used to track outstanding driver-initiated load notifications. These
59 * notifications have been allocated by driver, and thus must be freed by the
60 * driver in the event of an emergency unload. If we don't free them free
61 * them ourselves, they'll leak. Granted, the world is probably melting down
62 * at that point, but there's no reason why we shouldn't tidy up the deck
63 * chairs before we go.
64 */
65 static kmdb_wr_load_t *kctl_dmod_loads;
66 static kmutex_t kctl_dmod_loads_lock;
67
68 static int
kctl_find_module(char * modname,char * fullname,size_t fullnamelen)69 kctl_find_module(char *modname, char *fullname, size_t fullnamelen)
70 {
71 intptr_t fd;
72 int i;
73
74 /* If they gave us an absolute path, we don't need to search */
75 if (modname[0] == '/') {
76 if (strlen(modname) + 1 > fullnamelen) {
77 cmn_err(CE_WARN, "Can't load dmod %s - name too long",
78 modname);
79 return (0);
80 }
81
82 if ((fd = kobj_open(modname)) == -1)
83 return (0);
84 kobj_close(fd);
85
86 (void) strcpy(fullname, modname);
87
88 return (1);
89 }
90
91 for (i = 0; kctl_dmod_path->dpth_path[i] != NULL; i++) {
92 const char *path = kctl_dmod_path->dpth_path[i];
93
94 if (strlen(path) + 1 + strlen(modname) + 1 > fullnamelen) {
95 kctl_dprintf("Can't load dmod from %s/%s - "
96 "name too long", path, modname);
97 continue;
98 }
99
100 (void) snprintf(fullname, fullnamelen, "%s/%s", path, modname);
101
102 if ((fd = kobj_open(fullname)) == -1)
103 continue;
104
105 kobj_close(fd);
106
107 kctl_dprintf("kobj_open %s found", fullname);
108
109 /* Found it */
110 return (1);
111 }
112
113 /* No luck */
114 return (0);
115 }
116
117 static void
kctl_dlr_free(kmdb_wr_load_t * dlr)118 kctl_dlr_free(kmdb_wr_load_t *dlr)
119 {
120 if (dlr->dlr_node.wn_flags & WNFLAGS_NOFREE)
121 return;
122
123 kctl_strfree(dlr->dlr_fname);
124 kmem_free(dlr, sizeof (kmdb_wr_load_t));
125 }
126
127 int
kctl_dmod_load(kmdb_wr_load_t * dlr)128 kctl_dmod_load(kmdb_wr_load_t *dlr)
129 {
130 struct modctl *modp;
131 char modpath[MAXPATHLEN];
132 const char *modname = kctl_basename(dlr->dlr_fname);
133 int rc;
134
135 mutex_enter(&kctl_dmods_lock);
136
137 /* Have we already loaded this dmod? */
138 for (modp = kctl_dmods.mod_next; modp != &kctl_dmods;
139 modp = modp->mod_next) {
140 if (strcmp(modname, modp->mod_modname) == 0) {
141 mutex_exit(&kctl_dmods_lock);
142 dlr->dlr_errno = EEXIST;
143 return (-1);
144 }
145 }
146
147 /*
148 * If we find something that looks like a dmod, create a modctl for it,
149 * and add said modctl to our dmods list. This will allow us to drop
150 * the dmods lock, while still preventing duplicate loads. If we aren't
151 * able to actually load the dmod, we can always remove the modctl
152 * later.
153 */
154 if (!kctl_find_module(dlr->dlr_fname, modpath, sizeof (modpath))) {
155 mutex_exit(&kctl_dmods_lock);
156 dlr->dlr_errno = ENOENT;
157 return (-1);
158 }
159
160 modp = kobj_zalloc(sizeof (struct modctl), KM_SLEEP);
161
162 modp->mod_filename = kctl_strdup(modpath);
163 modp->mod_modname = kctl_basename(modp->mod_filename);
164 modp->mod_busy = 1;
165 modp->mod_loadflags |= MOD_NOAUTOUNLOAD | MOD_NONOTIFY;
166 modp->mod_next = &kctl_dmods;
167 modp->mod_prev = kctl_dmods.mod_prev;
168 modp->mod_prev->mod_next = modp;
169 kctl_dmods.mod_prev = modp;
170
171 mutex_exit(&kctl_dmods_lock);
172
173 if (kctl.kctl_boot_ops == NULL)
174 rc = kobj_load_module(modp, 0);
175 else
176 rc = kobj_load_primary_module(modp);
177
178 if (rc != 0) {
179 kctl_warn("failed to load dmod %s", modp->mod_modname);
180
181 if (kctl.kctl_boot_ops == NULL)
182 mod_release_requisites(modp);
183
184 mutex_enter(&kctl_dmods_lock);
185 modp->mod_next->mod_prev = modp->mod_prev;
186 modp->mod_prev->mod_next = modp->mod_next;
187 mutex_exit(&kctl_dmods_lock);
188
189 kctl_strfree(modp->mod_filename);
190 kobj_free(modp, sizeof (struct modctl));
191
192 dlr->dlr_errno = EMDB_NOMOD;
193 return (-1);
194 }
195
196 /*
197 * It worked! If the module has any CTF data, decompress it, and make a
198 * note of the load.
199 */
200 mutex_enter(&mod_lock);
201 if ((rc = kctl_mod_decompress(modp)) != 0) {
202 kctl_warn("failed to decompress CTF data for dmod %s: %s",
203 modpath, ctf_errmsg(rc));
204 }
205 mutex_exit(&mod_lock);
206
207 kctl_dprintf("loaded dmod %s at %p", modpath, modp);
208
209 modp->mod_ref = 1;
210 modp->mod_loaded = 1;
211
212 dlr->dlr_modctl = modp;
213
214 return (0);
215 }
216
217 /*
218 * Driver-initiated loads. Load the module and announce it to the debugger.
219 */
220 void
kctl_dmod_autoload(const char * fname)221 kctl_dmod_autoload(const char *fname)
222 {
223 kmdb_wr_load_t *dlr;
224
225 dlr = kobj_zalloc(sizeof (kmdb_wr_load_t), KM_SLEEP);
226 dlr->dlr_node.wn_task = WNTASK_DMOD_LOAD;
227 dlr->dlr_fname = kctl_strdup(fname);
228
229 /*
230 * If we're loading at boot, the kmdb_wr_load_t will have been
231 * "allocated" by krtld, and will thus not be under the control of
232 * kmem. We need to ensure that we don't attempt to free it when
233 * we get it back from the debugger.
234 */
235 if (kctl.kctl_boot_ops != NULL)
236 dlr->dlr_node.wn_flags |= WNFLAGS_NOFREE;
237
238 if (kctl_dmod_load(dlr) < 0) {
239 kctl_dlr_free(dlr);
240 return;
241 }
242
243 /*
244 * Add to the list of open driver-initiated loads. We need to track
245 * these so we can free them (and thus avoid leaks) in the event that
246 * the debugger needs to be blown away before it can return them.
247 */
248 mutex_enter(&kctl_dmod_loads_lock);
249 dlr->dlr_next = kctl_dmod_loads;
250 if (kctl_dmod_loads != NULL)
251 kctl_dmod_loads->dlr_prev = dlr;
252 kctl_dmod_loads = dlr;
253 mutex_exit(&kctl_dmod_loads_lock);
254
255 kmdb_wr_debugger_notify(dlr);
256 }
257
258 void
kctl_dmod_load_all(void)259 kctl_dmod_load_all(void)
260 {
261 /*
262 * The standard list of modules isn't populated until the tail end of
263 * kobj_init(). Prior to that point, the only available list is that of
264 * primaries. We'll use that if the normal list isn't ready yet.
265 */
266 if (modules.mod_mp == NULL) {
267 /* modules hasn't been initialized yet -- use primaries */
268 struct modctl_list *ml;
269
270 for (ml = kobj_linkmaps[KOBJ_LM_PRIMARY]; ml != NULL;
271 ml = ml->modl_next)
272 kctl_dmod_autoload(ml->modl_modp->mod_modname);
273
274 } else {
275 struct modctl *modp = &modules;
276
277 do {
278 if (modp->mod_mp != NULL)
279 kctl_dmod_autoload(modp->mod_modname);
280 } while ((modp = modp->mod_next) != &modules);
281 }
282 }
283
284 void
kctl_dmod_load_ack(kmdb_wr_load_t * dlr)285 kctl_dmod_load_ack(kmdb_wr_load_t *dlr)
286 {
287 /* Remove from the list of open driver-initiated requests */
288 mutex_enter(&kctl_dmod_loads_lock);
289 if (dlr->dlr_prev == NULL)
290 kctl_dmod_loads = dlr->dlr_next;
291 else
292 dlr->dlr_prev->dlr_next = dlr->dlr_next;
293
294 if (dlr->dlr_next != NULL)
295 dlr->dlr_next->dlr_prev = dlr->dlr_prev;
296 mutex_exit(&kctl_dmod_loads_lock);
297
298 kctl_dlr_free(dlr);
299 }
300
301 static int
kctl_dmod_unload_common(struct modctl * modp)302 kctl_dmod_unload_common(struct modctl *modp)
303 {
304 struct modctl *m;
305
306 kctl_dprintf("unloading dmod %s", modp->mod_modname);
307
308 mutex_enter(&kctl_dmods_lock);
309 for (m = kctl_dmods.mod_next; m != &kctl_dmods; m = m->mod_next) {
310 if (m == modp)
311 break;
312 }
313 mutex_exit(&kctl_dmods_lock);
314
315 if (m != modp)
316 return (ENOENT);
317
318 /* Found it */
319 modp->mod_ref = 0;
320 modp->mod_loaded = 0;
321
322 kobj_unload_module(modp);
323
324 mod_release_requisites(modp);
325
326 /* Remove it from our dmods list */
327 mutex_enter(&kctl_dmods_lock);
328 modp->mod_next->mod_prev = modp->mod_prev;
329 modp->mod_prev->mod_next = modp->mod_next;
330 mutex_exit(&kctl_dmods_lock);
331
332 kctl_strfree(modp->mod_filename);
333 kmem_free(modp, sizeof (struct modctl));
334
335 return (0);
336 }
337
338 void
kctl_dmod_unload(kmdb_wr_unload_t * dur)339 kctl_dmod_unload(kmdb_wr_unload_t *dur)
340 {
341 int rc;
342
343 if ((rc = kctl_dmod_unload_common(dur->dur_modctl)) != 0) {
344 cmn_err(CE_WARN, "unexpected dmod unload failure: %d", rc);
345 dur->dur_errno = rc;
346 }
347 }
348
349 /*
350 * This will be called during shutdown. The debugger has been stopped, we're
351 * off the module notification list, and we've already processed everything in
352 * the driver's work queue. We should have received (and processed) unload
353 * requests for each of the dmods we've loaded. To be safe, however, we'll
354 * double-check.
355 *
356 * If we're doing an emergency shutdown, there may be outstanding
357 * driver-initiated messages that haven't been returned to us. The debugger is
358 * dead, so it's not going to be returning them. We'll leak them unless we
359 * find and free them ourselves.
360 */
361 void
kctl_dmod_unload_all(void)362 kctl_dmod_unload_all(void)
363 {
364 kmdb_wr_load_t *dlr;
365 struct modctl *modp;
366
367 while ((modp = kctl_dmods.mod_next) != &kctl_dmods)
368 (void) kctl_dmod_unload_common(modp);
369
370 while ((dlr = kctl_dmod_loads) != NULL) {
371 kctl_dmod_loads = dlr->dlr_next;
372
373 kctl_dprintf("freed orphan load notification for %s",
374 dlr->dlr_fname);
375 kctl_dlr_free(dlr);
376 }
377 }
378
379 kmdb_wr_path_t *
kctl_dmod_path_set(kmdb_wr_path_t * pth)380 kctl_dmod_path_set(kmdb_wr_path_t *pth)
381 {
382 kmdb_wr_path_t *opth;
383
384 if (kctl.kctl_flags & KMDB_F_DRV_DEBUG) {
385 if (pth != NULL) {
386 int i;
387 kctl_dprintf("changing dmod path to: %p", pth);
388 for (i = 0; pth->dpth_path[i] != NULL; i++)
389 kctl_dprintf(" %s", pth->dpth_path[i]);
390 } else {
391 kctl_dprintf("changing dmod path to NULL");
392 }
393 }
394
395 opth = kctl_dmod_path;
396 kctl_dmod_path = pth;
397
398 return (opth);
399 }
400
401 void
kctl_dmod_path_reset(void)402 kctl_dmod_path_reset(void)
403 {
404 kmdb_wr_path_t *pth;
405
406 if ((pth = kctl_dmod_path_set(NULL)) != NULL) {
407 WR_ACK(pth);
408 kmdb_wr_debugger_notify(pth);
409 }
410 }
411
412 void
kctl_dmod_sync(void)413 kctl_dmod_sync(void)
414 {
415 struct modctl *modp;
416
417 /*
418 * kobj_sync() has no visibility into our dmods, so we need to
419 * explicitly tell krtld to export the portions of our dmods that were
420 * allocated using boot scratch memory.
421 */
422 for (modp = kctl_dmods.mod_next; modp != &kctl_dmods;
423 modp = modp->mod_next)
424 kobj_export_module(modp->mod_mp);
425 }
426
427 void
kctl_dmod_init(void)428 kctl_dmod_init(void)
429 {
430 mutex_init(&kctl_dmod_loads_lock, NULL, MUTEX_DRIVER, NULL);
431 mutex_init(&kctl_dmods_lock, NULL, MUTEX_DRIVER, NULL);
432
433 bzero(&kctl_dmods, sizeof (struct modctl));
434 kctl_dmods.mod_next = kctl_dmods.mod_prev = &kctl_dmods;
435 kdi_dmods = &kctl_dmods;
436 }
437
438 void
kctl_dmod_fini(void)439 kctl_dmod_fini(void)
440 {
441 mutex_destroy(&kctl_dmods_lock);
442 mutex_destroy(&kctl_dmod_loads_lock);
443 kdi_dmods = NULL;
444 }
445