Lines Matching defs:kd

68 static int kvm_nlist_core(kvm_t *kd, struct nlist nl[], const char *err);
71 fail(kvm_t *kd, const char *err, const char *message, ...)
76 if (err || (kd && kd->kvm_debug)) {
82 if (kd != NULL)
83 (void) kvm_close(kd);
92 kvm_t *kd;
96 if ((kd = calloc(1, sizeof (kvm_t))) == NULL)
99 kd->kvm_corefd = kd->kvm_kmemfd = kd->kvm_memfd = -1;
100 kd->kvm_debug = getenv("KVM_DEBUG");
102 if ((kd->kvm_openflag = flag) != O_RDONLY && flag != O_RDWR)
103 return (fail(kd, err, "illegal flag 0x%x to kvm_open()", flag));
109 return (fail(kd, err, "cannot stat %s", corefile));
113 return (fail(kd, err, "cannot stat /dev/mem"));
116 return (fail(kd, err, "cannot stat /dev/kmem"));
119 return (fail(kd, err, "cannot stat /dev/allkmem"));
126 if ((kd->kvm_kmemfd = open64(kmem, flag)) == -1)
127 return (fail(kd, err, "cannot open %s", kmem));
128 if ((kd->kvm_memfd = open64("/dev/mem", flag)) == -1)
129 return (fail(kd, err, "cannot open /dev/mem"));
132 if ((kd->kvm_corefd = open64(corefile, flag)) == -1)
133 return (fail(kd, err, "cannot open %s", corefile));
134 if (pread64(kd->kvm_corefd, &kd->kvm_dump,
135 sizeof (kd->kvm_dump), 0) != sizeof (kd->kvm_dump))
136 return (fail(kd, err, "cannot read dump header"));
137 if (kd->kvm_dump.dump_magic != DUMP_MAGIC)
138 return (fail(kd, err, "%s is not a kernel core file "
140 kd->kvm_dump.dump_magic));
141 if (kd->kvm_dump.dump_version != DUMP_VERSION)
142 return (fail(kd, err,
144 DUMP_VERSION, kd->kvm_dump.dump_version));
145 if (kd->kvm_dump.dump_wordsize != DUMP_WORDSIZE)
146 return (fail(kd, err, "%s is a %d-bit core file - "
148 kd->kvm_dump.dump_wordsize, DUMP_WORDSIZE));
154 kd->kvm_coremapsize = (size_t)corestat.st_size;
156 (kd->kvm_core = mmap64(0, kd->kvm_coremapsize,
157 PROT_READ, MAP_SHARED, kd->kvm_corefd, 0)) == MAP_FAILED) {
158 kd->kvm_coremapsize = kd->kvm_dump.dump_data;
159 if ((kd->kvm_core = mmap64(0, kd->kvm_coremapsize,
160 PROT_READ, MAP_SHARED, kd->kvm_corefd, 0)) ==
162 return (fail(kd, err, "cannot mmap corefile"));
164 kd->kvm_map = (void *)(kd->kvm_core + kd->kvm_dump.dump_map);
165 kd->kvm_pfn = (void *)(kd->kvm_core + kd->kvm_dump.dump_pfn);
171 (void) strncpy(kd->kvm_namelist, namelist, MAXNAMELEN);
173 if (kvm_nlist(kd, nl) == -1) {
174 if (kd->kvm_corefd == -1) {
175 return (fail(kd, err, "%s is not a %d-bit "
179 if (kvm_nlist_core(kd, nl, err) == -1)
183 kd->kvm_kas = (struct as *)nl[0].n_value;
184 kd->kvm_practive = (proc_t *)nl[1].n_value;
186 (void) kvm_setproc(kd);
187 return (kd);
191 kvm_close(kvm_t *kd)
193 if (kd->kvm_core != NULL && kd->kvm_core != MAP_FAILED)
194 (void) munmap(kd->kvm_core, kd->kvm_coremapsize);
195 if (kd->kvm_corefd != -1)
196 (void) close(kd->kvm_corefd);
197 if (kd->kvm_kmemfd != -1)
198 (void) close(kd->kvm_kmemfd);
199 if (kd->kvm_memfd != -1)
200 (void) close(kd->kvm_memfd);
201 if (kd->kvm_namelist_core)
202 (void) unlink(kd->kvm_namelist);
203 free(kd);
208 kvm_namelist(kvm_t *kd)
210 return (kd->kvm_namelist);
214 kvm_nlist(kvm_t *kd, struct nlist nl[])
216 return (nlist(kd->kvm_namelist, nl));
226 kvm_nlist_core(kvm_t *kd, struct nlist nl[], const char *err)
228 dumphdr_t *dump = &kd->kvm_dump;
234 (void) fail(kd, err, "%s: kernel symbols are compressed", msg);
238 if (dump->dump_ksyms + dump->dump_ksyms_size > kd->kvm_coremapsize) {
239 (void) fail(kd, err, "%s: kernel symbols not mapped", msg);
248 (void) snprintf(kd->kvm_namelist, MAXNAMELEN, template, getpid());
250 if ((fd = mkstemp(kd->kvm_namelist)) == -1) {
251 (void) fail(kd, err, "%s: couldn't create temporary "
256 kd->kvm_namelist_core = B_TRUE;
259 rval = write(fd, (caddr_t)((uintptr_t)kd->kvm_core +
264 (void) fail(kd, err, "%s: couldn't write to temporary "
272 if (kvm_nlist(kd, nl) == -1) {
273 (void) fail(kd, err, "%s: symbols not valid", msg);
281 kvm_lookup(kvm_t *kd, struct as *as, uint64_t addr)
283 uintptr_t pageoff = addr & (kd->kvm_dump.dump_pagesize - 1);
287 if (kd->kvm_debug)
292 long last = kd->kvm_dump.dump_npages - 1;
293 pfn_t target = (pfn_t)(page >> kd->kvm_dump.dump_pageshift);
296 pfn_t pfn = kd->kvm_pfn[middle];
297 if (kd->kvm_debug)
300 off = kd->kvm_dump.dump_data + pageoff +
302 kd->kvm_dump.dump_pageshift);
311 long hash = DUMP_HASH(&kd->kvm_dump, as, page);
312 off = kd->kvm_map[hash].dm_first;
314 dump_map_t *dmp = (void *)(kd->kvm_core + off);
315 if (kd->kvm_debug)
317 if (dmp < kd->kvm_map ||
318 dmp > kd->kvm_map + kd->kvm_dump.dump_hashmask ||
320 DUMP_HASH(&kd->kvm_dump, dmp->dm_as, dmp->dm_va) !=
322 if (kd->kvm_debug)
333 if (kd->kvm_debug)
339 kvm_rw(kvm_t *kd, uint64_t addr, void *buf, size_t size,
351 if (kd->kvm_core == NULL) {
356 if (as == kd->kvm_kas)
357 return (prw(kd->kvm_kmemfd, buf, size, addr));
359 return (prw(kd->kvm_memfd, buf, size, addr));
361 (void) sprintf(procbuf, "/proc/%ld/as", kd->kvm_pid);
362 if ((procfd = open64(procbuf, kd->kvm_openflag)) == -1)
370 uintptr_t pageoff = addr & (kd->kvm_dump.dump_pagesize - 1);
371 ssize_t len = MIN(resid, kd->kvm_dump.dump_pagesize - pageoff);
373 if ((off = kvm_lookup(kd, as, addr)) == 0)
376 if (prw == PREAD && off < kd->kvm_coremapsize)
377 bcopy(kd->kvm_core + off, buf, len);
378 else if ((len = prw(kd->kvm_corefd, buf, len, off)) <= 0)
388 kvm_read(kvm_t *kd, uintptr_t addr, void *buf, size_t size)
390 return (kvm_rw(kd, addr, buf, size, kd->kvm_kas, PREAD));
394 kvm_kread(kvm_t *kd, uintptr_t addr, void *buf, size_t size)
396 return (kvm_rw(kd, addr, buf, size, kd->kvm_kas, PREAD));
400 kvm_uread(kvm_t *kd, uintptr_t addr, void *buf, size_t size)
402 return (kvm_rw(kd, addr, buf, size, kd->kvm_proc.p_as, PREAD));
406 kvm_aread(kvm_t *kd, uintptr_t addr, void *buf, size_t size, struct as *as)
408 return (kvm_rw(kd, addr, buf, size, as, PREAD));
412 kvm_pread(kvm_t *kd, uint64_t addr, void *buf, size_t size)
414 return (kvm_rw(kd, addr, buf, size, NULL, PREAD));
418 kvm_write(kvm_t *kd, uintptr_t addr, const void *buf, size_t size)
420 return (kvm_rw(kd, addr, (void *)buf, size, kd->kvm_kas, PWRITE));
424 kvm_kwrite(kvm_t *kd, uintptr_t addr, const void *buf, size_t size)
426 return (kvm_rw(kd, addr, (void *)buf, size, kd->kvm_kas, PWRITE));
430 kvm_uwrite(kvm_t *kd, uintptr_t addr, const void *buf, size_t size)
432 return (kvm_rw(kd, addr, (void *)buf, size, kd->kvm_proc.p_as, PWRITE));
436 kvm_awrite(kvm_t *kd, uintptr_t addr, const void *buf, size_t size,
439 return (kvm_rw(kd, addr, (void *)buf, size, as, PWRITE));
443 kvm_pwrite(kvm_t *kd, uint64_t addr, const void *buf, size_t size)
445 return (kvm_rw(kd, addr, (void *)buf, size, NULL, PWRITE));
449 kvm_physaddr(kvm_t *kd, struct as *as, uintptr_t addr)
454 if (kd->kvm_core == NULL) {
457 if (ioctl(kd->kvm_kmemfd, MEM_VTOP, &mem_vtop) == 0)
461 if ((off = kvm_lookup(kd, as, addr)) != 0) {
463 (u_offset_t)(off - kd->kvm_dump.dump_data) >>
464 kd->kvm_dump.dump_pageshift;
465 return (((uint64_t)kd->kvm_pfn[pfn_index] <<
466 kd->kvm_dump.dump_pageshift) +
467 (addr & (kd->kvm_dump.dump_pagesize - 1)));
474 kvm_getproc(kvm_t *kd, pid_t pid)
476 (void) kvm_setproc(kd);
477 while (kvm_nextproc(kd) != NULL)
478 if (kd->kvm_pid == pid)
479 return (&kd->kvm_proc);
484 kvm_nextproc(kvm_t *kd)
486 if (kd->kvm_proc.p_next == NULL ||
487 kvm_kread(kd, (uintptr_t)kd->kvm_proc.p_next,
488 &kd->kvm_proc, sizeof (proc_t)) != sizeof (proc_t) ||
489 kvm_kread(kd, (uintptr_t)&kd->kvm_proc.p_pidp->pid_id,
490 &kd->kvm_pid, sizeof (pid_t)) != sizeof (pid_t))
493 return (&kd->kvm_proc);
497 kvm_setproc(kvm_t *kd)
499 (void) kvm_kread(kd, (uintptr_t)kd->kvm_practive,
500 &kd->kvm_proc.p_next, sizeof (proc_t *));
501 kd->kvm_pid = -1;
507 kvm_getu(kvm_t *kd, struct proc *p)