Lines Matching refs:info
140 static int kcov_alloc(struct kcov_info *info, size_t entries);
141 static void kcov_free(struct kcov_info *info);
167 struct kcov_info *info; in get_kinfo() local
183 info = td->td_kcov_info; in get_kinfo()
184 if (info == NULL || in get_kinfo()
185 atomic_load_acq_int(&info->state) != KCOV_STATE_RUNNING) in get_kinfo()
188 return (info); in get_kinfo()
195 struct kcov_info *info; in trace_pc() local
199 info = get_kinfo(td); in trace_pc()
200 if (info == NULL) in trace_pc()
206 if (info->mode != KCOV_MODE_TRACE_PC) in trace_pc()
209 KASSERT(info->kvaddr != 0, ("%s: NULL buf while running", __func__)); in trace_pc()
211 buf = (uint64_t *)info->kvaddr; in trace_pc()
215 if (index + 2 > info->entries) in trace_pc()
226 struct kcov_info *info; in trace_cmp() local
230 info = get_kinfo(td); in trace_cmp()
231 if (info == NULL) in trace_cmp()
237 if (info->mode != KCOV_MODE_TRACE_CMP) in trace_cmp()
240 KASSERT(info->kvaddr != 0, ("%s: NULL buf while running", __func__)); in trace_cmp()
242 buf = (uint64_t *)info->kvaddr; in trace_cmp()
248 if (index * 4 + 4 + 1 > info->entries) in trace_cmp()
271 struct kcov_info *info = arg; in kcov_mmap_cleanup() local
283 atomic_store_int(&info->state, KCOV_STATE_DYING); in kcov_mmap_cleanup()
285 thread = info->thread; in kcov_mmap_cleanup()
298 kcov_free(info); in kcov_mmap_cleanup()
304 struct kcov_info *info; in kcov_open() local
307 info = malloc(sizeof(struct kcov_info), M_KCOV_INFO, M_ZERO | M_WAITOK); in kcov_open()
308 info->state = KCOV_STATE_OPEN; in kcov_open()
309 info->thread = NULL; in kcov_open()
310 info->mode = -1; in kcov_open()
312 if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0) in kcov_open()
313 kcov_mmap_cleanup(info); in kcov_open()
321 struct kcov_info *info; in kcov_close() local
324 if ((error = devfs_get_cdevpriv((void **)&info)) != 0) in kcov_close()
327 KASSERT(info != NULL, ("kcov_close with no kcov_info structure")); in kcov_close()
330 if (info->state == KCOV_STATE_RUNNING) in kcov_close()
340 struct kcov_info *info; in kcov_mmap_single() local
347 if ((error = devfs_get_cdevpriv((void **)&info)) != 0) in kcov_mmap_single()
350 if (info->kvaddr == 0 || size / KCOV_ELEMENT_SIZE != info->entries) in kcov_mmap_single()
353 vm_object_reference(info->bufobj); in kcov_mmap_single()
355 *object = info->bufobj; in kcov_mmap_single()
360 kcov_alloc(struct kcov_info *info, size_t entries) in kcov_alloc() argument
365 KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer")); in kcov_alloc()
366 KASSERT(info->state == KCOV_STATE_OPEN, in kcov_alloc()
367 ("kcov_alloc: Not in open state (%x)", info->state)); in kcov_alloc()
373 info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE); in kcov_alloc()
374 pages = info->bufsize / PAGE_SIZE; in kcov_alloc()
376 if ((info->kvaddr = kva_alloc(info->bufsize)) == 0) in kcov_alloc()
379 info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize, in kcov_alloc()
382 VM_OBJECT_WLOCK(info->bufobj); in kcov_alloc()
384 m = vm_page_grab(info->bufobj, n, in kcov_alloc()
388 pmap_qenter(info->kvaddr + n * PAGE_SIZE, &m, 1); in kcov_alloc()
390 VM_OBJECT_WUNLOCK(info->bufobj); in kcov_alloc()
392 info->entries = entries; in kcov_alloc()
398 kcov_free(struct kcov_info *info) in kcov_free() argument
403 if (info->kvaddr != 0) { in kcov_free()
404 pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE); in kcov_free()
405 kva_free(info->kvaddr, info->bufsize); in kcov_free()
407 if (info->bufobj != NULL) { in kcov_free()
408 vm_page_iter_limit_init(&pages, info->bufobj, in kcov_free()
409 info->bufsize / PAGE_SIZE); in kcov_free()
410 VM_OBJECT_WLOCK(info->bufobj); in kcov_free()
413 VM_OBJECT_WUNLOCK(info->bufobj); in kcov_free()
414 vm_object_deallocate(info->bufobj); in kcov_free()
416 free(info, M_KCOV_INFO); in kcov_free()
423 struct kcov_info *info; in kcov_ioctl() local
426 if ((error = devfs_get_cdevpriv((void **)&info)) != 0) in kcov_ioctl()
434 if (info->state != KCOV_STATE_OPEN) { in kcov_ioctl()
437 error = kcov_alloc(info, *(u_int *)data); in kcov_ioctl()
439 info->state = KCOV_STATE_READY; in kcov_ioctl()
446 if (info->state != KCOV_STATE_READY) { in kcov_ioctl()
469 KASSERT(info->thread == NULL, in kcov_ioctl()
471 info->thread = td; in kcov_ioctl()
472 info->mode = mode; in kcov_ioctl()
477 atomic_store_rel_int(&info->state, KCOV_STATE_RUNNING); in kcov_ioctl()
478 td->td_kcov_info = info; in kcov_ioctl()
482 if (info->state != KCOV_STATE_RUNNING || in kcov_ioctl()
483 info != td->td_kcov_info) { in kcov_ioctl()
495 atomic_store_int(&info->state, KCOV_STATE_READY); in kcov_ioctl()
501 info->mode = -1; in kcov_ioctl()
502 info->thread = NULL; in kcov_ioctl()
516 struct kcov_info *info; in kcov_thread_dtor() local
518 info = td->td_kcov_info; in kcov_thread_dtor()
519 if (info == NULL) in kcov_thread_dtor()
530 if (info->state != KCOV_STATE_DYING) { in kcov_thread_dtor()
535 atomic_store_int(&info->state, KCOV_STATE_READY); in kcov_thread_dtor()
538 info->thread = NULL; in kcov_thread_dtor()
552 kcov_free(info); in kcov_thread_dtor()