Lines Matching +full:protect +full:- +full:exec
57 int kcpc_cpuctx; /* number of cpu-specific contexts */
96 #define KCPC_CTX_FLAG_SET(ctx, flag) atomic_or_uint(&(ctx)->kc_flags, (flag))
97 #define KCPC_CTX_FLAG_CLR(ctx, flag) atomic_and_uint(&(ctx)->kc_flags, ~(flag))
101 * cross-call or from high-PIL interrupt
126 * Perform one-time initialization of kcpc framework.
140 return (-1); in kcpc_init()
154 * Load platform-specific pcbe module in kcpc_init()
158 return (kcpc_pcbe_error == 0 ? 0 : -1); in kcpc_init()
165 cpc_ncounters = pcbe_ops->pcbe_ncounters(); in kcpc_register_pcbe()
196 ctx->kc_cpuid = cpuid; in kcpc_bind_cpu()
197 ctx->kc_thread = curthread; in kcpc_bind_cpu()
199 set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP); in kcpc_bind_cpu()
202 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); in kcpc_bind_cpu()
207 set->ks_ctx = ctx; in kcpc_bind_cpu()
208 ctx->kc_set = set; in kcpc_bind_cpu()
224 mutex_enter(&cp->cpu_cpc_ctxlock); in kcpc_bind_cpu()
233 if (cp->cpu_cpc_ctx != NULL && !CU_CPC_ON(cp)) { in kcpc_bind_cpu()
239 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_bind_cpu()
243 if (curthread->t_bind_cpu != cpuid) { in kcpc_bind_cpu()
246 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_bind_cpu()
255 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_bind_cpu()
258 mutex_enter(&set->ks_lock); in kcpc_bind_cpu()
259 set->ks_state |= KCPC_SET_BOUND; in kcpc_bind_cpu()
260 cv_signal(&set->ks_condv); in kcpc_bind_cpu()
261 mutex_exit(&set->ks_lock); in kcpc_bind_cpu()
267 set->ks_ctx = NULL; in kcpc_bind_cpu()
268 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); in kcpc_bind_cpu()
284 if (t->t_cpc_ctx != NULL) in kcpc_bind_thread()
295 ctx->kc_hrtime = gethrtime(); in kcpc_bind_thread()
303 ctx->kc_cpuid = -1; in kcpc_bind_thread()
304 if (set->ks_flags & CPC_BIND_LWP_INHERIT) in kcpc_bind_thread()
306 ctx->kc_thread = t; in kcpc_bind_thread()
307 t->t_cpc_ctx = ctx; in kcpc_bind_thread()
316 set->ks_data = kmem_alloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP); in kcpc_bind_thread()
319 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); in kcpc_bind_thread()
321 t->t_cpc_ctx = NULL; in kcpc_bind_thread()
325 set->ks_ctx = ctx; in kcpc_bind_thread()
326 ctx->kc_set = set; in kcpc_bind_thread()
349 * to ensure the flags are always self-consistent; they can in kcpc_bind_thread()
356 mutex_enter(&set->ks_lock); in kcpc_bind_thread()
357 set->ks_state |= KCPC_SET_BOUND; in kcpc_bind_thread()
358 cv_signal(&set->ks_condv); in kcpc_bind_thread()
359 mutex_exit(&set->ks_lock); in kcpc_bind_thread()
375 for (i = 0; i < set->ks_nreqs; i++) { in kcpc_configure_reqs()
377 rp = &set->ks_req[i]; in kcpc_configure_reqs()
379 n = rp->kr_picnum; in kcpc_configure_reqs()
383 ASSERT(ctx->kc_pics[n].kp_req == NULL); in kcpc_configure_reqs()
385 if (rp->kr_flags & CPC_OVF_NOTIFY_EMT) { in kcpc_configure_reqs()
386 if ((pcbe_ops->pcbe_caps & CPC_CAP_OVERFLOW_INTERRUPT) in kcpc_configure_reqs()
388 *subcode = -1; in kcpc_configure_reqs()
399 rp->kr_config = NULL; in kcpc_configure_reqs()
400 if ((ret = pcbe_ops->pcbe_configure(n, rp->kr_event, in kcpc_configure_reqs()
401 rp->kr_preset, rp->kr_flags, rp->kr_nattrs, rp->kr_attr, in kcpc_configure_reqs()
402 &(rp->kr_config), (void *)ctx)) != 0) { in kcpc_configure_reqs()
414 ctx->kc_pics[n].kp_req = rp; in kcpc_configure_reqs()
415 rp->kr_picp = &ctx->kc_pics[n]; in kcpc_configure_reqs()
416 rp->kr_data = set->ks_data + rp->kr_index; in kcpc_configure_reqs()
417 *rp->kr_data = rp->kr_preset; in kcpc_configure_reqs()
428 for (i = 0; i < set->ks_nreqs; i++) in kcpc_free_configs()
429 if (set->ks_req[i].kr_config != NULL) in kcpc_free_configs()
430 pcbe_ops->pcbe_free(set->ks_req[i].kr_config); in kcpc_free_configs()
440 kcpc_ctx_t *ctx = set->ks_ctx; in kcpc_sample()
443 mutex_enter(&set->ks_lock); in kcpc_sample()
444 if ((set->ks_state & KCPC_SET_BOUND) == 0) { in kcpc_sample()
445 mutex_exit(&set->ks_lock); in kcpc_sample()
448 mutex_exit(&set->ks_lock); in kcpc_sample()
452 * and if this is a CPU-bound context, while checking the CPU binding of in kcpc_sample()
458 if (ctx->kc_flags & KCPC_CTX_INVALID) { in kcpc_sample()
464 if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) { in kcpc_sample()
465 if (ctx->kc_cpuid != -1) { in kcpc_sample()
466 if (curthread->t_bind_cpu != ctx->kc_cpuid) { in kcpc_sample()
473 if (ctx->kc_thread == curthread) { in kcpc_sample()
476 ctx->kc_hrtime = gethrtime_waitfree(); in kcpc_sample()
477 pcbe_ops->pcbe_sample(ctx); in kcpc_sample()
478 ctx->kc_vtick += curtick - ctx->kc_rawtick; in kcpc_sample()
479 ctx->kc_rawtick = curtick; in kcpc_sample()
486 if (ctx->kc_flags & KCPC_CTX_INVALID) { in kcpc_sample()
497 if (copyout(set->ks_data, buf, in kcpc_sample()
498 set->ks_nreqs * sizeof (uint64_t)) == -1) in kcpc_sample()
500 if (copyout(&ctx->kc_hrtime, hrtime, sizeof (uint64_t)) == -1) in kcpc_sample()
502 if (copyout(&ctx->kc_vtick, tick, sizeof (uint64_t)) == -1) in kcpc_sample()
518 if (ctx->kc_cpuid == CPU->cpu_id) { in kcpc_stop_hw()
521 cp = cpu_get(ctx->kc_cpuid); in kcpc_stop_hw()
524 ASSERT(cp != NULL && cp->cpu_cpc_ctx == ctx); in kcpc_stop_hw()
541 mutex_enter(&set->ks_lock); in kcpc_unbind()
542 while ((set->ks_state & KCPC_SET_BOUND) == 0) in kcpc_unbind()
543 cv_wait(&set->ks_condv, &set->ks_lock); in kcpc_unbind()
544 mutex_exit(&set->ks_lock); in kcpc_unbind()
546 ctx = set->ks_ctx; in kcpc_unbind()
551 mutex_enter(&ctx->kc_lock); in kcpc_unbind()
553 mutex_exit(&ctx->kc_lock); in kcpc_unbind()
555 if (ctx->kc_cpuid == -1) { in kcpc_unbind()
556 t = ctx->kc_thread; in kcpc_unbind()
558 * The context is thread-bound and therefore has a device in kcpc_unbind()
567 if (!(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED)) in kcpc_unbind()
573 t->t_cpc_set = NULL; in kcpc_unbind()
574 t->t_cpc_ctx = NULL; in kcpc_unbind()
577 * If we are unbinding a CPU-bound set from a remote CPU, the in kcpc_unbind()
589 cp = cpu_get(ctx->kc_cpuid); in kcpc_unbind()
594 mutex_enter(&cp->cpu_cpc_ctxlock); in kcpc_unbind()
595 if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) in kcpc_unbind()
597 ASSERT(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED); in kcpc_unbind()
598 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_unbind()
601 if (ctx->kc_thread == curthread) { in kcpc_unbind()
603 curthread->t_cpc_set = NULL; in kcpc_unbind()
616 ASSERT(set->ks_state & KCPC_SET_BOUND); in kcpc_preset()
617 ASSERT(set->ks_ctx->kc_thread == curthread); in kcpc_preset()
618 ASSERT(set->ks_ctx->kc_cpuid == -1); in kcpc_preset()
620 if (index < 0 || index >= set->ks_nreqs) in kcpc_preset()
623 for (i = 0; i < set->ks_nreqs; i++) in kcpc_preset()
624 if (set->ks_req[i].kr_index == index) in kcpc_preset()
626 ASSERT(i != set->ks_nreqs); in kcpc_preset()
628 set->ks_req[i].kr_preset = preset; in kcpc_preset()
635 kcpc_ctx_t *ctx = set->ks_ctx; in kcpc_restart()
639 ASSERT(set->ks_state & KCPC_SET_BOUND); in kcpc_restart()
640 ASSERT(ctx->kc_thread == curthread); in kcpc_restart()
641 ASSERT(ctx->kc_cpuid == -1); in kcpc_restart()
643 for (i = 0; i < set->ks_nreqs; i++) { in kcpc_restart()
644 *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset; in kcpc_restart()
645 pcbe_ops->pcbe_configure(0, NULL, set->ks_req[i].kr_preset, in kcpc_restart()
646 0, 0, NULL, &set->ks_req[i].kr_config, NULL); in kcpc_restart()
656 if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) in kcpc_restart()
657 pcbe_ops->pcbe_allstop(); in kcpc_restart()
662 ctx->kc_rawtick = KCPC_GET_TICK(); in kcpc_restart()
664 pcbe_ops->pcbe_program(ctx); in kcpc_restart()
677 kcpc_ctx_t *ctx = t->t_cpc_ctx; in kcpc_enable()
678 kcpc_set_t *set = t->t_cpc_set; in kcpc_enable()
689 * CPU-bound set. in kcpc_enable()
691 ASSERT(t->t_cpc_set != NULL); in kcpc_enable()
692 ASSERT(t->t_cpc_set->ks_ctx->kc_cpuid != -1); in kcpc_enable()
694 } else if (ctx->kc_flags & KCPC_CTX_INVALID) in kcpc_enable()
698 if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) in kcpc_enable()
705 if (ctx->kc_flags & KCPC_CTX_FREEZE) in kcpc_enable()
715 * new config, then re-bind. in kcpc_enable()
723 pcbe_ops->pcbe_allstop(); in kcpc_enable()
726 for (i = 0; i < set->ks_nreqs; i++) { in kcpc_enable()
727 set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data); in kcpc_enable()
729 set->ks_req[i].kr_flags |= flag; in kcpc_enable()
731 set->ks_req[i].kr_flags &= ~flag; in kcpc_enable()
736 t->t_cpc_set = newset; in kcpc_enable()
738 t->t_cpc_set = NULL; in kcpc_enable()
771 if (ctx->kc_pics[i].kp_req != NULL) in kcpc_next_config()
783 pic = &ctx->kc_pics[i]; in kcpc_next_config()
785 if (pic->kp_req != NULL && in kcpc_next_config()
786 current == pic->kp_req->kr_config) in kcpc_next_config()
795 pic = &ctx->kc_pics[i]; in kcpc_next_config()
796 if (pic->kp_req != NULL) in kcpc_next_config()
805 *data = ctx->kc_pics[i].kp_req->kr_data; in kcpc_next_config()
808 return (ctx->kc_pics[i].kp_req->kr_config); in kcpc_next_config()
824 ctx->kc_next = kcpc_ctx_list[hash]; in kcpc_ctx_alloc()
828 ctx->kc_pics = (kcpc_pic_t *)kmem_zalloc(sizeof (kcpc_pic_t) * in kcpc_ctx_alloc()
831 ctx->kc_cpuid = -1; in kcpc_ctx_alloc()
843 kcpc_set_t *ks = ctx->kc_set, *cks; in kcpc_ctx_clone()
849 if ((ks->ks_flags & CPC_BIND_LWP_INHERIT) == 0) in kcpc_ctx_clone()
853 cks->ks_state &= ~KCPC_SET_BOUND; in kcpc_ctx_clone()
854 cctx->kc_set = cks; in kcpc_ctx_clone()
855 cks->ks_flags = ks->ks_flags; in kcpc_ctx_clone()
856 cks->ks_nreqs = ks->ks_nreqs; in kcpc_ctx_clone()
857 cks->ks_req = kmem_alloc(cks->ks_nreqs * in kcpc_ctx_clone()
859 cks->ks_data = kmem_alloc(cks->ks_nreqs * sizeof (uint64_t), in kcpc_ctx_clone()
861 cks->ks_ctx = cctx; in kcpc_ctx_clone()
863 for (i = 0; i < cks->ks_nreqs; i++) { in kcpc_ctx_clone()
864 cks->ks_req[i].kr_index = ks->ks_req[i].kr_index; in kcpc_ctx_clone()
865 cks->ks_req[i].kr_picnum = ks->ks_req[i].kr_picnum; in kcpc_ctx_clone()
866 (void) strncpy(cks->ks_req[i].kr_event, in kcpc_ctx_clone()
867 ks->ks_req[i].kr_event, CPC_MAX_EVENT_LEN); in kcpc_ctx_clone()
868 cks->ks_req[i].kr_preset = ks->ks_req[i].kr_preset; in kcpc_ctx_clone()
869 cks->ks_req[i].kr_flags = ks->ks_req[i].kr_flags; in kcpc_ctx_clone()
870 cks->ks_req[i].kr_nattrs = ks->ks_req[i].kr_nattrs; in kcpc_ctx_clone()
871 if (ks->ks_req[i].kr_nattrs > 0) { in kcpc_ctx_clone()
872 cks->ks_req[i].kr_attr = in kcpc_ctx_clone()
873 kmem_alloc(ks->ks_req[i].kr_nattrs * in kcpc_ctx_clone()
876 for (j = 0; j < ks->ks_req[i].kr_nattrs; j++) { in kcpc_ctx_clone()
877 (void) strncpy(cks->ks_req[i].kr_attr[j].ka_name, in kcpc_ctx_clone()
878 ks->ks_req[i].kr_attr[j].ka_name, in kcpc_ctx_clone()
880 cks->ks_req[i].kr_attr[j].ka_val = in kcpc_ctx_clone()
881 ks->ks_req[i].kr_attr[j].ka_val; in kcpc_ctx_clone()
887 mutex_enter(&cks->ks_lock); in kcpc_ctx_clone()
888 cks->ks_state |= KCPC_SET_BOUND; in kcpc_ctx_clone()
889 cv_signal(&cks->ks_condv); in kcpc_ctx_clone()
890 mutex_exit(&cks->ks_lock); in kcpc_ctx_clone()
904 loc = &(*loc)->kc_next; in kcpc_ctx_free()
905 *loc = ctx->kc_next; in kcpc_ctx_free()
908 kmem_free(ctx->kc_pics, cpc_ncounters * sizeof (kcpc_pic_t)); in kcpc_ctx_free()
909 cv_destroy(&ctx->kc_condv); in kcpc_ctx_free()
910 mutex_destroy(&ctx->kc_lock); in kcpc_ctx_free()
918 * Note: executed at high-level interrupt context!
929 * On both x86 and UltraSPARC, we may deliver the high-level in kcpc_overflow_intr()
937 * Check for this case here -- find the pinned thread in kcpc_overflow_intr()
940 if (t->t_flag & T_INTR_THREAD) { in kcpc_overflow_intr()
951 if ((lwp = t->t_lwp) != NULL) { in kcpc_overflow_intr()
953 ctx = t->t_cpc_ctx; in kcpc_overflow_intr()
956 ctx = t->t_cpc_ctx; in kcpc_overflow_intr()
968 ctx = curthread->t_cpu->cpu_cpc_ctx; in kcpc_overflow_intr()
1014 } else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) { in kcpc_overflow_intr()
1020 ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW; in kcpc_overflow_intr()
1027 if (ctx->kc_pics[i].kp_req != NULL && in kcpc_overflow_intr()
1029 ctx->kc_pics[i].kp_req->kr_flags & in kcpc_overflow_intr()
1037 atomic_or_uint(&ctx->kc_pics[i].kp_flags, in kcpc_overflow_intr()
1042 } else if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) { in kcpc_overflow_intr()
1047 return (curthread->t_cpu->cpu_cpc_ctx); in kcpc_overflow_intr()
1055 * executing here in high-level interrupt context.
1067 (bitmap = pcbe_ops->pcbe_overflow_bitmap()) == 0) in kcpc_hw_overflow_intr()
1073 pcbe_ops->pcbe_allstop(); in kcpc_hw_overflow_intr()
1076 state = &cpu_core[CPU->cpu_id].cpuc_dcpc_intr_state; in kcpc_hw_overflow_intr()
1079 * Set the per-CPU state bit to indicate that we are currently in kcpc_hw_overflow_intr()
1093 ctx = curthread->t_cpu->cpu_cpc_ctx; in kcpc_hw_overflow_intr()
1103 for (i = 0; i < ctx->kc_set->ks_nreqs; i++) { in kcpc_hw_overflow_intr()
1104 req = ctx->kc_set->ks_req[i]; in kcpc_hw_overflow_intr()
1107 pcbe_ops->pcbe_configure(req.kr_picnum, in kcpc_hw_overflow_intr()
1114 pcbe_ops->pcbe_program(ctx); in kcpc_hw_overflow_intr()
1120 cpu_core[CPU->cpu_id].cpuc_dcpc_intr_state = in kcpc_hw_overflow_intr()
1142 * The CPU's CPC context may disappear as a result of cross-call which in kcpc_hw_overflow_intr()
1143 * has higher PIL on x86, so protect the context by raising PIL to the in kcpc_hw_overflow_intr()
1144 * cross-call level. in kcpc_hw_overflow_intr()
1150 ctx->kc_hrtime = gethrtime_waitfree(); in kcpc_hw_overflow_intr()
1151 ctx->kc_vtick += curtick - ctx->kc_rawtick; in kcpc_hw_overflow_intr()
1152 ctx->kc_rawtick = curtick; in kcpc_hw_overflow_intr()
1153 pcbe_ops->pcbe_sample(ctx); in kcpc_hw_overflow_intr()
1154 pcbe_ops->pcbe_program(ctx); in kcpc_hw_overflow_intr()
1162 * Called from trap() when processing the ast posted by the high-level
1168 kcpc_ctx_t *ctx = curthread->t_cpc_ctx; in kcpc_overflow_ast()
1178 * virtualized 64-bit counter(s). in kcpc_overflow_ast()
1181 ctx->kc_hrtime = gethrtime_waitfree(); in kcpc_overflow_ast()
1182 pcbe_ops->pcbe_sample(ctx); in kcpc_overflow_ast()
1185 ctx->kc_vtick += curtick - ctx->kc_rawtick; in kcpc_overflow_ast()
1196 if (ctx->kc_pics[i].kp_flags & KCPC_PIC_OVERFLOWED) { in kcpc_overflow_ast()
1197 atomic_and_uint(&ctx->kc_pics[i].kp_flags, in kcpc_overflow_ast()
1206 * Otherwise, re-enable the counters and continue life as before. in kcpc_overflow_ast()
1210 pcbe_ops->pcbe_program(ctx); in kcpc_overflow_ast()
1228 if (ctx->kc_flags & KCPC_CTX_INVALID) { in kcpc_save()
1229 if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) { in kcpc_save()
1244 pcbe_ops->pcbe_allstop(); in kcpc_save()
1245 if (ctx->kc_flags & KCPC_CTX_FREEZE) { in kcpc_save()
1254 ctx->kc_hrtime = gethrtime_waitfree(); in kcpc_save()
1255 ctx->kc_vtick += KCPC_GET_TICK() - ctx->kc_rawtick; in kcpc_save()
1256 pcbe_ops->pcbe_sample(ctx); in kcpc_save()
1262 ASSERT(ctx->kc_cpuid == -1); in kcpc_save()
1274 mutex_enter(&ctx->kc_lock); in kcpc_restore()
1276 if ((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED)) == in kcpc_restore()
1286 if (ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_FREEZE)) { in kcpc_restore()
1287 mutex_exit(&ctx->kc_lock); in kcpc_restore()
1301 mutex_exit(&ctx->kc_lock); in kcpc_restore()
1317 mutex_enter(&ctx->kc_lock); in kcpc_restore()
1319 cv_signal(&ctx->kc_condv); in kcpc_restore()
1320 mutex_exit(&ctx->kc_lock); in kcpc_restore()
1344 mutex_enter(&cp->cpu_cpc_ctxlock); in kcpc_idle_save()
1346 if ((cp->cpu_cpc_ctx == NULL) || in kcpc_idle_save()
1347 (cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) { in kcpc_idle_save()
1348 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_idle_save()
1352 pcbe_ops->pcbe_program(cp->cpu_cpc_ctx); in kcpc_idle_save()
1353 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_idle_save()
1370 mutex_enter(&cp->cpu_cpc_ctxlock); in kcpc_idle_restore()
1372 if ((cp->cpu_cpc_ctx == NULL) || in kcpc_idle_restore()
1373 (cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) { in kcpc_idle_restore()
1374 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_idle_restore()
1378 pcbe_ops->pcbe_allstop(); in kcpc_idle_restore()
1379 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_idle_restore()
1399 kcpc_ctx_t *ctx = t->t_cpc_ctx, *cctx; in kcpc_lwp_create()
1402 if (ctx == NULL || (ctx->kc_flags & KCPC_CTX_LWPINHERIT) == 0) in kcpc_lwp_create()
1406 if (ctx->kc_flags & KCPC_CTX_INVALID) { in kcpc_lwp_create()
1418 KCPC_CTX_FLAG_SET(cctx, ctx->kc_flags); in kcpc_lwp_create()
1419 cctx->kc_thread = ct; in kcpc_lwp_create()
1420 cctx->kc_cpuid = -1; in kcpc_lwp_create()
1421 ct->t_cpc_set = cctx->kc_set; in kcpc_lwp_create()
1422 ct->t_cpc_ctx = cctx; in kcpc_lwp_create()
1424 if (cctx->kc_flags & KCPC_CTX_SIGOVF) { in kcpc_lwp_create()
1425 kcpc_set_t *ks = cctx->kc_set; in kcpc_lwp_create()
1434 for (i = 0; i < ks->ks_nreqs; i++) { in kcpc_lwp_create()
1435 kcpc_request_t *kr = &ks->ks_req[i]; in kcpc_lwp_create()
1437 if (kr->kr_flags & CPC_OVF_NOTIFY_EMT) { in kcpc_lwp_create()
1438 *(kr->kr_data) = UINT64_MAX; in kcpc_lwp_create()
1439 atomic_or_uint(&kr->kr_picp->kp_flags, in kcpc_lwp_create()
1443 ttolwp(ct)->lwp_pcb.pcb_flags |= CPC_OVERFLOW; in kcpc_lwp_create()
1457 * 3) An LWP performs an exec().
1469 * Case 3: kcpc_free(), called via freectx() via exec(), recognizes that it has
1470 * been called from exec. It stops the counters _and_ frees the context.
1474 * CPU-bound counters are always stopped via kcpc_unbind().
1479 * structures are freed, and that the hardware is passivated if this is an exec.
1488 kcpc_set_t *set = ctx->kc_set; in kcpc_free()
1495 mutex_enter(&ctx->kc_lock); in kcpc_free()
1496 while (ctx->kc_flags & KCPC_CTX_RESTORE) in kcpc_free()
1497 cv_wait(&ctx->kc_condv, &ctx->kc_lock); in kcpc_free()
1499 mutex_exit(&ctx->kc_lock); in kcpc_free()
1503 * This thread is execing, and after the exec it should not have in kcpc_free()
1508 if (ctx->kc_cpuid != -1) { in kcpc_free()
1511 * CPU-bound context; stop the appropriate CPU's ctrs. in kcpc_free()
1516 cp = cpu_get(ctx->kc_cpuid); in kcpc_free()
1523 mutex_enter(&cp->cpu_cpc_ctxlock); in kcpc_free()
1525 mutex_exit(&cp->cpu_cpc_ctxlock); in kcpc_free()
1528 ASSERT(curthread->t_cpc_ctx == NULL); in kcpc_free()
1533 * Thread-bound context; stop _this_ CPU's counters. in kcpc_free()
1538 curthread->t_cpc_ctx = NULL; in kcpc_free()
1544 * Since we are being called from an exec and we know that in kcpc_free()
1545 * exec is not permitted via the agent thread, we should clean in kcpc_free()
1549 ASSERT(ctx->kc_thread == curthread); in kcpc_free()
1550 curthread->t_cpc_set = NULL; in kcpc_free()
1557 for (i = 0; i < set->ks_nreqs; i++) { in kcpc_free()
1558 if (set->ks_req[i].kr_config != NULL) in kcpc_free()
1559 pcbe_ops->pcbe_free(set->ks_req[i].kr_config); in kcpc_free()
1562 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); in kcpc_free()
1582 ASSERT(set->ks_req != NULL); in kcpc_free_set()
1584 for (i = 0; i < set->ks_nreqs; i++) { in kcpc_free_set()
1585 req = &set->ks_req[i]; in kcpc_free_set()
1587 if (req->kr_nattrs != 0) { in kcpc_free_set()
1588 kmem_free(req->kr_attr, in kcpc_free_set()
1589 req->kr_nattrs * sizeof (kcpc_attr_t)); in kcpc_free_set()
1593 kmem_free(set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs); in kcpc_free_set()
1594 cv_destroy(&set->ks_condv); in kcpc_free_set()
1595 mutex_destroy(&set->ks_lock); in kcpc_free_set()
1610 for (ctx = kcpc_ctx_list[hash]; ctx; ctx = ctx->kc_next) in kcpc_invalidate_all()
1636 kcpc_ctx_t *ctx = curthread->t_cpc_ctx; in kcpc_passivate()
1637 kcpc_set_t *set = curthread->t_cpc_set; in kcpc_passivate()
1645 * This thread has a set but no context; it must be a CPU-bound in kcpc_passivate()
1656 ASSERT(curthread->t_cpc_set == NULL); in kcpc_passivate()
1662 curthread->t_cpc_set = NULL; in kcpc_passivate()
1671 if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) { in kcpc_passivate()
1682 curthread->t_cpc_ctx = NULL; in kcpc_passivate()
1690 * Returns 0 if successful, -1 on failure.
1699 ASSERT(set->ks_nreqs <= cpc_ncounters); in kcpc_assign_reqs()
1705 picnum_save = kmem_alloc(set->ks_nreqs * sizeof (int), KM_SLEEP); in kcpc_assign_reqs()
1715 for (i = 0; i < set->ks_nreqs; i++) in kcpc_assign_reqs()
1719 kmem_free(picnum_save, set->ks_nreqs * sizeof (int)); in kcpc_assign_reqs()
1720 if (i == set->ks_nreqs) in kcpc_assign_reqs()
1721 return (-1); in kcpc_assign_reqs()
1743 for (i = 0; i < set->ks_nreqs; i++) { in kcpc_tryassign()
1744 scratch[i] = set->ks_req[i].kr_picnum; in kcpc_tryassign()
1745 if (set->ks_req[i].kr_picnum != -1) in kcpc_tryassign()
1746 resmap |= (1 << set->ks_req[i].kr_picnum); in kcpc_tryassign()
1755 if (set->ks_req[i].kr_picnum != -1) { in kcpc_tryassign()
1756 ASSERT((bitmap & (1 << set->ks_req[i].kr_picnum)) == 0); in kcpc_tryassign()
1757 bitmap |= (1 << set->ks_req[i].kr_picnum); in kcpc_tryassign()
1758 if (++i == set->ks_nreqs) in kcpc_tryassign()
1763 ctrmap = pcbe_ops->pcbe_event_coverage(set->ks_req[i].kr_event); in kcpc_tryassign()
1779 for (i = 0; i < set->ks_nreqs; i++) in kcpc_tryassign()
1780 set->ks_req[i].kr_picnum = scratch[i]; in kcpc_tryassign()
1781 return (-1); in kcpc_tryassign()
1783 set->ks_req[i].kr_picnum = j; in kcpc_tryassign()
1785 if (++i == set->ks_nreqs) in kcpc_tryassign()
1800 new->ks_state &= ~KCPC_SET_BOUND; in kcpc_dup_set()
1801 new->ks_flags = set->ks_flags; in kcpc_dup_set()
1802 new->ks_nreqs = set->ks_nreqs; in kcpc_dup_set()
1803 new->ks_req = kmem_alloc(set->ks_nreqs * sizeof (kcpc_request_t), in kcpc_dup_set()
1805 new->ks_data = NULL; in kcpc_dup_set()
1806 new->ks_ctx = NULL; in kcpc_dup_set()
1808 for (i = 0; i < new->ks_nreqs; i++) { in kcpc_dup_set()
1809 new->ks_req[i].kr_config = NULL; in kcpc_dup_set()
1810 new->ks_req[i].kr_index = set->ks_req[i].kr_index; in kcpc_dup_set()
1811 new->ks_req[i].kr_picnum = set->ks_req[i].kr_picnum; in kcpc_dup_set()
1812 new->ks_req[i].kr_picp = NULL; in kcpc_dup_set()
1813 new->ks_req[i].kr_data = NULL; in kcpc_dup_set()
1814 (void) strncpy(new->ks_req[i].kr_event, set->ks_req[i].kr_event, in kcpc_dup_set()
1816 new->ks_req[i].kr_preset = set->ks_req[i].kr_preset; in kcpc_dup_set()
1817 new->ks_req[i].kr_flags = set->ks_req[i].kr_flags; in kcpc_dup_set()
1818 new->ks_req[i].kr_nattrs = set->ks_req[i].kr_nattrs; in kcpc_dup_set()
1819 new->ks_req[i].kr_attr = kmem_alloc(new->ks_req[i].kr_nattrs * in kcpc_dup_set()
1821 for (j = 0; j < new->ks_req[i].kr_nattrs; j++) { in kcpc_dup_set()
1822 new->ks_req[i].kr_attr[j].ka_val = in kcpc_dup_set()
1823 set->ks_req[i].kr_attr[j].ka_val; in kcpc_dup_set()
1824 (void) strncpy(new->ks_req[i].kr_attr[j].ka_name, in kcpc_dup_set()
1825 set->ks_req[i].kr_attr[j].ka_name, in kcpc_dup_set()
1836 return (((kcpc_ctx_t *)token)->kc_flags & KCPC_CTX_NONPRIV); in kcpc_allow_nonpriv()
1842 kcpc_ctx_t *ctx = t->t_cpc_ctx; in kcpc_invalidate()
1854 * Returns 0 if a PCBE was successfully loaded and -1 upon error.
1866 "pcbe", prefix, ".", s, 3, NULL) < 0 ? -1 : 0); in kcpc_pcbe_tryload()
1886 * whether memory allocation should be non-blocking or not. The code will try
1905 req_list == NULL || req_list->krl_cnt < 1) in kcpc_cpu_ctx_create()
1906 return (-1); in kcpc_cpu_ctx_create()
1912 nreqs = req_list->krl_cnt; in kcpc_cpu_ctx_create()
1913 nctx_ptrs = (nreqs + cpc_ncounters - 1) / cpc_ncounters; in kcpc_cpu_ctx_create()
1916 return (-2); in kcpc_cpu_ctx_create()
1922 reqs = req_list->krl_list; in kcpc_cpu_ctx_create()
1961 set->ks_req->kr_event); in kcpc_cpu_ctx_create()
1966 nreqs--; in kcpc_cpu_ctx_create()
1974 set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), in kcpc_cpu_ctx_create()
1976 if (set->ks_data == NULL) { in kcpc_cpu_ctx_create()
1991 reqs += set->ks_nreqs; in kcpc_cpu_ctx_create()
1992 nreqs -= set->ks_nreqs; in kcpc_cpu_ctx_create()
1993 kmem_free(set->ks_data, in kcpc_cpu_ctx_create()
1994 set->ks_nreqs * sizeof (uint64_t)); in kcpc_cpu_ctx_create()
2004 set->ks_ctx = ctx; in kcpc_cpu_ctx_create()
2005 ctx->kc_set = set; in kcpc_cpu_ctx_create()
2006 ctx->kc_cpuid = cp->cpu_id; in kcpc_cpu_ctx_create()
2007 ctx->kc_thread = curthread; in kcpc_cpu_ctx_create()
2014 reqs += set->ks_nreqs; in kcpc_cpu_ctx_create()
2015 nreqs -= set->ks_nreqs; in kcpc_cpu_ctx_create()
2032 ((nreqs + cpc_ncounters - 1) / cpc_ncounters); in kcpc_cpu_ctx_create()
2061 return (-2); in kcpc_cpu_ctx_create()
2075 if (pcbe_ops == NULL || pcbe_ops->pcbe_event_coverage(event) == 0) in kcpc_event_supported()
2089 * on the target CPU or from a cross-call from another CPU. To protect
2090 * programming and unprogramming from being interrupted by cross-calls, callers
2092 * cross-calls.
2103 * CPU or be -1 to specify any CPU when the context is bound to a in kcpc_program()
2106 ASSERT(ctx != NULL && (ctx->kc_cpuid == CPU->cpu_id || in kcpc_program()
2107 ctx->kc_cpuid == -1) && curthread->t_preempt > 0); in kcpc_program()
2108 if (ctx == NULL || (ctx->kc_cpuid != CPU->cpu_id && in kcpc_program()
2109 ctx->kc_cpuid != -1) || curthread->t_preempt < 1) in kcpc_program()
2119 kcpc_set_t *set = ctx->kc_set; in kcpc_program()
2130 for (i = 0; i < set->ks_nreqs; i++) { in kcpc_program()
2134 *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset; in kcpc_program()
2139 pcbe_ops->pcbe_configure(0, NULL, in kcpc_program()
2140 set->ks_req[i].kr_preset, in kcpc_program()
2141 0, 0, NULL, &set->ks_req[i].kr_config, NULL); in kcpc_program()
2148 ctx->kc_rawtick = KCPC_GET_TICK(); in kcpc_program()
2149 pcbe_ops->pcbe_program(ctx); in kcpc_program()
2158 CPU->cpu_cpc_ctx = ctx; in kcpc_program()
2169 * cross-calls.
2180 * CPU or be -1 to specify any CPU when the context is bound to a in kcpc_unprogram()
2183 ASSERT(ctx != NULL && (ctx->kc_cpuid == CPU->cpu_id || in kcpc_unprogram()
2184 ctx->kc_cpuid == -1) && curthread->t_preempt > 0); in kcpc_unprogram()
2186 if (ctx == NULL || (ctx->kc_cpuid != CPU->cpu_id && in kcpc_unprogram()
2187 ctx->kc_cpuid != -1) || curthread->t_preempt < 1 || in kcpc_unprogram()
2188 (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) != 0) { in kcpc_unprogram()
2196 ASSERT(CPU->cpu_cpc_ctx == ctx || curthread->t_cpc_ctx == ctx); in kcpc_unprogram()
2201 pcbe_ops->pcbe_allstop(); in kcpc_unprogram()
2233 ctx = CPU->cpu_cpc_ctx; in kcpc_read()
2242 pcbe_ops->pcbe_sample(ctx); in kcpc_read()
2244 set = ctx->kc_set; in kcpc_read()
2245 if (set == NULL || set->ks_req == NULL) { in kcpc_read()
2254 req = set->ks_req; in kcpc_read()
2256 for (i = 0; i < set->ks_nreqs; i++) { in kcpc_read()
2294 req_list->krl_list = reqs; in kcpc_reqs_init()
2295 req_list->krl_cnt = 0; in kcpc_reqs_init()
2296 req_list->krl_max = nreqs; in kcpc_reqs_init()
2310 if (req_list == NULL || req_list->krl_list == NULL) in kcpc_reqs_add()
2311 return (-1); in kcpc_reqs_add()
2313 ASSERT(req_list->krl_max != 0); in kcpc_reqs_add()
2318 if (req_list->krl_cnt > req_list->krl_max) { in kcpc_reqs_add()
2322 old = req_list->krl_list; in kcpc_reqs_add()
2323 new = kmem_zalloc((req_list->krl_max + in kcpc_reqs_add()
2326 return (-2); in kcpc_reqs_add()
2328 req_list->krl_list = new; in kcpc_reqs_add()
2329 bcopy(old, req_list->krl_list, in kcpc_reqs_add()
2330 req_list->krl_cnt * sizeof (kcpc_request_t)); in kcpc_reqs_add()
2331 kmem_free(old, req_list->krl_max * sizeof (kcpc_request_t)); in kcpc_reqs_add()
2332 req_list->krl_cnt = 0; in kcpc_reqs_add()
2333 req_list->krl_max += cpc_ncounters; in kcpc_reqs_add()
2340 req = &req_list->krl_list[req_list->krl_cnt]; in kcpc_reqs_add()
2341 req->kr_config = NULL; in kcpc_reqs_add()
2342 req->kr_picnum = -1; /* have CPC pick this */ in kcpc_reqs_add()
2343 req->kr_index = -1; /* set when assigning request to set */ in kcpc_reqs_add()
2344 req->kr_data = NULL; /* set when configuring request */ in kcpc_reqs_add()
2345 (void) strcpy(req->kr_event, event); in kcpc_reqs_add()
2346 req->kr_preset = preset; in kcpc_reqs_add()
2347 req->kr_flags = flags; in kcpc_reqs_add()
2348 req->kr_nattrs = nattrs; in kcpc_reqs_add()
2349 req->kr_attr = attr; in kcpc_reqs_add()
2354 req->kr_ptr = ptr; in kcpc_reqs_add()
2356 req_list->krl_cnt++; in kcpc_reqs_add()
2372 if (req_list == NULL || req_list->krl_list == NULL || in kcpc_reqs_reset()
2373 req_list->krl_max <= 0) in kcpc_reqs_reset()
2374 return (-1); in kcpc_reqs_reset()
2379 bzero(req_list->krl_list, req_list->krl_max * sizeof (kcpc_request_t)); in kcpc_reqs_reset()
2380 req_list->krl_cnt = 0; in kcpc_reqs_reset()
2390 kmem_free(req_list->krl_list, in kcpc_reqs_fini()
2391 req_list->krl_max * sizeof (kcpc_request_t)); in kcpc_reqs_fini()
2413 set->ks_nreqs = nreqs; in kcpc_set_create()
2415 set->ks_nreqs = cpc_ncounters; in kcpc_set_create()
2417 set->ks_flags = set_flags; in kcpc_set_create()
2424 set->ks_req = (kcpc_request_t *)kmem_zalloc(sizeof (kcpc_request_t) * in kcpc_set_create()
2425 set->ks_nreqs, kmem_flags); in kcpc_set_create()
2426 if (set->ks_req == NULL) { in kcpc_set_create()
2431 bcopy(reqs, set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs); in kcpc_set_create()
2433 for (i = 0; i < set->ks_nreqs; i++) in kcpc_set_create()
2434 set->ks_req[i].kr_index = i; in kcpc_set_create()
2460 if (CPU->cpu_cpc_ctx == NULL) { in kcpc_cpustop_func()
2465 kcpc_unprogram(CPU->cpu_cpc_ctx, B_TRUE); in kcpc_cpustop_func()
2472 if (!preserve_context && CPU->cpu_cpc_ctx != NULL && !CU_CPC_ON(CPU)) in kcpc_cpustop_func()
2473 CPU->cpu_cpc_ctx = NULL; in kcpc_cpustop_func()
2519 return (pcbe_ops->pcbe_list_attrs()); in kcpc_list_attrs()
2527 return (pcbe_ops->pcbe_list_events(pic)); in kcpc_list_events()
2535 return (pcbe_ops->pcbe_caps); in kcpc_pcbe_capabilities()
2541 return (pcbe_ops == NULL ? -1 : 0); in kcpc_pcbe_loaded()