Lines Matching refs:set
60 * These are set when a PCBE module is loaded.
81 static int kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch);
82 static kcpc_set_t *kcpc_dup_set(kcpc_set_t *set);
170 kcpc_bind_cpu(kcpc_set_t *set, processorid_t cpuid, int *subcode)
179 if (kcpc_assign_reqs(set, ctx) != 0) {
188 set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP);
190 if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) {
191 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
196 set->ks_ctx = ctx;
197 ctx->kc_set = set;
209 * The CPU could have been DRd out while we were getting set up.
224 * If this CPU already has a bound set, return an error.
247 mutex_enter(&set->ks_lock);
248 set->ks_state |= KCPC_SET_BOUND;
249 cv_signal(&set->ks_condv);
250 mutex_exit(&set->ks_lock);
256 set->ks_ctx = NULL;
257 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
263 kcpc_bind_thread(kcpc_set_t *set, kthread_t *t, int *subcode)
269 * Only one set is allowed per context, so ensure there is no
286 if (kcpc_assign_reqs(set, ctx) != 0) {
293 if (set->ks_flags & CPC_BIND_LWP_INHERIT)
303 * Create the data store for this set.
305 set->ks_data = kmem_alloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP);
307 if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) {
308 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
314 set->ks_ctx = ctx;
315 ctx->kc_set = set;
346 mutex_enter(&set->ks_lock);
347 set->ks_state |= KCPC_SET_BOUND;
348 cv_signal(&set->ks_condv);
349 mutex_exit(&set->ks_lock);
355 * Walk through each request in the set and ask the PCBE to configure a
359 kcpc_configure_reqs(kcpc_ctx_t *ctx, kcpc_set_t *set, int *subcode)
365 for (i = 0; i < set->ks_nreqs; i++) {
367 rp = &set->ks_req[i];
393 kcpc_free_configs(set);
406 rp->kr_data = set->ks_data + rp->kr_index;
414 kcpc_free_configs(kcpc_set_t *set)
418 for (i = 0; i < set->ks_nreqs; i++)
419 if (set->ks_req[i].kr_config != NULL)
420 pcbe_ops->pcbe_free(set->ks_req[i].kr_config);
428 kcpc_sample(kcpc_set_t *set, uint64_t *buf, hrtime_t *hrtime, uint64_t *tick)
430 kcpc_ctx_t *ctx = set->ks_ctx;
433 mutex_enter(&set->ks_lock);
434 if ((set->ks_state & KCPC_SET_BOUND) == 0) {
435 mutex_exit(&set->ks_lock);
438 mutex_exit(&set->ks_lock);
487 if (copyout(set->ks_data, buf,
488 set->ks_nreqs * sizeof (uint64_t)) == -1)
521 kcpc_unbind(kcpc_set_t *set)
528 * binds the set; we must wait for the set to finish binding
531 mutex_enter(&set->ks_lock);
532 while ((set->ks_state & KCPC_SET_BOUND) == 0)
533 cv_wait(&set->ks_condv, &set->ks_lock);
534 mutex_exit(&set->ks_lock);
536 ctx = set->ks_ctx;
575 * If we are unbinding a CPU-bound set from a remote CPU, the
609 kcpc_preset(kcpc_set_t *set, int index, uint64_t preset)
613 ASSERT(set != NULL);
614 ASSERT(set->ks_state & KCPC_SET_BOUND);
615 ASSERT(set->ks_ctx->kc_thread == curthread);
616 ASSERT(set->ks_ctx->kc_cpuid == -1);
618 if (index < 0 || index >= set->ks_nreqs)
621 for (i = 0; i < set->ks_nreqs; i++)
622 if (set->ks_req[i].kr_index == index)
624 ASSERT(i != set->ks_nreqs);
626 set->ks_req[i].kr_preset = preset;
631 kcpc_restart(kcpc_set_t *set)
633 kcpc_ctx_t *ctx = set->ks_ctx;
637 ASSERT(set->ks_state & KCPC_SET_BOUND);
641 for (i = 0; i < set->ks_nreqs; i++) {
642 *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset;
643 pcbe_ops->pcbe_configure(0, NULL, set->ks_req[i].kr_preset,
644 0, 0, NULL, &set->ks_req[i].kr_config, NULL);
651 * If the user is doing this on a running set, make sure the counters
676 kcpc_set_t *set = t->t_cpc_set;
686 * This thread has a set but no context; it must be a
687 * CPU-bound set.
711 * Strategy for usr/sys: stop counters and update set's presets
724 for (i = 0; i < set->ks_nreqs; i++) {
725 set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data);
727 set->ks_req[i].kr_flags |= flag;
729 set->ks_req[i].kr_flags &= ~flag;
731 newset = kcpc_dup_set(set);
732 if (kcpc_unbind(set) != 0)
835 * Copy set from ctx to the child context, cctx, if it has CPC_BIND_LWP_INHERIT
944 * Note that t_lwp is always set to point at the underlying
988 * set when we enter the handler. This variable is unset after
1115 * We've finished processing the interrupt so set
1289 * ctx & set related memory objects being freed without us knowing.
1320 * If kcpc_counts_include_idle is set to 0 by the sys admin, we add the the
1408 * and SIGOVF flags set. In addition, all counters should be
1409 * set to UINT64_MAX, and their pic's overflow flag turned on
1438 * 4) A bound set is unbound.
1467 kcpc_set_t *set = ctx->kc_set;
1469 ASSERT(set != NULL);
1533 * Walk through each request in this context's set and free the PCBE's
1536 for (i = 0; i < set->ks_nreqs; i++) {
1537 if (set->ks_req[i].kr_config != NULL)
1538 pcbe_ops->pcbe_free(set->ks_req[i].kr_config);
1541 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
1543 kcpc_free_set(set);
1547 * Free the memory associated with a request set.
1550 kcpc_free_set(kcpc_set_t *set)
1555 ASSERT(set->ks_req != NULL);
1557 for (i = 0; i < set->ks_nreqs; i++) {
1558 req = &set->ks_req[i];
1566 kmem_free(set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs);
1567 cv_destroy(&set->ks_condv);
1568 mutex_destroy(&set->ks_lock);
1569 kmem_free(set, sizeof (kcpc_set_t));
1610 kcpc_set_t *set = curthread->t_cpc_set;
1613 if (set == NULL)
1618 * This thread has a set but no context; it must be a CPU-bound
1619 * set. The hardware will be stopped via kcpc_unbind() when the
1622 * state; the set will be freed with the unbind().
1624 (void) kcpc_unbind(set);
1626 * Unbinding a set belonging to the current thread should clear
1627 * its set pointer.
1652 * CPC pointers left behind. The context and set will be freed by
1662 * Assign the requests in the given set to the PICs in the context.
1667 kcpc_assign_reqs(kcpc_set_t *set, kcpc_ctx_t *ctx)
1672 ASSERT(set->ks_nreqs <= cpc_ncounters);
1678 picnum_save = kmem_alloc(set->ks_nreqs * sizeof (int), KM_SLEEP);
1680 * kcpc_tryassign() blindly walks through each request in the set,
1688 for (i = 0; i < set->ks_nreqs; i++)
1689 if (kcpc_tryassign(set, i, picnum_save) == 0)
1692 kmem_free(picnum_save, set->ks_nreqs * sizeof (int));
1693 if (i == set->ks_nreqs)
1699 kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch)
1716 for (i = 0; i < set->ks_nreqs; i++) {
1717 scratch[i] = set->ks_req[i].kr_picnum;
1718 if (set->ks_req[i].kr_picnum != -1)
1719 resmap |= (1 << set->ks_req[i].kr_picnum);
1728 if (set->ks_req[i].kr_picnum != -1) {
1729 ASSERT((bitmap & (1 << set->ks_req[i].kr_picnum)) == 0);
1730 bitmap |= (1 << set->ks_req[i].kr_picnum);
1731 if (++i == set->ks_nreqs)
1736 ctrmap = pcbe_ops->pcbe_event_coverage(set->ks_req[i].kr_event);
1752 for (i = 0; i < set->ks_nreqs; i++)
1753 set->ks_req[i].kr_picnum = scratch[i];
1756 set->ks_req[i].kr_picnum = j;
1758 if (++i == set->ks_nreqs)
1766 kcpc_dup_set(kcpc_set_t *set)
1774 new->ks_flags = set->ks_flags;
1775 new->ks_nreqs = set->ks_nreqs;
1776 new->ks_req = kmem_alloc(set->ks_nreqs * sizeof (kcpc_request_t),
1783 new->ks_req[i].kr_index = set->ks_req[i].kr_index;
1784 new->ks_req[i].kr_picnum = set->ks_req[i].kr_picnum;
1787 (void) strncpy(new->ks_req[i].kr_event, set->ks_req[i].kr_event,
1789 new->ks_req[i].kr_preset = set->ks_req[i].kr_preset;
1790 new->ks_req[i].kr_flags = set->ks_req[i].kr_flags;
1791 new->ks_req[i].kr_nattrs = set->ks_req[i].kr_nattrs;
1796 set->ks_req[i].kr_attr[j].ka_val;
1798 set->ks_req[i].kr_attr[j].ka_name,
1882 * Allocate number of sets assuming that each set contains one and only
1898 kcpc_set_t *set;
1902 * Allocate CPC context and set for requested counter events
1905 set = kcpc_set_create(reqs, nreqs, 0, kmem_flags);
1906 if (set == NULL) {
1915 if (kcpc_assign_reqs(set, ctx) != 0) {
1920 * set of counter requests when this happens since at
1924 kcpc_free_set(set);
1925 set = kcpc_set_create(reqs, 1, 0, kmem_flags);
1926 if (set == NULL) {
1930 if (kcpc_assign_reqs(set, ctx) != 0) {
1934 set->ks_req->kr_event);
1936 kcpc_free_set(set);
1947 set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t),
1949 if (set->ks_data == NULL) {
1950 kcpc_free_set(set);
1958 if (kcpc_configure_reqs(ctx, set, &subcode) != 0) {
1962 "set of counter event requests!\n");
1964 reqs += set->ks_nreqs;
1965 nreqs -= set->ks_nreqs;
1966 kmem_free(set->ks_data,
1967 set->ks_nreqs * sizeof (uint64_t));
1968 kcpc_free_set(set);
1974 * Point set of counter event requests at this context and fill
1977 set->ks_ctx = ctx;
1978 ctx->kc_set = set;
1987 reqs += set->ks_nreqs;
1988 nreqs -= set->ks_nreqs;
2092 kcpc_set_t *set = ctx->kc_set;
2095 ASSERT(set != NULL);
2100 * set, but it is not very reliable, so we start again from the
2103 for (i = 0; i < set->ks_nreqs; i++) {
2107 *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset;
2113 set->ks_req[i].kr_preset,
2114 0, 0, NULL, &set->ks_req[i].kr_config, NULL);
2197 kcpc_set_t *set;
2217 set = ctx->kc_set;
2218 if (set == NULL || set->ks_req == NULL) {
2227 req = set->ks_req;
2229 for (i = 0; i < set->ks_nreqs; i++) {
2311 * to be set when request is assigned to a set.
2316 req->kr_index = -1; /* set when assigning request to set */
2317 req->kr_data = NULL; /* set when configuring request */
2335 * Reset list of CPC event requests so its space can be used for another set
2370 * Create set of given counter event requests
2376 kcpc_set_t *set;
2379 * Allocate set and assign number of requests in set and flags
2381 set = kmem_zalloc(sizeof (kcpc_set_t), kmem_flags);
2382 if (set == NULL)
2386 set->ks_nreqs = nreqs;
2388 set->ks_nreqs = cpc_ncounters;
2390 set->ks_flags = set_flags;
2393 * Allocate requests needed, copy requests into set, and set index into
2397 set->ks_req = (kcpc_request_t *)kmem_zalloc(sizeof (kcpc_request_t) *
2398 set->ks_nreqs, kmem_flags);
2399 if (set->ks_req == NULL) {
2400 kmem_free(set, sizeof (kcpc_set_t));
2404 bcopy(reqs, set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs);
2406 for (i = 0; i < set->ks_nreqs; i++)
2407 set->ks_req[i].kr_index = i;
2409 return (set);
2420 * to be preserved, so it is set to NULL.
2450 * Stop counters on given CPU and set its CPC context to NULL unless