Lines Matching refs:set

48  * instance, destroying the same set at the same time from different threads.).
56 static int cpc_set_valid(cpc_t *cpc, cpc_set_t *set);
191 cpc_set_t *set;
194 if ((set = malloc(sizeof (*set))) == NULL) {
199 set->cs_request = NULL;
200 set->cs_nreqs = 0;
201 set->cs_state = CS_UNBOUND;
202 set->cs_fd = -1;
203 set->cs_pctx = NULL;
204 set->cs_id = -1;
205 set->cs_thr = NULL;
208 set->cs_next = cpc->cpc_sets;
209 cpc->cpc_sets = set;
212 return (set);
216 cpc_set_destroy(cpc_t *cpc, cpc_set_t *set)
223 * Remove this set from the cpc handle's list of sets.
227 if (csp == set)
262 free(set);
269 cpc_set_add_request(cpc_t *cpc, cpc_set_t *set, const char *event,
277 if (cpc_set_valid(cpc, set) != 0 || set->cs_state != CS_UNBOUND) {
300 req->cr_index = set->cs_nreqs;
346 req->cr_next = set->cs_request;
347 set->cs_request = req;
348 set->cs_nreqs++;
359 cpc_buf_create(cpc_t *cpc, cpc_set_t *set)
364 if (cpc_set_valid(cpc, set) != 0) {
372 buf->cb_size = set->cs_nreqs * sizeof (uint64_t);
424 cpc_bind_curlwp(cpc_t *cpc, cpc_set_t *set, uint_t flags)
436 if (CPC_SET_VALID_FLAGS(flags) == 0 || set->cs_nreqs <= 0) {
441 if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
455 set->cs_thr = thr_self();
456 set->cs_state = CS_BOUND_CURLWP;
462 cpc_bind_pctx(cpc_t *cpc, pctx_t *pctx, id_t id, cpc_set_t *set, uint_t flags)
472 if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
477 if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
488 set->cs_pctx = pctx;
489 set->cs_id = id;
490 set->cs_state = CS_BOUND_PCTX;
499 cpc_bind_cpu(cpc_t *cpc, processorid_t id, cpc_set_t *set, uint_t flags)
512 if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
517 if (processor_bind(P_LWPID, P_MYID, id, &set->cs_obind) == -1) {
524 (void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
532 * same set to different CPUs without first unbinding it.
534 if (set->cs_fd != -1)
535 (void) close(set->cs_fd);
536 set->cs_fd = fd;
538 if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
540 (void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
553 (void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
562 set->cs_thr = thr_self();
563 set->cs_state = CS_BOUND_CPU;
578 cpc_set_restart(cpc_t *cpc, cpc_set_t *set)
585 cpc_unbind(cpc_t *cpc, cpc_set_t *set)
590 if (cpc_set_valid(cpc, set) != 0) {
595 switch (set->cs_state) {
604 ret = ioctl(set->cs_fd, CPCIO_RELE, NULL);
606 (void) close(set->cs_fd);
607 set->cs_fd = -1;
608 (void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
611 if (set->cs_pctx != NULL) {
612 ret = __pctx_cpc(set->cs_pctx, cpc, CPC_RELE,
613 set->cs_id, 0, 0, 0, 0);
619 set->cs_thr = NULL;
620 set->cs_id = -1;
621 set->cs_state = CS_UNBOUND;
629 cpc_set_sample(cpc_t *cpc, cpc_set_t *set, cpc_buf_t *buf)
634 * The following check ensures that only the most recently bound set
635 * can be sampled, as binding a set invalidates all other sets in the
638 if (set->cs_state == CS_UNBOUND ||
639 buf->cb_size != set->cs_nreqs * sizeof (uint64_t)) {
644 switch (set->cs_state) {
652 return (ioctl(set->cs_fd, CPCIO_SAMPLE, &args));
654 return (__pctx_cpc(set->cs_pctx, cpc, CPC_SAMPLE, set->cs_id,
781 cpc_walk_requests(cpc_t *cpc, cpc_set_t *set, void *arg,
789 for (rp = set->cs_request; rp != NULL; rp = rp->cr_next) {
1094 cpc_set_t *set;
1098 for (set = cpc->cpc_sets; set != NULL; set = set->cs_next)
1099 if (set->cs_pctx == pctx)
1100 set->cs_pctx = NULL;
1105 * Check that the set is valid; if so it will be in the cpc handle's
1106 * list of sets. The lock protects the list of sets, but not the set
1110 cpc_set_valid(cpc_t *cpc, cpc_set_t *set)
1117 if (csp == set)