Lines Matching +full:open +full:- +full:pic

45  * ---------------------------------------
72 * first-come, first-served basis. Only a finite amount of hardware resource
80 * counters are made available on a first-come, first-served basis. As with
81 * cpustat, the cpc provider has priority over per-LWP libcpc usage (e.g.
82 * cputrack(1)). Invoking the cpc provider will cause all existing per-LWP
91 int dcpc_picno; /* pic this event is programmed in */
108 * DCPC_MIN_OVF_DEFAULT or the value that dcpc-min-overflow is set to in
125 * which contains the pic(s) that have overflowed.
149 dtrace_probe(dcpc_actv_reqs[i]->dcpc_id, in dcpc_fire()
150 CPU->cpu_cpcprofile_pc, in dcpc_fire()
151 CPU->cpu_cpcprofile_upc, 0, 0, 0); in dcpc_fire()
166 (bitmap & (1ULL << dcpc_actv_reqs[i]->dcpc_picno))) { in dcpc_fire()
167 dtrace_probe(dcpc_actv_reqs[i]->dcpc_id, in dcpc_fire()
168 CPU->cpu_cpcprofile_pc, in dcpc_fire()
169 CPU->cpu_cpcprofile_upc, 0, 0, 0); in dcpc_fire()
188 (void) strncpy(pp->dcpc_event_name, eventname, in dcpc_create_probe()
189 sizeof (pp->dcpc_event_name) - 1); in dcpc_create_probe()
190 pp->dcpc_event_name[sizeof (pp->dcpc_event_name) - 1] = '\0'; in dcpc_create_probe()
191 pp->dcpc_flag = flag | CPC_OVF_NOTIFY_EMT; in dcpc_create_probe()
192 pp->dcpc_ovfval = ovfval; in dcpc_create_probe()
193 pp->dcpc_umask = umask; in dcpc_create_probe()
194 pp->dcpc_actv_req_idx = pp->dcpc_picno = pp->dcpc_disabling = -1; in dcpc_create_probe()
196 pp->dcpc_id = dtrace_probe_create(id, NULL, NULL, probename, in dcpc_create_probe()
207 * event_name-mode-{optional_umask}-overflow_rate in dcpc_provide()
209 * DC_refill_from_system-user-0x1e-50000, or, in dcpc_provide()
210 * DC_refill_from_system-all-10000 in dcpc_provide()
216 long umask = -1, val = 0; in dcpc_provide()
225 len = strlen(desc->dtpd_name); in dcpc_provide()
227 (void) strcpy(str, desc->dtpd_name); in dcpc_provide()
235 if (str[i] == '-') in dcpc_provide()
244 (void) strncpy(event, p, CPC_MAX_EVENT_LEN - 1); in dcpc_provide()
245 event[CPC_MAX_EVENT_LEN - 1] = '\0'; in dcpc_provide()
314 dcpc_create_probe(dcpc_pid, desc->dtpd_name, in dcpc_provide()
336 ASSERT(pp->dcpc_enabled == 0); in dcpc_destroy()
344 if (CPU->cpu_cpcprofile_pc == 0) { in dcpc_mode()
357 (void) strncpy(set->ks_req[reqno].kr_event, pp->dcpc_event_name, in dcpc_populate_set()
359 set->ks_req[reqno].kr_config = NULL; in dcpc_populate_set()
360 set->ks_req[reqno].kr_index = reqno; in dcpc_populate_set()
361 set->ks_req[reqno].kr_picnum = -1; in dcpc_populate_set()
362 set->ks_req[reqno].kr_flags = pp->dcpc_flag; in dcpc_populate_set()
368 if (pp->dcpc_umask >= 0) { in dcpc_populate_set()
369 set->ks_req[reqno].kr_attr = in dcpc_populate_set()
371 set->ks_req[reqno].kr_nattrs = 1; in dcpc_populate_set()
373 (void) strncpy(set->ks_req[reqno].kr_attr->ka_name, in dcpc_populate_set()
376 (void) strncpy(set->ks_req[reqno].kr_attr->ka_name, in dcpc_populate_set()
378 set->ks_req[reqno].kr_attr->ka_val = pp->dcpc_umask; in dcpc_populate_set()
380 set->ks_req[reqno].kr_attr = NULL; in dcpc_populate_set()
381 set->ks_req[reqno].kr_nattrs = 0; in dcpc_populate_set()
389 if (pp->dcpc_enabled && (c->cpu_cpc_ctx != NULL)) { in dcpc_populate_set()
390 oset = c->cpu_cpc_ctx->kc_set; in dcpc_populate_set()
392 for (i = 0; i < oset->ks_nreqs; i++) { in dcpc_populate_set()
393 if (strcmp(oset->ks_req[i].kr_event, in dcpc_populate_set()
394 set->ks_req[reqno].kr_event) == 0) { in dcpc_populate_set()
395 set->ks_req[reqno].kr_preset = in dcpc_populate_set()
396 *(oset->ks_req[i].kr_data); in dcpc_populate_set()
400 set->ks_req[reqno].kr_preset = UINT64_MAX - pp->dcpc_ovfval; in dcpc_populate_set()
403 set->ks_nreqs++; in dcpc_populate_set()
414 * 2) We are on a multi-overflow platform and we already have one or
417 * 3) We are on a multi-overflow platform and we have just removed an
442 set->ks_req = in dcpc_create_set()
445 set->ks_data = in dcpc_create_set()
471 set->ks_ctx = ctx = kcpc_ctx_alloc(KM_SLEEP); in dcpc_program_cpu_event()
472 ctx->kc_set = set; in dcpc_program_cpu_event()
473 ctx->kc_cpuid = c->cpu_id; in dcpc_program_cpu_event()
481 for (i = 0; i < set->ks_nreqs; i++) { in dcpc_program_cpu_event()
484 strcmp(set->ks_req[i].kr_event, in dcpc_program_cpu_event()
485 dcpc_actv_reqs[j]->dcpc_event_name) == 0) { in dcpc_program_cpu_event()
486 dcpc_actv_reqs[j]->dcpc_picno = in dcpc_program_cpu_event()
487 set->ks_req[i].kr_picnum; in dcpc_program_cpu_event()
496 octx = c->cpu_cpc_ctx; in dcpc_program_cpu_event()
501 kcpc_set_t *oset = octx->kc_set; in dcpc_program_cpu_event()
502 kmem_free(oset->ks_data, oset->ks_nreqs * sizeof (uint64_t)); in dcpc_program_cpu_event()
516 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); in dcpc_program_cpu_event()
520 return (-1); in dcpc_program_cpu_event()
532 if (c->cpu_flags & CPU_OFFLINE) in dcpc_disable_cpu()
539 ctx = c->cpu_cpc_ctx; in dcpc_disable_cpu()
543 set = ctx->kc_set; in dcpc_disable_cpu()
546 kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); in dcpc_disable_cpu()
553 * per-CPU dcpc interrupt state byte. The purpose of the state byte is to
576 ASSERT(cpu_core[c->cpu_id].cpuc_dcpc_intr_state != DCPC_INTR_INACTIVE); in dcpc_block_interrupts()
579 state = &cpu_core[c->cpu_id].cpuc_dcpc_intr_state; in dcpc_block_interrupts()
585 } while ((c = c->cpu_next) != cpu_list); in dcpc_block_interrupts()
597 ASSERT(cpu_core[c->cpu_id].cpuc_dcpc_intr_state != DCPC_INTR_INACTIVE); in dcpc_release_interrupts()
600 cpu_core[c->cpu_id].cpuc_dcpc_intr_state = DCPC_INTR_FREE; in dcpc_release_interrupts()
602 } while ((c = c->cpu_next) != cpu_list); in dcpc_release_interrupts()
616 ASSERT(cpu_core[c->cpu_id].cpuc_dcpc_intr_state == DCPC_INTR_INACTIVE); in dcpc_claim_interrupts()
619 cpu_core[c->cpu_id].cpuc_dcpc_intr_state = DCPC_INTR_FREE; in dcpc_claim_interrupts()
621 } while ((c = c->cpu_next) != cpu_list); in dcpc_claim_interrupts()
634 ASSERT(cpu_core[c->cpu_id].cpuc_dcpc_intr_state != DCPC_INTR_INACTIVE); in dcpc_surrender_interrupts()
637 cpu_core[c->cpu_id].cpuc_dcpc_intr_state = DCPC_INTR_INACTIVE; in dcpc_surrender_interrupts()
639 } while ((c = c->cpu_next) != cpu_list); in dcpc_surrender_interrupts()
649 * which are still valid (possible with multi-overflow platforms).
669 if (c->cpu_flags & CPU_OFFLINE) in dcpc_program_event()
685 if (c->cpu_cpc_ctx != NULL) in dcpc_program_event()
687 } while ((c = c->cpu_next) != cpu_list); in dcpc_program_event()
697 if (pp->dcpc_disabling == 1) in dcpc_program_event()
698 dcpc_actv_reqs[pp->dcpc_actv_req_idx] = NULL; in dcpc_program_event()
704 if (c->cpu_flags & CPU_OFFLINE) in dcpc_program_event()
708 } while ((c = c->cpu_next) != cpu_list && ret == 0); in dcpc_program_event()
718 pp->dcpc_enabled = 0; in dcpc_program_event()
720 return (-1); in dcpc_program_event()
723 if (pp->dcpc_disabling != 1) in dcpc_program_event()
724 pp->dcpc_enabled = 1; in dcpc_program_event()
747 return (-1); in dcpc_enable()
760 pp->dcpc_actv_req_idx = i; in dcpc_enable()
771 dtrace_cpc_in_use--; in dcpc_enable()
772 return (-1); in dcpc_enable()
775 ASSERT(pp->dcpc_actv_req_idx >= 0); in dcpc_enable()
806 * invalidate any lwp-based contexts and lay claim to the in dcpc_enable()
835 if (c->cpu_flags & CPU_OFFLINE) in dcpc_enable()
838 kcpc_cpu_program(c, c->cpu_cpc_ctx); in dcpc_enable()
839 } while ((c = c->cpu_next) != cpu_list); in dcpc_enable()
849 dtrace_cpc_in_use--; in dcpc_enable()
850 dcpc_actv_reqs[pp->dcpc_actv_req_idx] = NULL; in dcpc_enable()
851 pp->dcpc_actv_req_idx = pp->dcpc_picno = -1; in dcpc_enable()
860 return (-1); in dcpc_enable()
866 * one, its associated meta-data and re-program the hardware.
883 if (pp->dcpc_enabled == 0) { in dcpc_disable()
889 if (pp->dcpc_actv_req_idx >= 0) { in dcpc_disable()
890 dcpc_actv_reqs[pp->dcpc_actv_req_idx] = NULL; in dcpc_disable()
891 pp->dcpc_actv_req_idx = pp->dcpc_picno = in dcpc_disable()
892 pp->dcpc_disabling = -1; in dcpc_disable()
901 * free up the meta-data. in dcpc_disable()
912 } while ((c = c->cpu_next) != cpu_list); in dcpc_disable()
914 dcpc_actv_reqs[pp->dcpc_actv_req_idx] = NULL; in dcpc_disable()
920 * enabling and re-program the hardware with the new config. in dcpc_disable()
925 pp->dcpc_disabling = 1; in dcpc_disable()
931 dcpc_enablings--; in dcpc_disable()
932 dtrace_cpc_in_use--; in dcpc_disable()
933 pp->dcpc_enabled = 0; in dcpc_disable()
934 pp->dcpc_actv_req_idx = pp->dcpc_picno = pp->dcpc_disabling = -1; in dcpc_disable()
961 state = &cpu_core[c->cpu_id].cpuc_dcpc_intr_state; in dcpc_cpu_setup()
978 cpu_core[c->cpu_id].cpuc_dcpc_intr_state = in dcpc_cpu_setup()
1095 if (kcpc_pcbe_loaded() == -1) in dcpc_attach()
1118 dcpc_ovf_mask = (1 << cpc_ncounters) - 1; in dcpc_attach()
1125 * Determine which, if any, mask attribute the back-end can use. in dcpc_attach()
1144 DDI_PROP_DONTPASS, "dcpc-min-overflow", DCPC_MIN_OVF_DEFAULT); in dcpc_attach()
1155 dcpc_open, /* open */