Lines Matching +full:coexist +full:- +full:support
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
26 * register -------------------------------
28 *-----------------------------------------
32 *-----------------------------------------
35 * we can pre-allocate their slot in the per-cpu
36 * per-core reg tables.
39 EXTRA_REG_NONE = -1, /* not used */
67 return ((ecode & c->cmask) - c->code) <= (u64)c->size; in constraint_match()
92 return event->hw.flags & PERF_X86_EVENT_TOPDOWN; in is_topdown_count()
97 u64 config = event->attr.config; in is_metric_event()
106 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; in is_slots_event()
118 return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false; in check_leader_group()
123 return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS); in is_branch_counters_group()
128 return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR); in is_pebs_counter_event_group()
133 return check_leader_group(event->group_leader, PERF_X86_EVENT_ACR); in is_acr_event_group()
143 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
190 raw_spinlock_t lock; /* per-core: protect structure */
204 int refcnt; /* per-core: #HT threads */
205 unsigned core_id; /* per-core: core id */
229 int refcnt; /* per-core: #HT threads */
230 unsigned core_id; /* per-core: core id */
338 * manage shared (per-core, per-cpu) registers
378 .size = (e) - (c), \
419 * will increase scheduling cycles for an over-committed system
439 * Constraint on the Event code + UMask + fixed-mask
443 * - inv
444 * - edge
445 * - cnt-mask
446 * - in_tx
447 * - in_tx_checkpointed
449 * The any-thread option is supported starting with v3.
576 * We define the end marker as having a weight of -1
582 #define EVENT_CONSTRAINT_END { .weight = -1 }
585 * Check for end marker with weight == -1
588 for ((e) = (c); (e)->weight != -1; (e)++)
605 int idx; /* per_xxx->regs[] reg index */
691 #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1)
693 #define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
771 __Fp = &hybrid_pmu(_pmu)->_field; \
781 __Fp = &hybrid_pmu(_pmu)->_var; \
791 __Fp = hybrid_pmu(_pmu)->_field; \
797 * struct x86_pmu - generic x86 pmu
947 bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
996 * Intel host/guest support (KVM)
1009 * Hybrid support
1041 * Add padding to guarantee the 64-byte alignment of the state buffer.
1073 #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
1080 #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */
1082 #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
1083 #define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
1143 return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; in task_context_opt()
1145 return &((struct x86_perf_task_context *)ctx)->opt; in task_context_opt()
1162 * 'not supported', -1 means 'hw_event makes no sense on
1232 return hwc->flags & PERF_X86_EVENT_AMD_BRS; in has_amd_brs()
1237 return hwc->flags & PERF_X86_EVENT_PAIR; in is_counter_pair()
1245 if (hwc->extra_reg.reg) in __x86_pmu_enable_event()
1246 wrmsrq(hwc->extra_reg.reg, hwc->extra_reg.config); in __x86_pmu_enable_event()
1253 wrmsrq(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); in __x86_pmu_enable_event()
1255 wrmsrq(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); in __x86_pmu_enable_event()
1269 struct hw_perf_event *hwc = &event->hw; in x86_pmu_disable_event()
1271 wrmsrq(hwc->config_base, hwc->config & ~disable_mask); in x86_pmu_disable_event()
1274 wrmsrq(x86_pmu_config_addr(hwc->idx + 1), 0); in x86_pmu_disable_event()
1305 return event->attr.config & hybrid(event->pmu, config_mask); in x86_pmu_get_event_config()
1327 * vm86 mode using the known zero-based code segment and 'fix up' the registers
1335 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; in set_linear_ip()
1336 if (regs->flags & X86_VM_MASK) in set_linear_ip()
1337 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); in set_linear_ip()
1338 regs->ip = ip; in set_linear_ip()
1469 perf_sched_cb_inc(event->pmu); in amd_pmu_brs_add()
1470 cpuc->lbr_users++; in amd_pmu_brs_add()
1481 cpuc->lbr_users--; in amd_pmu_brs_del()
1482 WARN_ON_ONCE(cpuc->lbr_users < 0); in amd_pmu_brs_del()
1484 perf_sched_cb_dec(event->pmu); in amd_pmu_brs_del()
1533 return -EOPNOTSUPP; in amd_brs_init()
1551 return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); in is_pebs_pt()
1558 struct hw_perf_event *hwc = &event->hw; in intel_pmu_has_bts_period()
1561 if (event->attr.freq) in intel_pmu_has_bts_period()
1564 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; in intel_pmu_has_bts_period()
1572 struct hw_perf_event *hwc = &event->hw; in intel_pmu_has_bts()
1574 return intel_pmu_has_bts_period(event, hwc->sample_period); in intel_pmu_has_bts()