Lines Matching +full:coexist +full:- +full:support

5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
25 * register -------------------------------
27 *-----------------------------------------
31 *-----------------------------------------
34 * we can pre-allocate their slot in the per-cpu
35 * per-core reg tables.
38 EXTRA_REG_NONE = -1, /* not used */
66 return ((ecode & c->cmask) - c->code) <= (u64)c->size; in constraint_match()
91 return event->hw.flags & PERF_X86_EVENT_TOPDOWN; in is_topdown_count()
96 u64 config = event->attr.config; in is_metric_event()
105 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; in is_slots_event()
115 return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; in is_branch_counters_group()
125 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
172 raw_spinlock_t lock; /* per-core: protect structure */
186 int refcnt; /* per-core: #HT threads */
187 unsigned core_id; /* per-core: core id */
211 int refcnt; /* per-core: #HT threads */
212 unsigned core_id; /* per-core: core id */
315 * manage shared (per-core, per-cpu) registers
355 .size = (e) - (c), \
396 * will increase scheduling cycles for an over-committed system
416 * Constraint on the Event code + UMask + fixed-mask
420 * - inv
421 * - edge
422 * - cnt-mask
423 * - in_tx
424 * - in_tx_checkpointed
426 * The any-thread option is supported starting with v3.
553 * We define the end marker as having a weight of -1
559 #define EVENT_CONSTRAINT_END { .weight = -1 }
562 * Check for end marker with weight == -1
565 for ((e) = (c); (e)->weight != -1; (e)++)
582 int idx; /* per_xxx->regs[] reg index */
668 #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1)
670 #define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
674 * of the core. Bits 31-24 indicates its core type (Core or Atom)
756 __Fp = &hybrid_pmu(_pmu)->_field; \
766 __Fp = &hybrid_pmu(_pmu)->_var; \
776 __Fp = hybrid_pmu(_pmu)->_field; \
782 * struct x86_pmu - generic x86 pmu
919 bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
976 * Intel host/guest support (KVM)
989 * Hybrid support
1021 * Add padding to guarantee the 64-byte alignment of the state buffer.
1053 #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
1060 #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */
1062 #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
1063 #define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
1116 return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; in task_context_opt()
1118 return &((struct x86_perf_task_context *)ctx)->opt; in task_context_opt()
1135 * 'not supported', -1 means 'hw_event makes no sense on
1199 return hwc->flags & PERF_X86_EVENT_AMD_BRS; in has_amd_brs()
1204 return hwc->flags & PERF_X86_EVENT_PAIR; in is_counter_pair()
1212 if (hwc->extra_reg.reg) in __x86_pmu_enable_event()
1213 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); in __x86_pmu_enable_event()
1220 wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); in __x86_pmu_enable_event()
1222 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); in __x86_pmu_enable_event()
1236 struct hw_perf_event *hwc = &event->hw; in x86_pmu_disable_event()
1238 wrmsrl(hwc->config_base, hwc->config & ~disable_mask); in x86_pmu_disable_event()
1241 wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); in x86_pmu_disable_event()
1272 return event->attr.config & hybrid(event->pmu, config_mask); in x86_pmu_get_event_config()
1294 * vm86 mode using the known zero-based code segment and 'fix up' the registers
1302 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; in set_linear_ip()
1303 if (regs->flags & X86_VM_MASK) in set_linear_ip()
1304 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); in set_linear_ip()
1305 regs->ip = ip; in set_linear_ip()
1436 perf_sched_cb_inc(event->pmu); in amd_pmu_brs_add()
1437 cpuc->lbr_users++; in amd_pmu_brs_add()
1448 cpuc->lbr_users--; in amd_pmu_brs_del()
1449 WARN_ON_ONCE(cpuc->lbr_users < 0); in amd_pmu_brs_del()
1451 perf_sched_cb_dec(event->pmu); in amd_pmu_brs_del()
1500 return -EOPNOTSUPP; in amd_brs_init()
1518 return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); in is_pebs_pt()
1525 struct hw_perf_event *hwc = &event->hw; in intel_pmu_has_bts_period()
1528 if (event->attr.freq) in intel_pmu_has_bts_period()
1531 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; in intel_pmu_has_bts_period()
1539 struct hw_perf_event *hwc = &event->hw; in intel_pmu_has_bts()
1541 return intel_pmu_has_bts_period(event, hwc->sample_period); in intel_pmu_has_bts()