Lines Matching +full:use +full:- +full:rtm
1 // SPDX-License-Identifier: GPL-2.0
214 OP_LH | LEVEL(MSC) | P(SNOOP, NONE), /* 0x10: Memory-side Cache Hit */
274 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) in precise_datala_hsw()
276 else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW) in precise_datala_hsw()
287 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) { in precise_datala_hsw()
313 /* Retrieve the latency data for e-core of ADL */
319 WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); in __grt_latency_data()
322 val = hybrid_var(event->pmu, pebs_data_source)[dse]; in __grt_latency_data()
345 /* Retrieve the latency data for e-core of MTL */
366 val = hybrid_var(event->pmu, pebs_data_source)[status & PERF_PEBS_DATA_SOURCE_MASK]; in lnc_latency_data()
386 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) in lnc_latency_data()
394 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); in lnl_latency_data()
396 if (pmu->pmu_type == hybrid_small) in lnl_latency_data()
404 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); in arl_h_latency_data()
406 if (pmu->pmu_type == hybrid_tiny) in arl_h_latency_data()
420 * use the mapping table for bit 0-3 in load_latency_data()
422 val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse]; in load_latency_data()
470 * use the mapping table for bit 0-3 in store_latency_data()
472 val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse]; in store_latency_data()
583 * This is a cross-CPU update of the cpu_entry_area, we must shoot down in ds_update_cea()
622 struct debug_store *ds = hwev->ds; in alloc_pebs_buffer()
632 return -ENOMEM; in alloc_pebs_buffer()
642 return -ENOMEM; in alloc_pebs_buffer()
646 hwev->ds_pebs_vaddr = buffer; in alloc_pebs_buffer()
648 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; in alloc_pebs_buffer()
649 ds->pebs_buffer_base = (unsigned long) cea; in alloc_pebs_buffer()
651 ds->pebs_index = ds->pebs_buffer_base; in alloc_pebs_buffer()
653 ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; in alloc_pebs_buffer()
669 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; in release_pebs_buffer()
671 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); in release_pebs_buffer()
672 hwev->ds_pebs_vaddr = NULL; in release_pebs_buffer()
678 struct debug_store *ds = hwev->ds; in alloc_bts_buffer()
688 return -ENOMEM; in alloc_bts_buffer()
690 hwev->ds_bts_vaddr = buffer; in alloc_bts_buffer()
692 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; in alloc_bts_buffer()
693 ds->bts_buffer_base = (unsigned long) cea; in alloc_bts_buffer()
695 ds->bts_index = ds->bts_buffer_base; in alloc_bts_buffer()
697 ds->bts_absolute_maximum = ds->bts_buffer_base + in alloc_bts_buffer()
699 ds->bts_interrupt_threshold = ds->bts_absolute_maximum - in alloc_bts_buffer()
713 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; in release_bts_buffer()
715 dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); in release_bts_buffer()
716 hwev->ds_bts_vaddr = NULL; in release_bts_buffer()
721 struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; in alloc_ds_buffer()
853 if (!cpuc->ds) in intel_pmu_disable_bts()
868 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_bts_buffer()
874 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; in intel_pmu_drain_bts_buffer()
888 base = (struct bts_record *)(unsigned long)ds->bts_buffer_base; in intel_pmu_drain_bts_buffer()
889 top = (struct bts_record *)(unsigned long)ds->bts_index; in intel_pmu_drain_bts_buffer()
896 ds->bts_index = ds->bts_buffer_base; in intel_pmu_drain_bts_buffer()
898 perf_sample_data_init(&data, 0, event->hw.last_period); in intel_pmu_drain_bts_buffer()
916 if (event->attr.exclude_kernel && in intel_pmu_drain_bts_buffer()
917 (kernel_ip(at->from) || kernel_ip(at->to))) in intel_pmu_drain_bts_buffer()
931 header.size * (top - base - skip))) in intel_pmu_drain_bts_buffer()
936 if (event->attr.exclude_kernel && in intel_pmu_drain_bts_buffer()
937 (kernel_ip(at->from) || kernel_ip(at->to))) in intel_pmu_drain_bts_buffer()
940 data.ip = at->from; in intel_pmu_drain_bts_buffer()
941 data.addr = at->to; in intel_pmu_drain_bts_buffer()
949 event->hw.interrupts++; in intel_pmu_drain_bts_buffer()
950 event->pending_kill = POLL_IN; in intel_pmu_drain_bts_buffer()
1226 struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints); in intel_pebs_constraints()
1229 if (!event->attr.precise_ip) in intel_pebs_constraints()
1234 if (constraint_match(c, event->hw.config)) { in intel_pebs_constraints()
1235 event->hw.flags |= c->flags; in intel_pebs_constraints()
1252 * We need the sched_task callback even for per-cpu events when we use
1258 if (cpuc->n_pebs == cpuc->n_pebs_via_pt) in pebs_needs_sched_cb()
1261 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); in pebs_needs_sched_cb()
1274 struct debug_store *ds = cpuc->ds; in pebs_update_threshold()
1275 int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu); in pebs_update_threshold()
1279 if (cpuc->n_pebs_via_pt) in pebs_update_threshold()
1283 reserved = max_pebs_events + x86_pmu_max_num_counters_fixed(cpuc->pmu); in pebs_update_threshold()
1287 if (cpuc->n_pebs == cpuc->n_large_pebs) { in pebs_update_threshold()
1288 threshold = ds->pebs_absolute_maximum - in pebs_update_threshold()
1289 reserved * cpuc->pebs_record_size; in pebs_update_threshold()
1291 threshold = ds->pebs_buffer_base + cpuc->pebs_record_size; in pebs_update_threshold()
1294 ds->pebs_interrupt_threshold = threshold; in pebs_update_threshold()
1300 u64 pebs_data_cfg = cpuc->pebs_data_cfg; in adaptive_pebs_record_size_update()
1312 cpuc->pebs_record_size = sz; in adaptive_pebs_record_size_update()
1323 struct perf_event_attr *attr = &event->attr; in pebs_update_adaptive_cfg()
1324 u64 sample_type = attr->sample_type; in pebs_update_adaptive_cfg()
1329 attr->precise_ip > 1) in pebs_update_adaptive_cfg()
1339 * + For RTM TSX weight we need GPRs for the abort code. in pebs_update_adaptive_cfg()
1342 (attr->sample_regs_intr & PEBS_GP_REGS)) || in pebs_update_adaptive_cfg()
1344 (attr->sample_regs_user & PEBS_GP_REGS)); in pebs_update_adaptive_cfg()
1347 ((attr->config & INTEL_ARCH_EVENT_MASK) == in pebs_update_adaptive_cfg()
1350 if (gprs || (attr->precise_ip < 2) || tsx_weight) in pebs_update_adaptive_cfg()
1354 (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) in pebs_update_adaptive_cfg()
1363 ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); in pebs_update_adaptive_cfg()
1373 struct pmu *pmu = event->pmu; in pebs_update_state()
1377 * During removal, ->pebs_data_cfg is still valid for in pebs_update_state()
1380 if ((cpuc->n_pebs == 1) && add) in pebs_update_state()
1381 cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW; in pebs_update_state()
1389 cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW; in pebs_update_state()
1403 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) in pebs_update_state()
1404 cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; in pebs_update_state()
1411 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_add()
1414 cpuc->n_pebs++; in intel_pmu_pebs_add()
1415 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) in intel_pmu_pebs_add()
1416 cpuc->n_large_pebs++; in intel_pmu_pebs_add()
1417 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) in intel_pmu_pebs_add()
1418 cpuc->n_pebs_via_pt++; in intel_pmu_pebs_add()
1430 if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK)) in intel_pmu_pebs_via_pt_disable()
1431 cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK; in intel_pmu_pebs_via_pt_disable()
1437 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_via_pt_enable()
1438 struct debug_store *ds = cpuc->ds; in intel_pmu_pebs_via_pt_enable()
1439 u64 value = ds->pebs_event_reset[hwc->idx]; in intel_pmu_pebs_via_pt_enable()
1441 unsigned int idx = hwc->idx; in intel_pmu_pebs_via_pt_enable()
1446 if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) in intel_pmu_pebs_via_pt_enable()
1447 cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD; in intel_pmu_pebs_via_pt_enable()
1449 cpuc->pebs_enabled |= PEBS_OUTPUT_PT; in intel_pmu_pebs_via_pt_enable()
1451 if (hwc->idx >= INTEL_PMC_IDX_FIXED) { in intel_pmu_pebs_via_pt_enable()
1453 idx = hwc->idx - INTEL_PMC_IDX_FIXED; in intel_pmu_pebs_via_pt_enable()
1455 value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx]; in intel_pmu_pebs_via_pt_enable()
1457 value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx]; in intel_pmu_pebs_via_pt_enable()
1464 if (cpuc->n_pebs == cpuc->n_large_pebs && in intel_pmu_drain_large_pebs()
1465 cpuc->n_pebs != cpuc->n_pebs_via_pt) in intel_pmu_drain_large_pebs()
1472 u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW; in intel_pmu_pebs_enable()
1473 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_enable()
1474 struct debug_store *ds = cpuc->ds; in intel_pmu_pebs_enable()
1475 unsigned int idx = hwc->idx; in intel_pmu_pebs_enable()
1477 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; in intel_pmu_pebs_enable()
1479 cpuc->pebs_enabled |= 1ULL << hwc->idx; in intel_pmu_pebs_enable()
1481 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) in intel_pmu_pebs_enable()
1482 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); in intel_pmu_pebs_enable()
1483 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) in intel_pmu_pebs_enable()
1484 cpuc->pebs_enabled |= 1ULL << 63; in intel_pmu_pebs_enable()
1487 hwc->config |= ICL_EVENTSEL_ADAPTIVE; in intel_pmu_pebs_enable()
1488 if (pebs_data_cfg != cpuc->active_pebs_data_cfg) { in intel_pmu_pebs_enable()
1497 cpuc->active_pebs_data_cfg = pebs_data_cfg; in intel_pmu_pebs_enable()
1500 if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) { in intel_pmu_pebs_enable()
1501 cpuc->pebs_data_cfg = pebs_data_cfg; in intel_pmu_pebs_enable()
1507 idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED); in intel_pmu_pebs_enable()
1509 idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED); in intel_pmu_pebs_enable()
1513 * Use auto-reload if possible to save a MSR write in the PMI. in intel_pmu_pebs_enable()
1516 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { in intel_pmu_pebs_enable()
1517 ds->pebs_event_reset[idx] = in intel_pmu_pebs_enable()
1518 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; in intel_pmu_pebs_enable()
1520 ds->pebs_event_reset[idx] = 0; in intel_pmu_pebs_enable()
1529 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_del()
1532 cpuc->n_pebs--; in intel_pmu_pebs_del()
1533 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) in intel_pmu_pebs_del()
1534 cpuc->n_large_pebs--; in intel_pmu_pebs_del()
1535 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) in intel_pmu_pebs_del()
1536 cpuc->n_pebs_via_pt--; in intel_pmu_pebs_del()
1544 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_disable()
1548 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); in intel_pmu_pebs_disable()
1550 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && in intel_pmu_pebs_disable()
1552 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); in intel_pmu_pebs_disable()
1553 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) in intel_pmu_pebs_disable()
1554 cpuc->pebs_enabled &= ~(1ULL << 63); in intel_pmu_pebs_disable()
1558 if (cpuc->enabled) in intel_pmu_pebs_disable()
1559 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); in intel_pmu_pebs_disable()
1561 hwc->config |= ARCH_PERFMON_EVENTSEL_INT; in intel_pmu_pebs_disable()
1568 if (cpuc->pebs_enabled) in intel_pmu_pebs_enable_all()
1569 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); in intel_pmu_pebs_enable_all()
1576 if (cpuc->pebs_enabled) in intel_pmu_pebs_disable_all()
1583 unsigned long from = cpuc->lbr_entries[0].from; in intel_pmu_pebs_fixup_ip()
1584 unsigned long old_to, to = cpuc->lbr_entries[0].to; in intel_pmu_pebs_fixup_ip()
1585 unsigned long ip = regs->ip; in intel_pmu_pebs_fixup_ip()
1599 if (!cpuc->lbr_stack.nr || !from || !to) in intel_pmu_pebs_fixup_ip()
1612 if ((ip - to) > PEBS_FIXUP_SIZE) in intel_pmu_pebs_fixup_ip()
1623 size = ip - to; in intel_pmu_pebs_fixup_ip()
1658 size -= insn.length; in intel_pmu_pebs_fixup_ip()
1686 /* For RTM XABORTs also log the abort code from AX */ in intel_get_tsx_transaction()
1695 return ((struct pebs_record_nhm *)n)->status; in get_pebs_status()
1696 return ((struct pebs_basic *)n)->applicable_counters; in get_pebs_status()
1707 int fl = event->hw.flags; in get_data_src()
1727 /* Converting to a user-defined clock is not supported yet. */ in setup_pebs_time()
1728 if (event->attr.use_clockid != 0) in setup_pebs_time()
1740 data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset; in setup_pebs_time()
1741 data->sample_flags |= PERF_SAMPLE_TIME; in setup_pebs_time()
1765 sample_type = event->attr.sample_type; in setup_pebs_fixed_sample_data()
1766 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; in setup_pebs_fixed_sample_data()
1768 perf_sample_data_init(data, 0, event->hw.last_period); in setup_pebs_fixed_sample_data()
1770 data->period = event->hw.last_period; in setup_pebs_fixed_sample_data()
1773 * Use latency for weight (only avail with PEBS-LL) in setup_pebs_fixed_sample_data()
1776 data->weight.full = pebs->lat; in setup_pebs_fixed_sample_data()
1777 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; in setup_pebs_fixed_sample_data()
1784 data->data_src.val = get_data_src(event, pebs->dse); in setup_pebs_fixed_sample_data()
1785 data->sample_flags |= PERF_SAMPLE_DATA_SRC; in setup_pebs_fixed_sample_data()
1789 * We must however always use iregs for the unwinder to stay sane; the in setup_pebs_fixed_sample_data()
1797 * We use the interrupt regs as a base because the PEBS record does not in setup_pebs_fixed_sample_data()
1810 regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT; in setup_pebs_fixed_sample_data()
1813 regs->ax = pebs->ax; in setup_pebs_fixed_sample_data()
1814 regs->bx = pebs->bx; in setup_pebs_fixed_sample_data()
1815 regs->cx = pebs->cx; in setup_pebs_fixed_sample_data()
1816 regs->dx = pebs->dx; in setup_pebs_fixed_sample_data()
1817 regs->si = pebs->si; in setup_pebs_fixed_sample_data()
1818 regs->di = pebs->di; in setup_pebs_fixed_sample_data()
1820 regs->bp = pebs->bp; in setup_pebs_fixed_sample_data()
1821 regs->sp = pebs->sp; in setup_pebs_fixed_sample_data()
1824 regs->r8 = pebs->r8; in setup_pebs_fixed_sample_data()
1825 regs->r9 = pebs->r9; in setup_pebs_fixed_sample_data()
1826 regs->r10 = pebs->r10; in setup_pebs_fixed_sample_data()
1827 regs->r11 = pebs->r11; in setup_pebs_fixed_sample_data()
1828 regs->r12 = pebs->r12; in setup_pebs_fixed_sample_data()
1829 regs->r13 = pebs->r13; in setup_pebs_fixed_sample_data()
1830 regs->r14 = pebs->r14; in setup_pebs_fixed_sample_data()
1831 regs->r15 = pebs->r15; in setup_pebs_fixed_sample_data()
1835 if (event->attr.precise_ip > 1) { in setup_pebs_fixed_sample_data()
1838 * (real IP) which fixes the off-by-1 skid in hardware. in setup_pebs_fixed_sample_data()
1839 * Use it when precise_ip >= 2 : in setup_pebs_fixed_sample_data()
1842 set_linear_ip(regs, pebs->real_ip); in setup_pebs_fixed_sample_data()
1843 regs->flags |= PERF_EFLAGS_EXACT; in setup_pebs_fixed_sample_data()
1845 /* Otherwise, use PEBS off-by-1 IP: */ in setup_pebs_fixed_sample_data()
1846 set_linear_ip(regs, pebs->ip); in setup_pebs_fixed_sample_data()
1849 * With precise_ip >= 2, try to fix up the off-by-1 IP in setup_pebs_fixed_sample_data()
1851 * corrects regs->ip and calls set_linear_ip() on regs: in setup_pebs_fixed_sample_data()
1854 regs->flags |= PERF_EFLAGS_EXACT; in setup_pebs_fixed_sample_data()
1858 * When precise_ip == 1, return the PEBS off-by-1 IP, in setup_pebs_fixed_sample_data()
1861 set_linear_ip(regs, pebs->ip); in setup_pebs_fixed_sample_data()
1867 data->addr = pebs->dla; in setup_pebs_fixed_sample_data()
1868 data->sample_flags |= PERF_SAMPLE_ADDR; in setup_pebs_fixed_sample_data()
1874 data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning); in setup_pebs_fixed_sample_data()
1875 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; in setup_pebs_fixed_sample_data()
1878 data->txn = intel_get_tsx_transaction(pebs->tsx_tuning, in setup_pebs_fixed_sample_data()
1879 pebs->ax); in setup_pebs_fixed_sample_data()
1880 data->sample_flags |= PERF_SAMPLE_TRANSACTION; in setup_pebs_fixed_sample_data()
1885 * v3 supplies an accurate time stamp, so we use that in setup_pebs_fixed_sample_data()
1891 setup_pebs_time(event, data, pebs->tsc); in setup_pebs_fixed_sample_data()
1893 perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); in setup_pebs_fixed_sample_data()
1899 regs->ax = gprs->ax; in adaptive_pebs_save_regs()
1900 regs->bx = gprs->bx; in adaptive_pebs_save_regs()
1901 regs->cx = gprs->cx; in adaptive_pebs_save_regs()
1902 regs->dx = gprs->dx; in adaptive_pebs_save_regs()
1903 regs->si = gprs->si; in adaptive_pebs_save_regs()
1904 regs->di = gprs->di; in adaptive_pebs_save_regs()
1905 regs->bp = gprs->bp; in adaptive_pebs_save_regs()
1906 regs->sp = gprs->sp; in adaptive_pebs_save_regs()
1908 regs->r8 = gprs->r8; in adaptive_pebs_save_regs()
1909 regs->r9 = gprs->r9; in adaptive_pebs_save_regs()
1910 regs->r10 = gprs->r10; in adaptive_pebs_save_regs()
1911 regs->r11 = gprs->r11; in adaptive_pebs_save_regs()
1912 regs->r12 = gprs->r12; in adaptive_pebs_save_regs()
1913 regs->r13 = gprs->r13; in adaptive_pebs_save_regs()
1914 regs->r14 = gprs->r14; in adaptive_pebs_save_regs()
1915 regs->r15 = gprs->r15; in adaptive_pebs_save_regs()
1942 perf_regs->xmm_regs = NULL; in setup_pebs_adaptive_sample_data()
1944 sample_type = event->attr.sample_type; in setup_pebs_adaptive_sample_data()
1945 format_group = basic->format_group; in setup_pebs_adaptive_sample_data()
1946 perf_sample_data_init(data, 0, event->hw.last_period); in setup_pebs_adaptive_sample_data()
1947 data->period = event->hw.last_period; in setup_pebs_adaptive_sample_data()
1949 setup_pebs_time(event, data, basic->tsc); in setup_pebs_adaptive_sample_data()
1952 * We must however always use iregs for the unwinder to stay sane; the in setup_pebs_adaptive_sample_data()
1961 set_linear_ip(regs, basic->ip); in setup_pebs_adaptive_sample_data()
1962 regs->flags = PERF_EFLAGS_EXACT; in setup_pebs_adaptive_sample_data()
1966 data->weight.var3_w = basic->retire_latency; in setup_pebs_adaptive_sample_data()
1968 data->weight.var3_w = 0; in setup_pebs_adaptive_sample_data()
1973 * But PERF_SAMPLE_TRANSACTION needs gprs->ax. in setup_pebs_adaptive_sample_data()
1985 if (event->attr.precise_ip < 2) { in setup_pebs_adaptive_sample_data()
1986 set_linear_ip(regs, gprs->ip); in setup_pebs_adaptive_sample_data()
1987 regs->flags &= ~PERF_EFLAGS_EXACT; in setup_pebs_adaptive_sample_data()
1997 meminfo->cache_latency : meminfo->mem_latency; in setup_pebs_adaptive_sample_data()
2000 data->weight.var2_w = meminfo->instr_latency; in setup_pebs_adaptive_sample_data()
2008 data->weight.full = latency ?: in setup_pebs_adaptive_sample_data()
2009 intel_get_tsx_weight(meminfo->tsx_tuning); in setup_pebs_adaptive_sample_data()
2011 data->weight.var1_dw = (u32)latency ?: in setup_pebs_adaptive_sample_data()
2012 intel_get_tsx_weight(meminfo->tsx_tuning); in setup_pebs_adaptive_sample_data()
2015 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; in setup_pebs_adaptive_sample_data()
2019 data->data_src.val = get_data_src(event, meminfo->aux); in setup_pebs_adaptive_sample_data()
2020 data->sample_flags |= PERF_SAMPLE_DATA_SRC; in setup_pebs_adaptive_sample_data()
2024 data->addr = meminfo->address; in setup_pebs_adaptive_sample_data()
2025 data->sample_flags |= PERF_SAMPLE_ADDR; in setup_pebs_adaptive_sample_data()
2029 data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning, in setup_pebs_adaptive_sample_data()
2030 gprs ? gprs->ax : 0); in setup_pebs_adaptive_sample_data()
2031 data->sample_flags |= PERF_SAMPLE_TRANSACTION; in setup_pebs_adaptive_sample_data()
2039 perf_regs->xmm_regs = xmm->xmm; in setup_pebs_adaptive_sample_data()
2054 WARN_ONCE(next_record != __pebs + basic->format_size, in setup_pebs_adaptive_sample_data()
2056 basic->format_size, in setup_pebs_adaptive_sample_data()
2057 (u64)(next_record - __pebs), in setup_pebs_adaptive_sample_data()
2069 * fmt0 does not have a status bitfield (does not use in get_next_pebs_record_by_bit()
2078 for (at = base; at < top; at += cpuc->pebs_record_size) { in get_next_pebs_record_by_bit()
2089 /* clear non-PEBS bit and re-check */ in get_next_pebs_record_by_bit()
2090 pebs_status = status & cpuc->pebs_enabled; in get_next_pebs_record_by_bit()
2100 * Special variant of intel_pmu_save_and_restart() for auto-reload.
2105 struct hw_perf_event *hwc = &event->hw; in intel_pmu_save_and_restart_reload()
2106 int shift = 64 - x86_pmu.cntval_bits; in intel_pmu_save_and_restart_reload()
2107 u64 period = hwc->sample_period; in intel_pmu_save_and_restart_reload()
2118 prev_raw_count = local64_read(&hwc->prev_count); in intel_pmu_save_and_restart_reload()
2119 rdpmcl(hwc->event_base_rdpmc, new_raw_count); in intel_pmu_save_and_restart_reload()
2120 local64_set(&hwc->prev_count, new_raw_count); in intel_pmu_save_and_restart_reload()
2126 * [-period, 0] in intel_pmu_save_and_restart_reload()
2130 * A) value2 - value1; in intel_pmu_save_and_restart_reload()
2133 * B) (0 - value1) + (value2 - (-period)); in intel_pmu_save_and_restart_reload()
2136 * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); in intel_pmu_save_and_restart_reload()
2147 * value2 - value1 + n * period in intel_pmu_save_and_restart_reload()
2151 local64_add(new - old + count * period, &event->count); in intel_pmu_save_and_restart_reload()
2153 local64_set(&hwc->period_left, -new); in intel_pmu_save_and_restart_reload()
2186 struct hw_perf_event *hwc = &event->hw; in __intel_pmu_pebs_last_event()
2191 * The PEBS records may be drained in the non-overflow context, in __intel_pmu_pebs_last_event()
2206 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { in __intel_pmu_pebs_last_event()
2208 * Now, auto-reload is only enabled in fixed period mode. in __intel_pmu_pebs_last_event()
2209 * The reload value is always hwc->sample_period. in __intel_pmu_pebs_last_event()
2210 * May need to change it, if auto-reload is enabled in in __intel_pmu_pebs_last_event()
2237 at += cpuc->pebs_record_size; in __intel_pmu_pebs_events()
2239 cnt--; in __intel_pmu_pebs_events()
2248 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_core()
2249 struct perf_event *event = cpuc->events[0]; /* PMC0 only */ in intel_pmu_drain_pebs_core()
2256 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; in intel_pmu_drain_pebs_core()
2257 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; in intel_pmu_drain_pebs_core()
2262 ds->pebs_index = ds->pebs_buffer_base; in intel_pmu_drain_pebs_core()
2264 if (!test_bit(0, cpuc->active_mask)) in intel_pmu_drain_pebs_core()
2269 if (!event->attr.precise_ip) in intel_pmu_drain_pebs_core()
2272 n = top - at; in intel_pmu_drain_pebs_core()
2274 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) in intel_pmu_drain_pebs_core()
2290 * for auto-reload event in pmu::read(). There are no in intel_pmu_pebs_event_update_no_drain()
2293 * update the event->count for this case. in intel_pmu_pebs_event_update_no_drain()
2295 for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { in intel_pmu_pebs_event_update_no_drain()
2296 event = cpuc->events[bit]; in intel_pmu_pebs_event_update_no_drain()
2297 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) in intel_pmu_pebs_event_update_no_drain()
2305 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_nhm()
2317 base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; in intel_pmu_drain_pebs_nhm()
2318 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; in intel_pmu_drain_pebs_nhm()
2320 ds->pebs_index = ds->pebs_buffer_base; in intel_pmu_drain_pebs_nhm()
2338 pebs_status = p->status & cpuc->pebs_enabled; in intel_pmu_drain_pebs_nhm()
2357 if (!pebs_status && cpuc->pebs_enabled && in intel_pmu_drain_pebs_nhm()
2358 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) in intel_pmu_drain_pebs_nhm()
2359 pebs_status = p->status = cpuc->pebs_enabled; in intel_pmu_drain_pebs_nhm()
2372 * If these events include one PEBS and multiple non-PEBS in intel_pmu_drain_pebs_nhm()
2395 event = cpuc->events[bit]; in intel_pmu_drain_pebs_nhm()
2399 if (WARN_ON_ONCE(!event->attr.precise_ip)) in intel_pmu_drain_pebs_nhm()
2423 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_icl()
2435 base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; in intel_pmu_drain_pebs_icl()
2436 top = (struct pebs_basic *)(unsigned long)ds->pebs_index; in intel_pmu_drain_pebs_icl()
2438 ds->pebs_index = ds->pebs_buffer_base; in intel_pmu_drain_pebs_icl()
2440 mask = hybrid(cpuc->pmu, pebs_events_mask) | in intel_pmu_drain_pebs_icl()
2441 (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); in intel_pmu_drain_pebs_icl()
2452 for (at = base; at < top; at += basic->format_size) { in intel_pmu_drain_pebs_icl()
2456 if (basic->format_size != cpuc->pebs_record_size) in intel_pmu_drain_pebs_icl()
2459 pebs_status = basic->applicable_counters & cpuc->pebs_enabled & mask; in intel_pmu_drain_pebs_icl()
2461 event = cpuc->events[bit]; in intel_pmu_drain_pebs_icl()
2464 WARN_ON_ONCE(!event->attr.precise_ip)) in intel_pmu_drain_pebs_icl()
2479 event = cpuc->events[bit]; in intel_pmu_drain_pebs_icl()
2505 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; in intel_ds_init()
2560 pebs_qual = "-baseline"; in intel_ds_init()
2561 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; in intel_ds_init()
2575 * The PEBS-via-PT is not supported on hybrid platforms, in intel_ds_init()
2579 * of the feature. The per-PMU pebs_output_pt_available in intel_ds_init()
2583 pr_cont("PEBS-via-PT, "); in intel_ds_init()
2584 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; in intel_ds_init()