Lines Matching +full:0 +full:x187

19 /* AMD Event 0xFFF: Merge.  Used with Large Increment per Cycle events */
20 #define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
33 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
34 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
37 [ C(RESULT_ACCESS) ] = 0,
38 [ C(RESULT_MISS) ] = 0,
41 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
42 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
47 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
48 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
55 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
56 [ C(RESULT_MISS) ] = 0,
61 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
62 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
65 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
66 [ C(RESULT_MISS) ] = 0,
69 [ C(RESULT_ACCESS) ] = 0,
70 [ C(RESULT_MISS) ] = 0,
75 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
76 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
79 [ C(RESULT_ACCESS) ] = 0,
80 [ C(RESULT_MISS) ] = 0,
83 [ C(RESULT_ACCESS) ] = 0,
84 [ C(RESULT_MISS) ] = 0,
89 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
90 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
103 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
104 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
117 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
118 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
137 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
138 [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
141 [C(RESULT_ACCESS)] = 0,
142 [C(RESULT_MISS)] = 0,
145 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
146 [C(RESULT_MISS)] = 0,
151 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
152 [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
159 [C(RESULT_ACCESS)] = 0,
160 [C(RESULT_MISS)] = 0,
165 [C(RESULT_ACCESS)] = 0,
166 [C(RESULT_MISS)] = 0,
169 [C(RESULT_ACCESS)] = 0,
170 [C(RESULT_MISS)] = 0,
173 [C(RESULT_ACCESS)] = 0,
174 [C(RESULT_MISS)] = 0,
179 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
180 [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
183 [C(RESULT_ACCESS)] = 0,
184 [C(RESULT_MISS)] = 0,
187 [C(RESULT_ACCESS)] = 0,
188 [C(RESULT_MISS)] = 0,
193 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
194 [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
207 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
208 [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
221 [C(RESULT_ACCESS)] = 0,
222 [C(RESULT_MISS)] = 0,
240 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
241 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
242 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
243 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
244 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
245 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
246 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
247 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
255 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
256 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
257 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
258 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
259 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
260 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
261 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
262 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
267 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
268 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
269 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
270 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
271 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
272 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
273 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
278 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
279 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
280 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
281 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
282 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
283 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
284 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
285 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x100000120,
290 if (cpu_feature_enabled(X86_FEATURE_ZEN4) || boot_cpu_data.x86 >= 0x1a) in amd_pmu_event_map()
293 if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19) in amd_pmu_event_map()
310 * 4 counters starting at 0xc0010000 each offset by 1
313 * 6 counters starting at 0xc0010200 each offset by 2
348 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); in amd_get_event_code()
357 case 0x003: return true; /* Retired SSE/AVX FLOPs */ in amd_is_pair_event_code()
368 * When HO == GO == 1 the hardware treats that as GO == HO == 0 in amd_core_hw_config()
370 * case so we emulate no-counting by setting US = OS = 0. in amd_core_hw_config()
385 return 0; in amd_core_hw_config()
390 return (hwc->config & 0xe0) == 0xe0; in amd_is_nb_event()
449 * traffic. They are identified by an event code >= 0xe00.
566 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); in amd_pmu_cpu_reset()
588 return 0; in amd_pmu_cpu_prepare()
592 return 0; in amd_pmu_cpu_prepare()
645 if (nb->nb_id == -1 || --nb->refcnt == 0) in amd_pmu_cpu_dead()
712 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { in amd_pmu_wait_on_overflow()
821 amd_pmu_set_global_ctl(0); in amd_pmu_core_disable_all()
894 cpuc->enabled = 0; in amd_pmu_handle_irq()
937 amd_pmu_v2_enable_all(0); in amd_pmu_v2_snapshot_branch_stack()
946 static atomic64_t status_warned = ATOMIC64_INIT(0); in amd_pmu_v2_handle_irq()
951 int handled = 0, idx; in amd_pmu_v2_handle_irq()
959 cpuc->enabled = 0; in amd_pmu_v2_handle_irq()
978 …pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microc… in amd_pmu_v2_handle_irq()
999 perf_sample_data_init(&data, 0, hwc->last_period); in amd_pmu_v2_handle_irq()
1007 x86_pmu_stop(event, 0); in amd_pmu_v2_handle_irq()
1015 if (status > 0) { in amd_pmu_v2_handle_irq()
1061 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
1078 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
1080 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
1081 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
1082 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
1083 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
1084 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
1085 #define AMD_EVENT_EX_LS 0x000000C0ULL
1086 #define AMD_EVENT_DE 0x000000D0ULL
1087 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
1092 * type = event_code & 0x0F0:
1094 * 0x000 FP PERF_CTL[5:3]
1095 * 0x010 FP PERF_CTL[5:3]
1096 * 0x020 LS PERF_CTL[5:0]
1097 * 0x030 LS PERF_CTL[5:0]
1098 * 0x040 DC PERF_CTL[5:0]
1099 * 0x050 DC PERF_CTL[5:0]
1100 * 0x060 CU PERF_CTL[2:0]
1101 * 0x070 CU PERF_CTL[2:0]
1102 * 0x080 IC/DE PERF_CTL[2:0]
1103 * 0x090 IC/DE PERF_CTL[2:0]
1104 * 0x0A0 ---
1105 * 0x0B0 ---
1106 * 0x0C0 EX/LS PERF_CTL[5:0]
1107 * 0x0D0 DE PERF_CTL[2:0]
1108 * 0x0E0 NB NB_PERF_CTL[3:0]
1109 * 0x0F0 NB NB_PERF_CTL[3:0]
1113 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
1114 * 0x003 FP PERF_CTL[3]
1115 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
1116 * 0x00B FP PERF_CTL[3]
1117 * 0x00D FP PERF_CTL[3]
1118 * 0x023 DE PERF_CTL[2:0]
1119 * 0x02D LS PERF_CTL[3]
1120 * 0x02E LS PERF_CTL[3,0]
1121 * 0x031 LS PERF_CTL[2:0] (**)
1122 * 0x043 CU PERF_CTL[2:0]
1123 * 0x045 CU PERF_CTL[2:0]
1124 * 0x046 CU PERF_CTL[2:0]
1125 * 0x054 CU PERF_CTL[2:0]
1126 * 0x055 CU PERF_CTL[2:0]
1127 * 0x08F IC PERF_CTL[0]
1128 * 0x187 DE PERF_CTL[0]
1129 * 0x188 DE PERF_CTL[0]
1130 * 0x0DB EX PERF_CTL[5:0]
1131 * 0x0DC LS PERF_CTL[5:0]
1132 * 0x0DD LS PERF_CTL[5:0]
1133 * 0x0DE LS PERF_CTL[5:0]
1134 * 0x0DF LS PERF_CTL[5:0]
1135 * 0x1C0 EX PERF_CTL[5:3]
1136 * 0x1D6 EX PERF_CTL[5:0]
1137 * 0x1D8 EX PERF_CTL[5:0]
1143 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
1144 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
1145 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
1146 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
1147 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
1148 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
1160 case 0x000: in amd_get_event_constraints_f15h()
1161 if (!(hwc->config & 0x0000F000ULL)) in amd_get_event_constraints_f15h()
1163 if (!(hwc->config & 0x00000F00ULL)) in amd_get_event_constraints_f15h()
1166 case 0x004: in amd_get_event_constraints_f15h()
1170 case 0x003: in amd_get_event_constraints_f15h()
1171 case 0x00B: in amd_get_event_constraints_f15h()
1172 case 0x00D: in amd_get_event_constraints_f15h()
1180 case 0x023: in amd_get_event_constraints_f15h()
1181 case 0x043: in amd_get_event_constraints_f15h()
1182 case 0x045: in amd_get_event_constraints_f15h()
1183 case 0x046: in amd_get_event_constraints_f15h()
1184 case 0x054: in amd_get_event_constraints_f15h()
1185 case 0x055: in amd_get_event_constraints_f15h()
1187 case 0x02D: in amd_get_event_constraints_f15h()
1189 case 0x02E: in amd_get_event_constraints_f15h()
1191 case 0x031: in amd_get_event_constraints_f15h()
1195 case 0x1C0: in amd_get_event_constraints_f15h()
1204 case 0x08F: in amd_get_event_constraints_f15h()
1205 case 0x187: in amd_get_event_constraints_f15h()
1206 case 0x188: in amd_get_event_constraints_f15h()
1208 case 0x0DB ... 0x0DF: in amd_get_event_constraints_f15h()
1209 case 0x1D6: in amd_get_event_constraints_f15h()
1210 case 0x1D8: in amd_get_event_constraints_f15h()
1261 EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK);
1264 __EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK, 1, 0, PERF_X86_EVENT_PAIR);
1275 * the kernel allows it but only on counter 0 & 1 to enforce in amd_get_event_constraints_f19h()
1323 .cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0),
1361 return x86_pmu.lbr_nr ? attr->mode : 0; in amd_branches_is_visible()
1384 attr->mode : 0; in amd_brs_is_visible()
1406 u64 even_ctr_mask = 0ULL; in amd_core_pmu_init()
1410 return 0; in amd_core_pmu_init()
1422 x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0); in amd_core_pmu_init()
1432 x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0); in amd_core_pmu_init()
1448 x86_pmu.amd_nb_constraints = 0; in amd_core_pmu_init()
1450 if (boot_cpu_data.x86 == 0x15) { in amd_core_pmu_init()
1454 if (boot_cpu_data.x86 >= 0x17) { in amd_core_pmu_init()
1462 for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2) in amd_core_pmu_init()
1466 __EVENT_CONSTRAINT(0, even_ctr_mask, 0, in amd_core_pmu_init()
1467 x86_pmu_max_num_counters(NULL) / 2, 0, in amd_core_pmu_init()
1512 return 0; in amd_core_pmu_init()
1534 x86_pmu.amd_nb_constraints = 0; in amd_pmu_init()
1537 if (boot_cpu_data.x86 >= 0x17) in amd_pmu_init()
1542 return 0; in amd_pmu_init()
1554 amd_pmu_enable_all(0); in amd_pmu_reload_virt()
1555 amd_pmu_v2_enable_all(0); in amd_pmu_reload_virt()
1560 amd_pmu_enable_all(0); in amd_pmu_reload_virt()
1567 cpuc->perf_ctr_virt_mask = 0; in amd_pmu_enable_virt()