1 // SPDX-License-Identifier: GPL-2.0
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <errno.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include <api/fs/fs.h>
10 #include <linux/kernel.h>
11 #include "cpumap.h"
12 #include "map_symbol.h"
13 #include "mem-events.h"
14 #include "mem-info.h"
15 #include "debug.h"
16 #include "evsel.h"
17 #include "symbol.h"
18 #include "pmu.h"
19 #include "pmus.h"
20 
21 unsigned int perf_mem_events__loads_ldlat = 30;
22 
23 #define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
24 
25 struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
26 	E("ldlat-loads",	"%s/mem-loads,ldlat=%u/P",	"mem-loads",	true,	0),
27 	E("ldlat-stores",	"%s/mem-stores/P",		"mem-stores",	false,	0),
28 	E(NULL,			NULL,				NULL,		false,	0),
29 };
30 #undef E
31 
32 bool perf_mem_record[PERF_MEM_EVENTS__MAX] = { 0 };
33 
34 static char mem_loads_name[100];
35 static char mem_stores_name[100];
36 
perf_pmu__mem_events_ptr(struct perf_pmu * pmu,int i)37 struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
38 {
39 	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
40 		return NULL;
41 
42 	return &pmu->mem_events[i];
43 }
44 
perf_pmus__scan_mem(struct perf_pmu * pmu)45 static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
46 {
47 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
48 		if (pmu->mem_events)
49 			return pmu;
50 	}
51 	return NULL;
52 }
53 
perf_mem_events_find_pmu(void)54 struct perf_pmu *perf_mem_events_find_pmu(void)
55 {
56 	/*
57 	 * The current perf mem doesn't support per-PMU configuration.
58 	 * The exact same configuration is applied to all the
59 	 * mem_events supported PMUs.
60 	 * Return the first mem_events supported PMU.
61 	 *
62 	 * Notes: The only case which may support multiple mem_events
63 	 * supported PMUs is Intel hybrid. The exact same mem_events
64 	 * is shared among the PMUs. Only configure the first PMU
65 	 * is good enough as well.
66 	 */
67 	return perf_pmus__scan_mem(NULL);
68 }
69 
70 /**
71  * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
72  * @pmu: Start pmu. If it's NULL, search the entire PMU list.
73  */
perf_pmu__mem_events_num_mem_pmus(struct perf_pmu * pmu)74 int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
75 {
76 	int num = 0;
77 
78 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
79 		num++;
80 
81 	return num;
82 }
83 
perf_pmu__mem_events_name(int i,struct perf_pmu * pmu)84 static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
85 {
86 	struct perf_mem_event *e;
87 
88 	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
89 		return NULL;
90 
91 	e = &pmu->mem_events[i];
92 	if (!e || !e->name)
93 		return NULL;
94 
95 	if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
96 		if (e->ldlat) {
97 			if (!e->aux_event) {
98 				/* ARM and Most of Intel */
99 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
100 					  e->name, pmu->name,
101 					  perf_mem_events__loads_ldlat);
102 			} else {
103 				/* Intel with mem-loads-aux event */
104 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
105 					  e->name, pmu->name, pmu->name,
106 					  perf_mem_events__loads_ldlat);
107 			}
108 		} else {
109 			if (!e->aux_event) {
110 				/* AMD and POWER */
111 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
112 					  e->name, pmu->name);
113 			} else
114 				return NULL;
115 		}
116 
117 		return mem_loads_name;
118 	}
119 
120 	if (i == PERF_MEM_EVENTS__STORE) {
121 		scnprintf(mem_stores_name, sizeof(mem_stores_name),
122 			  e->name, pmu->name);
123 		return mem_stores_name;
124 	}
125 
126 	return NULL;
127 }
128 
is_mem_loads_aux_event(struct evsel * leader)129 bool is_mem_loads_aux_event(struct evsel *leader)
130 {
131 	struct perf_pmu *pmu = leader->pmu;
132 	struct perf_mem_event *e;
133 
134 	if (!pmu || !pmu->mem_events)
135 		return false;
136 
137 	e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
138 	if (!e->aux_event)
139 		return false;
140 
141 	return leader->core.attr.config == e->aux_event;
142 }
143 
perf_pmu__mem_events_parse(struct perf_pmu * pmu,const char * str)144 int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
145 {
146 	char *tok, *saveptr = NULL;
147 	bool found = false;
148 	char *buf;
149 	int j;
150 
151 	/* We need buffer that we know we can write to. */
152 	buf = malloc(strlen(str) + 1);
153 	if (!buf)
154 		return -ENOMEM;
155 
156 	strcpy(buf, str);
157 
158 	tok = strtok_r((char *)buf, ",", &saveptr);
159 
160 	while (tok) {
161 		for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
162 			struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
163 
164 			if (!e->tag)
165 				continue;
166 
167 			if (strstr(e->tag, tok))
168 				perf_mem_record[j] = found = true;
169 		}
170 
171 		tok = strtok_r(NULL, ",", &saveptr);
172 	}
173 
174 	free(buf);
175 
176 	if (found)
177 		return 0;
178 
179 	pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
180 	return -1;
181 }
182 
perf_pmu__mem_events_supported(const char * mnt,struct perf_pmu * pmu,struct perf_mem_event * e)183 static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
184 				      struct perf_mem_event *e)
185 {
186 	char path[PATH_MAX];
187 	struct stat st;
188 
189 	if (!e->event_name)
190 		return true;
191 
192 	scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/events/%s", mnt, pmu->name, e->event_name);
193 
194 	return !stat(path, &st);
195 }
196 
__perf_pmu__mem_events_init(struct perf_pmu * pmu)197 static int __perf_pmu__mem_events_init(struct perf_pmu *pmu)
198 {
199 	const char *mnt = sysfs__mount();
200 	bool found = false;
201 	int j;
202 
203 	if (!mnt)
204 		return -ENOENT;
205 
206 	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
207 		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
208 
209 		/*
210 		 * If the event entry isn't valid, skip initialization
211 		 * and "e->supported" will keep false.
212 		 */
213 		if (!e->tag)
214 			continue;
215 
216 		e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
217 		if (e->supported)
218 			found = true;
219 	}
220 
221 	return found ? 0 : -ENOENT;
222 }
223 
perf_pmu__mem_events_init(void)224 int perf_pmu__mem_events_init(void)
225 {
226 	struct perf_pmu *pmu = NULL;
227 
228 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
229 		if (__perf_pmu__mem_events_init(pmu))
230 			return -ENOENT;
231 	}
232 
233 	return 0;
234 }
235 
perf_pmu__mem_events_list(struct perf_pmu * pmu)236 void perf_pmu__mem_events_list(struct perf_pmu *pmu)
237 {
238 	int j;
239 
240 	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
241 		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
242 
243 		fprintf(stderr, "%-*s%-*s%s",
244 			e->tag ? 13 : 0,
245 			e->tag ? : "",
246 			e->tag && verbose > 0 ? 25 : 0,
247 			e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
248 			e->supported ? ": available\n" : "");
249 	}
250 }
251 
perf_mem_events__record_args(const char ** rec_argv,int * argv_nr)252 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
253 {
254 	const char *mnt = sysfs__mount();
255 	struct perf_pmu *pmu = NULL;
256 	struct perf_mem_event *e;
257 	int i = *argv_nr;
258 	const char *s;
259 	char *copy;
260 	struct perf_cpu_map *cpu_map = NULL;
261 	int ret;
262 
263 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
264 		for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
265 			e = perf_pmu__mem_events_ptr(pmu, j);
266 
267 			if (!perf_mem_record[j])
268 				continue;
269 
270 			if (!e->supported) {
271 				pr_err("failed: event '%s' not supported\n",
272 					perf_pmu__mem_events_name(j, pmu));
273 				return -1;
274 			}
275 
276 			s = perf_pmu__mem_events_name(j, pmu);
277 			if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
278 				continue;
279 
280 			copy = strdup(s);
281 			if (!copy)
282 				return -1;
283 
284 			rec_argv[i++] = "-e";
285 			rec_argv[i++] = copy;
286 
287 			ret = perf_cpu_map__merge(&cpu_map, pmu->cpus);
288 			if (ret < 0)
289 				return ret;
290 		}
291 	}
292 
293 	if (cpu_map) {
294 		if (!perf_cpu_map__equal(cpu_map, cpu_map__online())) {
295 			char buf[200];
296 
297 			cpu_map__snprint(cpu_map, buf, sizeof(buf));
298 			pr_warning("Memory events are enabled on a subset of CPUs: %s\n", buf);
299 		}
300 		perf_cpu_map__put(cpu_map);
301 	}
302 
303 	*argv_nr = i;
304 	return 0;
305 }
306 
307 static const char * const tlb_access[] = {
308 	"N/A",
309 	"HIT",
310 	"MISS",
311 	"L1",
312 	"L2",
313 	"Walker",
314 	"Fault",
315 };
316 
perf_mem__tlb_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)317 int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
318 {
319 	size_t l = 0, i;
320 	u64 m = PERF_MEM_TLB_NA;
321 	u64 hit, miss;
322 
323 	sz -= 1; /* -1 for null termination */
324 	out[0] = '\0';
325 
326 	if (mem_info)
327 		m = mem_info__const_data_src(mem_info)->mem_dtlb;
328 
329 	hit = m & PERF_MEM_TLB_HIT;
330 	miss = m & PERF_MEM_TLB_MISS;
331 
332 	/* already taken care of */
333 	m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
334 
335 	for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
336 		if (!(m & 0x1))
337 			continue;
338 		if (l) {
339 			strcat(out, " or ");
340 			l += 4;
341 		}
342 		l += scnprintf(out + l, sz - l, tlb_access[i]);
343 	}
344 	if (*out == '\0')
345 		l += scnprintf(out, sz - l, "N/A");
346 	if (hit)
347 		l += scnprintf(out + l, sz - l, " hit");
348 	if (miss)
349 		l += scnprintf(out + l, sz - l, " miss");
350 
351 	return l;
352 }
353 
354 static const char * const mem_lvl[] = {
355 	"N/A",
356 	"HIT",
357 	"MISS",
358 	"L1",
359 	"LFB/MAB",
360 	"L2",
361 	"L3",
362 	"Local RAM",
363 	"Remote RAM (1 hop)",
364 	"Remote RAM (2 hops)",
365 	"Remote Cache (1 hop)",
366 	"Remote Cache (2 hops)",
367 	"I/O",
368 	"Uncached",
369 };
370 
371 static const char * const mem_lvlnum[] = {
372 	[PERF_MEM_LVLNUM_L1] = "L1",
373 	[PERF_MEM_LVLNUM_L2] = "L2",
374 	[PERF_MEM_LVLNUM_L3] = "L3",
375 	[PERF_MEM_LVLNUM_L4] = "L4",
376 	[PERF_MEM_LVLNUM_L2_MHB] = "L2 MHB",
377 	[PERF_MEM_LVLNUM_MSC] = "Memory-side Cache",
378 	[PERF_MEM_LVLNUM_UNC] = "Uncached",
379 	[PERF_MEM_LVLNUM_CXL] = "CXL",
380 	[PERF_MEM_LVLNUM_IO] = "I/O",
381 	[PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
382 	[PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
383 	[PERF_MEM_LVLNUM_RAM] = "RAM",
384 	[PERF_MEM_LVLNUM_PMEM] = "PMEM",
385 	[PERF_MEM_LVLNUM_NA] = "N/A",
386 };
387 
388 static const char * const mem_hops[] = {
389 	"N/A",
390 	/*
391 	 * While printing, 'Remote' will be added to represent
392 	 * 'Remote core, same node' accesses as remote field need
393 	 * to be set with mem_hops field.
394 	 */
395 	"core, same node",
396 	"node, same socket",
397 	"socket, same board",
398 	"board",
399 };
400 
perf_mem__op_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)401 static int perf_mem__op_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
402 {
403 	u64 op = PERF_MEM_LOCK_NA;
404 	int l;
405 
406 	if (mem_info)
407 		op = mem_info__const_data_src(mem_info)->mem_op;
408 
409 	if (op & PERF_MEM_OP_NA)
410 		l = scnprintf(out, sz, "N/A");
411 	else if (op & PERF_MEM_OP_LOAD)
412 		l = scnprintf(out, sz, "LOAD");
413 	else if (op & PERF_MEM_OP_STORE)
414 		l = scnprintf(out, sz, "STORE");
415 	else if (op & PERF_MEM_OP_PFETCH)
416 		l = scnprintf(out, sz, "PFETCH");
417 	else if (op & PERF_MEM_OP_EXEC)
418 		l = scnprintf(out, sz, "EXEC");
419 	else
420 		l = scnprintf(out, sz, "No");
421 
422 	return l;
423 }
424 
perf_mem__lvl_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)425 int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
426 {
427 	union perf_mem_data_src data_src;
428 	int printed = 0;
429 	size_t l = 0;
430 	size_t i;
431 	int lvl;
432 	char hit_miss[5] = {0};
433 
434 	sz -= 1; /* -1 for null termination */
435 	out[0] = '\0';
436 
437 	if (!mem_info)
438 		goto na;
439 
440 	data_src = *mem_info__const_data_src(mem_info);
441 
442 	if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
443 		memcpy(hit_miss, "hit", 3);
444 	else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
445 		memcpy(hit_miss, "miss", 4);
446 
447 	lvl = data_src.mem_lvl_num;
448 	if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
449 		if (data_src.mem_remote) {
450 			strcat(out, "Remote ");
451 			l += 7;
452 		}
453 
454 		if (data_src.mem_hops)
455 			l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
456 
457 		if (mem_lvlnum[lvl])
458 			l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
459 		else
460 			l += scnprintf(out + l, sz - l, "Unknown level %d", lvl);
461 
462 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
463 		return l;
464 	}
465 
466 	lvl = data_src.mem_lvl;
467 	if (!lvl)
468 		goto na;
469 
470 	lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
471 	if (!lvl)
472 		goto na;
473 
474 	for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
475 		if (!(lvl & 0x1))
476 			continue;
477 		if (printed++) {
478 			strcat(out, " or ");
479 			l += 4;
480 		}
481 		l += scnprintf(out + l, sz - l, mem_lvl[i]);
482 	}
483 
484 	if (printed) {
485 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
486 		return l;
487 	}
488 
489 na:
490 	strcat(out, "N/A");
491 	return 3;
492 }
493 
494 static const char * const snoop_access[] = {
495 	"N/A",
496 	"None",
497 	"Hit",
498 	"Miss",
499 	"HitM",
500 };
501 
502 static const char * const snoopx_access[] = {
503 	"Fwd",
504 	"Peer",
505 };
506 
perf_mem__snp_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)507 int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
508 {
509 	size_t i, l = 0;
510 	u64 m = PERF_MEM_SNOOP_NA;
511 
512 	sz -= 1; /* -1 for null termination */
513 	out[0] = '\0';
514 
515 	if (mem_info)
516 		m = mem_info__const_data_src(mem_info)->mem_snoop;
517 
518 	for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
519 		if (!(m & 0x1))
520 			continue;
521 		if (l) {
522 			strcat(out, " or ");
523 			l += 4;
524 		}
525 		l += scnprintf(out + l, sz - l, snoop_access[i]);
526 	}
527 
528 	m = 0;
529 	if (mem_info)
530 		m = mem_info__const_data_src(mem_info)->mem_snoopx;
531 
532 	for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
533 		if (!(m & 0x1))
534 			continue;
535 
536 		if (l) {
537 			strcat(out, " or ");
538 			l += 4;
539 		}
540 		l += scnprintf(out + l, sz - l, snoopx_access[i]);
541 	}
542 
543 	if (*out == '\0')
544 		l += scnprintf(out, sz - l, "N/A");
545 
546 	return l;
547 }
548 
perf_mem__lck_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)549 int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
550 {
551 	u64 mask = PERF_MEM_LOCK_NA;
552 	int l;
553 
554 	if (mem_info)
555 		mask = mem_info__const_data_src(mem_info)->mem_lock;
556 
557 	if (mask & PERF_MEM_LOCK_NA)
558 		l = scnprintf(out, sz, "N/A");
559 	else if (mask & PERF_MEM_LOCK_LOCKED)
560 		l = scnprintf(out, sz, "Yes");
561 	else
562 		l = scnprintf(out, sz, "No");
563 
564 	return l;
565 }
566 
perf_mem__blk_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)567 int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
568 {
569 	size_t l = 0;
570 	u64 mask = PERF_MEM_BLK_NA;
571 
572 	sz -= 1; /* -1 for null termination */
573 	out[0] = '\0';
574 
575 	if (mem_info)
576 		mask = mem_info__const_data_src(mem_info)->mem_blk;
577 
578 	if (!mask || (mask & PERF_MEM_BLK_NA)) {
579 		l += scnprintf(out + l, sz - l, " N/A");
580 		return l;
581 	}
582 	if (mask & PERF_MEM_BLK_DATA)
583 		l += scnprintf(out + l, sz - l, " Data");
584 	if (mask & PERF_MEM_BLK_ADDR)
585 		l += scnprintf(out + l, sz - l, " Addr");
586 
587 	return l;
588 }
589 
perf_script__meminfo_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)590 int perf_script__meminfo_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
591 {
592 	int i = 0;
593 
594 	i += scnprintf(out, sz, "|OP ");
595 	i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
596 	i += scnprintf(out + i, sz - i, "|LVL ");
597 	i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
598 	i += scnprintf(out + i, sz - i, "|SNP ");
599 	i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
600 	i += scnprintf(out + i, sz - i, "|TLB ");
601 	i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
602 	i += scnprintf(out + i, sz - i, "|LCK ");
603 	i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
604 	i += scnprintf(out + i, sz - i, "|BLK ");
605 	i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
606 
607 	return i;
608 }
609 
c2c_decode_stats(struct c2c_stats * stats,struct mem_info * mi)610 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
611 {
612 	union perf_mem_data_src *data_src = mem_info__data_src(mi);
613 	u64 daddr  = mem_info__daddr(mi)->addr;
614 	u64 op     = data_src->mem_op;
615 	u64 lvl    = data_src->mem_lvl;
616 	u64 snoop  = data_src->mem_snoop;
617 	u64 snoopx = data_src->mem_snoopx;
618 	u64 lock   = data_src->mem_lock;
619 	u64 blk    = data_src->mem_blk;
620 	/*
621 	 * Skylake might report unknown remote level via this
622 	 * bit, consider it when evaluating remote HITMs.
623 	 *
624 	 * Incase of power, remote field can also be used to denote cache
625 	 * accesses from the another core of same node. Hence, setting
626 	 * mrem only when HOPS is zero along with set remote field.
627 	 */
628 	bool mrem  = (data_src->mem_remote && !data_src->mem_hops);
629 	int err = 0;
630 
631 #define HITM_INC(__f)		\
632 do {				\
633 	stats->__f++;		\
634 	stats->tot_hitm++;	\
635 } while (0)
636 
637 #define PEER_INC(__f)		\
638 do {				\
639 	stats->__f++;		\
640 	stats->tot_peer++;	\
641 } while (0)
642 
643 #define P(a, b) PERF_MEM_##a##_##b
644 
645 	stats->nr_entries++;
646 
647 	if (lock & P(LOCK, LOCKED)) stats->locks++;
648 
649 	if (blk & P(BLK, DATA)) stats->blk_data++;
650 	if (blk & P(BLK, ADDR)) stats->blk_addr++;
651 
652 	if (op & P(OP, LOAD)) {
653 		/* load */
654 		stats->load++;
655 
656 		if (!daddr) {
657 			stats->ld_noadrs++;
658 			return -1;
659 		}
660 
661 		if (lvl & P(LVL, HIT)) {
662 			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
663 			if (lvl & P(LVL, IO))  stats->ld_io++;
664 			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
665 			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
666 			if (lvl & P(LVL, L2)) {
667 				stats->ld_l2hit++;
668 
669 				if (snoopx & P(SNOOPX, PEER))
670 					PEER_INC(lcl_peer);
671 			}
672 			if (lvl & P(LVL, L3 )) {
673 				if (snoop & P(SNOOP, HITM))
674 					HITM_INC(lcl_hitm);
675 				else
676 					stats->ld_llchit++;
677 
678 				if (snoopx & P(SNOOPX, PEER))
679 					PEER_INC(lcl_peer);
680 			}
681 
682 			if (lvl & P(LVL, LOC_RAM)) {
683 				stats->lcl_dram++;
684 				if (snoop & P(SNOOP, HIT))
685 					stats->ld_shared++;
686 				else
687 					stats->ld_excl++;
688 			}
689 
690 			if ((lvl & P(LVL, REM_RAM1)) ||
691 			    (lvl & P(LVL, REM_RAM2)) ||
692 			     mrem) {
693 				stats->rmt_dram++;
694 				if (snoop & P(SNOOP, HIT))
695 					stats->ld_shared++;
696 				else
697 					stats->ld_excl++;
698 			}
699 		}
700 
701 		if ((lvl & P(LVL, REM_CCE1)) ||
702 		    (lvl & P(LVL, REM_CCE2)) ||
703 		     mrem) {
704 			if (snoop & P(SNOOP, HIT)) {
705 				stats->rmt_hit++;
706 			} else if (snoop & P(SNOOP, HITM)) {
707 				HITM_INC(rmt_hitm);
708 			} else if (snoopx & P(SNOOPX, PEER)) {
709 				stats->rmt_hit++;
710 				PEER_INC(rmt_peer);
711 			}
712 		}
713 
714 		if ((lvl & P(LVL, MISS)))
715 			stats->ld_miss++;
716 
717 	} else if (op & P(OP, STORE)) {
718 		/* store */
719 		stats->store++;
720 
721 		if (!daddr) {
722 			stats->st_noadrs++;
723 			return -1;
724 		}
725 
726 		if (lvl & P(LVL, HIT)) {
727 			if (lvl & P(LVL, UNC)) stats->st_uncache++;
728 			if (lvl & P(LVL, L1 )) stats->st_l1hit++;
729 		}
730 		if (lvl & P(LVL, MISS))
731 			if (lvl & P(LVL, L1)) stats->st_l1miss++;
732 		if (lvl & P(LVL, NA))
733 			stats->st_na++;
734 	} else {
735 		/* unparsable data_src? */
736 		stats->noparse++;
737 		return -1;
738 	}
739 
740 	if (!mem_info__daddr(mi)->ms.map || !mem_info__iaddr(mi)->ms.map) {
741 		stats->nomap++;
742 		return -1;
743 	}
744 
745 #undef P
746 #undef HITM_INC
747 	return err;
748 }
749 
c2c_add_stats(struct c2c_stats * stats,struct c2c_stats * add)750 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
751 {
752 	stats->nr_entries	+= add->nr_entries;
753 
754 	stats->locks		+= add->locks;
755 	stats->store		+= add->store;
756 	stats->st_uncache	+= add->st_uncache;
757 	stats->st_noadrs	+= add->st_noadrs;
758 	stats->st_l1hit		+= add->st_l1hit;
759 	stats->st_l1miss	+= add->st_l1miss;
760 	stats->st_na		+= add->st_na;
761 	stats->load		+= add->load;
762 	stats->ld_excl		+= add->ld_excl;
763 	stats->ld_shared	+= add->ld_shared;
764 	stats->ld_uncache	+= add->ld_uncache;
765 	stats->ld_io		+= add->ld_io;
766 	stats->ld_miss		+= add->ld_miss;
767 	stats->ld_noadrs	+= add->ld_noadrs;
768 	stats->ld_fbhit		+= add->ld_fbhit;
769 	stats->ld_l1hit		+= add->ld_l1hit;
770 	stats->ld_l2hit		+= add->ld_l2hit;
771 	stats->ld_llchit	+= add->ld_llchit;
772 	stats->lcl_hitm		+= add->lcl_hitm;
773 	stats->rmt_hitm		+= add->rmt_hitm;
774 	stats->tot_hitm		+= add->tot_hitm;
775 	stats->lcl_peer		+= add->lcl_peer;
776 	stats->rmt_peer		+= add->rmt_peer;
777 	stats->tot_peer		+= add->tot_peer;
778 	stats->rmt_hit		+= add->rmt_hit;
779 	stats->lcl_dram		+= add->lcl_dram;
780 	stats->rmt_dram		+= add->rmt_dram;
781 	stats->blk_data		+= add->blk_data;
782 	stats->blk_addr		+= add->blk_addr;
783 	stats->nomap		+= add->nomap;
784 	stats->noparse		+= add->noparse;
785 }
786