xref: /aosp_15_r20/external/bcc/tools/readahead.py (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1#!/usr/bin/env python
2# @lint-avoid-python-3-compatibility-imports
3#
4# readahead     Show performance of read-ahead cache
5#               For Linux, uses BCC, eBPF
6#
7# Copyright (c) 2020 Suchakra Sharma <[email protected]>
8# Licensed under the Apache License, Version 2.0 (the "License")
9# This was originally created for the BPF Performance Tools book
10# published by Addison Wesley. ISBN-13: 9780136554820
11# When copying or porting, include this comment.
12#
13# 20-Aug-2020   Suchakra Sharma     Ported from bpftrace to BCC
14# 17-Sep-2021   Hengqi Chen         Migrated to kfunc
15# 30-Jan-2023   Rong Tao            Support more kfunc/kprobe, introduce folio
16
17from __future__ import print_function
18from bcc import BPF
19from time import sleep
20import ctypes as ct
21import argparse
22
23# arguments
24examples = """examples:
25    ./readahead -d 20       # monitor for 20 seconds and generate stats
26"""
27
28parser = argparse.ArgumentParser(
29    description="Monitor performance of read ahead cache",
30    formatter_class=argparse.RawDescriptionHelpFormatter,
31    epilog=examples)
32parser.add_argument("-d", "--duration", type=int,
33    help="total duration to monitor for, in seconds")
34args = parser.parse_args()
35if not args.duration:
36    args.duration = 99999999
37
38# BPF program
39bpf_text = """
40#include <uapi/linux/ptrace.h>
41#include <linux/mm_types.h>
42#include <linux/mm.h>
43
44BPF_HASH(flag, u32, u8);            // used to track if we are in do_page_cache_readahead()
45BPF_HASH(birth, struct page*, u64); // used to track timestamps of cache alloc'ed page
46BPF_ARRAY(pages);                   // increment/decrement readahead pages
47BPF_HISTOGRAM(dist);
48"""
49
50bpf_text_kprobe = """
51int entry__do_page_cache_readahead(struct pt_regs *ctx) {
52    u32 pid;
53    u8 one = 1;
54    pid = bpf_get_current_pid_tgid();
55    flag.update(&pid, &one);
56    return 0;
57}
58
59int exit__do_page_cache_readahead(struct pt_regs *ctx) {
60    u32 pid;
61    u8 zero = 0;
62    pid = bpf_get_current_pid_tgid();
63    flag.update(&pid, &zero);
64    return 0;
65}
66
67int exit__page_cache_alloc(struct pt_regs *ctx) {
68    u32 pid;
69    u64 ts;
70    struct page *retval = (struct page*) GET_RETVAL_PAGE;
71    u32 zero = 0; // static key for accessing pages[0]
72    pid = bpf_get_current_pid_tgid();
73    u8 *f = flag.lookup(&pid);
74    if (f != NULL && *f == 1) {
75        ts = bpf_ktime_get_ns();
76        birth.update(&retval, &ts);
77        pages.atomic_increment(zero);
78    }
79    return 0;
80}
81
82int entry_mark_page_accessed(struct pt_regs *ctx) {
83    u64 ts, delta;
84    struct page *arg0 = (struct page *) PT_REGS_PARM1(ctx);
85    u32 zero = 0; // static key for accessing pages[0]
86    u64 *bts = birth.lookup(&arg0);
87    if (bts != NULL) {
88        delta = bpf_ktime_get_ns() - *bts;
89        dist.atomic_increment(bpf_log2l(delta/1000000));
90        pages.atomic_increment(zero, -1);
91        birth.delete(&arg0); // remove the entry from hashmap
92    }
93    return 0;
94}
95"""
96
97bpf_text_kfunc = """
98KFUNC_PROBE(RA_FUNC)
99{
100    u32 pid = bpf_get_current_pid_tgid();
101    u8 one = 1;
102
103    flag.update(&pid, &one);
104    return 0;
105}
106
107KRETFUNC_PROBE(RA_FUNC)
108{
109    u32 pid = bpf_get_current_pid_tgid();
110    u8 zero = 0;
111
112    flag.update(&pid, &zero);
113    return 0;
114}
115
116KFUNC_PROBE(mark_page_accessed, struct page *arg0)
117{
118    u64 ts, delta;
119    u32 zero = 0; // static key for accessing pages[0]
120    u64 *bts = birth.lookup(&arg0);
121
122    if (bts != NULL) {
123        delta = bpf_ktime_get_ns() - *bts;
124        dist.atomic_increment(bpf_log2l(delta/1000000));
125        pages.atomic_increment(zero, -1);
126        birth.delete(&arg0); // remove the entry from hashmap
127    }
128    return 0;
129}
130"""
131
132bpf_text_kfunc_cache_alloc_ret_page = """
133KRETFUNC_PROBE(__page_cache_alloc, gfp_t gfp, struct page *retval)
134{
135    u64 ts;
136    u32 zero = 0; // static key for accessing pages[0]
137    u32 pid = bpf_get_current_pid_tgid();
138    u8 *f = flag.lookup(&pid);
139
140    if (f != NULL && *f == 1) {
141        ts = bpf_ktime_get_ns();
142        birth.update(&retval, &ts);
143        pages.atomic_increment(zero);
144    }
145    return 0;
146}
147"""
148
149bpf_text_kfunc_cache_alloc_ret_folio = """
150KRETFUNC_PROBE(filemap_alloc_folio, gfp_t gfp, unsigned int order,
151    struct folio *retval)
152{
153    u64 ts;
154    u32 zero = 0; // static key for accessing pages[0]
155    u32 pid = bpf_get_current_pid_tgid();
156    u8 *f = flag.lookup(&pid);
157    struct page *page = folio_page(retval, 0);
158
159    if (f != NULL && *f == 1) {
160        ts = bpf_ktime_get_ns();
161        birth.update(&page, &ts);
162        pages.atomic_increment(zero);
163    }
164    return 0;
165}
166"""
167
168if BPF.support_kfunc():
169    if BPF.get_kprobe_functions(b"__do_page_cache_readahead"):
170        ra_func = "__do_page_cache_readahead"
171    elif BPF.get_kprobe_functions(b"do_page_cache_ra"):
172        ra_func = "do_page_cache_ra"
173    elif BPF.get_kprobe_functions(b"page_cache_ra_order"):
174        ra_func = "page_cache_ra_order"
175    else:
176        print("Not found any kfunc.")
177        exit()
178    bpf_text += bpf_text_kfunc.replace("RA_FUNC", ra_func)
179    if BPF.get_kprobe_functions(b"__page_cache_alloc"):
180        bpf_text += bpf_text_kfunc_cache_alloc_ret_page
181    else:
182        bpf_text += bpf_text_kfunc_cache_alloc_ret_folio
183    b = BPF(text=bpf_text)
184else:
185    bpf_text += bpf_text_kprobe
186    if BPF.get_kprobe_functions(b"__do_page_cache_readahead"):
187        ra_event = "__do_page_cache_readahead"
188    elif BPF.get_kprobe_functions(b"do_page_cache_ra"):
189        ra_event = "do_page_cache_ra"
190    elif BPF.get_kprobe_functions(b"page_cache_ra_order"):
191        ra_event = "page_cache_ra_order"
192    else:
193        print("Not found any kprobe.")
194        exit()
195    if BPF.get_kprobe_functions(b"__page_cache_alloc"):
196        cache_func = "__page_cache_alloc"
197        bpf_text = bpf_text.replace('GET_RETVAL_PAGE', 'PT_REGS_RC(ctx)')
198    else:
199        cache_func = "filemap_alloc_folio"
200        bpf_text = bpf_text.replace('GET_RETVAL_PAGE', 'folio_page((struct folio *)PT_REGS_RC(ctx), 0)')
201    b = BPF(text=bpf_text)
202    b.attach_kprobe(event=ra_event, fn_name="entry__do_page_cache_readahead")
203    b.attach_kretprobe(event=ra_event, fn_name="exit__do_page_cache_readahead")
204    b.attach_kretprobe(event=cache_func, fn_name="exit__page_cache_alloc")
205    b.attach_kprobe(event="mark_page_accessed", fn_name="entry_mark_page_accessed")
206
207# header
208print("Tracing... Hit Ctrl-C to end.")
209
210# print
211def print_stats():
212    print()
213    print("Read-ahead unused pages: %d" % (b["pages"][ct.c_ulong(0)].value))
214    print("Histogram of read-ahead used page age (ms):")
215    print("")
216    b["dist"].print_log2_hist("age (ms)")
217    b["dist"].clear()
218    b["pages"].clear()
219
220while True:
221    try:
222        sleep(args.duration)
223        print_stats()
224    except KeyboardInterrupt:
225        print_stats()
226        break
227