1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __PERF_ENV_H 3 #define __PERF_ENV_H 4 5 #include <linux/types.h> 6 #include <linux/rbtree.h> 7 #include "cpumap.h" 8 #include "rwsem.h" 9 10 struct perf_cpu_map; 11 12 struct cpu_topology_map { 13 int socket_id; 14 int die_id; 15 int cluster_id; 16 int core_id; 17 }; 18 19 struct cpu_cache_level { 20 u32 level; 21 u32 line_size; 22 u32 sets; 23 u32 ways; 24 char *type; 25 char *size; 26 char *map; 27 }; 28 29 struct numa_node { 30 u32 node; 31 u64 mem_total; 32 u64 mem_free; 33 struct perf_cpu_map *map; 34 }; 35 36 struct memory_node { 37 u64 node; 38 u64 size; 39 unsigned long *set; 40 }; 41 42 struct hybrid_node { 43 char *pmu_name; 44 char *cpus; 45 }; 46 47 struct pmu_caps { 48 int nr_caps; 49 unsigned int max_branches; 50 unsigned int br_cntr_nr; 51 unsigned int br_cntr_width; 52 53 char **caps; 54 char *pmu_name; 55 }; 56 57 typedef const char *(arch_syscalls__strerrno_t)(int err); 58 59 struct perf_env { 60 char *hostname; 61 char *os_release; 62 char *version; 63 char *arch; 64 int nr_cpus_online; 65 int nr_cpus_avail; 66 char *cpu_desc; 67 char *cpuid; 68 unsigned long long total_mem; 69 unsigned int msr_pmu_type; 70 unsigned int max_branches; 71 unsigned int br_cntr_nr; 72 unsigned int br_cntr_width; 73 int kernel_is_64_bit; 74 75 int nr_cmdline; 76 int nr_sibling_cores; 77 int nr_sibling_dies; 78 int nr_sibling_threads; 79 int nr_numa_nodes; 80 int nr_memory_nodes; 81 int nr_pmu_mappings; 82 int nr_groups; 83 int nr_cpu_pmu_caps; 84 int nr_hybrid_nodes; 85 int nr_pmus_with_caps; 86 char *cmdline; 87 const char **cmdline_argv; 88 char *sibling_cores; 89 char *sibling_dies; 90 char *sibling_threads; 91 char *pmu_mappings; 92 char **cpu_pmu_caps; 93 struct cpu_topology_map *cpu; 94 struct cpu_cache_level *caches; 95 int caches_cnt; 96 u32 comp_ratio; 97 u32 comp_ver; 98 u32 comp_type; 99 u32 comp_level; 100 u32 comp_mmap_len; 101 struct numa_node *numa_nodes; 102 struct memory_node *memory_nodes; 103 unsigned long long memory_bsize; 104 struct hybrid_node *hybrid_nodes; 105 struct pmu_caps *pmu_caps; 106 #ifdef HAVE_LIBBPF_SUPPORT 107 /* 108 * bpf_info_lock protects bpf rbtrees. This is needed because the 109 * trees are accessed by different threads in perf-top 110 */ 111 struct { 112 struct rw_semaphore lock; 113 struct rb_root infos; 114 u32 infos_cnt; 115 struct rb_root btfs; 116 u32 btfs_cnt; 117 } bpf_progs; 118 #endif // HAVE_LIBBPF_SUPPORT 119 /* same reason as above (for perf-top) */ 120 struct { 121 struct rw_semaphore lock; 122 struct rb_root tree; 123 } cgroups; 124 125 /* For fast cpu to numa node lookup via perf_env__numa_node */ 126 int *numa_map; 127 int nr_numa_map; 128 129 /* For real clock time reference. */ 130 struct { 131 u64 tod_ns; 132 u64 clockid_ns; 133 u64 clockid_res_ns; 134 int clockid; 135 /* 136 * enabled is valid for report mode, and is true if above 137 * values are set, it's set in process_clock_data 138 */ 139 bool enabled; 140 } clock; 141 arch_syscalls__strerrno_t *arch_strerrno; 142 }; 143 144 enum perf_compress_type { 145 PERF_COMP_NONE = 0, 146 PERF_COMP_ZSTD, 147 PERF_COMP_MAX 148 }; 149 150 struct bpf_prog_info_node; 151 struct btf_node; 152 153 extern struct perf_env perf_env; 154 155 void perf_env__exit(struct perf_env *env); 156 157 int perf_env__kernel_is_64_bit(struct perf_env *env); 158 159 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]); 160 161 int perf_env__read_cpuid(struct perf_env *env); 162 int perf_env__read_pmu_mappings(struct perf_env *env); 163 int perf_env__nr_pmu_mappings(struct perf_env *env); 164 const char *perf_env__pmu_mappings(struct perf_env *env); 165 166 int perf_env__read_cpu_topology_map(struct perf_env *env); 167 168 void cpu_cache_level__free(struct cpu_cache_level *cache); 169 170 const char *perf_env__arch(struct perf_env *env); 171 const char *perf_env__arch_strerrno(struct perf_env *env, int err); 172 const char *perf_env__cpuid(struct perf_env *env); 173 const char *perf_env__raw_arch(struct perf_env *env); 174 int perf_env__nr_cpus_avail(struct perf_env *env); 175 176 void perf_env__init(struct perf_env *env); 177 bool __perf_env__insert_bpf_prog_info(struct perf_env *env, 178 struct bpf_prog_info_node *info_node); 179 bool perf_env__insert_bpf_prog_info(struct perf_env *env, 180 struct bpf_prog_info_node *info_node); 181 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, 182 __u32 prog_id); 183 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); 184 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); 185 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); 186 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id); 187 188 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu); 189 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name, 190 const char *cap); 191 192 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name); 193 void perf_env__find_br_cntr_info(struct perf_env *env, 194 unsigned int *nr, 195 unsigned int *width); 196 197 bool x86__is_amd_cpu(void); 198 bool perf_env__is_x86_amd_cpu(struct perf_env *env); 199 200 #endif /* __PERF_ENV_H */ 201