Lines Matching refs:max_cpu_num

2034 	int max_cpu_num;  member
5382 for (i = 0; i < topo.max_cpu_num + 1; ++i) { in free_fd_percpu()
5396 for (int i = 0; i < topo.max_cpu_num + 1; ++i) { in free_fd_instr_count_percpu()
5430 for (int cpu = 0; cpu < topo.max_cpu_num; ++cpu) { in free_fd_msr()
5538 for (i = 0; i <= topo.max_cpu_num; ++i) { in free_all_buffers()
5599 for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) in set_node_data()
5605 for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) { in set_node_data()
5618 for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) { in set_node_data()
5628 if (cpu_count >= topo.max_cpu_num) in set_node_data()
5640 for (i = 0; i <= topo.max_cpu_num; i++) { in get_physical_node_id()
5714 int offset = topo.max_cpu_num + 1; in get_thread_siblings()
5718 thiscpu->put_ids = CPU_ALLOC((topo.max_cpu_num + 1)); in get_thread_siblings()
5724 size = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); in get_thread_siblings()
5904 topo.max_cpu_num = 0; in set_max_cpu_num()
5906 topo.max_cpu_num += BITMASK_SIZE; in set_max_cpu_num()
5908 topo.max_cpu_num--; /* 0 based */ in set_max_cpu_num()
5986 if (cpu_number > topo.max_cpu_num) { in snapshot_proc_interrupts()
5987 warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num); in snapshot_proc_interrupts()
6295 per_cpu_msr_sum = calloc(topo.max_cpu_num + 1, sizeof(struct msr_sum_array)); in msr_sum_record()
7833 fd_instr_count_percpu = calloc(topo.max_cpu_num + 1, sizeof(int)); in linux_perf_init()
7876 for (int cpu = 0; cpu < topo.max_cpu_num + 1; ++cpu) { in rapl_perf_init()
8017 const int mci_num = topo.max_cpu_num + 1; in msr_perf_init_()
8099 const int cci_num = topo.max_cpu_num + 1; in cstate_perf_init_()
8551 fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); in topology_probe()
8553 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); in topology_probe()
8560 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); in topology_probe()
8563 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); in topology_probe()
8570 cpu_possible_set = CPU_ALLOC((topo.max_cpu_num + 1)); in topology_probe()
8573 cpu_possible_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); in topology_probe()
8580 cpu_effective_set = CPU_ALLOC((topo.max_cpu_num + 1)); in topology_probe()
8583 cpu_effective_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); in topology_probe()
8590 cpu_allowed_set = CPU_ALLOC((topo.max_cpu_num + 1)); in topology_probe()
8593 cpu_allowed_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); in topology_probe()
8637 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); in topology_probe()
8640 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); in topology_probe()
8651 for (i = 0; i <= topo.max_cpu_num; ++i) { in topology_probe()
8723 for (i = 0; i <= topo.max_cpu_num; ++i) { in topology_probe()
8824 fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int)); in allocate_fd_percpu()
8835 irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int)); in allocate_irq_buffers()
8837 err(-1, "calloc %d IRQ", topo.max_cpu_num + 1); in allocate_irq_buffers()
8839 nmi_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int)); in allocate_irq_buffers()
8841 err(-1, "calloc %d NMI", topo.max_cpu_num + 1); in allocate_irq_buffers()
8879 for (i = 0; i < topo.max_cpu_num + 1; ++i) { in set_base_cpu()
8956 …const size_t max_num_domains = MAX(topo.max_cpu_num + 1, MAX(topo.max_core_id + 1, topo.max_packag… in added_perf_counters_init_()
8963 num_domains = topo.max_cpu_num + 1; in added_perf_counters_init_()
8988 for (int cpu = 0; cpu < topo.max_cpu_num + 1; ++cpu) { in added_perf_counters_init_()
9405 for (cpu_num = 0; cpu_num < topo.max_cpu_num;) { in pmt_init()