Lines Matching full:box
149 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) in uncore_msr_read_counter() argument
158 void uncore_mmio_exit_box(struct intel_uncore_box *box) in uncore_mmio_exit_box() argument
160 if (box->io_addr) in uncore_mmio_exit_box()
161 iounmap(box->io_addr); in uncore_mmio_exit_box()
164 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, in uncore_mmio_read_counter() argument
167 if (!box->io_addr) in uncore_mmio_read_counter()
170 if (!uncore_mmio_is_valid_offset(box, event->hw.event_base)) in uncore_mmio_read_counter()
173 return readq(box->io_addr + event->hw.event_base); in uncore_mmio_read_counter()
180 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_get_constraint() argument
189 * reg->alloc can be set due to existing state, so for fake box we in uncore_get_constraint()
194 (!uncore_box_is_fake(box) && reg1->alloc)) in uncore_get_constraint()
197 er = &box->shared_regs[reg1->idx]; in uncore_get_constraint()
209 if (!uncore_box_is_fake(box)) in uncore_get_constraint()
217 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_put_constraint() argument
226 * Also, if this is a fake box we shouldn't touch any event state in uncore_put_constraint()
227 * (reg->alloc) and we don't care about leaving inconsistent box in uncore_put_constraint()
230 if (uncore_box_is_fake(box) || !reg1->alloc) in uncore_put_constraint()
233 er = &box->shared_regs[reg1->idx]; in uncore_put_constraint()
238 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) in uncore_shared_reg_config() argument
244 er = &box->shared_regs[idx]; in uncore_shared_reg_config()
253 static void uncore_assign_hw_event(struct intel_uncore_box *box, in uncore_assign_hw_event() argument
259 hwc->last_tag = ++box->tags[idx]; in uncore_assign_hw_event()
262 hwc->event_base = uncore_fixed_ctr(box); in uncore_assign_hw_event()
263 hwc->config_base = uncore_fixed_ctl(box); in uncore_assign_hw_event()
267 if (intel_generic_uncore_assign_hw_event(event, box)) in uncore_assign_hw_event()
270 hwc->config_base = uncore_event_ctl(box, hwc->idx); in uncore_assign_hw_event()
271 hwc->event_base = uncore_perf_ctr(box, hwc->idx); in uncore_assign_hw_event()
274 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) in uncore_perf_event_update() argument
280 shift = 64 - uncore_freerunning_bits(box, event); in uncore_perf_event_update()
282 shift = 64 - uncore_fixed_ctr_bits(box); in uncore_perf_event_update()
284 shift = 64 - uncore_perf_ctr_bits(box); in uncore_perf_event_update()
289 new_count = uncore_read_counter(box, event); in uncore_perf_event_update()
306 struct intel_uncore_box *box; in uncore_pmu_hrtimer() local
311 box = container_of(hrtimer, struct intel_uncore_box, hrtimer); in uncore_pmu_hrtimer()
312 if (!box->n_active || box->cpu != smp_processor_id()) in uncore_pmu_hrtimer()
324 list_for_each_entry(event, &box->active_list, active_entry) { in uncore_pmu_hrtimer()
325 uncore_perf_event_update(box, event); in uncore_pmu_hrtimer()
328 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) in uncore_pmu_hrtimer()
329 uncore_perf_event_update(box, box->events[bit]); in uncore_pmu_hrtimer()
333 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); in uncore_pmu_hrtimer()
337 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) in uncore_pmu_start_hrtimer() argument
339 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), in uncore_pmu_start_hrtimer()
343 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) in uncore_pmu_cancel_hrtimer() argument
345 hrtimer_cancel(&box->hrtimer); in uncore_pmu_cancel_hrtimer()
348 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) in uncore_pmu_init_hrtimer() argument
350 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in uncore_pmu_init_hrtimer()
351 box->hrtimer.function = uncore_pmu_hrtimer; in uncore_pmu_init_hrtimer()
358 struct intel_uncore_box *box; in uncore_alloc_box() local
360 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg); in uncore_alloc_box()
362 box = kzalloc_node(size, GFP_KERNEL, node); in uncore_alloc_box()
363 if (!box) in uncore_alloc_box()
367 raw_spin_lock_init(&box->shared_regs[i].lock); in uncore_alloc_box()
369 uncore_pmu_init_hrtimer(box); in uncore_alloc_box()
370 box->cpu = -1; in uncore_alloc_box()
371 box->dieid = -1; in uncore_alloc_box()
374 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; in uncore_alloc_box()
376 INIT_LIST_HEAD(&box->active_list); in uncore_alloc_box()
378 return box; in uncore_alloc_box()
387 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) in is_box_event() argument
389 return &box->pmu->pmu == event->pmu; in is_box_event()
393 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, in uncore_collect_events() argument
399 max_count = box->pmu->type->num_counters; in uncore_collect_events()
400 if (box->pmu->type->fixed_ctl) in uncore_collect_events()
403 if (box->n_events >= max_count) in uncore_collect_events()
406 n = box->n_events; in uncore_collect_events()
408 if (is_box_event(box, leader)) { in uncore_collect_events()
409 box->event_list[n] = leader; in uncore_collect_events()
417 if (!is_box_event(box, event) || in uncore_collect_events()
424 box->event_list[n] = event; in uncore_collect_events()
431 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_get_event_constraint() argument
433 struct intel_uncore_type *type = box->pmu->type; in uncore_get_event_constraint()
437 c = type->ops->get_constraint(box, event); in uncore_get_event_constraint()
455 static void uncore_put_event_constraint(struct intel_uncore_box *box, in uncore_put_event_constraint() argument
458 if (box->pmu->type->ops->put_constraint) in uncore_put_event_constraint()
459 box->pmu->type->ops->put_constraint(box, event); in uncore_put_event_constraint()
462 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) in uncore_assign_events() argument
472 c = uncore_get_event_constraint(box, box->event_list[i]); in uncore_assign_events()
473 box->event_constraint[i] = c; in uncore_assign_events()
480 hwc = &box->event_list[i]->hw; in uncore_assign_events()
481 c = box->event_constraint[i]; in uncore_assign_events()
501 ret = perf_assign_events(box->event_constraint, n, in uncore_assign_events()
506 uncore_put_event_constraint(box, box->event_list[i]); in uncore_assign_events()
513 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_start() local
526 list_add_tail(&event->active_entry, &box->active_list); in uncore_pmu_event_start()
528 uncore_read_counter(box, event)); in uncore_pmu_event_start()
529 if (box->n_active++ == 0) in uncore_pmu_event_start()
530 uncore_pmu_start_hrtimer(box); in uncore_pmu_event_start()
538 box->events[idx] = event; in uncore_pmu_event_start()
539 box->n_active++; in uncore_pmu_event_start()
540 __set_bit(idx, box->active_mask); in uncore_pmu_event_start()
542 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); in uncore_pmu_event_start()
543 uncore_enable_event(box, event); in uncore_pmu_event_start()
545 if (box->n_active == 1) in uncore_pmu_event_start()
546 uncore_pmu_start_hrtimer(box); in uncore_pmu_event_start()
551 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_stop() local
557 if (--box->n_active == 0) in uncore_pmu_event_stop()
558 uncore_pmu_cancel_hrtimer(box); in uncore_pmu_event_stop()
559 uncore_perf_event_update(box, event); in uncore_pmu_event_stop()
563 if (__test_and_clear_bit(hwc->idx, box->active_mask)) { in uncore_pmu_event_stop()
564 uncore_disable_event(box, event); in uncore_pmu_event_stop()
565 box->n_active--; in uncore_pmu_event_stop()
566 box->events[hwc->idx] = NULL; in uncore_pmu_event_stop()
570 if (box->n_active == 0) in uncore_pmu_event_stop()
571 uncore_pmu_cancel_hrtimer(box); in uncore_pmu_event_stop()
579 uncore_perf_event_update(box, event); in uncore_pmu_event_stop()
586 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_add() local
591 if (!box) in uncore_pmu_event_add()
605 ret = n = uncore_collect_events(box, event, false); in uncore_pmu_event_add()
613 ret = uncore_assign_events(box, assign, n); in uncore_pmu_event_add()
618 for (i = 0; i < box->n_events; i++) { in uncore_pmu_event_add()
619 event = box->event_list[i]; in uncore_pmu_event_add()
623 hwc->last_tag == box->tags[assign[i]]) in uncore_pmu_event_add()
637 event = box->event_list[i]; in uncore_pmu_event_add()
641 hwc->last_tag != box->tags[assign[i]]) in uncore_pmu_event_add()
642 uncore_assign_hw_event(box, event, assign[i]); in uncore_pmu_event_add()
643 else if (i < box->n_events) in uncore_pmu_event_add()
651 box->n_events = n; in uncore_pmu_event_add()
658 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_del() local
671 for (i = 0; i < box->n_events; i++) { in uncore_pmu_event_del()
672 if (event == box->event_list[i]) { in uncore_pmu_event_del()
673 uncore_put_event_constraint(box, event); in uncore_pmu_event_del()
675 for (++i; i < box->n_events; i++) in uncore_pmu_event_del()
676 box->event_list[i - 1] = box->event_list[i]; in uncore_pmu_event_del()
678 --box->n_events; in uncore_pmu_event_del()
689 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_read() local
690 uncore_perf_event_update(box, event); in uncore_pmu_event_read()
739 struct intel_uncore_box *box; in uncore_pmu_event_init() local
761 box = uncore_pmu_to_box(pmu, event->cpu); in uncore_pmu_event_init()
762 if (!box || box->cpu < 0) in uncore_pmu_event_init()
764 event->cpu = box->cpu; in uncore_pmu_event_init()
765 event->pmu_private = box; in uncore_pmu_event_init()
789 if (!check_valid_freerunning_event(box, event)) in uncore_pmu_event_init()
798 event->hw.event_base = uncore_freerunning_counter(box, event); in uncore_pmu_event_init()
803 ret = pmu->type->ops->hw_config(box, event); in uncore_pmu_event_init()
820 struct intel_uncore_box *box; in uncore_pmu_enable() local
824 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); in uncore_pmu_enable()
825 if (!box) in uncore_pmu_enable()
829 uncore_pmu->type->ops->enable_box(box); in uncore_pmu_enable()
835 struct intel_uncore_box *box; in uncore_pmu_disable() local
839 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); in uncore_pmu_disable()
840 if (!box) in uncore_pmu_disable()
844 uncore_pmu->type->ops->disable_box(box); in uncore_pmu_disable()
907 * Use the box ID from the discovery table if applicable. in uncore_get_pmu_name()
1152 struct intel_uncore_box *box; in uncore_pci_pmu_register() local
1158 box = uncore_alloc_box(type, NUMA_NO_NODE); in uncore_pci_pmu_register()
1159 if (!box) in uncore_pci_pmu_register()
1162 atomic_inc(&box->refcnt); in uncore_pci_pmu_register()
1163 box->dieid = die; in uncore_pci_pmu_register()
1164 box->pci_dev = pdev; in uncore_pci_pmu_register()
1165 box->pmu = pmu; in uncore_pci_pmu_register()
1166 uncore_box_init(box); in uncore_pci_pmu_register()
1168 pmu->boxes[die] = box; in uncore_pci_pmu_register()
1172 /* First active box registers the pmu */ in uncore_pci_pmu_register()
1176 uncore_box_exit(box); in uncore_pci_pmu_register()
1177 kfree(box); in uncore_pci_pmu_register()
1208 * PCI slot and func to indicate the uncore box. in uncore_pci_probe()
1219 * each box has a different function id. in uncore_pci_probe()
1238 struct intel_uncore_box *box = pmu->boxes[die]; in uncore_pci_pmu_unregister() local
1243 uncore_box_exit(box); in uncore_pci_pmu_unregister()
1244 kfree(box); in uncore_pci_pmu_unregister()
1249 struct intel_uncore_box *box; in uncore_pci_remove() local
1256 box = pci_get_drvdata(pdev); in uncore_pci_remove()
1257 if (!box) { in uncore_pci_remove()
1268 pmu = box->pmu; in uncore_pci_remove()
1469 struct intel_uncore_box *box; in uncore_change_type_ctx() local
1474 box = pmu->boxes[die]; in uncore_change_type_ctx()
1475 if (!box) in uncore_change_type_ctx()
1479 WARN_ON_ONCE(box->cpu != -1); in uncore_change_type_ctx()
1481 box->cpu = new_cpu; in uncore_change_type_ctx()
1487 WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu); in uncore_change_type_ctx()
1488 box->cpu = -1; in uncore_change_type_ctx()
1495 uncore_pmu_cancel_hrtimer(box); in uncore_change_type_ctx()
1497 box->cpu = new_cpu; in uncore_change_type_ctx()
1513 struct intel_uncore_box *box; in uncore_box_unref() local
1520 box = pmu->boxes[id]; in uncore_box_unref()
1521 if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0) in uncore_box_unref()
1522 uncore_box_exit(box); in uncore_box_unref()
1558 struct intel_uncore_box *box, *tmp; in allocate_boxes() local
1571 box = uncore_alloc_box(type, cpu_to_node(cpu)); in allocate_boxes()
1572 if (!box) in allocate_boxes()
1574 box->pmu = pmu; in allocate_boxes()
1575 box->dieid = die; in allocate_boxes()
1576 list_add(&box->active_list, &allocated); in allocate_boxes()
1580 list_for_each_entry_safe(box, tmp, &allocated, active_list) { in allocate_boxes()
1581 list_del_init(&box->active_list); in allocate_boxes()
1582 box->pmu->boxes[die] = box; in allocate_boxes()
1587 list_for_each_entry_safe(box, tmp, &allocated, active_list) { in allocate_boxes()
1588 list_del_init(&box->active_list); in allocate_boxes()
1589 kfree(box); in allocate_boxes()
1599 struct intel_uncore_box *box; in uncore_box_ref() local
1610 box = pmu->boxes[id]; in uncore_box_ref()
1611 if (box && box->cpu >= 0 && atomic_inc_return(&box->refcnt) == 1) in uncore_box_ref()
1612 uncore_box_init(box); in uncore_box_ref()