Lines Matching +full:ddr +full:- +full:pmu
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2021-2024 Marvell.
54 /* Two dedicated event counters for DDR reads and writes */
61 * DO NOT change these event-id numbers, they are used to
148 struct pmu pmu; member
161 void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
163 void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
165 void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
166 void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
167 void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
170 #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
197 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); in cn10k_ddr_pmu_event_show()
349 PMU_FORMAT_ATTR(event, "config:0-8");
365 struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev); in cn10k_ddr_perf_cpumask_show() local
367 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in cn10k_ddr_perf_cpumask_show()
415 if (!ddr_pmu->p_data->is_ody) { in ddr_perf_get_event_bitmap()
416 err = -EINVAL; in ddr_perf_get_event_bitmap()
422 *event_bitmap = (1ULL << (eventid - 1)); in ddr_perf_get_event_bitmap()
427 *event_bitmap = (0xFULL << (eventid - 1)); in ddr_perf_get_event_bitmap()
430 err = -EINVAL; in ddr_perf_get_event_bitmap()
438 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu, in cn10k_ddr_perf_alloc_counter() argument
441 u8 config = event->attr.config; in cn10k_ddr_perf_alloc_counter()
444 /* DDR read free-run counter index */ in cn10k_ddr_perf_alloc_counter()
446 pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter()
450 /* DDR write free-run counter index */ in cn10k_ddr_perf_alloc_counter()
452 pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter()
456 /* Allocate DDR generic counters */ in cn10k_ddr_perf_alloc_counter()
458 if (pmu->events[i] == NULL) { in cn10k_ddr_perf_alloc_counter()
459 pmu->events[i] = event; in cn10k_ddr_perf_alloc_counter()
464 return -ENOENT; in cn10k_ddr_perf_alloc_counter()
467 static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter) in cn10k_ddr_perf_free_counter() argument
469 pmu->events[counter] = NULL; in cn10k_ddr_perf_free_counter()
474 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_init() local
475 struct hw_perf_event *hwc = &event->hw; in cn10k_ddr_perf_event_init()
477 if (event->attr.type != event->pmu->type) in cn10k_ddr_perf_event_init()
478 return -ENOENT; in cn10k_ddr_perf_event_init()
481 dev_info(pmu->dev, "Sampling not supported!\n"); in cn10k_ddr_perf_event_init()
482 return -EOPNOTSUPP; in cn10k_ddr_perf_event_init()
485 if (event->cpu < 0) { in cn10k_ddr_perf_event_init()
486 dev_warn(pmu->dev, "Can't provide per-task data!\n"); in cn10k_ddr_perf_event_init()
487 return -EOPNOTSUPP; in cn10k_ddr_perf_event_init()
491 if (event->group_leader->pmu != event->pmu && in cn10k_ddr_perf_event_init()
492 !is_software_event(event->group_leader)) in cn10k_ddr_perf_event_init()
493 return -EINVAL; in cn10k_ddr_perf_event_init()
498 event->cpu = pmu->cpu; in cn10k_ddr_perf_event_init()
499 hwc->idx = -1; in cn10k_ddr_perf_event_init()
506 const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data; in cn10k_ddr_perf_counter_start()
507 u64 ctrl_reg = p_data->cnt_start_op_ctrl; in cn10k_ddr_perf_counter_start()
509 writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base + in cn10k_ddr_perf_counter_start()
516 const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data; in cn10k_ddr_perf_counter_stop()
517 u64 ctrl_reg = p_data->cnt_end_op_ctrl; in cn10k_ddr_perf_counter_stop()
519 writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base + in cn10k_ddr_perf_counter_stop()
523 static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu, in cn10k_ddr_perf_counter_enable() argument
526 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_perf_counter_enable()
527 u64 ctrl_reg = pmu->p_data->cnt_op_mode_ctrl; in cn10k_ddr_perf_counter_enable()
528 const struct ddr_pmu_ops *ops = pmu->ops; in cn10k_ddr_perf_counter_enable()
529 bool is_ody = pmu->p_data->is_ody; in cn10k_ddr_perf_counter_enable()
539 reg = DDRC_PERF_CFG(p_data->cfg_base, counter); in cn10k_ddr_perf_counter_enable()
540 val = readq_relaxed(pmu->base + reg); in cn10k_ddr_perf_counter_enable()
547 writeq_relaxed(val, pmu->base + reg); in cn10k_ddr_perf_counter_enable()
552 * Setup the PMU counter to work in in cn10k_ddr_perf_counter_enable()
557 pmu->base + reg); in cn10k_ddr_perf_counter_enable()
559 cn10k_ddr_perf_counter_start(pmu, counter); in cn10k_ddr_perf_counter_enable()
561 cn10k_ddr_perf_counter_stop(pmu, counter); in cn10k_ddr_perf_counter_enable()
566 ops->enable_read_freerun_counter(pmu, enable); in cn10k_ddr_perf_counter_enable()
568 ops->enable_write_freerun_counter(pmu, enable); in cn10k_ddr_perf_counter_enable()
572 static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter) in cn10k_ddr_perf_read_counter() argument
574 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_perf_read_counter()
578 return readq_relaxed(pmu->base + in cn10k_ddr_perf_read_counter()
579 p_data->cnt_value_rd_op); in cn10k_ddr_perf_read_counter()
582 return readq_relaxed(pmu->base + in cn10k_ddr_perf_read_counter()
583 p_data->cnt_value_wr_op); in cn10k_ddr_perf_read_counter()
585 val = readq_relaxed(pmu->base + in cn10k_ddr_perf_read_counter()
586 DDRC_PERF_REG(p_data->cnt_base, counter)); in cn10k_ddr_perf_read_counter()
592 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_update() local
593 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_perf_event_update()
594 struct hw_perf_event *hwc = &event->hw; in cn10k_ddr_perf_event_update()
598 prev_count = local64_read(&hwc->prev_count); in cn10k_ddr_perf_event_update()
599 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_perf_event_update()
600 } while (local64_xchg(&hwc->prev_count, new_count) != prev_count); in cn10k_ddr_perf_event_update()
602 mask = p_data->counter_max_val; in cn10k_ddr_perf_event_update()
604 local64_add((new_count - prev_count) & mask, &event->count); in cn10k_ddr_perf_event_update()
609 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_start() local
610 struct hw_perf_event *hwc = &event->hw; in cn10k_ddr_perf_event_start()
611 int counter = hwc->idx; in cn10k_ddr_perf_event_start()
613 local64_set(&hwc->prev_count, 0); in cn10k_ddr_perf_event_start()
615 cn10k_ddr_perf_counter_enable(pmu, counter, true); in cn10k_ddr_perf_event_start()
617 hwc->state = 0; in cn10k_ddr_perf_event_start()
622 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_add() local
623 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_perf_event_add()
624 const struct ddr_pmu_ops *ops = pmu->ops; in cn10k_ddr_perf_event_add()
625 struct hw_perf_event *hwc = &event->hw; in cn10k_ddr_perf_event_add()
626 u8 config = event->attr.config; in cn10k_ddr_perf_event_add()
631 counter = cn10k_ddr_perf_alloc_counter(pmu, event); in cn10k_ddr_perf_event_add()
633 return -EAGAIN; in cn10k_ddr_perf_event_add()
635 pmu->active_events++; in cn10k_ddr_perf_event_add()
636 hwc->idx = counter; in cn10k_ddr_perf_event_add()
638 if (pmu->active_events == 1) in cn10k_ddr_perf_event_add()
639 hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(), in cn10k_ddr_perf_event_add()
644 reg_offset = DDRC_PERF_CFG(p_data->cfg_base, counter); in cn10k_ddr_perf_event_add()
645 ret = ddr_perf_get_event_bitmap(config, &val, pmu); in cn10k_ddr_perf_event_add()
649 writeq_relaxed(val, pmu->base + reg_offset); in cn10k_ddr_perf_event_add()
653 ops->clear_read_freerun_counter(pmu); in cn10k_ddr_perf_event_add()
655 ops->clear_write_freerun_counter(pmu); in cn10k_ddr_perf_event_add()
658 hwc->state |= PERF_HES_STOPPED; in cn10k_ddr_perf_event_add()
668 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_stop() local
669 struct hw_perf_event *hwc = &event->hw; in cn10k_ddr_perf_event_stop()
670 int counter = hwc->idx; in cn10k_ddr_perf_event_stop()
672 cn10k_ddr_perf_counter_enable(pmu, counter, false); in cn10k_ddr_perf_event_stop()
677 hwc->state |= PERF_HES_STOPPED; in cn10k_ddr_perf_event_stop()
682 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_del() local
683 struct hw_perf_event *hwc = &event->hw; in cn10k_ddr_perf_event_del()
684 int counter = hwc->idx; in cn10k_ddr_perf_event_del()
688 cn10k_ddr_perf_free_counter(pmu, counter); in cn10k_ddr_perf_event_del()
689 pmu->active_events--; in cn10k_ddr_perf_event_del()
690 hwc->idx = -1; in cn10k_ddr_perf_event_del()
693 if (pmu->active_events == 0) in cn10k_ddr_perf_event_del()
694 hrtimer_cancel(&pmu->hrtimer); in cn10k_ddr_perf_event_del()
697 static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu) in cn10k_ddr_perf_pmu_enable() argument
699 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); in cn10k_ddr_perf_pmu_enable()
700 const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data; in cn10k_ddr_perf_pmu_enable()
702 writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base + in cn10k_ddr_perf_pmu_enable()
703 p_data->cnt_start_op_ctrl); in cn10k_ddr_perf_pmu_enable()
706 static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu) in cn10k_ddr_perf_pmu_disable() argument
708 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); in cn10k_ddr_perf_pmu_disable()
709 const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data; in cn10k_ddr_perf_pmu_disable()
711 writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base + in cn10k_ddr_perf_pmu_disable()
712 p_data->cnt_end_op_ctrl); in cn10k_ddr_perf_pmu_disable()
715 static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu) in cn10k_ddr_perf_event_update_all() argument
721 if (pmu->events[i] == NULL) in cn10k_ddr_perf_event_update_all()
724 cn10k_ddr_perf_event_update(pmu->events[i]); in cn10k_ddr_perf_event_update_all()
729 if (pmu->events[i] == NULL) in cn10k_ddr_perf_event_update_all()
732 hwc = &pmu->events[i]->hw; in cn10k_ddr_perf_event_update_all()
733 local64_set(&hwc->prev_count, 0); in cn10k_ddr_perf_event_update_all()
737 static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable) in ddr_pmu_enable_read_freerun() argument
739 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_enable_read_freerun()
742 val = readq_relaxed(pmu->base + p_data->cnt_freerun_en); in ddr_pmu_enable_read_freerun()
748 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en); in ddr_pmu_enable_read_freerun()
751 static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable) in ddr_pmu_enable_write_freerun() argument
753 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_enable_write_freerun()
756 val = readq_relaxed(pmu->base + p_data->cnt_freerun_en); in ddr_pmu_enable_write_freerun()
762 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en); in ddr_pmu_enable_write_freerun()
765 static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu) in ddr_pmu_read_clear_freerun() argument
767 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_read_clear_freerun()
771 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_read_clear_freerun()
774 static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu) in ddr_pmu_write_clear_freerun() argument
776 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_write_clear_freerun()
780 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_write_clear_freerun()
783 static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx) in ddr_pmu_overflow_hander() argument
785 cn10k_ddr_perf_event_update_all(pmu); in ddr_pmu_overflow_hander()
786 cn10k_ddr_perf_pmu_disable(&pmu->pmu); in ddr_pmu_overflow_hander()
787 cn10k_ddr_perf_pmu_enable(&pmu->pmu); in ddr_pmu_overflow_hander()
790 static void ddr_pmu_ody_enable_read_freerun(struct cn10k_ddr_pmu *pmu, in ddr_pmu_ody_enable_read_freerun() argument
793 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_ody_enable_read_freerun()
796 val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_ody_enable_read_freerun()
802 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_ody_enable_read_freerun()
805 static void ddr_pmu_ody_enable_write_freerun(struct cn10k_ddr_pmu *pmu, in ddr_pmu_ody_enable_write_freerun() argument
808 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_ody_enable_write_freerun()
811 val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_ody_enable_write_freerun()
817 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_ody_enable_write_freerun()
820 static void ddr_pmu_ody_read_clear_freerun(struct cn10k_ddr_pmu *pmu) in ddr_pmu_ody_read_clear_freerun() argument
822 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_ody_read_clear_freerun()
826 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr); in ddr_pmu_ody_read_clear_freerun()
829 static void ddr_pmu_ody_write_clear_freerun(struct cn10k_ddr_pmu *pmu) in ddr_pmu_ody_write_clear_freerun() argument
831 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_ody_write_clear_freerun()
835 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr); in ddr_pmu_ody_write_clear_freerun()
838 static void ddr_pmu_ody_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx) in ddr_pmu_ody_overflow_hander() argument
845 cn10k_ddr_perf_event_update(pmu->events[evt_idx]); in ddr_pmu_ody_overflow_hander()
846 cn10k_ddr_perf_counter_stop(pmu, evt_idx); in ddr_pmu_ody_overflow_hander()
847 cn10k_ddr_perf_counter_start(pmu, evt_idx); in ddr_pmu_ody_overflow_hander()
850 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu) in cn10k_ddr_pmu_overflow_handler() argument
852 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_pmu_overflow_handler()
853 const struct ddr_pmu_ops *ops = pmu->ops; in cn10k_ddr_pmu_overflow_handler()
860 event = pmu->events[DDRC_PERF_READ_COUNTER_IDX]; in cn10k_ddr_pmu_overflow_handler()
862 hwc = &event->hw; in cn10k_ddr_pmu_overflow_handler()
863 prev_count = local64_read(&hwc->prev_count); in cn10k_ddr_pmu_overflow_handler()
864 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_pmu_overflow_handler()
873 event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX]; in cn10k_ddr_pmu_overflow_handler()
875 hwc = &event->hw; in cn10k_ddr_pmu_overflow_handler()
876 prev_count = local64_read(&hwc->prev_count); in cn10k_ddr_pmu_overflow_handler()
877 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_pmu_overflow_handler()
887 if (pmu->events[i] == NULL) in cn10k_ddr_pmu_overflow_handler()
890 value = cn10k_ddr_perf_read_counter(pmu, i); in cn10k_ddr_pmu_overflow_handler()
891 if (value == p_data->counter_max_val) { in cn10k_ddr_pmu_overflow_handler()
892 pr_info("Counter-(%d) reached max value\n", i); in cn10k_ddr_pmu_overflow_handler()
893 ops->pmu_overflow_handler(pmu, i); in cn10k_ddr_pmu_overflow_handler()
902 struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu, in cn10k_ddr_pmu_timer_handler() local
907 cn10k_ddr_pmu_overflow_handler(pmu); in cn10k_ddr_pmu_timer_handler()
916 struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu, in cn10k_ddr_pmu_offline_cpu() local
920 if (cpu != pmu->cpu) in cn10k_ddr_pmu_offline_cpu()
927 perf_pmu_migrate_context(&pmu->pmu, cpu, target); in cn10k_ddr_pmu_offline_cpu()
928 pmu->cpu = target; in cn10k_ddr_pmu_offline_cpu()
997 ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL); in cn10k_ddr_perf_probe()
999 return -ENOMEM; in cn10k_ddr_perf_probe()
1001 ddr_pmu->dev = &pdev->dev; in cn10k_ddr_perf_probe()
1004 dev_data = device_get_match_data(&pdev->dev); in cn10k_ddr_perf_probe()
1006 dev_err(&pdev->dev, "Error: No device match data found\n"); in cn10k_ddr_perf_probe()
1007 return -ENODEV; in cn10k_ddr_perf_probe()
1014 ddr_pmu->base = base; in cn10k_ddr_perf_probe()
1016 ddr_pmu->p_data = dev_data; in cn10k_ddr_perf_probe()
1017 is_cn10k = ddr_pmu->p_data->is_cn10k; in cn10k_ddr_perf_probe()
1018 is_ody = ddr_pmu->p_data->is_ody; in cn10k_ddr_perf_probe()
1021 ddr_pmu->ops = &ddr_pmu_ops; in cn10k_ddr_perf_probe()
1022 /* Setup the PMU counter to work in manual mode */ in cn10k_ddr_perf_probe()
1023 writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base + in cn10k_ddr_perf_probe()
1024 ddr_pmu->p_data->cnt_op_mode_ctrl); in cn10k_ddr_perf_probe()
1026 ddr_pmu->pmu = (struct pmu) { in cn10k_ddr_perf_probe()
1043 ddr_pmu->ops = &ddr_pmu_ody_ops; in cn10k_ddr_perf_probe()
1045 ddr_pmu->pmu = (struct pmu) { in cn10k_ddr_perf_probe()
1060 ddr_pmu->cpu = raw_smp_processor_id(); in cn10k_ddr_perf_probe()
1062 name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "mrvl_ddr_pmu_%llx", in cn10k_ddr_perf_probe()
1063 res->start); in cn10k_ddr_perf_probe()
1065 return -ENOMEM; in cn10k_ddr_perf_probe()
1067 hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in cn10k_ddr_perf_probe()
1068 ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler; in cn10k_ddr_perf_probe()
1072 &ddr_pmu->node); in cn10k_ddr_perf_probe()
1074 ret = perf_pmu_register(&ddr_pmu->pmu, name, -1); in cn10k_ddr_perf_probe()
1078 pr_info("DDR PMU Driver for ddrc@%llx\n", res->start); in cn10k_ddr_perf_probe()
1083 &ddr_pmu->node); in cn10k_ddr_perf_probe()
1093 &ddr_pmu->node); in cn10k_ddr_perf_remove()
1095 perf_pmu_unregister(&ddr_pmu->pmu); in cn10k_ddr_perf_remove()
1100 { .compatible = "marvell,cn10k-ddr-pmu", .data = &cn10k_ddr_pmu_pdata },
1117 .name = "cn10k-ddr-pmu",
1132 "perf/marvell/cn10k/ddr:online", NULL, in cn10k_ddr_pmu_init()