Lines Matching full:dfi

87  * The dfi controller can monitor DDR load. It has an upper and lower threshold
121 static int rockchip_dfi_enable(struct rockchip_dfi *dfi) in rockchip_dfi_enable() argument
123 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_enable()
126 mutex_lock(&dfi->mutex); in rockchip_dfi_enable()
128 dfi->usecount++; in rockchip_dfi_enable()
129 if (dfi->usecount > 1) in rockchip_dfi_enable()
132 ret = clk_prepare_enable(dfi->clk); in rockchip_dfi_enable()
134 dev_err(&dfi->edev->dev, "failed to enable dfi clk: %d\n", ret); in rockchip_dfi_enable()
138 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_enable()
141 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_enable()
147 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_enable()
149 /* set ddr type to dfi */ in rockchip_dfi_enable()
150 switch (dfi->ddr_type) { in rockchip_dfi_enable()
164 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_enable()
168 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_enable()
170 if (dfi->ddrmon_ctrl_single) in rockchip_dfi_enable()
174 mutex_unlock(&dfi->mutex); in rockchip_dfi_enable()
179 static void rockchip_dfi_disable(struct rockchip_dfi *dfi) in rockchip_dfi_disable() argument
181 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_disable()
184 mutex_lock(&dfi->mutex); in rockchip_dfi_disable()
186 dfi->usecount--; in rockchip_dfi_disable()
188 WARN_ON_ONCE(dfi->usecount < 0); in rockchip_dfi_disable()
190 if (dfi->usecount > 0) in rockchip_dfi_disable()
193 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_disable()
194 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_disable()
198 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_disable()
200 if (dfi->ddrmon_ctrl_single) in rockchip_dfi_disable()
204 clk_disable_unprepare(dfi->clk); in rockchip_dfi_disable()
206 mutex_unlock(&dfi->mutex); in rockchip_dfi_disable()
209 static void rockchip_dfi_read_counters(struct rockchip_dfi *dfi, struct dmc_count *res) in rockchip_dfi_read_counters() argument
212 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_read_counters()
214 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_read_counters()
215 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_read_counters()
218 DDRMON_CH0_RD_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
220 DDRMON_CH0_WR_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
222 DDRMON_CH0_DFI_ACCESS_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
224 DDRMON_CH0_COUNT_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
230 struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev); in rockchip_dfi_event_disable() local
232 rockchip_dfi_disable(dfi); in rockchip_dfi_event_disable()
239 struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev); in rockchip_dfi_event_enable() local
241 return rockchip_dfi_enable(dfi); in rockchip_dfi_event_enable()
252 struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev); in rockchip_dfi_get_event() local
254 struct dmc_count *last = &dfi->last_event_count; in rockchip_dfi_get_event()
258 rockchip_dfi_read_counters(dfi, &count); in rockchip_dfi_get_event()
261 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_get_event()
264 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_get_event()
279 dfi->last_event_count = count; in rockchip_dfi_get_event()
293 static void rockchip_ddr_perf_counters_add(struct rockchip_dfi *dfi, in rockchip_ddr_perf_counters_add() argument
297 const struct dmc_count *last = &dfi->last_perf_count; in rockchip_ddr_perf_counters_add()
300 for (i = 0; i < dfi->max_channels; i++) { in rockchip_ddr_perf_counters_add()
301 res->c[i].read_access = dfi->total_count.c[i].read_access + in rockchip_ddr_perf_counters_add()
303 res->c[i].write_access = dfi->total_count.c[i].write_access + in rockchip_ddr_perf_counters_add()
305 res->c[i].access = dfi->total_count.c[i].access + in rockchip_ddr_perf_counters_add()
307 res->c[i].clock_cycles = dfi->total_count.c[i].clock_cycles + in rockchip_ddr_perf_counters_add()
316 struct rockchip_dfi *dfi = container_of(pmu, struct rockchip_dfi, pmu); in ddr_perf_cpumask_show() local
318 return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu)); in ddr_perf_cpumask_show()
404 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_init() local
413 dev_warn(dfi->dev, "Can't provide per-task data!\n"); in rockchip_ddr_perf_event_init()
422 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_get_count() local
423 int blen = dfi->burst_len; in rockchip_ddr_perf_event_get_count()
429 rockchip_dfi_read_counters(dfi, &now); in rockchip_ddr_perf_event_get_count()
432 seq = read_seqbegin(&dfi->count_seqlock); in rockchip_ddr_perf_event_get_count()
433 rockchip_ddr_perf_counters_add(dfi, &now, &total); in rockchip_ddr_perf_event_get_count()
434 } while (read_seqretry(&dfi->count_seqlock, seq)); in rockchip_ddr_perf_event_get_count()
441 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
442 count += total.c[i].read_access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
445 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
446 count += total.c[i].write_access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
449 count = total.c[0].read_access * blen * dfi->buswidth[0]; in rockchip_ddr_perf_event_get_count()
452 count = total.c[0].write_access * blen * dfi->buswidth[0]; in rockchip_ddr_perf_event_get_count()
455 count = total.c[1].read_access * blen * dfi->buswidth[1]; in rockchip_ddr_perf_event_get_count()
458 count = total.c[1].write_access * blen * dfi->buswidth[1]; in rockchip_ddr_perf_event_get_count()
461 count = total.c[2].read_access * blen * dfi->buswidth[2]; in rockchip_ddr_perf_event_get_count()
464 count = total.c[2].write_access * blen * dfi->buswidth[2]; in rockchip_ddr_perf_event_get_count()
467 count = total.c[3].read_access * blen * dfi->buswidth[3]; in rockchip_ddr_perf_event_get_count()
470 count = total.c[3].write_access * blen * dfi->buswidth[3]; in rockchip_ddr_perf_event_get_count()
473 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
474 count += total.c[i].access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
503 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_add() local
505 dfi->active_events++; in rockchip_ddr_perf_event_add()
507 if (dfi->active_events == 1) { in rockchip_ddr_perf_event_add()
508 dfi->total_count = (struct dmc_count){}; in rockchip_ddr_perf_event_add()
509 rockchip_dfi_read_counters(dfi, &dfi->last_perf_count); in rockchip_ddr_perf_event_add()
510 hrtimer_start(&dfi->timer, ns_to_ktime(NSEC_PER_SEC), HRTIMER_MODE_REL); in rockchip_ddr_perf_event_add()
526 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_del() local
530 dfi->active_events--; in rockchip_ddr_perf_event_del()
532 if (dfi->active_events == 0) in rockchip_ddr_perf_event_del()
533 hrtimer_cancel(&dfi->timer); in rockchip_ddr_perf_event_del()
538 struct rockchip_dfi *dfi = container_of(timer, struct rockchip_dfi, timer); in rockchip_dfi_timer() local
541 rockchip_dfi_read_counters(dfi, &now); in rockchip_dfi_timer()
543 write_seqlock(&dfi->count_seqlock); in rockchip_dfi_timer()
545 rockchip_ddr_perf_counters_add(dfi, &now, &total); in rockchip_dfi_timer()
546 dfi->total_count = total; in rockchip_dfi_timer()
547 dfi->last_perf_count = now; in rockchip_dfi_timer()
549 write_sequnlock(&dfi->count_seqlock); in rockchip_dfi_timer()
551 hrtimer_forward_now(&dfi->timer, ns_to_ktime(NSEC_PER_SEC)); in rockchip_dfi_timer()
558 struct rockchip_dfi *dfi = hlist_entry_safe(node, struct rockchip_dfi, node); in ddr_perf_offline_cpu() local
561 if (cpu != dfi->cpu) in ddr_perf_offline_cpu()
568 perf_pmu_migrate_context(&dfi->pmu, cpu, target); in ddr_perf_offline_cpu()
569 dfi->cpu = target; in ddr_perf_offline_cpu()
576 struct rockchip_dfi *dfi = data; in rockchip_ddr_cpuhp_remove_state() local
578 cpuhp_remove_multi_state(dfi->cpuhp_state); in rockchip_ddr_cpuhp_remove_state()
580 rockchip_dfi_disable(dfi); in rockchip_ddr_cpuhp_remove_state()
585 struct rockchip_dfi *dfi = data; in rockchip_ddr_cpuhp_remove_instance() local
587 cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node); in rockchip_ddr_cpuhp_remove_instance()
592 struct rockchip_dfi *dfi = data; in rockchip_ddr_perf_remove() local
594 perf_pmu_unregister(&dfi->pmu); in rockchip_ddr_perf_remove()
597 static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi) in rockchip_ddr_perf_init() argument
599 struct pmu *pmu = &dfi->pmu; in rockchip_ddr_perf_init()
602 seqlock_init(&dfi->count_seqlock); in rockchip_ddr_perf_init()
615 dfi->cpu = raw_smp_processor_id(); in rockchip_ddr_perf_init()
623 dev_err(dfi->dev, "cpuhp_setup_state_multi failed: %d\n", ret); in rockchip_ddr_perf_init()
627 dfi->cpuhp_state = ret; in rockchip_ddr_perf_init()
629 rockchip_dfi_enable(dfi); in rockchip_ddr_perf_init()
631 ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_state, dfi); in rockchip_ddr_perf_init()
635 ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node); in rockchip_ddr_perf_init()
637 dev_err(dfi->dev, "Error %d registering hotplug\n", ret); in rockchip_ddr_perf_init()
641 ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_instance, dfi); in rockchip_ddr_perf_init()
645 hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in rockchip_ddr_perf_init()
646 dfi->timer.function = rockchip_dfi_timer; in rockchip_ddr_perf_init()
648 switch (dfi->ddr_type) { in rockchip_ddr_perf_init()
651 dfi->burst_len = 8; in rockchip_ddr_perf_init()
655 dfi->burst_len = 16; in rockchip_ddr_perf_init()
663 return devm_add_action_or_reset(dfi->dev, rockchip_ddr_perf_remove, dfi); in rockchip_ddr_perf_init()
666 static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi) in rockchip_ddr_perf_init() argument
672 static int rk3399_dfi_init(struct rockchip_dfi *dfi) in rk3399_dfi_init() argument
674 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3399_dfi_init()
677 dfi->clk = devm_clk_get(dfi->dev, "pclk_ddr_mon"); in rk3399_dfi_init()
678 if (IS_ERR(dfi->clk)) in rk3399_dfi_init()
679 return dev_err_probe(dfi->dev, PTR_ERR(dfi->clk), in rk3399_dfi_init()
684 dfi->ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val); in rk3399_dfi_init()
686 dfi->channel_mask = GENMASK(1, 0); in rk3399_dfi_init()
687 dfi->max_channels = 2; in rk3399_dfi_init()
689 dfi->buswidth[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0, val) == 0 ? 4 : 2; in rk3399_dfi_init()
690 dfi->buswidth[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1, val) == 0 ? 4 : 2; in rk3399_dfi_init()
692 dfi->ddrmon_stride = 0x14; in rk3399_dfi_init()
693 dfi->ddrmon_ctrl_single = true; in rk3399_dfi_init()
698 static int rk3568_dfi_init(struct rockchip_dfi *dfi) in rk3568_dfi_init() argument
700 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3568_dfi_init()
707 dfi->ddr_type = FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2); in rk3568_dfi_init()
714 dfi->ddr_type |= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3; in rk3568_dfi_init()
716 dfi->channel_mask = BIT(0); in rk3568_dfi_init()
717 dfi->max_channels = 1; in rk3568_dfi_init()
719 dfi->buswidth[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2; in rk3568_dfi_init()
721 dfi->ddrmon_stride = 0x0; /* not relevant, we only have a single channel on this SoC */ in rk3568_dfi_init()
722 dfi->ddrmon_ctrl_single = true; in rk3568_dfi_init()
727 static int rk3588_dfi_init(struct rockchip_dfi *dfi) in rk3588_dfi_init() argument
729 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3588_dfi_init()
737 dfi->ddr_type = FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2); in rk3588_dfi_init()
744 dfi->ddr_type |= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3; in rk3588_dfi_init()
746 dfi->buswidth[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2; in rk3588_dfi_init()
747 dfi->buswidth[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg2) == 0 ? 4 : 2; in rk3588_dfi_init()
748 dfi->buswidth[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg4) == 0 ? 4 : 2; in rk3588_dfi_init()
749 dfi->buswidth[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg4) == 0 ? 4 : 2; in rk3588_dfi_init()
750 dfi->channel_mask = FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg2) | in rk3588_dfi_init()
752 dfi->max_channels = 4; in rk3588_dfi_init()
754 dfi->ddrmon_stride = 0x4000; in rk3588_dfi_init()
760 { .compatible = "rockchip,rk3399-dfi", .data = rk3399_dfi_init },
761 { .compatible = "rockchip,rk3568-dfi", .data = rk3568_dfi_init },
762 { .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init },
771 struct rockchip_dfi *dfi; in rockchip_dfi_probe() local
774 int (*soc_init)(struct rockchip_dfi *dfi); in rockchip_dfi_probe()
781 dfi = devm_kzalloc(dev, sizeof(*dfi), GFP_KERNEL); in rockchip_dfi_probe()
782 if (!dfi) in rockchip_dfi_probe()
785 dfi->regs = devm_platform_ioremap_resource(pdev, 0); in rockchip_dfi_probe()
786 if (IS_ERR(dfi->regs)) in rockchip_dfi_probe()
787 return PTR_ERR(dfi->regs); in rockchip_dfi_probe()
793 dfi->regmap_pmu = syscon_node_to_regmap(node); in rockchip_dfi_probe()
795 if (IS_ERR(dfi->regmap_pmu)) in rockchip_dfi_probe()
796 return PTR_ERR(dfi->regmap_pmu); in rockchip_dfi_probe()
798 dfi->dev = dev; in rockchip_dfi_probe()
799 mutex_init(&dfi->mutex); in rockchip_dfi_probe()
801 desc = &dfi->desc; in rockchip_dfi_probe()
803 desc->driver_data = dfi; in rockchip_dfi_probe()
806 ret = soc_init(dfi); in rockchip_dfi_probe()
810 dfi->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); in rockchip_dfi_probe()
811 if (IS_ERR(dfi->edev)) { in rockchip_dfi_probe()
814 return PTR_ERR(dfi->edev); in rockchip_dfi_probe()
817 ret = rockchip_ddr_perf_init(dfi); in rockchip_dfi_probe()
821 platform_set_drvdata(pdev, dfi); in rockchip_dfi_probe()
829 .name = "rockchip-dfi",
838 MODULE_DESCRIPTION("Rockchip DFI driver");