Lines Matching +full:clock +full:- +full:div

1 // SPDX-License-Identifier: GPL-2.0-only
7 #include "clk-kona.h"
12 #include <linux/clk-provider.h>
28 /* Produces a mask of set bits covering a range of a 32-bit value */
31 return ((1 << width) - 1) << shift; in bitfield_mask()
51 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) in scaled_div_value() argument
53 return (u64)reg_div + ((u64)1 << div->u.s.frac_width); in scaled_div_value()
61 u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) in scaled_div_build() argument
69 combined <<= div->u.s.frac_width; in scaled_div_build()
76 scaled_div_min(struct bcm_clk_div *div) in scaled_div_min() argument
78 if (divider_is_fixed(div)) in scaled_div_min()
79 return (u64)div->u.fixed; in scaled_div_min()
81 return scaled_div_value(div, 0); in scaled_div_min()
85 u64 scaled_div_max(struct bcm_clk_div *div) in scaled_div_max() argument
89 if (divider_is_fixed(div)) in scaled_div_max()
90 return (u64)div->u.fixed; in scaled_div_max()
92 reg_div = ((u32)1 << div->u.s.width) - 1; in scaled_div_max()
94 return scaled_div_value(div, reg_div); in scaled_div_max()
102 divider(struct bcm_clk_div *div, u64 scaled_div) in divider() argument
104 BUG_ON(scaled_div < scaled_div_min(div)); in divider()
105 BUG_ON(scaled_div > scaled_div_max(div)); in divider()
107 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width)); in divider()
112 scale_rate(struct bcm_clk_div *div, u32 rate) in scale_rate() argument
114 if (divider_is_fixed(div)) in scale_rate()
117 return (u64)rate << div->u.s.frac_width; in scale_rate()
122 /* Read a 32-bit register value from a CCU's address space. */
125 return readl(ccu->base + reg_offset); in __ccu_read()
128 /* Write a 32-bit register value into a CCU's address space. */
132 writel(reg_val, ccu->base + reg_offset); in __ccu_write()
139 spin_lock_irqsave(&ccu->lock, flags); in ccu_lock()
145 spin_unlock_irqrestore(&ccu->lock, flags); in ccu_unlock()
154 if (ccu->write_enabled) { in __ccu_write_enable()
156 ccu->name); in __ccu_write_enable()
159 ccu->write_enabled = true; in __ccu_write_enable()
165 if (!ccu->write_enabled) { in __ccu_write_disable()
167 ccu->name); in __ccu_write_disable()
172 ccu->write_enabled = false; in __ccu_write_disable()
200 ccu->name, reg_offset, bit, want ? "set" : "clear"); in __ccu_wait_bit()
209 struct bcm_policy_ctl *control = &ccu->policy.control; in __ccu_policy_engine_start()
219 offset = control->offset; in __ccu_policy_engine_start()
220 go_bit = control->go_bit; in __ccu_policy_engine_start()
226 __func__, ccu->name); in __ccu_policy_engine_start()
240 * (ATL bit clear) and setting the request auto-copy (AC bit in __ccu_policy_engine_start()
243 * Note, we do NOT read-modify-write this register. in __ccu_policy_engine_start()
247 mask |= 1 << control->atl_bit; in __ccu_policy_engine_start()
249 mask |= 1 << control->ac_bit; in __ccu_policy_engine_start()
256 __func__, ccu->name); in __ccu_policy_engine_start()
263 struct bcm_lvm_en *enable = &ccu->policy.enable; in __ccu_policy_engine_stop()
273 offset = enable->offset; in __ccu_policy_engine_stop()
274 enable_bit = enable->bit; in __ccu_policy_engine_stop()
278 __func__, ccu->name); in __ccu_policy_engine_stop()
282 /* Now set the bit to stop the engine (NO read-modify-write) */ in __ccu_policy_engine_stop()
289 __func__, ccu->name); in __ccu_policy_engine_stop()
298 * each policy indicating whether the clock is enabled for that
299 * policy or not. The bit position for a clock is the same for all
300 * four registers, and the 32-bit registers are at consecutive
319 __func__, ccu->name); in policy_init()
324 * For now, if a clock defines its policy bit we just mark in policy_init()
327 offset = policy->offset; in policy_init()
328 mask = (u32)1 << policy->bit; in policy_init()
342 __func__, ccu->name); in policy_init()
349 /* Determine whether a clock is gated. CCU lock must be held. */
360 bit_mask = 1 << gate->status_bit; in __is_clk_gate_enabled()
361 reg_val = __ccu_read(ccu, gate->offset); in __is_clk_gate_enabled()
366 /* Determine whether a clock is gated. */
399 reg_val = __ccu_read(ccu, gate->offset); in __gate_commit()
403 mask = (u32)1 << gate->hw_sw_sel_bit; in __gate_commit()
417 mask = (u32)1 << gate->en_bit; in __gate_commit()
424 __ccu_write(ccu, gate->offset, reg_val); in __gate_commit()
431 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled); in __gate_commit()
478 /* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
508 return -EIO; in clk_gate()
514 * If a clock gate requires a turn-off delay it will have
518 * defined for a clock, we set it.
529 offset = hyst->offset; in hyst_init()
530 mask = (u32)1 << hyst->en_bit; in hyst_init()
531 mask |= (u32)1 << hyst->val_bit; in hyst_init()
548 /* Trigger the clock and wait for it to finish */ in __clk_trigger()
549 __ccu_write(ccu, trig->offset, 1 << trig->bit); in __clk_trigger()
551 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false); in __clk_trigger()
557 static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) in divider_read_scaled() argument
563 if (divider_is_fixed(div)) in divider_read_scaled()
564 return (u64)div->u.fixed; in divider_read_scaled()
567 reg_val = __ccu_read(ccu, div->u.s.offset); in divider_read_scaled()
571 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width); in divider_read_scaled()
574 return scaled_div_value(div, reg_div); in divider_read_scaled()
581 * Returns 0 on success. Returns -EINVAL for invalid arguments.
582 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
585 struct bcm_clk_div *div, struct bcm_clk_trig *trig) in __div_commit() argument
592 BUG_ON(divider_is_fixed(div)); in __div_commit()
599 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) { in __div_commit()
600 reg_val = __ccu_read(ccu, div->u.s.offset); in __div_commit()
601 reg_div = bitfield_extract(reg_val, div->u.s.shift, in __div_commit()
602 div->u.s.width); in __div_commit()
603 div->u.s.scaled_div = scaled_div_value(div, reg_div); in __div_commit()
609 reg_div = divider(div, div->u.s.scaled_div); in __div_commit()
611 /* Clock needs to be enabled before changing the rate */ in __div_commit()
614 ret = -ENXIO; in __div_commit()
619 reg_val = __ccu_read(ccu, div->u.s.offset); in __div_commit()
620 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width, in __div_commit()
622 __ccu_write(ccu, div->u.s.offset, reg_val); in __div_commit()
626 ret = -EIO; in __div_commit()
628 /* Disable the clock again if it was disabled to begin with */ in __div_commit()
630 ret = ret ? ret : -ENXIO; /* return first error */ in __div_commit()
641 struct bcm_clk_div *div, struct bcm_clk_trig *trig) in div_init() argument
643 if (!divider_exists(div) || divider_is_fixed(div)) in div_init()
645 return !__div_commit(ccu, gate, div, trig); in div_init()
649 struct bcm_clk_div *div, struct bcm_clk_trig *trig, in divider_write() argument
656 BUG_ON(divider_is_fixed(div)); in divider_write()
658 previous = div->u.s.scaled_div; in divider_write()
662 div->u.s.scaled_div = scaled_div; in divider_write()
667 ret = __div_commit(ccu, gate, div, trig); in divider_write()
673 div->u.s.scaled_div = previous; /* Revert the change */ in divider_write()
679 /* Common clock rate helpers */
682 * Implement the common clock framework recalc_rate method, taking
683 * into account a divider and an optional pre-divider. The
684 * pre-divider register pointer may be NULL.
687 struct bcm_clk_div *div, struct bcm_clk_div *pre_div, in clk_recalc_rate() argument
694 if (!divider_exists(div)) in clk_recalc_rate()
701 * If there is a pre-divider, divide the scaled parent rate in clk_recalc_rate()
702 * by the pre-divider value first. In this case--to improve in clk_recalc_rate()
703 * accuracy--scale the parent rate by *both* the pre-divider in clk_recalc_rate()
705 * result of the pre-divider. in clk_recalc_rate()
713 scaled_rate = scale_rate(div, scaled_rate); in clk_recalc_rate()
718 scaled_parent_rate = scale_rate(div, parent_rate); in clk_recalc_rate()
723 * parent rate by that to determine this clock's resulting in clk_recalc_rate()
726 scaled_div = divider_read_scaled(ccu, div); in clk_recalc_rate()
734 * into two dividers. The pre-divider can be NULL, and even if it's
735 * non-null it may be nonexistent. It's also OK for the divider to
736 * be nonexistent, and in that case the pre-divider is also ignored.
738 * If scaled_div is non-null, it is used to return the scaled divisor
741 static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div, in round_rate() argument
752 BUG_ON(!divider_exists(div)); in round_rate()
757 * If there is a pre-divider, divide the scaled parent rate in round_rate()
758 * by the pre-divider value first. In this case--to improve in round_rate()
759 * accuracy--scale the parent rate by *both* the pre-divider in round_rate()
761 * result of the pre-divider. in round_rate()
765 * For simplicity we treat the pre-divider as fixed (for now). in round_rate()
772 scaled_rate = scale_rate(div, scaled_rate); in round_rate()
777 scaled_parent_rate = scale_rate(div, parent_rate); in round_rate()
785 if (!divider_is_fixed(div)) { in round_rate()
788 min_scaled_div = scaled_div_min(div); in round_rate()
789 max_scaled_div = scaled_div_max(div); in round_rate()
795 best_scaled_div = divider_read_scaled(ccu, div); in round_rate()
807 /* Common clock parent helpers */
818 BUG_ON(sel->parent_count > (u32)U8_MAX); in parent_index()
819 for (i = 0; i < sel->parent_count; i++) in parent_index()
820 if (sel->parent_sel[i] == parent_sel) in parent_index()
828 * the clock framework.
846 reg_val = __ccu_read(ccu, sel->offset); in selector_read_index()
849 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); in selector_read_index()
854 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n", in selector_read_index()
855 __func__, parent_sel, ccu->name, sel->offset); in selector_read_index()
863 * Returns 0 on success. Returns -EINVAL for invalid arguments.
864 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
882 if (sel->clk_index == BAD_CLK_INDEX) { in __sel_commit()
885 reg_val = __ccu_read(ccu, sel->offset); in __sel_commit()
886 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); in __sel_commit()
889 return -EINVAL; in __sel_commit()
890 sel->clk_index = index; in __sel_commit()
895 BUG_ON((u32)sel->clk_index >= sel->parent_count); in __sel_commit()
896 parent_sel = sel->parent_sel[sel->clk_index]; in __sel_commit()
898 /* Clock needs to be enabled before changing the parent */ in __sel_commit()
901 return -ENXIO; in __sel_commit()
904 reg_val = __ccu_read(ccu, sel->offset); in __sel_commit()
905 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel); in __sel_commit()
906 __ccu_write(ccu, sel->offset, reg_val); in __sel_commit()
910 ret = -EIO; in __sel_commit()
912 /* Disable the clock again if it was disabled to begin with */ in __sel_commit()
914 ret = ret ? ret : -ENXIO; /* return first error */ in __sel_commit()
934 * different parent clock. Returns 0 on success, or an error code
945 previous = sel->clk_index; in selector_write()
949 sel->clk_index = index; in selector_write()
960 sel->clk_index = previous; /* Revert the change */ in selector_write()
965 /* Clock operations */
970 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; in kona_peri_clk_enable()
972 return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true); in kona_peri_clk_enable()
978 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; in kona_peri_clk_disable()
980 (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false); in kona_peri_clk_disable()
986 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; in kona_peri_clk_is_enabled()
988 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; in kona_peri_clk_is_enabled()
995 struct peri_clk_data *data = bcm_clk->u.peri; in kona_peri_clk_recalc_rate()
997 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, in kona_peri_clk_recalc_rate()
1005 struct bcm_clk_div *div = &bcm_clk->u.peri->div; in kona_peri_clk_round_rate() local
1007 if (!divider_exists(div)) in kona_peri_clk_round_rate()
1011 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, in kona_peri_clk_round_rate()
1031 WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT); in kona_peri_clk_determine_rate()
1032 parent_count = (u32)bcm_clk->init_data.num_parents; in kona_peri_clk_determine_rate()
1034 rate = kona_peri_clk_round_rate(hw, req->rate, in kona_peri_clk_determine_rate()
1035 &req->best_parent_rate); in kona_peri_clk_determine_rate()
1039 req->rate = rate; in kona_peri_clk_determine_rate()
1046 best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate); in kona_peri_clk_determine_rate()
1047 best_delta = abs(best_rate - req->rate); in kona_peri_clk_determine_rate()
1049 /* Check whether any other parent clock can produce a better result */ in kona_peri_clk_determine_rate()
1061 other_rate = kona_peri_clk_round_rate(hw, req->rate, in kona_peri_clk_determine_rate()
1063 delta = abs(other_rate - req->rate); in kona_peri_clk_determine_rate()
1067 req->best_parent_hw = parent; in kona_peri_clk_determine_rate()
1068 req->best_parent_rate = parent_rate; in kona_peri_clk_determine_rate()
1072 req->rate = best_rate; in kona_peri_clk_determine_rate()
1079 struct peri_clk_data *data = bcm_clk->u.peri; in kona_peri_clk_set_parent()
1080 struct bcm_clk_sel *sel = &data->sel; in kona_peri_clk_set_parent()
1084 BUG_ON(index >= sel->parent_count); in kona_peri_clk_set_parent()
1092 * pre-trigger we want to use that instead. in kona_peri_clk_set_parent()
1094 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig in kona_peri_clk_set_parent()
1095 : &data->trig; in kona_peri_clk_set_parent()
1097 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index); in kona_peri_clk_set_parent()
1098 if (ret == -ENXIO) { in kona_peri_clk_set_parent()
1100 bcm_clk->init_data.name); in kona_peri_clk_set_parent()
1101 ret = -EIO; /* Don't proliferate weird errors */ in kona_peri_clk_set_parent()
1102 } else if (ret == -EIO) { in kona_peri_clk_set_parent()
1104 trig == &data->pre_trig ? "pre-" : "", in kona_peri_clk_set_parent()
1105 bcm_clk->init_data.name); in kona_peri_clk_set_parent()
1114 struct peri_clk_data *data = bcm_clk->u.peri; in kona_peri_clk_get_parent()
1117 index = selector_read_index(bcm_clk->ccu, &data->sel); in kona_peri_clk_get_parent()
1119 /* Not all callers would handle an out-of-range value gracefully */ in kona_peri_clk_get_parent()
1127 struct peri_clk_data *data = bcm_clk->u.peri; in kona_peri_clk_set_rate()
1128 struct bcm_clk_div *div = &data->div; in kona_peri_clk_set_rate() local
1133 return -EINVAL; in kona_peri_clk_set_rate()
1138 if (!divider_exists(div)) in kona_peri_clk_set_rate()
1139 return rate == parent_rate ? 0 : -EINVAL; in kona_peri_clk_set_rate()
1143 * pre-divider be, but for now we never actually try to in kona_peri_clk_set_rate()
1144 * change that.) Tolerate a request for a no-op change. in kona_peri_clk_set_rate()
1146 if (divider_is_fixed(&data->div)) in kona_peri_clk_set_rate()
1147 return rate == parent_rate ? 0 : -EINVAL; in kona_peri_clk_set_rate()
1150 * Get the scaled divisor value needed to achieve a clock in kona_peri_clk_set_rate()
1152 * the parent clock rate supplied. in kona_peri_clk_set_rate()
1154 (void)round_rate(bcm_clk->ccu, div, &data->pre_div, in kona_peri_clk_set_rate()
1158 * We aren't updating any pre-divider at this point, so in kona_peri_clk_set_rate()
1161 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div, in kona_peri_clk_set_rate()
1162 &data->trig, scaled_div); in kona_peri_clk_set_rate()
1163 if (ret == -ENXIO) { in kona_peri_clk_set_rate()
1165 bcm_clk->init_data.name); in kona_peri_clk_set_rate()
1166 ret = -EIO; /* Don't proliferate weird errors */ in kona_peri_clk_set_rate()
1167 } else if (ret == -EIO) { in kona_peri_clk_set_rate()
1169 bcm_clk->init_data.name); in kona_peri_clk_set_rate()
1186 /* Put a peripheral clock into its initial state */
1189 struct ccu_data *ccu = bcm_clk->ccu; in __peri_clk_init()
1190 struct peri_clk_data *peri = bcm_clk->u.peri; in __peri_clk_init()
1191 const char *name = bcm_clk->init_data.name; in __peri_clk_init()
1194 BUG_ON(bcm_clk->type != bcm_clk_peri); in __peri_clk_init()
1196 if (!policy_init(ccu, &peri->policy)) { in __peri_clk_init()
1201 if (!gate_init(ccu, &peri->gate)) { in __peri_clk_init()
1205 if (!hyst_init(ccu, &peri->hyst)) { in __peri_clk_init()
1209 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) { in __peri_clk_init()
1216 * For the pre-divider and selector, the pre-trigger is used in __peri_clk_init()
1219 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig in __peri_clk_init()
1220 : &peri->trig; in __peri_clk_init()
1222 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) { in __peri_clk_init()
1223 pr_err("%s: error initializing pre-divider for %s\n", __func__, in __peri_clk_init()
1228 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) { in __peri_clk_init()
1239 switch (bcm_clk->type) { in __kona_clk_init()
1253 struct kona_clk *kona_clks = ccu->kona_clks; in kona_ccu_init()
1259 for (which = 0; which < ccu->clk_num; which++) { in kona_ccu_init()
1262 if (!bcm_clk->ccu) in kona_ccu_init()