1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas RZ/V2H(P) Clock Pulse Generator
4 *
5 * Copyright (C) 2024 Renesas Electronics Corp.
6 *
7 * Based on rzg2l-cpg.c
8 *
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
12 */
13
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/iopoll.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_clock.h>
25 #include <linux/pm_domain.h>
26 #include <linux/refcount.h>
27 #include <linux/reset-controller.h>
28
29 #include <dt-bindings/clock/renesas-cpg-mssr.h>
30
31 #include "rzv2h-cpg.h"
32
33 #ifdef DEBUG
34 #define WARN_DEBUG(x) WARN_ON(x)
35 #else
36 #define WARN_DEBUG(x) do { } while (0)
37 #endif
38
39 #define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4))
40 #define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4))
41 #define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
42 #define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
43
44 #define CPG_BUS_1_MSTOP (0xd00)
45 #define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
46
47 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
48 #define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
49 #define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
50 #define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
51
52 #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
53
54 #define GET_MOD_CLK_ID(base, index, bit) \
55 ((base) + ((((index) * (16))) + (bit)))
56
57 #define CPG_CLKSTATUS0 (0x700)
58
59 /**
60 * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
61 *
62 * @dev: CPG device
63 * @base: CPG register block base address
64 * @rmw_lock: protects register accesses
65 * @clks: Array containing all Core and Module Clocks
66 * @num_core_clks: Number of Core Clocks in clks[]
67 * @num_mod_clks: Number of Module Clocks in clks[]
68 * @resets: Array of resets
69 * @num_resets: Number of Module Resets in info->resets[]
70 * @last_dt_core_clk: ID of the last Core Clock exported to DT
71 * @mstop_count: Array of mstop values
72 * @rcdev: Reset controller entity
73 */
74 struct rzv2h_cpg_priv {
75 struct device *dev;
76 void __iomem *base;
77 spinlock_t rmw_lock;
78
79 struct clk **clks;
80 unsigned int num_core_clks;
81 unsigned int num_mod_clks;
82 struct rzv2h_reset *resets;
83 unsigned int num_resets;
84 unsigned int last_dt_core_clk;
85
86 atomic_t *mstop_count;
87
88 struct reset_controller_dev rcdev;
89 };
90
91 #define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
92
93 struct pll_clk {
94 struct rzv2h_cpg_priv *priv;
95 void __iomem *base;
96 struct clk_hw hw;
97 unsigned int conf;
98 unsigned int type;
99 };
100
101 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
102
103 /**
104 * struct mod_clock - Module clock
105 *
106 * @priv: CPG private data
107 * @mstop_data: mstop data relating to module clock
108 * @hw: handle between common and hardware-specific interfaces
109 * @no_pm: flag to indicate PM is not supported
110 * @on_index: register offset
111 * @on_bit: ON/MON bit
112 * @mon_index: monitor register offset
113 * @mon_bit: montor bit
114 */
115 struct mod_clock {
116 struct rzv2h_cpg_priv *priv;
117 unsigned int mstop_data;
118 struct clk_hw hw;
119 bool no_pm;
120 u8 on_index;
121 u8 on_bit;
122 s8 mon_index;
123 u8 mon_bit;
124 };
125
126 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
127
128 /**
129 * struct ddiv_clk - DDIV clock
130 *
131 * @priv: CPG private data
132 * @div: divider clk
133 * @mon: monitor bit in CPG_CLKSTATUS0 register
134 */
135 struct ddiv_clk {
136 struct rzv2h_cpg_priv *priv;
137 struct clk_divider div;
138 u8 mon;
139 };
140
141 #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
142
rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)143 static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
144 unsigned long parent_rate)
145 {
146 struct pll_clk *pll_clk = to_pll(hw);
147 struct rzv2h_cpg_priv *priv = pll_clk->priv;
148 unsigned int clk1, clk2;
149 u64 rate;
150
151 if (!PLL_CLK_ACCESS(pll_clk->conf))
152 return 0;
153
154 clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
155 clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
156
157 rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
158 16 + SDIV(clk2));
159
160 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
161 }
162
163 static const struct clk_ops rzv2h_cpg_pll_ops = {
164 .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
165 };
166
167 static struct clk * __init
rzv2h_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv,const struct clk_ops * ops)168 rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
169 struct rzv2h_cpg_priv *priv,
170 const struct clk_ops *ops)
171 {
172 void __iomem *base = priv->base;
173 struct device *dev = priv->dev;
174 struct clk_init_data init;
175 const struct clk *parent;
176 const char *parent_name;
177 struct pll_clk *pll_clk;
178 int ret;
179
180 parent = priv->clks[core->parent];
181 if (IS_ERR(parent))
182 return ERR_CAST(parent);
183
184 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
185 if (!pll_clk)
186 return ERR_PTR(-ENOMEM);
187
188 parent_name = __clk_get_name(parent);
189 init.name = core->name;
190 init.ops = ops;
191 init.flags = 0;
192 init.parent_names = &parent_name;
193 init.num_parents = 1;
194
195 pll_clk->hw.init = &init;
196 pll_clk->conf = core->cfg.conf;
197 pll_clk->base = base;
198 pll_clk->priv = priv;
199 pll_clk->type = core->type;
200
201 ret = devm_clk_hw_register(dev, &pll_clk->hw);
202 if (ret)
203 return ERR_PTR(ret);
204
205 return pll_clk->hw.clk;
206 }
207
rzv2h_ddiv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)208 static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
209 unsigned long parent_rate)
210 {
211 struct clk_divider *divider = to_clk_divider(hw);
212 unsigned int val;
213
214 val = readl(divider->reg) >> divider->shift;
215 val &= clk_div_mask(divider->width);
216
217 return divider_recalc_rate(hw, parent_rate, val, divider->table,
218 divider->flags, divider->width);
219 }
220
rzv2h_ddiv_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)221 static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
222 unsigned long *prate)
223 {
224 struct clk_divider *divider = to_clk_divider(hw);
225
226 return divider_round_rate(hw, rate, prate, divider->table,
227 divider->width, divider->flags);
228 }
229
rzv2h_ddiv_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)230 static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
231 struct clk_rate_request *req)
232 {
233 struct clk_divider *divider = to_clk_divider(hw);
234
235 return divider_determine_rate(hw, req, divider->table, divider->width,
236 divider->flags);
237 }
238
rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem * base,u8 mon)239 static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon)
240 {
241 u32 bitmask = BIT(mon);
242 u32 val;
243
244 return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
245 }
246
rzv2h_ddiv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)247 static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
248 unsigned long parent_rate)
249 {
250 struct clk_divider *divider = to_clk_divider(hw);
251 struct ddiv_clk *ddiv = to_ddiv_clock(divider);
252 struct rzv2h_cpg_priv *priv = ddiv->priv;
253 unsigned long flags = 0;
254 int value;
255 u32 val;
256 int ret;
257
258 value = divider_get_val(rate, parent_rate, divider->table,
259 divider->width, divider->flags);
260 if (value < 0)
261 return value;
262
263 spin_lock_irqsave(divider->lock, flags);
264
265 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
266 if (ret)
267 goto ddiv_timeout;
268
269 val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift);
270 val &= ~(clk_div_mask(divider->width) << divider->shift);
271 val |= (u32)value << divider->shift;
272 writel(val, divider->reg);
273
274 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
275 if (ret)
276 goto ddiv_timeout;
277
278 spin_unlock_irqrestore(divider->lock, flags);
279
280 return 0;
281
282 ddiv_timeout:
283 spin_unlock_irqrestore(divider->lock, flags);
284 return ret;
285 }
286
287 static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
288 .recalc_rate = rzv2h_ddiv_recalc_rate,
289 .round_rate = rzv2h_ddiv_round_rate,
290 .determine_rate = rzv2h_ddiv_determine_rate,
291 .set_rate = rzv2h_ddiv_set_rate,
292 };
293
294 static struct clk * __init
rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)295 rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
296 struct rzv2h_cpg_priv *priv)
297 {
298 struct ddiv cfg_ddiv = core->cfg.ddiv;
299 struct clk_init_data init = {};
300 struct device *dev = priv->dev;
301 u8 shift = cfg_ddiv.shift;
302 u8 width = cfg_ddiv.width;
303 const struct clk *parent;
304 const char *parent_name;
305 struct clk_divider *div;
306 struct ddiv_clk *ddiv;
307 int ret;
308
309 parent = priv->clks[core->parent];
310 if (IS_ERR(parent))
311 return ERR_CAST(parent);
312
313 parent_name = __clk_get_name(parent);
314
315 if ((shift + width) > 16)
316 return ERR_PTR(-EINVAL);
317
318 ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL);
319 if (!ddiv)
320 return ERR_PTR(-ENOMEM);
321
322 init.name = core->name;
323 init.ops = &rzv2h_ddiv_clk_divider_ops;
324 init.parent_names = &parent_name;
325 init.num_parents = 1;
326
327 ddiv->priv = priv;
328 ddiv->mon = cfg_ddiv.monbit;
329 div = &ddiv->div;
330 div->reg = priv->base + cfg_ddiv.offset;
331 div->shift = shift;
332 div->width = width;
333 div->flags = core->flag;
334 div->lock = &priv->rmw_lock;
335 div->hw.init = &init;
336 div->table = core->dtable;
337
338 ret = devm_clk_hw_register(dev, &div->hw);
339 if (ret)
340 return ERR_PTR(ret);
341
342 return div->hw.clk;
343 }
344
345 static struct clk
rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)346 *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
347 void *data)
348 {
349 unsigned int clkidx = clkspec->args[1];
350 struct rzv2h_cpg_priv *priv = data;
351 struct device *dev = priv->dev;
352 const char *type;
353 struct clk *clk;
354
355 switch (clkspec->args[0]) {
356 case CPG_CORE:
357 type = "core";
358 if (clkidx > priv->last_dt_core_clk) {
359 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
360 return ERR_PTR(-EINVAL);
361 }
362 clk = priv->clks[clkidx];
363 break;
364
365 case CPG_MOD:
366 type = "module";
367 if (clkidx >= priv->num_mod_clks) {
368 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
369 return ERR_PTR(-EINVAL);
370 }
371 clk = priv->clks[priv->num_core_clks + clkidx];
372 break;
373
374 default:
375 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
376 return ERR_PTR(-EINVAL);
377 }
378
379 if (IS_ERR(clk))
380 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
381 PTR_ERR(clk));
382 else
383 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
384 clkspec->args[0], clkspec->args[1], clk,
385 clk_get_rate(clk));
386 return clk;
387 }
388
389 static void __init
rzv2h_cpg_register_core_clk(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)390 rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
391 struct rzv2h_cpg_priv *priv)
392 {
393 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
394 unsigned int id = core->id, div = core->div;
395 struct device *dev = priv->dev;
396 const char *parent_name;
397 struct clk_hw *clk_hw;
398
399 WARN_DEBUG(id >= priv->num_core_clks);
400 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
401
402 switch (core->type) {
403 case CLK_TYPE_IN:
404 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
405 break;
406 case CLK_TYPE_FF:
407 WARN_DEBUG(core->parent >= priv->num_core_clks);
408 parent = priv->clks[core->parent];
409 if (IS_ERR(parent)) {
410 clk = parent;
411 goto fail;
412 }
413
414 parent_name = __clk_get_name(parent);
415 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name,
416 parent_name, CLK_SET_RATE_PARENT,
417 core->mult, div);
418 if (IS_ERR(clk_hw))
419 clk = ERR_CAST(clk_hw);
420 else
421 clk = clk_hw->clk;
422 break;
423 case CLK_TYPE_PLL:
424 clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
425 break;
426 case CLK_TYPE_DDIV:
427 clk = rzv2h_cpg_ddiv_clk_register(core, priv);
428 break;
429 default:
430 goto fail;
431 }
432
433 if (IS_ERR_OR_NULL(clk))
434 goto fail;
435
436 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
437 priv->clks[id] = clk;
438 return;
439
440 fail:
441 dev_err(dev, "Failed to register core clock %s: %ld\n",
442 core->name, PTR_ERR(clk));
443 }
444
rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv * priv,u32 mstop_data)445 static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
446 u32 mstop_data)
447 {
448 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
449 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
450 unsigned int index = (mstop_index - 1) * 16;
451 atomic_t *mstop = &priv->mstop_count[index];
452 unsigned long flags;
453 unsigned int i;
454 u32 val = 0;
455
456 spin_lock_irqsave(&priv->rmw_lock, flags);
457 for_each_set_bit(i, &mstop_mask, 16) {
458 if (!atomic_read(&mstop[i]))
459 val |= BIT(i) << 16;
460 atomic_inc(&mstop[i]);
461 }
462 if (val)
463 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
464 spin_unlock_irqrestore(&priv->rmw_lock, flags);
465 }
466
rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv * priv,u32 mstop_data)467 static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
468 u32 mstop_data)
469 {
470 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
471 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
472 unsigned int index = (mstop_index - 1) * 16;
473 atomic_t *mstop = &priv->mstop_count[index];
474 unsigned long flags;
475 unsigned int i;
476 u32 val = 0;
477
478 spin_lock_irqsave(&priv->rmw_lock, flags);
479 for_each_set_bit(i, &mstop_mask, 16) {
480 if (!atomic_read(&mstop[i]) ||
481 atomic_dec_and_test(&mstop[i]))
482 val |= BIT(i) << 16 | BIT(i);
483 }
484 if (val)
485 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
486 spin_unlock_irqrestore(&priv->rmw_lock, flags);
487 }
488
rzv2h_mod_clock_is_enabled(struct clk_hw * hw)489 static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
490 {
491 struct mod_clock *clock = to_mod_clock(hw);
492 struct rzv2h_cpg_priv *priv = clock->priv;
493 u32 bitmask;
494 u32 offset;
495
496 if (clock->mon_index >= 0) {
497 offset = GET_CLK_MON_OFFSET(clock->mon_index);
498 bitmask = BIT(clock->mon_bit);
499 } else {
500 offset = GET_CLK_ON_OFFSET(clock->on_index);
501 bitmask = BIT(clock->on_bit);
502 }
503
504 return readl(priv->base + offset) & bitmask;
505 }
506
rzv2h_mod_clock_endisable(struct clk_hw * hw,bool enable)507 static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
508 {
509 bool enabled = rzv2h_mod_clock_is_enabled(hw);
510 struct mod_clock *clock = to_mod_clock(hw);
511 unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
512 struct rzv2h_cpg_priv *priv = clock->priv;
513 u32 bitmask = BIT(clock->on_bit);
514 struct device *dev = priv->dev;
515 u32 value;
516 int error;
517
518 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
519 enable ? "ON" : "OFF");
520
521 if (enabled == enable)
522 return 0;
523
524 value = bitmask << 16;
525 if (enable) {
526 value |= bitmask;
527 writel(value, priv->base + reg);
528 if (clock->mstop_data != BUS_MSTOP_NONE)
529 rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
530 } else {
531 if (clock->mstop_data != BUS_MSTOP_NONE)
532 rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data);
533 writel(value, priv->base + reg);
534 }
535
536 if (!enable || clock->mon_index < 0)
537 return 0;
538
539 reg = GET_CLK_MON_OFFSET(clock->mon_index);
540 bitmask = BIT(clock->mon_bit);
541 error = readl_poll_timeout_atomic(priv->base + reg, value,
542 value & bitmask, 0, 10);
543 if (error)
544 dev_err(dev, "Failed to enable CLK_ON %p\n",
545 priv->base + reg);
546
547 return error;
548 }
549
rzv2h_mod_clock_enable(struct clk_hw * hw)550 static int rzv2h_mod_clock_enable(struct clk_hw *hw)
551 {
552 return rzv2h_mod_clock_endisable(hw, true);
553 }
554
rzv2h_mod_clock_disable(struct clk_hw * hw)555 static void rzv2h_mod_clock_disable(struct clk_hw *hw)
556 {
557 rzv2h_mod_clock_endisable(hw, false);
558 }
559
560 static const struct clk_ops rzv2h_mod_clock_ops = {
561 .enable = rzv2h_mod_clock_enable,
562 .disable = rzv2h_mod_clock_disable,
563 .is_enabled = rzv2h_mod_clock_is_enabled,
564 };
565
566 static void __init
rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk * mod,struct rzv2h_cpg_priv * priv)567 rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
568 struct rzv2h_cpg_priv *priv)
569 {
570 struct mod_clock *clock = NULL;
571 struct device *dev = priv->dev;
572 struct clk_init_data init;
573 struct clk *parent, *clk;
574 const char *parent_name;
575 unsigned int id;
576 int ret;
577
578 id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit);
579 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
580 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
581 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
582
583 parent = priv->clks[mod->parent];
584 if (IS_ERR(parent)) {
585 clk = parent;
586 goto fail;
587 }
588
589 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
590 if (!clock) {
591 clk = ERR_PTR(-ENOMEM);
592 goto fail;
593 }
594
595 init.name = mod->name;
596 init.ops = &rzv2h_mod_clock_ops;
597 init.flags = CLK_SET_RATE_PARENT;
598 if (mod->critical)
599 init.flags |= CLK_IS_CRITICAL;
600
601 parent_name = __clk_get_name(parent);
602 init.parent_names = &parent_name;
603 init.num_parents = 1;
604
605 clock->on_index = mod->on_index;
606 clock->on_bit = mod->on_bit;
607 clock->mon_index = mod->mon_index;
608 clock->mon_bit = mod->mon_bit;
609 clock->no_pm = mod->no_pm;
610 clock->priv = priv;
611 clock->hw.init = &init;
612 clock->mstop_data = mod->mstop_data;
613
614 ret = devm_clk_hw_register(dev, &clock->hw);
615 if (ret) {
616 clk = ERR_PTR(ret);
617 goto fail;
618 }
619
620 priv->clks[id] = clock->hw.clk;
621
622 /*
623 * Ensure the module clocks and MSTOP bits are synchronized when they are
624 * turned ON by the bootloader. Enable MSTOP bits for module clocks that were
625 * turned ON in an earlier boot stage.
626 */
627 if (clock->mstop_data != BUS_MSTOP_NONE &&
628 !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) {
629 rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
630 } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
631 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
632 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
633 unsigned int index = (mstop_index - 1) * 16;
634 atomic_t *mstop = &priv->mstop_count[index];
635 unsigned long flags;
636 unsigned int i;
637 u32 val = 0;
638
639 /*
640 * Critical clocks are turned ON immediately upon registration, and the
641 * MSTOP counter is updated through the rzv2h_mod_clock_enable() path.
642 * However, if the critical clocks were already turned ON by the initial
643 * bootloader, synchronize the atomic counter here and clear the MSTOP bit.
644 */
645 spin_lock_irqsave(&priv->rmw_lock, flags);
646 for_each_set_bit(i, &mstop_mask, 16) {
647 if (atomic_read(&mstop[i]))
648 continue;
649 val |= BIT(i) << 16;
650 atomic_inc(&mstop[i]);
651 }
652 if (val)
653 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
654 spin_unlock_irqrestore(&priv->rmw_lock, flags);
655 }
656
657 return;
658
659 fail:
660 dev_err(dev, "Failed to register module clock %s: %ld\n",
661 mod->name, PTR_ERR(clk));
662 }
663
rzv2h_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)664 static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
665 unsigned long id)
666 {
667 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
668 unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
669 u32 mask = BIT(priv->resets[id].reset_bit);
670 u8 monbit = priv->resets[id].mon_bit;
671 u32 value = mask << 16;
672
673 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
674
675 writel(value, priv->base + reg);
676
677 reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
678 mask = BIT(monbit);
679
680 return readl_poll_timeout_atomic(priv->base + reg, value,
681 value & mask, 10, 200);
682 }
683
rzv2h_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)684 static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
685 unsigned long id)
686 {
687 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
688 unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
689 u32 mask = BIT(priv->resets[id].reset_bit);
690 u8 monbit = priv->resets[id].mon_bit;
691 u32 value = (mask << 16) | mask;
692
693 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
694
695 writel(value, priv->base + reg);
696
697 reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
698 mask = BIT(monbit);
699
700 return readl_poll_timeout_atomic(priv->base + reg, value,
701 !(value & mask), 10, 200);
702 }
703
rzv2h_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)704 static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
705 unsigned long id)
706 {
707 int ret;
708
709 ret = rzv2h_cpg_assert(rcdev, id);
710 if (ret)
711 return ret;
712
713 return rzv2h_cpg_deassert(rcdev, id);
714 }
715
rzv2h_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)716 static int rzv2h_cpg_status(struct reset_controller_dev *rcdev,
717 unsigned long id)
718 {
719 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
720 unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
721 u8 monbit = priv->resets[id].mon_bit;
722
723 return !!(readl(priv->base + reg) & BIT(monbit));
724 }
725
726 static const struct reset_control_ops rzv2h_cpg_reset_ops = {
727 .reset = rzv2h_cpg_reset,
728 .assert = rzv2h_cpg_assert,
729 .deassert = rzv2h_cpg_deassert,
730 .status = rzv2h_cpg_status,
731 };
732
rzv2h_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)733 static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev,
734 const struct of_phandle_args *reset_spec)
735 {
736 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
737 unsigned int id = reset_spec->args[0];
738 u8 rst_index = id / 16;
739 u8 rst_bit = id % 16;
740 unsigned int i;
741
742 for (i = 0; i < rcdev->nr_resets; i++) {
743 if (rst_index == priv->resets[i].reset_index &&
744 rst_bit == priv->resets[i].reset_bit)
745 return i;
746 }
747
748 return -EINVAL;
749 }
750
rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv * priv)751 static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv)
752 {
753 priv->rcdev.ops = &rzv2h_cpg_reset_ops;
754 priv->rcdev.of_node = priv->dev->of_node;
755 priv->rcdev.dev = priv->dev;
756 priv->rcdev.of_reset_n_cells = 1;
757 priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate;
758 priv->rcdev.nr_resets = priv->num_resets;
759
760 return devm_reset_controller_register(priv->dev, &priv->rcdev);
761 }
762
763 /**
764 * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
765 * @priv: pointer to CPG private data structure
766 * @genpd: generic PM domain
767 */
768 struct rzv2h_cpg_pd {
769 struct rzv2h_cpg_priv *priv;
770 struct generic_pm_domain genpd;
771 };
772
rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd * pd,const struct of_phandle_args * clkspec)773 static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd,
774 const struct of_phandle_args *clkspec)
775 {
776 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
777 return false;
778
779 switch (clkspec->args[0]) {
780 case CPG_MOD: {
781 struct rzv2h_cpg_priv *priv = pd->priv;
782 unsigned int id = clkspec->args[1];
783 struct mod_clock *clock;
784
785 if (id >= priv->num_mod_clks)
786 return false;
787
788 if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT))
789 return false;
790
791 clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id]));
792
793 return !clock->no_pm;
794 }
795
796 case CPG_CORE:
797 default:
798 return false;
799 }
800 }
801
rzv2h_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)802 static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
803 {
804 struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd);
805 struct device_node *np = dev->of_node;
806 struct of_phandle_args clkspec;
807 bool once = true;
808 struct clk *clk;
809 unsigned int i;
810 int error;
811
812 for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
813 if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) {
814 of_node_put(clkspec.np);
815 continue;
816 }
817
818 if (once) {
819 once = false;
820 error = pm_clk_create(dev);
821 if (error) {
822 of_node_put(clkspec.np);
823 goto err;
824 }
825 }
826 clk = of_clk_get_from_provider(&clkspec);
827 of_node_put(clkspec.np);
828 if (IS_ERR(clk)) {
829 error = PTR_ERR(clk);
830 goto fail_destroy;
831 }
832
833 error = pm_clk_add_clk(dev, clk);
834 if (error) {
835 dev_err(dev, "pm_clk_add_clk failed %d\n",
836 error);
837 goto fail_put;
838 }
839 }
840
841 return 0;
842
843 fail_put:
844 clk_put(clk);
845
846 fail_destroy:
847 pm_clk_destroy(dev);
848 err:
849 return error;
850 }
851
rzv2h_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)852 static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
853 {
854 if (!pm_clk_no_clocks(dev))
855 pm_clk_destroy(dev);
856 }
857
rzv2h_cpg_genpd_remove_simple(void * data)858 static void rzv2h_cpg_genpd_remove_simple(void *data)
859 {
860 pm_genpd_remove(data);
861 }
862
rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv * priv)863 static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv)
864 {
865 struct device *dev = priv->dev;
866 struct device_node *np = dev->of_node;
867 struct rzv2h_cpg_pd *pd;
868 int ret;
869
870 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
871 if (!pd)
872 return -ENOMEM;
873
874 pd->genpd.name = np->name;
875 pd->priv = priv;
876 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
877 pd->genpd.attach_dev = rzv2h_cpg_attach_dev;
878 pd->genpd.detach_dev = rzv2h_cpg_detach_dev;
879 ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false);
880 if (ret)
881 return ret;
882
883 ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd);
884 if (ret)
885 return ret;
886
887 return of_genpd_add_provider_simple(np, &pd->genpd);
888 }
889
rzv2h_cpg_del_clk_provider(void * data)890 static void rzv2h_cpg_del_clk_provider(void *data)
891 {
892 of_clk_del_provider(data);
893 }
894
rzv2h_cpg_probe(struct platform_device * pdev)895 static int __init rzv2h_cpg_probe(struct platform_device *pdev)
896 {
897 struct device *dev = &pdev->dev;
898 struct device_node *np = dev->of_node;
899 const struct rzv2h_cpg_info *info;
900 struct rzv2h_cpg_priv *priv;
901 unsigned int nclks, i;
902 struct clk **clks;
903 int error;
904
905 info = of_device_get_match_data(dev);
906
907 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
908 if (!priv)
909 return -ENOMEM;
910
911 spin_lock_init(&priv->rmw_lock);
912
913 priv->dev = dev;
914
915 priv->base = devm_platform_ioremap_resource(pdev, 0);
916 if (IS_ERR(priv->base))
917 return PTR_ERR(priv->base);
918
919 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
920 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
921 if (!clks)
922 return -ENOMEM;
923
924 priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits,
925 sizeof(*priv->mstop_count), GFP_KERNEL);
926 if (!priv->mstop_count)
927 return -ENOMEM;
928
929 priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
930 info->num_resets, GFP_KERNEL);
931 if (!priv->resets)
932 return -ENOMEM;
933
934 dev_set_drvdata(dev, priv);
935 priv->clks = clks;
936 priv->num_core_clks = info->num_total_core_clks;
937 priv->num_mod_clks = info->num_hw_mod_clks;
938 priv->last_dt_core_clk = info->last_dt_core_clk;
939 priv->num_resets = info->num_resets;
940
941 for (i = 0; i < nclks; i++)
942 clks[i] = ERR_PTR(-ENOENT);
943
944 for (i = 0; i < info->num_core_clks; i++)
945 rzv2h_cpg_register_core_clk(&info->core_clks[i], priv);
946
947 for (i = 0; i < info->num_mod_clks; i++)
948 rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv);
949
950 error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv);
951 if (error)
952 return error;
953
954 error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np);
955 if (error)
956 return error;
957
958 error = rzv2h_cpg_add_pm_domains(priv);
959 if (error)
960 return error;
961
962 error = rzv2h_cpg_reset_controller_register(priv);
963 if (error)
964 return error;
965
966 return 0;
967 }
968
969 static const struct of_device_id rzv2h_cpg_match[] = {
970 #ifdef CONFIG_CLK_R9A09G057
971 {
972 .compatible = "renesas,r9a09g057-cpg",
973 .data = &r9a09g057_cpg_info,
974 },
975 #endif
976 #ifdef CONFIG_CLK_R9A09G047
977 {
978 .compatible = "renesas,r9a09g047-cpg",
979 .data = &r9a09g047_cpg_info,
980 },
981 #endif
982 { /* sentinel */ }
983 };
984
985 static struct platform_driver rzv2h_cpg_driver = {
986 .driver = {
987 .name = "rzv2h-cpg",
988 .of_match_table = rzv2h_cpg_match,
989 },
990 };
991
rzv2h_cpg_init(void)992 static int __init rzv2h_cpg_init(void)
993 {
994 return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe);
995 }
996
997 subsys_initcall(rzv2h_cpg_init);
998
999 MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");
1000