1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * RZ/G2L Clock Pulse Generator
4 *
5 * Copyright (C) 2021 Renesas Electronics Corp.
6 *
7 * Based on renesas-cpg-mssr.c
8 *
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
12 */
13
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33
34 #include "rzg2l-cpg.h"
35
36 #ifdef DEBUG
37 #define WARN_DEBUG(x) WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x) do { } while (0)
40 #endif
41
42 #define GET_SHIFT(val) ((val >> 12) & 0xff)
43 #define GET_WIDTH(val) ((val >> 8) & 0xf)
44
45 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
49
50 #define RZG3S_DIV_P GENMASK(28, 26)
51 #define RZG3S_DIV_M GENMASK(25, 22)
52 #define RZG3S_DIV_NI GENMASK(21, 13)
53 #define RZG3S_DIV_NF GENMASK(12, 1)
54 #define RZG3S_SEL_PLL BIT(0)
55
56 #define CLK_ON_R(reg) (reg)
57 #define CLK_MON_R(reg) (0x180 + (reg))
58 #define CLK_RST_R(reg) (reg)
59 #define CLK_MRST_R(reg) (0x180 + (reg))
60
61 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
62 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
63 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
64 #define GET_REG_SAMPLL_SETTING(val) ((val) & 0xfff)
65
66 #define CPG_WEN_BIT BIT(16)
67
68 #define MAX_VCLK_FREQ (148500000)
69
70 /**
71 * struct clk_hw_data - clock hardware data
72 * @hw: clock hw
73 * @conf: clock configuration (register offset, shift, width)
74 * @sconf: clock status configuration (register offset, shift, width)
75 * @priv: CPG private data structure
76 */
77 struct clk_hw_data {
78 struct clk_hw hw;
79 u32 conf;
80 u32 sconf;
81 struct rzg2l_cpg_priv *priv;
82 };
83
84 #define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw)
85
86 /**
87 * struct sd_mux_hw_data - SD MUX clock hardware data
88 * @hw_data: clock hw data
89 * @mtable: clock mux table
90 */
91 struct sd_mux_hw_data {
92 struct clk_hw_data hw_data;
93 const u32 *mtable;
94 };
95
96 #define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data)
97
98 /**
99 * struct div_hw_data - divider clock hardware data
100 * @hw_data: clock hw data
101 * @dtable: pointer to divider table
102 * @invalid_rate: invalid rate for divider
103 * @max_rate: maximum rate for divider
104 * @width: divider width
105 */
106 struct div_hw_data {
107 struct clk_hw_data hw_data;
108 const struct clk_div_table *dtable;
109 unsigned long invalid_rate;
110 unsigned long max_rate;
111 u32 width;
112 };
113
114 #define to_div_hw_data(_hw) container_of(_hw, struct div_hw_data, hw_data)
115
116 struct rzg2l_pll5_param {
117 u32 pl5_fracin;
118 u8 pl5_refdiv;
119 u8 pl5_intin;
120 u8 pl5_postdiv1;
121 u8 pl5_postdiv2;
122 u8 pl5_spread;
123 };
124
125 struct rzg2l_pll5_mux_dsi_div_param {
126 u8 clksrc;
127 u8 dsi_div_a;
128 u8 dsi_div_b;
129 };
130
131 /**
132 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
133 *
134 * @rcdev: Reset controller entity
135 * @dev: CPG device
136 * @base: CPG register block base address
137 * @rmw_lock: protects register accesses
138 * @clks: Array containing all Core and Module Clocks
139 * @num_core_clks: Number of Core Clocks in clks[]
140 * @num_mod_clks: Number of Module Clocks in clks[]
141 * @num_resets: Number of Module Resets in info->resets[]
142 * @last_dt_core_clk: ID of the last Core Clock exported to DT
143 * @info: Pointer to platform data
144 * @mux_dsi_div_params: pll5 mux and dsi div parameters
145 */
146 struct rzg2l_cpg_priv {
147 struct reset_controller_dev rcdev;
148 struct device *dev;
149 void __iomem *base;
150 spinlock_t rmw_lock;
151
152 struct clk **clks;
153 unsigned int num_core_clks;
154 unsigned int num_mod_clks;
155 unsigned int num_resets;
156 unsigned int last_dt_core_clk;
157
158 const struct rzg2l_cpg_info *info;
159
160 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
161 };
162
rzg2l_cpg_del_clk_provider(void * data)163 static void rzg2l_cpg_del_clk_provider(void *data)
164 {
165 of_clk_del_provider(data);
166 }
167
168 /* Must be called in atomic context. */
rzg2l_cpg_wait_clk_update_done(void __iomem * base,u32 conf)169 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
170 {
171 u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
172 u32 off = GET_REG_OFFSET(conf);
173 u32 val;
174
175 return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
176 }
177
rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block * nb,unsigned long event,void * data)178 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
179 void *data)
180 {
181 struct clk_notifier_data *cnd = data;
182 struct clk_hw *hw = __clk_get_hw(cnd->clk);
183 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
184 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
185 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
186 u32 shift = GET_SHIFT(clk_hw_data->conf);
187 const u32 clk_src_266 = 3;
188 unsigned long flags;
189 int ret;
190
191 if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
192 return NOTIFY_DONE;
193
194 spin_lock_irqsave(&priv->rmw_lock, flags);
195
196 /*
197 * As per the HW manual, we should not directly switch from 533 MHz to
198 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
199 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
200 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
201 * (400 MHz)).
202 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
203 * switching register is prohibited.
204 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
205 * the index to value mapping is done by adding 1 to the index.
206 */
207
208 writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
209
210 /* Wait for the update done. */
211 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
212
213 spin_unlock_irqrestore(&priv->rmw_lock, flags);
214
215 if (ret)
216 dev_err(priv->dev, "failed to switch to safe clk source\n");
217
218 return notifier_from_errno(ret);
219 }
220
rzg3s_cpg_div_clk_notifier(struct notifier_block * nb,unsigned long event,void * data)221 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
222 void *data)
223 {
224 struct clk_notifier_data *cnd = data;
225 struct clk_hw *hw = __clk_get_hw(cnd->clk);
226 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
227 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
228 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
229 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
230 u32 shift = GET_SHIFT(clk_hw_data->conf);
231 unsigned long flags;
232 int ret = 0;
233 u32 val;
234
235 if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
236 div_hw_data->invalid_rate % cnd->new_rate)
237 return NOTIFY_DONE;
238
239 spin_lock_irqsave(&priv->rmw_lock, flags);
240
241 val = readl(priv->base + off);
242 val >>= shift;
243 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
244
245 /*
246 * There are different constraints for the user of this notifiers as follows:
247 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
248 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
249 * As SD can have only one parent having 800MHz and OCTA div can have
250 * only one parent having 400MHz we took into account the parent rate
251 * at the beginning of function (by checking invalid_rate % new_rate).
252 * Now it is time to check the hardware divider and update it accordingly.
253 */
254 if (!val) {
255 writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
256 /* Wait for the update done. */
257 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
258 }
259
260 spin_unlock_irqrestore(&priv->rmw_lock, flags);
261
262 if (ret)
263 dev_err(priv->dev, "Failed to downgrade the div\n");
264
265 return notifier_from_errno(ret);
266 }
267
rzg2l_register_notifier(struct clk_hw * hw,const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)268 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
269 struct rzg2l_cpg_priv *priv)
270 {
271 struct notifier_block *nb;
272
273 if (!core->notifier)
274 return 0;
275
276 nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
277 if (!nb)
278 return -ENOMEM;
279
280 nb->notifier_call = core->notifier;
281
282 return clk_notifier_register(hw->clk, nb);
283 }
284
rzg3s_div_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)285 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
286 unsigned long parent_rate)
287 {
288 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
289 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
290 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
291 u32 val;
292
293 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
294 val >>= GET_SHIFT(clk_hw_data->conf);
295 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
296
297 return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
298 CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
299 }
300
rzg3s_div_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)301 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
302 {
303 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
304 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
305
306 if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
307 req->rate = div_hw_data->max_rate;
308
309 return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
310 CLK_DIVIDER_ROUND_CLOSEST);
311 }
312
rzg3s_div_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)313 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
314 unsigned long parent_rate)
315 {
316 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
317 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
318 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
319 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
320 u32 shift = GET_SHIFT(clk_hw_data->conf);
321 unsigned long flags;
322 u32 val;
323 int ret;
324
325 val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
326 CLK_DIVIDER_ROUND_CLOSEST);
327
328 spin_lock_irqsave(&priv->rmw_lock, flags);
329 writel((CPG_WEN_BIT | val) << shift, priv->base + off);
330 /* Wait for the update done. */
331 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
332 spin_unlock_irqrestore(&priv->rmw_lock, flags);
333
334 return ret;
335 }
336
337 static const struct clk_ops rzg3s_div_clk_ops = {
338 .recalc_rate = rzg3s_div_clk_recalc_rate,
339 .determine_rate = rzg3s_div_clk_determine_rate,
340 .set_rate = rzg3s_div_clk_set_rate,
341 };
342
343 static struct clk * __init
rzg3s_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)344 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
345 {
346 struct div_hw_data *div_hw_data;
347 struct clk_init_data init = {};
348 const struct clk_div_table *clkt;
349 struct clk_hw *clk_hw;
350 const struct clk *parent;
351 const char *parent_name;
352 u32 max = 0;
353 int ret;
354
355 parent = priv->clks[core->parent];
356 if (IS_ERR(parent))
357 return ERR_CAST(parent);
358
359 parent_name = __clk_get_name(parent);
360
361 div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
362 if (!div_hw_data)
363 return ERR_PTR(-ENOMEM);
364
365 init.name = core->name;
366 init.flags = core->flag;
367 init.ops = &rzg3s_div_clk_ops;
368 init.parent_names = &parent_name;
369 init.num_parents = 1;
370
371 /* Get the maximum divider to retrieve div width. */
372 for (clkt = core->dtable; clkt->div; clkt++) {
373 if (max < clkt->div)
374 max = clkt->div;
375 }
376
377 div_hw_data->hw_data.priv = priv;
378 div_hw_data->hw_data.conf = core->conf;
379 div_hw_data->hw_data.sconf = core->sconf;
380 div_hw_data->dtable = core->dtable;
381 div_hw_data->invalid_rate = core->invalid_rate;
382 div_hw_data->max_rate = core->max_rate;
383 div_hw_data->width = fls(max) - 1;
384
385 clk_hw = &div_hw_data->hw_data.hw;
386 clk_hw->init = &init;
387
388 ret = devm_clk_hw_register(priv->dev, clk_hw);
389 if (ret)
390 return ERR_PTR(ret);
391
392 ret = rzg2l_register_notifier(clk_hw, core, priv);
393 if (ret) {
394 dev_err(priv->dev, "Failed to register notifier for %s\n",
395 core->name);
396 return ERR_PTR(ret);
397 }
398
399 return clk_hw->clk;
400 }
401
402 static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)403 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
404 struct rzg2l_cpg_priv *priv)
405 {
406 void __iomem *base = priv->base;
407 struct device *dev = priv->dev;
408 const struct clk *parent;
409 const char *parent_name;
410 struct clk_hw *clk_hw;
411
412 parent = priv->clks[core->parent];
413 if (IS_ERR(parent))
414 return ERR_CAST(parent);
415
416 parent_name = __clk_get_name(parent);
417
418 if (core->dtable)
419 clk_hw = clk_hw_register_divider_table(dev, core->name,
420 parent_name, 0,
421 base + GET_REG_OFFSET(core->conf),
422 GET_SHIFT(core->conf),
423 GET_WIDTH(core->conf),
424 core->flag,
425 core->dtable,
426 &priv->rmw_lock);
427 else
428 clk_hw = clk_hw_register_divider(dev, core->name,
429 parent_name, 0,
430 base + GET_REG_OFFSET(core->conf),
431 GET_SHIFT(core->conf),
432 GET_WIDTH(core->conf),
433 core->flag, &priv->rmw_lock);
434
435 if (IS_ERR(clk_hw))
436 return ERR_CAST(clk_hw);
437
438 return clk_hw->clk;
439 }
440
441 static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)442 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
443 struct rzg2l_cpg_priv *priv)
444 {
445 const struct clk_hw *clk_hw;
446
447 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
448 core->parent_names, core->num_parents,
449 core->flag,
450 priv->base + GET_REG_OFFSET(core->conf),
451 GET_SHIFT(core->conf),
452 GET_WIDTH(core->conf),
453 core->mux_flags, &priv->rmw_lock);
454 if (IS_ERR(clk_hw))
455 return ERR_CAST(clk_hw);
456
457 return clk_hw->clk;
458 }
459
rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw * hw,u8 index)460 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
461 {
462 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
463 struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
464 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
465 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
466 u32 shift = GET_SHIFT(clk_hw_data->conf);
467 unsigned long flags;
468 u32 val;
469 int ret;
470
471 val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
472
473 spin_lock_irqsave(&priv->rmw_lock, flags);
474
475 writel((CPG_WEN_BIT | val) << shift, priv->base + off);
476
477 /* Wait for the update done. */
478 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
479
480 spin_unlock_irqrestore(&priv->rmw_lock, flags);
481
482 if (ret)
483 dev_err(priv->dev, "Failed to switch parent\n");
484
485 return ret;
486 }
487
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw * hw)488 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
489 {
490 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
491 struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
492 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
493 u32 val;
494
495 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
496 val >>= GET_SHIFT(clk_hw_data->conf);
497 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
498
499 return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
500 }
501
502 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
503 .determine_rate = __clk_mux_determine_rate_closest,
504 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
505 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
506 };
507
508 static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)509 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
510 struct rzg2l_cpg_priv *priv)
511 {
512 struct sd_mux_hw_data *sd_mux_hw_data;
513 struct clk_init_data init;
514 struct clk_hw *clk_hw;
515 int ret;
516
517 sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
518 if (!sd_mux_hw_data)
519 return ERR_PTR(-ENOMEM);
520
521 sd_mux_hw_data->hw_data.priv = priv;
522 sd_mux_hw_data->hw_data.conf = core->conf;
523 sd_mux_hw_data->hw_data.sconf = core->sconf;
524 sd_mux_hw_data->mtable = core->mtable;
525
526 init.name = core->name;
527 init.ops = &rzg2l_cpg_sd_clk_mux_ops;
528 init.flags = core->flag;
529 init.num_parents = core->num_parents;
530 init.parent_names = core->parent_names;
531
532 clk_hw = &sd_mux_hw_data->hw_data.hw;
533 clk_hw->init = &init;
534
535 ret = devm_clk_hw_register(priv->dev, clk_hw);
536 if (ret)
537 return ERR_PTR(ret);
538
539 ret = rzg2l_register_notifier(clk_hw, core, priv);
540 if (ret) {
541 dev_err(priv->dev, "Failed to register notifier for %s\n",
542 core->name);
543 return ERR_PTR(ret);
544 }
545
546 return clk_hw->clk;
547 }
548
549 static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param * params,unsigned long rate)550 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
551 unsigned long rate)
552 {
553 unsigned long foutpostdiv_rate, foutvco_rate;
554
555 params->pl5_intin = rate / MEGA;
556 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
557 params->pl5_refdiv = 2;
558 params->pl5_postdiv1 = 1;
559 params->pl5_postdiv2 = 1;
560 params->pl5_spread = 0x16;
561
562 foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
563 (params->pl5_intin << 24) + params->pl5_fracin),
564 params->pl5_refdiv) >> 24;
565 foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
566 params->pl5_postdiv1 * params->pl5_postdiv2);
567
568 return foutpostdiv_rate;
569 }
570
571 struct dsi_div_hw_data {
572 struct clk_hw hw;
573 u32 conf;
574 unsigned long rate;
575 struct rzg2l_cpg_priv *priv;
576 };
577
578 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
579
rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)580 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
581 unsigned long parent_rate)
582 {
583 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
584 unsigned long rate = dsi_div->rate;
585
586 if (!rate)
587 rate = parent_rate;
588
589 return rate;
590 }
591
rzg2l_cpg_get_vclk_parent_rate(struct clk_hw * hw,unsigned long rate)592 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
593 unsigned long rate)
594 {
595 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
596 struct rzg2l_cpg_priv *priv = dsi_div->priv;
597 struct rzg2l_pll5_param params;
598 unsigned long parent_rate;
599
600 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate);
601
602 if (priv->mux_dsi_div_params.clksrc)
603 parent_rate /= 2;
604
605 return parent_rate;
606 }
607
rzg2l_cpg_dsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)608 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
609 struct clk_rate_request *req)
610 {
611 if (req->rate > MAX_VCLK_FREQ)
612 req->rate = MAX_VCLK_FREQ;
613
614 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
615
616 return 0;
617 }
618
rzg2l_cpg_dsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)619 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
620 unsigned long rate,
621 unsigned long parent_rate)
622 {
623 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
624 struct rzg2l_cpg_priv *priv = dsi_div->priv;
625
626 /*
627 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
628 *
629 * Based on the dot clock, the DSI divider clock sets the divider value,
630 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
631 * source for the MUX and propagates that info to the parents.
632 */
633
634 if (!rate || rate > MAX_VCLK_FREQ)
635 return -EINVAL;
636
637 dsi_div->rate = rate;
638 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
639 (priv->mux_dsi_div_params.dsi_div_a << 0) |
640 (priv->mux_dsi_div_params.dsi_div_b << 8),
641 priv->base + CPG_PL5_SDIV);
642
643 return 0;
644 }
645
646 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
647 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
648 .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
649 .set_rate = rzg2l_cpg_dsi_div_set_rate,
650 };
651
652 static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)653 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
654 struct rzg2l_cpg_priv *priv)
655 {
656 struct dsi_div_hw_data *clk_hw_data;
657 const struct clk *parent;
658 const char *parent_name;
659 struct clk_init_data init;
660 struct clk_hw *clk_hw;
661 int ret;
662
663 parent = priv->clks[core->parent];
664 if (IS_ERR(parent))
665 return ERR_CAST(parent);
666
667 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
668 if (!clk_hw_data)
669 return ERR_PTR(-ENOMEM);
670
671 clk_hw_data->priv = priv;
672
673 parent_name = __clk_get_name(parent);
674 init.name = core->name;
675 init.ops = &rzg2l_cpg_dsi_div_ops;
676 init.flags = CLK_SET_RATE_PARENT;
677 init.parent_names = &parent_name;
678 init.num_parents = 1;
679
680 clk_hw = &clk_hw_data->hw;
681 clk_hw->init = &init;
682
683 ret = devm_clk_hw_register(priv->dev, clk_hw);
684 if (ret)
685 return ERR_PTR(ret);
686
687 return clk_hw->clk;
688 }
689
690 struct pll5_mux_hw_data {
691 struct clk_hw hw;
692 u32 conf;
693 unsigned long rate;
694 struct rzg2l_cpg_priv *priv;
695 };
696
697 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
698
rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)699 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
700 struct clk_rate_request *req)
701 {
702 struct clk_hw *parent;
703 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
704 struct rzg2l_cpg_priv *priv = hwdata->priv;
705
706 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
707 req->best_parent_hw = parent;
708 req->best_parent_rate = req->rate;
709
710 return 0;
711 }
712
rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw * hw,u8 index)713 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
714 {
715 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
716 struct rzg2l_cpg_priv *priv = hwdata->priv;
717
718 /*
719 * FOUTPOSTDIV--->|
720 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
721 * |--FOUT1PH0-->|
722 *
723 * Based on the dot clock, the DSI divider clock calculates the parent
724 * rate and clk source for the MUX. It propagates that info to
725 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
726 */
727
728 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
729 priv->base + CPG_OTHERFUNC1_REG);
730
731 return 0;
732 }
733
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw * hw)734 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
735 {
736 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
737 struct rzg2l_cpg_priv *priv = hwdata->priv;
738
739 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
740 }
741
742 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
743 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
744 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
745 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
746 };
747
748 static struct clk * __init
rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)749 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
750 struct rzg2l_cpg_priv *priv)
751 {
752 struct pll5_mux_hw_data *clk_hw_data;
753 struct clk_init_data init;
754 struct clk_hw *clk_hw;
755 int ret;
756
757 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
758 if (!clk_hw_data)
759 return ERR_PTR(-ENOMEM);
760
761 clk_hw_data->priv = priv;
762 clk_hw_data->conf = core->conf;
763
764 init.name = core->name;
765 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
766 init.flags = CLK_SET_RATE_PARENT;
767 init.num_parents = core->num_parents;
768 init.parent_names = core->parent_names;
769
770 clk_hw = &clk_hw_data->hw;
771 clk_hw->init = &init;
772
773 ret = devm_clk_hw_register(priv->dev, clk_hw);
774 if (ret)
775 return ERR_PTR(ret);
776
777 return clk_hw->clk;
778 }
779
780 struct sipll5 {
781 struct clk_hw hw;
782 u32 conf;
783 unsigned long foutpostdiv_rate;
784 struct rzg2l_cpg_priv *priv;
785 };
786
787 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
788
rzg2l_cpg_get_vclk_rate(struct clk_hw * hw,unsigned long rate)789 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
790 unsigned long rate)
791 {
792 struct sipll5 *sipll5 = to_sipll5(hw);
793 struct rzg2l_cpg_priv *priv = sipll5->priv;
794 unsigned long vclk;
795
796 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
797 (priv->mux_dsi_div_params.dsi_div_b + 1));
798
799 if (priv->mux_dsi_div_params.clksrc)
800 vclk /= 2;
801
802 return vclk;
803 }
804
rzg2l_cpg_sipll5_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)805 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
806 unsigned long parent_rate)
807 {
808 struct sipll5 *sipll5 = to_sipll5(hw);
809 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
810
811 if (!pll5_rate)
812 pll5_rate = parent_rate;
813
814 return pll5_rate;
815 }
816
rzg2l_cpg_sipll5_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)817 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
818 unsigned long rate,
819 unsigned long *parent_rate)
820 {
821 return rate;
822 }
823
rzg2l_cpg_sipll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)824 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
825 unsigned long rate,
826 unsigned long parent_rate)
827 {
828 struct sipll5 *sipll5 = to_sipll5(hw);
829 struct rzg2l_cpg_priv *priv = sipll5->priv;
830 struct rzg2l_pll5_param params;
831 unsigned long vclk_rate;
832 int ret;
833 u32 val;
834
835 /*
836 * OSC --> PLL5 --> FOUTPOSTDIV-->|
837 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
838 * |--FOUT1PH0-->|
839 *
840 * Based on the dot clock, the DSI divider clock calculates the parent
841 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
842 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
843 *
844 * OSC --> PLL5 --> FOUTPOSTDIV
845 */
846
847 if (!rate)
848 return -EINVAL;
849
850 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
851 sipll5->foutpostdiv_rate =
852 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate);
853
854 /* Put PLL5 into standby mode */
855 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
856 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
857 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
858 if (ret) {
859 dev_err(priv->dev, "failed to release pll5 lock");
860 return ret;
861 }
862
863 /* Output clock setting 1 */
864 writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
865 (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
866
867 /* Output clock setting, SSCG modulation value setting 3 */
868 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
869
870 /* Output clock setting 4 */
871 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
872 priv->base + CPG_SIPLL5_CLK4);
873
874 /* Output clock setting 5 */
875 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
876
877 /* PLL normal mode setting */
878 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
879 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
880 priv->base + CPG_SIPLL5_STBY);
881
882 /* PLL normal mode transition, output clock stability check */
883 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
884 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
885 if (ret) {
886 dev_err(priv->dev, "failed to lock pll5");
887 return ret;
888 }
889
890 return 0;
891 }
892
893 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
894 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
895 .round_rate = rzg2l_cpg_sipll5_round_rate,
896 .set_rate = rzg2l_cpg_sipll5_set_rate,
897 };
898
899 static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)900 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
901 struct rzg2l_cpg_priv *priv)
902 {
903 const struct clk *parent;
904 struct clk_init_data init;
905 const char *parent_name;
906 struct sipll5 *sipll5;
907 struct clk_hw *clk_hw;
908 int ret;
909
910 parent = priv->clks[core->parent];
911 if (IS_ERR(parent))
912 return ERR_CAST(parent);
913
914 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
915 if (!sipll5)
916 return ERR_PTR(-ENOMEM);
917
918 init.name = core->name;
919 parent_name = __clk_get_name(parent);
920 init.ops = &rzg2l_cpg_sipll5_ops;
921 init.flags = 0;
922 init.parent_names = &parent_name;
923 init.num_parents = 1;
924
925 sipll5->hw.init = &init;
926 sipll5->conf = core->conf;
927 sipll5->priv = priv;
928
929 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
930 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
931
932 clk_hw = &sipll5->hw;
933 clk_hw->init = &init;
934
935 ret = devm_clk_hw_register(priv->dev, clk_hw);
936 if (ret)
937 return ERR_PTR(ret);
938
939 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
940 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
941 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
942
943 return clk_hw->clk;
944 }
945
946 struct pll_clk {
947 struct clk_hw hw;
948 unsigned long default_rate;
949 unsigned int conf;
950 unsigned int type;
951 void __iomem *base;
952 struct rzg2l_cpg_priv *priv;
953 };
954
955 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
956
rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)957 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
958 unsigned long parent_rate)
959 {
960 struct pll_clk *pll_clk = to_pll(hw);
961 struct rzg2l_cpg_priv *priv = pll_clk->priv;
962 unsigned int val1, val2;
963 u64 rate;
964
965 if (pll_clk->type != CLK_TYPE_SAM_PLL)
966 return parent_rate;
967
968 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
969 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
970
971 rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
972 16 + SDIV(val2));
973
974 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
975 }
976
977 static const struct clk_ops rzg2l_cpg_pll_ops = {
978 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
979 };
980
rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)981 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
982 unsigned long parent_rate)
983 {
984 struct pll_clk *pll_clk = to_pll(hw);
985 struct rzg2l_cpg_priv *priv = pll_clk->priv;
986 u32 nir, nfr, mr, pr, val, setting;
987 u64 rate;
988
989 if (pll_clk->type != CLK_TYPE_G3S_PLL)
990 return parent_rate;
991
992 setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
993 if (setting) {
994 val = readl(priv->base + setting);
995 if (val & RZG3S_SEL_PLL)
996 return pll_clk->default_rate;
997 }
998
999 val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
1000
1001 pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
1002 /* Hardware interprets values higher than 8 as p = 16. */
1003 if (pr > 8)
1004 pr = 16;
1005
1006 mr = FIELD_GET(RZG3S_DIV_M, val) + 1;
1007 nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
1008 nfr = FIELD_GET(RZG3S_DIV_NF, val);
1009
1010 rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1011
1012 return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1013 }
1014
1015 static const struct clk_ops rzg3s_cpg_pll_ops = {
1016 .recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1017 };
1018
1019 static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv,const struct clk_ops * ops)1020 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1021 struct rzg2l_cpg_priv *priv,
1022 const struct clk_ops *ops)
1023 {
1024 struct device *dev = priv->dev;
1025 const struct clk *parent;
1026 struct clk_init_data init;
1027 const char *parent_name;
1028 struct pll_clk *pll_clk;
1029 int ret;
1030
1031 parent = priv->clks[core->parent];
1032 if (IS_ERR(parent))
1033 return ERR_CAST(parent);
1034
1035 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1036 if (!pll_clk)
1037 return ERR_PTR(-ENOMEM);
1038
1039 parent_name = __clk_get_name(parent);
1040 init.name = core->name;
1041 init.ops = ops;
1042 init.flags = 0;
1043 init.parent_names = &parent_name;
1044 init.num_parents = 1;
1045
1046 pll_clk->hw.init = &init;
1047 pll_clk->conf = core->conf;
1048 pll_clk->base = priv->base;
1049 pll_clk->priv = priv;
1050 pll_clk->type = core->type;
1051 pll_clk->default_rate = core->default_rate;
1052
1053 ret = devm_clk_hw_register(dev, &pll_clk->hw);
1054 if (ret)
1055 return ERR_PTR(ret);
1056
1057 return pll_clk->hw.clk;
1058 }
1059
1060 static struct clk
rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)1061 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1062 void *data)
1063 {
1064 unsigned int clkidx = clkspec->args[1];
1065 struct rzg2l_cpg_priv *priv = data;
1066 struct device *dev = priv->dev;
1067 const char *type;
1068 struct clk *clk;
1069
1070 switch (clkspec->args[0]) {
1071 case CPG_CORE:
1072 type = "core";
1073 if (clkidx > priv->last_dt_core_clk) {
1074 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1075 return ERR_PTR(-EINVAL);
1076 }
1077 clk = priv->clks[clkidx];
1078 break;
1079
1080 case CPG_MOD:
1081 type = "module";
1082 if (clkidx >= priv->num_mod_clks) {
1083 dev_err(dev, "Invalid %s clock index %u\n", type,
1084 clkidx);
1085 return ERR_PTR(-EINVAL);
1086 }
1087 clk = priv->clks[priv->num_core_clks + clkidx];
1088 break;
1089
1090 default:
1091 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1092 return ERR_PTR(-EINVAL);
1093 }
1094
1095 if (IS_ERR(clk))
1096 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1097 PTR_ERR(clk));
1098 else
1099 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1100 clkspec->args[0], clkspec->args[1], clk,
1101 clk_get_rate(clk));
1102 return clk;
1103 }
1104
1105 static void __init
rzg2l_cpg_register_core_clk(const struct cpg_core_clk * core,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1106 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1107 const struct rzg2l_cpg_info *info,
1108 struct rzg2l_cpg_priv *priv)
1109 {
1110 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1111 struct device *dev = priv->dev;
1112 unsigned int id = core->id, div = core->div;
1113 const char *parent_name;
1114 struct clk_hw *clk_hw;
1115
1116 WARN_DEBUG(id >= priv->num_core_clks);
1117 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1118
1119 if (!core->name) {
1120 /* Skip NULLified clock */
1121 return;
1122 }
1123
1124 switch (core->type) {
1125 case CLK_TYPE_IN:
1126 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1127 break;
1128 case CLK_TYPE_FF:
1129 WARN_DEBUG(core->parent >= priv->num_core_clks);
1130 parent = priv->clks[core->parent];
1131 if (IS_ERR(parent)) {
1132 clk = parent;
1133 goto fail;
1134 }
1135
1136 parent_name = __clk_get_name(parent);
1137 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
1138 CLK_SET_RATE_PARENT,
1139 core->mult, div);
1140 if (IS_ERR(clk_hw))
1141 clk = ERR_CAST(clk_hw);
1142 else
1143 clk = clk_hw->clk;
1144 break;
1145 case CLK_TYPE_SAM_PLL:
1146 clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
1147 break;
1148 case CLK_TYPE_G3S_PLL:
1149 clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
1150 break;
1151 case CLK_TYPE_SIPLL5:
1152 clk = rzg2l_cpg_sipll5_register(core, priv);
1153 break;
1154 case CLK_TYPE_DIV:
1155 clk = rzg2l_cpg_div_clk_register(core, priv);
1156 break;
1157 case CLK_TYPE_G3S_DIV:
1158 clk = rzg3s_cpg_div_clk_register(core, priv);
1159 break;
1160 case CLK_TYPE_MUX:
1161 clk = rzg2l_cpg_mux_clk_register(core, priv);
1162 break;
1163 case CLK_TYPE_SD_MUX:
1164 clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
1165 break;
1166 case CLK_TYPE_PLL5_4_MUX:
1167 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1168 break;
1169 case CLK_TYPE_DSI_DIV:
1170 clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
1171 break;
1172 default:
1173 goto fail;
1174 }
1175
1176 if (IS_ERR_OR_NULL(clk))
1177 goto fail;
1178
1179 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1180 priv->clks[id] = clk;
1181 return;
1182
1183 fail:
1184 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1185 core->name, PTR_ERR(clk));
1186 }
1187
1188 /**
1189 * struct mstp_clock - MSTP gating clock
1190 *
1191 * @hw: handle between common and hardware-specific interfaces
1192 * @off: register offset
1193 * @bit: ON/MON bit
1194 * @enabled: soft state of the clock, if it is coupled with another clock
1195 * @priv: CPG/MSTP private data
1196 * @sibling: pointer to the other coupled clock
1197 */
1198 struct mstp_clock {
1199 struct clk_hw hw;
1200 u16 off;
1201 u8 bit;
1202 bool enabled;
1203 struct rzg2l_cpg_priv *priv;
1204 struct mstp_clock *sibling;
1205 };
1206
1207 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
1208
rzg2l_mod_clock_endisable(struct clk_hw * hw,bool enable)1209 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1210 {
1211 struct mstp_clock *clock = to_mod_clock(hw);
1212 struct rzg2l_cpg_priv *priv = clock->priv;
1213 unsigned int reg = clock->off;
1214 struct device *dev = priv->dev;
1215 u32 bitmask = BIT(clock->bit);
1216 u32 value;
1217 int error;
1218
1219 if (!clock->off) {
1220 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
1221 return 0;
1222 }
1223
1224 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1225 enable ? "ON" : "OFF");
1226
1227 value = bitmask << 16;
1228 if (enable)
1229 value |= bitmask;
1230
1231 writel(value, priv->base + CLK_ON_R(reg));
1232
1233 if (!enable)
1234 return 0;
1235
1236 if (!priv->info->has_clk_mon_regs)
1237 return 0;
1238
1239 error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1240 value & bitmask, 0, 10);
1241 if (error)
1242 dev_err(dev, "Failed to enable CLK_ON %p\n",
1243 priv->base + CLK_ON_R(reg));
1244
1245 return error;
1246 }
1247
rzg2l_mod_clock_enable(struct clk_hw * hw)1248 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1249 {
1250 struct mstp_clock *clock = to_mod_clock(hw);
1251
1252 if (clock->sibling) {
1253 struct rzg2l_cpg_priv *priv = clock->priv;
1254 unsigned long flags;
1255 bool enabled;
1256
1257 spin_lock_irqsave(&priv->rmw_lock, flags);
1258 enabled = clock->sibling->enabled;
1259 clock->enabled = true;
1260 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1261 if (enabled)
1262 return 0;
1263 }
1264
1265 return rzg2l_mod_clock_endisable(hw, true);
1266 }
1267
rzg2l_mod_clock_disable(struct clk_hw * hw)1268 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1269 {
1270 struct mstp_clock *clock = to_mod_clock(hw);
1271
1272 if (clock->sibling) {
1273 struct rzg2l_cpg_priv *priv = clock->priv;
1274 unsigned long flags;
1275 bool enabled;
1276
1277 spin_lock_irqsave(&priv->rmw_lock, flags);
1278 enabled = clock->sibling->enabled;
1279 clock->enabled = false;
1280 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1281 if (enabled)
1282 return;
1283 }
1284
1285 rzg2l_mod_clock_endisable(hw, false);
1286 }
1287
rzg2l_mod_clock_is_enabled(struct clk_hw * hw)1288 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1289 {
1290 struct mstp_clock *clock = to_mod_clock(hw);
1291 struct rzg2l_cpg_priv *priv = clock->priv;
1292 u32 bitmask = BIT(clock->bit);
1293 u32 value;
1294
1295 if (!clock->off) {
1296 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
1297 return 1;
1298 }
1299
1300 if (clock->sibling)
1301 return clock->enabled;
1302
1303 if (priv->info->has_clk_mon_regs)
1304 value = readl(priv->base + CLK_MON_R(clock->off));
1305 else
1306 value = readl(priv->base + clock->off);
1307
1308 return value & bitmask;
1309 }
1310
1311 static const struct clk_ops rzg2l_mod_clock_ops = {
1312 .enable = rzg2l_mod_clock_enable,
1313 .disable = rzg2l_mod_clock_disable,
1314 .is_enabled = rzg2l_mod_clock_is_enabled,
1315 };
1316
1317 static struct mstp_clock
rzg2l_mod_clock_get_sibling(struct mstp_clock * clock,struct rzg2l_cpg_priv * priv)1318 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1319 struct rzg2l_cpg_priv *priv)
1320 {
1321 struct clk_hw *hw;
1322 unsigned int i;
1323
1324 for (i = 0; i < priv->num_mod_clks; i++) {
1325 struct mstp_clock *clk;
1326
1327 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1328 continue;
1329
1330 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1331 clk = to_mod_clock(hw);
1332 if (clock->off == clk->off && clock->bit == clk->bit)
1333 return clk;
1334 }
1335
1336 return NULL;
1337 }
1338
1339 static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk * mod,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1340 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1341 const struct rzg2l_cpg_info *info,
1342 struct rzg2l_cpg_priv *priv)
1343 {
1344 struct mstp_clock *clock = NULL;
1345 struct device *dev = priv->dev;
1346 unsigned int id = mod->id;
1347 struct clk_init_data init;
1348 struct clk *parent, *clk;
1349 const char *parent_name;
1350 unsigned int i;
1351 int ret;
1352
1353 WARN_DEBUG(id < priv->num_core_clks);
1354 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1355 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1356 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1357
1358 if (!mod->name) {
1359 /* Skip NULLified clock */
1360 return;
1361 }
1362
1363 parent = priv->clks[mod->parent];
1364 if (IS_ERR(parent)) {
1365 clk = parent;
1366 goto fail;
1367 }
1368
1369 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1370 if (!clock) {
1371 clk = ERR_PTR(-ENOMEM);
1372 goto fail;
1373 }
1374
1375 init.name = mod->name;
1376 init.ops = &rzg2l_mod_clock_ops;
1377 init.flags = CLK_SET_RATE_PARENT;
1378 for (i = 0; i < info->num_crit_mod_clks; i++)
1379 if (id == info->crit_mod_clks[i]) {
1380 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1381 mod->name);
1382 init.flags |= CLK_IS_CRITICAL;
1383 break;
1384 }
1385
1386 parent_name = __clk_get_name(parent);
1387 init.parent_names = &parent_name;
1388 init.num_parents = 1;
1389
1390 clock->off = mod->off;
1391 clock->bit = mod->bit;
1392 clock->priv = priv;
1393 clock->hw.init = &init;
1394
1395 ret = devm_clk_hw_register(dev, &clock->hw);
1396 if (ret) {
1397 clk = ERR_PTR(ret);
1398 goto fail;
1399 }
1400
1401 clk = clock->hw.clk;
1402 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1403 priv->clks[id] = clk;
1404
1405 if (mod->is_coupled) {
1406 struct mstp_clock *sibling;
1407
1408 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1409 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1410 if (sibling) {
1411 clock->sibling = sibling;
1412 sibling->sibling = clock;
1413 }
1414 }
1415
1416 return;
1417
1418 fail:
1419 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1420 mod->name, PTR_ERR(clk));
1421 }
1422
1423 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1424
rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1425 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1426 unsigned long id)
1427 {
1428 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1429 const struct rzg2l_cpg_info *info = priv->info;
1430 unsigned int reg = info->resets[id].off;
1431 u32 mask = BIT(info->resets[id].bit);
1432 s8 monbit = info->resets[id].monbit;
1433 u32 value = mask << 16;
1434
1435 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1436
1437 writel(value, priv->base + CLK_RST_R(reg));
1438
1439 if (info->has_clk_mon_regs) {
1440 reg = CLK_MRST_R(reg);
1441 } else if (monbit >= 0) {
1442 reg = CPG_RST_MON;
1443 mask = BIT(monbit);
1444 } else {
1445 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1446 udelay(35);
1447 return 0;
1448 }
1449
1450 return readl_poll_timeout_atomic(priv->base + reg, value,
1451 value & mask, 10, 200);
1452 }
1453
rzg2l_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1454 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1455 unsigned long id)
1456 {
1457 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1458 const struct rzg2l_cpg_info *info = priv->info;
1459 unsigned int reg = info->resets[id].off;
1460 u32 mask = BIT(info->resets[id].bit);
1461 s8 monbit = info->resets[id].monbit;
1462 u32 value = (mask << 16) | mask;
1463
1464 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1465 CLK_RST_R(reg));
1466
1467 writel(value, priv->base + CLK_RST_R(reg));
1468
1469 if (info->has_clk_mon_regs) {
1470 reg = CLK_MRST_R(reg);
1471 } else if (monbit >= 0) {
1472 reg = CPG_RST_MON;
1473 mask = BIT(monbit);
1474 } else {
1475 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1476 udelay(35);
1477 return 0;
1478 }
1479
1480 return readl_poll_timeout_atomic(priv->base + reg, value,
1481 !(value & mask), 10, 200);
1482 }
1483
rzg2l_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1484 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1485 unsigned long id)
1486 {
1487 int ret;
1488
1489 ret = rzg2l_cpg_assert(rcdev, id);
1490 if (ret)
1491 return ret;
1492
1493 return rzg2l_cpg_deassert(rcdev, id);
1494 }
1495
rzg2l_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1496 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1497 unsigned long id)
1498 {
1499 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1500 const struct rzg2l_cpg_info *info = priv->info;
1501 s8 monbit = info->resets[id].monbit;
1502 unsigned int reg;
1503 u32 bitmask;
1504
1505 if (info->has_clk_mon_regs) {
1506 reg = CLK_MRST_R(info->resets[id].off);
1507 bitmask = BIT(info->resets[id].bit);
1508 } else if (monbit >= 0) {
1509 reg = CPG_RST_MON;
1510 bitmask = BIT(monbit);
1511 } else {
1512 return -ENOTSUPP;
1513 }
1514
1515 return !!(readl(priv->base + reg) & bitmask);
1516 }
1517
1518 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1519 .reset = rzg2l_cpg_reset,
1520 .assert = rzg2l_cpg_assert,
1521 .deassert = rzg2l_cpg_deassert,
1522 .status = rzg2l_cpg_status,
1523 };
1524
rzg2l_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1525 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1526 const struct of_phandle_args *reset_spec)
1527 {
1528 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1529 const struct rzg2l_cpg_info *info = priv->info;
1530 unsigned int id = reset_spec->args[0];
1531
1532 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1533 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1534 return -EINVAL;
1535 }
1536
1537 return id;
1538 }
1539
rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv * priv)1540 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1541 {
1542 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1543 priv->rcdev.of_node = priv->dev->of_node;
1544 priv->rcdev.dev = priv->dev;
1545 priv->rcdev.of_reset_n_cells = 1;
1546 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1547 priv->rcdev.nr_resets = priv->num_resets;
1548
1549 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1550 }
1551
rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv * priv,const struct of_phandle_args * clkspec)1552 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1553 const struct of_phandle_args *clkspec)
1554 {
1555 const struct rzg2l_cpg_info *info = priv->info;
1556 unsigned int id;
1557 unsigned int i;
1558
1559 if (clkspec->args_count != 2)
1560 return false;
1561
1562 if (clkspec->args[0] != CPG_MOD)
1563 return false;
1564
1565 id = clkspec->args[1] + info->num_total_core_clks;
1566 for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1567 if (info->no_pm_mod_clks[i] == id)
1568 return false;
1569 }
1570
1571 return true;
1572 }
1573
1574 /**
1575 * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
1576 * @onecell_data: cell data
1577 * @domains: generic PM domains
1578 */
1579 struct rzg2l_cpg_pm_domains {
1580 struct genpd_onecell_data onecell_data;
1581 struct generic_pm_domain *domains[];
1582 };
1583
1584 /**
1585 * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
1586 * @genpd: generic PM domain
1587 * @priv: pointer to CPG private data structure
1588 * @conf: CPG PM domain configuration info
1589 * @id: RZ/G2L power domain ID
1590 */
1591 struct rzg2l_cpg_pd {
1592 struct generic_pm_domain genpd;
1593 struct rzg2l_cpg_priv *priv;
1594 struct rzg2l_cpg_pm_domain_conf conf;
1595 u16 id;
1596 };
1597
rzg2l_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1598 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1599 {
1600 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1601 struct rzg2l_cpg_priv *priv = pd->priv;
1602 struct device_node *np = dev->of_node;
1603 struct of_phandle_args clkspec;
1604 bool once = true;
1605 struct clk *clk;
1606 int error;
1607 int i = 0;
1608
1609 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1610 &clkspec)) {
1611 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1612 if (once) {
1613 once = false;
1614 error = pm_clk_create(dev);
1615 if (error) {
1616 of_node_put(clkspec.np);
1617 goto err;
1618 }
1619 }
1620 clk = of_clk_get_from_provider(&clkspec);
1621 of_node_put(clkspec.np);
1622 if (IS_ERR(clk)) {
1623 error = PTR_ERR(clk);
1624 goto fail_destroy;
1625 }
1626
1627 error = pm_clk_add_clk(dev, clk);
1628 if (error) {
1629 dev_err(dev, "pm_clk_add_clk failed %d\n",
1630 error);
1631 goto fail_put;
1632 }
1633 } else {
1634 of_node_put(clkspec.np);
1635 }
1636 i++;
1637 }
1638
1639 return 0;
1640
1641 fail_put:
1642 clk_put(clk);
1643
1644 fail_destroy:
1645 pm_clk_destroy(dev);
1646 err:
1647 return error;
1648 }
1649
rzg2l_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1650 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1651 {
1652 if (!pm_clk_no_clocks(dev))
1653 pm_clk_destroy(dev);
1654 }
1655
rzg2l_cpg_genpd_remove(void * data)1656 static void rzg2l_cpg_genpd_remove(void *data)
1657 {
1658 struct genpd_onecell_data *celldata = data;
1659
1660 for (unsigned int i = 0; i < celldata->num_domains; i++)
1661 pm_genpd_remove(celldata->domains[i]);
1662 }
1663
rzg2l_cpg_genpd_remove_simple(void * data)1664 static void rzg2l_cpg_genpd_remove_simple(void *data)
1665 {
1666 pm_genpd_remove(data);
1667 }
1668
rzg2l_cpg_power_on(struct generic_pm_domain * domain)1669 static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
1670 {
1671 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1672 struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1673 struct rzg2l_cpg_priv *priv = pd->priv;
1674
1675 /* Set MSTOP. */
1676 if (mstop.mask)
1677 writel(mstop.mask << 16, priv->base + mstop.off);
1678
1679 return 0;
1680 }
1681
rzg2l_cpg_power_off(struct generic_pm_domain * domain)1682 static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
1683 {
1684 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1685 struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1686 struct rzg2l_cpg_priv *priv = pd->priv;
1687
1688 /* Set MSTOP. */
1689 if (mstop.mask)
1690 writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
1691
1692 return 0;
1693 }
1694
rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd * pd)1695 static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd)
1696 {
1697 bool always_on = !!(pd->genpd.flags & GENPD_FLAG_ALWAYS_ON);
1698 struct dev_power_governor *governor;
1699 int ret;
1700
1701 if (always_on)
1702 governor = &pm_domain_always_on_gov;
1703 else
1704 governor = &simple_qos_governor;
1705
1706 pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1707 pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
1708 pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
1709 pd->genpd.power_on = rzg2l_cpg_power_on;
1710 pd->genpd.power_off = rzg2l_cpg_power_off;
1711
1712 ret = pm_genpd_init(&pd->genpd, governor, !always_on);
1713 if (ret)
1714 return ret;
1715
1716 if (always_on)
1717 ret = rzg2l_cpg_power_on(&pd->genpd);
1718
1719 return ret;
1720 }
1721
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv * priv)1722 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1723 {
1724 struct device *dev = priv->dev;
1725 struct device_node *np = dev->of_node;
1726 struct rzg2l_cpg_pd *pd;
1727 int ret;
1728
1729 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1730 if (!pd)
1731 return -ENOMEM;
1732
1733 pd->genpd.name = np->name;
1734 pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
1735 pd->priv = priv;
1736 ret = rzg2l_cpg_pd_setup(pd);
1737 if (ret)
1738 return ret;
1739
1740 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
1741 if (ret)
1742 return ret;
1743
1744 return of_genpd_add_provider_simple(np, &pd->genpd);
1745 }
1746
1747 static struct generic_pm_domain *
rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args * spec,void * data)1748 rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
1749 {
1750 struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
1751 struct genpd_onecell_data *genpd = data;
1752
1753 if (spec->args_count != 1)
1754 return ERR_PTR(-EINVAL);
1755
1756 for (unsigned int i = 0; i < genpd->num_domains; i++) {
1757 struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
1758 genpd);
1759
1760 if (pd->id == spec->args[0]) {
1761 domain = &pd->genpd;
1762 break;
1763 }
1764 }
1765
1766 return domain;
1767 }
1768
rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv * priv)1769 static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
1770 {
1771 const struct rzg2l_cpg_info *info = priv->info;
1772 struct device *dev = priv->dev;
1773 struct device_node *np = dev->of_node;
1774 struct rzg2l_cpg_pm_domains *domains;
1775 struct generic_pm_domain *parent;
1776 u32 ncells;
1777 int ret;
1778
1779 ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
1780 if (ret)
1781 return ret;
1782
1783 /* For backward compatibility. */
1784 if (!ncells)
1785 return rzg2l_cpg_add_clk_domain(priv);
1786
1787 domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
1788 GFP_KERNEL);
1789 if (!domains)
1790 return -ENOMEM;
1791
1792 domains->onecell_data.domains = domains->domains;
1793 domains->onecell_data.num_domains = info->num_pm_domains;
1794 domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
1795
1796 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
1797 if (ret)
1798 return ret;
1799
1800 for (unsigned int i = 0; i < info->num_pm_domains; i++) {
1801 struct rzg2l_cpg_pd *pd;
1802
1803 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1804 if (!pd)
1805 return -ENOMEM;
1806
1807 pd->genpd.name = info->pm_domains[i].name;
1808 pd->genpd.flags = info->pm_domains[i].genpd_flags;
1809 pd->conf = info->pm_domains[i].conf;
1810 pd->id = info->pm_domains[i].id;
1811 pd->priv = priv;
1812
1813 ret = rzg2l_cpg_pd_setup(pd);
1814 if (ret)
1815 return ret;
1816
1817 domains->domains[i] = &pd->genpd;
1818 /* Parent should be on the very first entry of info->pm_domains[]. */
1819 if (!i) {
1820 parent = &pd->genpd;
1821 continue;
1822 }
1823
1824 ret = pm_genpd_add_subdomain(parent, &pd->genpd);
1825 if (ret)
1826 return ret;
1827 }
1828
1829 ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
1830 if (ret)
1831 return ret;
1832
1833 return 0;
1834 }
1835
rzg2l_cpg_probe(struct platform_device * pdev)1836 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1837 {
1838 struct device *dev = &pdev->dev;
1839 struct device_node *np = dev->of_node;
1840 const struct rzg2l_cpg_info *info;
1841 struct rzg2l_cpg_priv *priv;
1842 unsigned int nclks, i;
1843 struct clk **clks;
1844 int error;
1845
1846 info = of_device_get_match_data(dev);
1847
1848 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1849 if (!priv)
1850 return -ENOMEM;
1851
1852 priv->dev = dev;
1853 priv->info = info;
1854 spin_lock_init(&priv->rmw_lock);
1855
1856 priv->base = devm_platform_ioremap_resource(pdev, 0);
1857 if (IS_ERR(priv->base))
1858 return PTR_ERR(priv->base);
1859
1860 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1861 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1862 if (!clks)
1863 return -ENOMEM;
1864
1865 dev_set_drvdata(dev, priv);
1866 priv->clks = clks;
1867 priv->num_core_clks = info->num_total_core_clks;
1868 priv->num_mod_clks = info->num_hw_mod_clks;
1869 priv->num_resets = info->num_resets;
1870 priv->last_dt_core_clk = info->last_dt_core_clk;
1871
1872 for (i = 0; i < nclks; i++)
1873 clks[i] = ERR_PTR(-ENOENT);
1874
1875 for (i = 0; i < info->num_core_clks; i++)
1876 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1877
1878 for (i = 0; i < info->num_mod_clks; i++)
1879 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1880
1881 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1882 if (error)
1883 return error;
1884
1885 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1886 if (error)
1887 return error;
1888
1889 error = rzg2l_cpg_add_pm_domains(priv);
1890 if (error)
1891 return error;
1892
1893 error = rzg2l_cpg_reset_controller_register(priv);
1894 if (error)
1895 return error;
1896
1897 return 0;
1898 }
1899
1900 static const struct of_device_id rzg2l_cpg_match[] = {
1901 #ifdef CONFIG_CLK_R9A07G043
1902 {
1903 .compatible = "renesas,r9a07g043-cpg",
1904 .data = &r9a07g043_cpg_info,
1905 },
1906 #endif
1907 #ifdef CONFIG_CLK_R9A07G044
1908 {
1909 .compatible = "renesas,r9a07g044-cpg",
1910 .data = &r9a07g044_cpg_info,
1911 },
1912 #endif
1913 #ifdef CONFIG_CLK_R9A07G054
1914 {
1915 .compatible = "renesas,r9a07g054-cpg",
1916 .data = &r9a07g054_cpg_info,
1917 },
1918 #endif
1919 #ifdef CONFIG_CLK_R9A08G045
1920 {
1921 .compatible = "renesas,r9a08g045-cpg",
1922 .data = &r9a08g045_cpg_info,
1923 },
1924 #endif
1925 #ifdef CONFIG_CLK_R9A09G011
1926 {
1927 .compatible = "renesas,r9a09g011-cpg",
1928 .data = &r9a09g011_cpg_info,
1929 },
1930 #endif
1931 { /* sentinel */ }
1932 };
1933
1934 static struct platform_driver rzg2l_cpg_driver = {
1935 .driver = {
1936 .name = "rzg2l-cpg",
1937 .of_match_table = rzg2l_cpg_match,
1938 },
1939 };
1940
rzg2l_cpg_init(void)1941 static int __init rzg2l_cpg_init(void)
1942 {
1943 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1944 }
1945
1946 subsys_initcall(rzg2l_cpg_init);
1947
1948 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1949