1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * clk-xgene.c - AppliedMicro X-Gene Clock Interface
4 *
5 * Copyright (c) 2013, Applied Micro Circuits Corporation
6 * Author: Loc Ho <[email protected]>
7 */
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/string_choices.h>
11 #include <linux/io.h>
12 #include <linux/of.h>
13 #include <linux/clkdev.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
16
17 /* Register SCU_PCPPLL bit fields */
18 #define N_DIV_RD(src) ((src) & 0x000001ff)
19 #define SC_N_DIV_RD(src) ((src) & 0x0000007f)
20 #define SC_OUTDIV2(src) (((src) & 0x00000100) >> 8)
21
22 /* Register SCU_SOCPLL bit fields */
23 #define CLKR_RD(src) (((src) & 0x07000000)>>24)
24 #define CLKOD_RD(src) (((src) & 0x00300000)>>20)
25 #define REGSPEC_RESET_F1_MASK 0x00010000
26 #define CLKF_RD(src) (((src) & 0x000001ff))
27
28 #define XGENE_CLK_DRIVER_VER "0.1"
29
30 static DEFINE_SPINLOCK(clk_lock);
31
xgene_clk_read(void __iomem * csr)32 static inline u32 xgene_clk_read(void __iomem *csr)
33 {
34 return readl_relaxed(csr);
35 }
36
xgene_clk_write(u32 data,void __iomem * csr)37 static inline void xgene_clk_write(u32 data, void __iomem *csr)
38 {
39 writel_relaxed(data, csr);
40 }
41
42 /* PLL Clock */
43 enum xgene_pll_type {
44 PLL_TYPE_PCP = 0,
45 PLL_TYPE_SOC = 1,
46 };
47
48 struct xgene_clk_pll {
49 struct clk_hw hw;
50 void __iomem *reg;
51 spinlock_t *lock;
52 u32 pll_offset;
53 enum xgene_pll_type type;
54 int version;
55 };
56
57 #define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
58
xgene_clk_pll_is_enabled(struct clk_hw * hw)59 static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
60 {
61 struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
62 u32 data;
63
64 data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
65 pr_debug("%s pll %s\n", clk_hw_get_name(hw),
66 data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
67
68 return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
69 }
70
xgene_clk_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)71 static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
72 unsigned long parent_rate)
73 {
74 struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
75 unsigned long fref;
76 unsigned long fvco;
77 u32 pll;
78 u32 nref;
79 u32 nout;
80 u32 nfb;
81
82 pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
83
84 if (pllclk->version <= 1) {
85 if (pllclk->type == PLL_TYPE_PCP) {
86 /*
87 * PLL VCO = Reference clock * NF
88 * PCP PLL = PLL_VCO / 2
89 */
90 nout = 2;
91 fvco = parent_rate * (N_DIV_RD(pll) + 4);
92 } else {
93 /*
94 * Fref = Reference Clock / NREF;
95 * Fvco = Fref * NFB;
96 * Fout = Fvco / NOUT;
97 */
98 nref = CLKR_RD(pll) + 1;
99 nout = CLKOD_RD(pll) + 1;
100 nfb = CLKF_RD(pll);
101 fref = parent_rate / nref;
102 fvco = fref * nfb;
103 }
104 } else {
105 /*
106 * fvco = Reference clock * FBDIVC
107 * PLL freq = fvco / NOUT
108 */
109 nout = SC_OUTDIV2(pll) ? 2 : 3;
110 fvco = parent_rate * SC_N_DIV_RD(pll);
111 }
112 pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
113 clk_hw_get_name(hw), fvco / nout, parent_rate,
114 pllclk->version);
115
116 return fvco / nout;
117 }
118
119 static const struct clk_ops xgene_clk_pll_ops = {
120 .is_enabled = xgene_clk_pll_is_enabled,
121 .recalc_rate = xgene_clk_pll_recalc_rate,
122 };
123
xgene_register_clk_pll(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * reg,u32 pll_offset,u32 type,spinlock_t * lock,int version)124 static struct clk *xgene_register_clk_pll(struct device *dev,
125 const char *name, const char *parent_name,
126 unsigned long flags, void __iomem *reg, u32 pll_offset,
127 u32 type, spinlock_t *lock, int version)
128 {
129 struct xgene_clk_pll *apmclk;
130 struct clk *clk;
131 struct clk_init_data init;
132
133 /* allocate the APM clock structure */
134 apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
135 if (!apmclk)
136 return ERR_PTR(-ENOMEM);
137
138 init.name = name;
139 init.ops = &xgene_clk_pll_ops;
140 init.flags = flags;
141 init.parent_names = parent_name ? &parent_name : NULL;
142 init.num_parents = parent_name ? 1 : 0;
143
144 apmclk->version = version;
145 apmclk->reg = reg;
146 apmclk->lock = lock;
147 apmclk->pll_offset = pll_offset;
148 apmclk->type = type;
149 apmclk->hw.init = &init;
150
151 /* Register the clock */
152 clk = clk_register(dev, &apmclk->hw);
153 if (IS_ERR(clk)) {
154 pr_err("%s: could not register clk %s\n", __func__, name);
155 kfree(apmclk);
156 return NULL;
157 }
158 return clk;
159 }
160
xgene_pllclk_version(struct device_node * np)161 static int xgene_pllclk_version(struct device_node *np)
162 {
163 if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
164 return 1;
165 if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
166 return 1;
167 return 2;
168 }
169
xgene_pllclk_init(struct device_node * np,enum xgene_pll_type pll_type)170 static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
171 {
172 const char *clk_name = np->full_name;
173 struct clk *clk;
174 void __iomem *reg;
175 int version = xgene_pllclk_version(np);
176
177 reg = of_iomap(np, 0);
178 if (!reg) {
179 pr_err("Unable to map CSR register for %pOF\n", np);
180 return;
181 }
182 of_property_read_string(np, "clock-output-names", &clk_name);
183 clk = xgene_register_clk_pll(NULL,
184 clk_name, of_clk_get_parent_name(np, 0),
185 0, reg, 0, pll_type, &clk_lock,
186 version);
187 if (!IS_ERR(clk)) {
188 of_clk_add_provider(np, of_clk_src_simple_get, clk);
189 clk_register_clkdev(clk, clk_name, NULL);
190 pr_debug("Add %s clock PLL\n", clk_name);
191 }
192 }
193
xgene_socpllclk_init(struct device_node * np)194 static void xgene_socpllclk_init(struct device_node *np)
195 {
196 xgene_pllclk_init(np, PLL_TYPE_SOC);
197 }
198
xgene_pcppllclk_init(struct device_node * np)199 static void xgene_pcppllclk_init(struct device_node *np)
200 {
201 xgene_pllclk_init(np, PLL_TYPE_PCP);
202 }
203
204 /**
205 * struct xgene_clk_pmd - PMD clock
206 *
207 * @hw: handle between common and hardware-specific interfaces
208 * @reg: register containing the fractional scale multiplier (scaler)
209 * @shift: shift to the unit bit field
210 * @mask: mask to the unit bit field
211 * @denom: 1/denominator unit
212 * @lock: register lock
213 * @flags: XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
214 * from the register plus one. For example,
215 * 0 for (0 + 1) / denom,
216 * 1 for (1 + 1) / denom and etc.
217 * If this flag is set, it is
218 * 0 for (denom - 0) / denom,
219 * 1 for (denom - 1) / denom and etc.
220 */
221 struct xgene_clk_pmd {
222 struct clk_hw hw;
223 void __iomem *reg;
224 u8 shift;
225 u32 mask;
226 u64 denom;
227 u32 flags;
228 spinlock_t *lock;
229 };
230
231 #define to_xgene_clk_pmd(_hw) container_of(_hw, struct xgene_clk_pmd, hw)
232
233 #define XGENE_CLK_PMD_SCALE_INVERTED BIT(0)
234 #define XGENE_CLK_PMD_SHIFT 8
235 #define XGENE_CLK_PMD_WIDTH 3
236
xgene_clk_pmd_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)237 static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
238 unsigned long parent_rate)
239 {
240 struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
241 unsigned long flags = 0;
242 u64 ret, scale;
243 u32 val;
244
245 if (fd->lock)
246 spin_lock_irqsave(fd->lock, flags);
247 else
248 __acquire(fd->lock);
249
250 val = readl(fd->reg);
251
252 if (fd->lock)
253 spin_unlock_irqrestore(fd->lock, flags);
254 else
255 __release(fd->lock);
256
257 ret = (u64)parent_rate;
258
259 scale = (val & fd->mask) >> fd->shift;
260 if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
261 scale = fd->denom - scale;
262 else
263 scale++;
264
265 /* freq = parent_rate * scaler / denom */
266 do_div(ret, fd->denom);
267 ret *= scale;
268 if (ret == 0)
269 ret = (u64)parent_rate;
270
271 return ret;
272 }
273
xgene_clk_pmd_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)274 static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
275 unsigned long *parent_rate)
276 {
277 struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
278 u64 ret, scale;
279
280 if (!rate || rate >= *parent_rate)
281 return *parent_rate;
282
283 /* freq = parent_rate * scaler / denom */
284 ret = rate * fd->denom;
285 scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
286
287 ret = (u64)*parent_rate * scale;
288 do_div(ret, fd->denom);
289
290 return ret;
291 }
292
xgene_clk_pmd_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)293 static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
294 unsigned long parent_rate)
295 {
296 struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
297 unsigned long flags = 0;
298 u64 scale, ret;
299 u32 val;
300
301 /*
302 * Compute the scaler:
303 *
304 * freq = parent_rate * scaler / denom, or
305 * scaler = freq * denom / parent_rate
306 */
307 ret = rate * fd->denom;
308 scale = DIV_ROUND_UP_ULL(ret, (u64)parent_rate);
309
310 /* Check if inverted */
311 if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
312 scale = fd->denom - scale;
313 else
314 scale--;
315
316 if (fd->lock)
317 spin_lock_irqsave(fd->lock, flags);
318 else
319 __acquire(fd->lock);
320
321 val = readl(fd->reg);
322 val &= ~fd->mask;
323 val |= (scale << fd->shift);
324 writel(val, fd->reg);
325
326 if (fd->lock)
327 spin_unlock_irqrestore(fd->lock, flags);
328 else
329 __release(fd->lock);
330
331 return 0;
332 }
333
334 static const struct clk_ops xgene_clk_pmd_ops = {
335 .recalc_rate = xgene_clk_pmd_recalc_rate,
336 .round_rate = xgene_clk_pmd_round_rate,
337 .set_rate = xgene_clk_pmd_set_rate,
338 };
339
340 static struct clk *
xgene_register_clk_pmd(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * reg,u8 shift,u8 width,u64 denom,u32 clk_flags,spinlock_t * lock)341 xgene_register_clk_pmd(struct device *dev,
342 const char *name, const char *parent_name,
343 unsigned long flags, void __iomem *reg, u8 shift,
344 u8 width, u64 denom, u32 clk_flags, spinlock_t *lock)
345 {
346 struct xgene_clk_pmd *fd;
347 struct clk_init_data init;
348 struct clk *clk;
349
350 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
351 if (!fd)
352 return ERR_PTR(-ENOMEM);
353
354 init.name = name;
355 init.ops = &xgene_clk_pmd_ops;
356 init.flags = flags;
357 init.parent_names = parent_name ? &parent_name : NULL;
358 init.num_parents = parent_name ? 1 : 0;
359
360 fd->reg = reg;
361 fd->shift = shift;
362 fd->mask = (BIT(width) - 1) << shift;
363 fd->denom = denom;
364 fd->flags = clk_flags;
365 fd->lock = lock;
366 fd->hw.init = &init;
367
368 clk = clk_register(dev, &fd->hw);
369 if (IS_ERR(clk)) {
370 pr_err("%s: could not register clk %s\n", __func__, name);
371 kfree(fd);
372 return NULL;
373 }
374
375 return clk;
376 }
377
xgene_pmdclk_init(struct device_node * np)378 static void xgene_pmdclk_init(struct device_node *np)
379 {
380 const char *clk_name = np->full_name;
381 void __iomem *csr_reg;
382 struct resource res;
383 struct clk *clk;
384 u64 denom;
385 u32 flags = 0;
386 int rc;
387
388 /* Check if the entry is disabled */
389 if (!of_device_is_available(np))
390 return;
391
392 /* Parse the DTS register for resource */
393 rc = of_address_to_resource(np, 0, &res);
394 if (rc != 0) {
395 pr_err("no DTS register for %pOF\n", np);
396 return;
397 }
398 csr_reg = of_iomap(np, 0);
399 if (!csr_reg) {
400 pr_err("Unable to map resource for %pOF\n", np);
401 return;
402 }
403 of_property_read_string(np, "clock-output-names", &clk_name);
404
405 denom = BIT(XGENE_CLK_PMD_WIDTH);
406 flags |= XGENE_CLK_PMD_SCALE_INVERTED;
407
408 clk = xgene_register_clk_pmd(NULL, clk_name,
409 of_clk_get_parent_name(np, 0), 0,
410 csr_reg, XGENE_CLK_PMD_SHIFT,
411 XGENE_CLK_PMD_WIDTH, denom,
412 flags, &clk_lock);
413 if (!IS_ERR(clk)) {
414 of_clk_add_provider(np, of_clk_src_simple_get, clk);
415 clk_register_clkdev(clk, clk_name, NULL);
416 pr_debug("Add %s clock\n", clk_name);
417 } else {
418 if (csr_reg)
419 iounmap(csr_reg);
420 }
421 }
422
423 /* IP Clock */
424 struct xgene_dev_parameters {
425 void __iomem *csr_reg; /* CSR for IP clock */
426 u32 reg_clk_offset; /* Offset to clock enable CSR */
427 u32 reg_clk_mask; /* Mask bit for clock enable */
428 u32 reg_csr_offset; /* Offset to CSR reset */
429 u32 reg_csr_mask; /* Mask bit for disable CSR reset */
430 void __iomem *divider_reg; /* CSR for divider */
431 u32 reg_divider_offset; /* Offset to divider register */
432 u32 reg_divider_shift; /* Bit shift to divider field */
433 u32 reg_divider_width; /* Width of the bit to divider field */
434 };
435
436 struct xgene_clk {
437 struct clk_hw hw;
438 spinlock_t *lock;
439 struct xgene_dev_parameters param;
440 };
441
442 #define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
443
xgene_clk_enable(struct clk_hw * hw)444 static int xgene_clk_enable(struct clk_hw *hw)
445 {
446 struct xgene_clk *pclk = to_xgene_clk(hw);
447 unsigned long flags = 0;
448 u32 data;
449
450 if (pclk->lock)
451 spin_lock_irqsave(pclk->lock, flags);
452
453 if (pclk->param.csr_reg) {
454 pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
455 /* First enable the clock */
456 data = xgene_clk_read(pclk->param.csr_reg +
457 pclk->param.reg_clk_offset);
458 data |= pclk->param.reg_clk_mask;
459 xgene_clk_write(data, pclk->param.csr_reg +
460 pclk->param.reg_clk_offset);
461 pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
462 clk_hw_get_name(hw),
463 pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
464 data);
465
466 /* Second enable the CSR */
467 data = xgene_clk_read(pclk->param.csr_reg +
468 pclk->param.reg_csr_offset);
469 data &= ~pclk->param.reg_csr_mask;
470 xgene_clk_write(data, pclk->param.csr_reg +
471 pclk->param.reg_csr_offset);
472 pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
473 clk_hw_get_name(hw),
474 pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
475 data);
476 }
477
478 if (pclk->lock)
479 spin_unlock_irqrestore(pclk->lock, flags);
480
481 return 0;
482 }
483
xgene_clk_disable(struct clk_hw * hw)484 static void xgene_clk_disable(struct clk_hw *hw)
485 {
486 struct xgene_clk *pclk = to_xgene_clk(hw);
487 unsigned long flags = 0;
488 u32 data;
489
490 if (pclk->lock)
491 spin_lock_irqsave(pclk->lock, flags);
492
493 if (pclk->param.csr_reg) {
494 pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
495 /* First put the CSR in reset */
496 data = xgene_clk_read(pclk->param.csr_reg +
497 pclk->param.reg_csr_offset);
498 data |= pclk->param.reg_csr_mask;
499 xgene_clk_write(data, pclk->param.csr_reg +
500 pclk->param.reg_csr_offset);
501
502 /* Second disable the clock */
503 data = xgene_clk_read(pclk->param.csr_reg +
504 pclk->param.reg_clk_offset);
505 data &= ~pclk->param.reg_clk_mask;
506 xgene_clk_write(data, pclk->param.csr_reg +
507 pclk->param.reg_clk_offset);
508 }
509
510 if (pclk->lock)
511 spin_unlock_irqrestore(pclk->lock, flags);
512 }
513
xgene_clk_is_enabled(struct clk_hw * hw)514 static int xgene_clk_is_enabled(struct clk_hw *hw)
515 {
516 struct xgene_clk *pclk = to_xgene_clk(hw);
517 u32 data = 0;
518
519 if (pclk->param.csr_reg) {
520 pr_debug("%s clock checking\n", clk_hw_get_name(hw));
521 data = xgene_clk_read(pclk->param.csr_reg +
522 pclk->param.reg_clk_offset);
523 pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
524 str_enabled_disabled(data & pclk->param.reg_clk_mask));
525 } else {
526 return 1;
527 }
528
529 return data & pclk->param.reg_clk_mask ? 1 : 0;
530 }
531
xgene_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)532 static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
533 unsigned long parent_rate)
534 {
535 struct xgene_clk *pclk = to_xgene_clk(hw);
536 u32 data;
537
538 if (pclk->param.divider_reg) {
539 data = xgene_clk_read(pclk->param.divider_reg +
540 pclk->param.reg_divider_offset);
541 data >>= pclk->param.reg_divider_shift;
542 data &= (1 << pclk->param.reg_divider_width) - 1;
543
544 pr_debug("%s clock recalc rate %ld parent %ld\n",
545 clk_hw_get_name(hw),
546 parent_rate / data, parent_rate);
547
548 return parent_rate / data;
549 } else {
550 pr_debug("%s clock recalc rate %ld parent %ld\n",
551 clk_hw_get_name(hw), parent_rate, parent_rate);
552 return parent_rate;
553 }
554 }
555
xgene_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)556 static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
557 unsigned long parent_rate)
558 {
559 struct xgene_clk *pclk = to_xgene_clk(hw);
560 unsigned long flags = 0;
561 u32 data;
562 u32 divider;
563 u32 divider_save;
564
565 if (pclk->lock)
566 spin_lock_irqsave(pclk->lock, flags);
567
568 if (pclk->param.divider_reg) {
569 /* Let's compute the divider */
570 if (rate > parent_rate)
571 rate = parent_rate;
572 divider_save = divider = parent_rate / rate; /* Rounded down */
573 divider &= (1 << pclk->param.reg_divider_width) - 1;
574 divider <<= pclk->param.reg_divider_shift;
575
576 /* Set new divider */
577 data = xgene_clk_read(pclk->param.divider_reg +
578 pclk->param.reg_divider_offset);
579 data &= ~(((1 << pclk->param.reg_divider_width) - 1)
580 << pclk->param.reg_divider_shift);
581 data |= divider;
582 xgene_clk_write(data, pclk->param.divider_reg +
583 pclk->param.reg_divider_offset);
584 pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
585 parent_rate / divider_save);
586 } else {
587 divider_save = 1;
588 }
589
590 if (pclk->lock)
591 spin_unlock_irqrestore(pclk->lock, flags);
592
593 return parent_rate / divider_save;
594 }
595
xgene_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)596 static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
597 unsigned long *prate)
598 {
599 struct xgene_clk *pclk = to_xgene_clk(hw);
600 unsigned long parent_rate = *prate;
601 u32 divider;
602
603 if (pclk->param.divider_reg) {
604 /* Let's compute the divider */
605 if (rate > parent_rate)
606 rate = parent_rate;
607 divider = parent_rate / rate; /* Rounded down */
608 } else {
609 divider = 1;
610 }
611
612 return parent_rate / divider;
613 }
614
615 static const struct clk_ops xgene_clk_ops = {
616 .enable = xgene_clk_enable,
617 .disable = xgene_clk_disable,
618 .is_enabled = xgene_clk_is_enabled,
619 .recalc_rate = xgene_clk_recalc_rate,
620 .set_rate = xgene_clk_set_rate,
621 .round_rate = xgene_clk_round_rate,
622 };
623
xgene_register_clk(struct device * dev,const char * name,const char * parent_name,struct xgene_dev_parameters * parameters,spinlock_t * lock)624 static struct clk *xgene_register_clk(struct device *dev,
625 const char *name, const char *parent_name,
626 struct xgene_dev_parameters *parameters, spinlock_t *lock)
627 {
628 struct xgene_clk *apmclk;
629 struct clk *clk;
630 struct clk_init_data init;
631 int rc;
632
633 /* allocate the APM clock structure */
634 apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
635 if (!apmclk)
636 return ERR_PTR(-ENOMEM);
637
638 init.name = name;
639 init.ops = &xgene_clk_ops;
640 init.flags = 0;
641 init.parent_names = parent_name ? &parent_name : NULL;
642 init.num_parents = parent_name ? 1 : 0;
643
644 apmclk->lock = lock;
645 apmclk->hw.init = &init;
646 apmclk->param = *parameters;
647
648 /* Register the clock */
649 clk = clk_register(dev, &apmclk->hw);
650 if (IS_ERR(clk)) {
651 pr_err("%s: could not register clk %s\n", __func__, name);
652 kfree(apmclk);
653 return clk;
654 }
655
656 /* Register the clock for lookup */
657 rc = clk_register_clkdev(clk, name, NULL);
658 if (rc != 0) {
659 pr_err("%s: could not register lookup clk %s\n",
660 __func__, name);
661 }
662 return clk;
663 }
664
xgene_devclk_init(struct device_node * np)665 static void __init xgene_devclk_init(struct device_node *np)
666 {
667 const char *clk_name = np->full_name;
668 struct clk *clk;
669 struct resource res;
670 int rc;
671 struct xgene_dev_parameters parameters;
672 int i;
673
674 /* Check if the entry is disabled */
675 if (!of_device_is_available(np))
676 return;
677
678 /* Parse the DTS register for resource */
679 parameters.csr_reg = NULL;
680 parameters.divider_reg = NULL;
681 for (i = 0; i < 2; i++) {
682 void __iomem *map_res;
683 rc = of_address_to_resource(np, i, &res);
684 if (rc != 0) {
685 if (i == 0) {
686 pr_err("no DTS register for %pOF\n", np);
687 return;
688 }
689 break;
690 }
691 map_res = of_iomap(np, i);
692 if (!map_res) {
693 pr_err("Unable to map resource %d for %pOF\n", i, np);
694 goto err;
695 }
696 if (strcmp(res.name, "div-reg") == 0)
697 parameters.divider_reg = map_res;
698 else /* if (strcmp(res->name, "csr-reg") == 0) */
699 parameters.csr_reg = map_res;
700 }
701 if (of_property_read_u32(np, "csr-offset", ¶meters.reg_csr_offset))
702 parameters.reg_csr_offset = 0;
703 if (of_property_read_u32(np, "csr-mask", ¶meters.reg_csr_mask))
704 parameters.reg_csr_mask = 0xF;
705 if (of_property_read_u32(np, "enable-offset",
706 ¶meters.reg_clk_offset))
707 parameters.reg_clk_offset = 0x8;
708 if (of_property_read_u32(np, "enable-mask", ¶meters.reg_clk_mask))
709 parameters.reg_clk_mask = 0xF;
710 if (of_property_read_u32(np, "divider-offset",
711 ¶meters.reg_divider_offset))
712 parameters.reg_divider_offset = 0;
713 if (of_property_read_u32(np, "divider-width",
714 ¶meters.reg_divider_width))
715 parameters.reg_divider_width = 0;
716 if (of_property_read_u32(np, "divider-shift",
717 ¶meters.reg_divider_shift))
718 parameters.reg_divider_shift = 0;
719 of_property_read_string(np, "clock-output-names", &clk_name);
720
721 clk = xgene_register_clk(NULL, clk_name,
722 of_clk_get_parent_name(np, 0), ¶meters, &clk_lock);
723 if (IS_ERR(clk))
724 goto err;
725 pr_debug("Add %s clock\n", clk_name);
726 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
727 if (rc != 0)
728 pr_err("%s: could register provider clk %pOF\n", __func__, np);
729
730 return;
731
732 err:
733 if (parameters.csr_reg)
734 iounmap(parameters.csr_reg);
735 if (parameters.divider_reg)
736 iounmap(parameters.divider_reg);
737 }
738
739 CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
740 CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
741 CLK_OF_DECLARE(xgene_pmd_clock, "apm,xgene-pmd-clock", xgene_pmdclk_init);
742 CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
743 xgene_socpllclk_init);
744 CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
745 xgene_pcppllclk_init);
746 CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);
747