blob: 560222b96c4442afb11fd96518ee4667188db2a0 [file] [log] [blame]
huang lin01aa7022015-11-17 14:20:16 +08001/*
2 * (C) Copyright 2015 Google, Inc
3 *
4 * SPDX-License-Identifier: GPL-2.0
5 */
6
7#include <common.h>
Stephen Warrena9622432016-06-17 09:44:00 -06008#include <clk-uclass.h>
huang lin01aa7022015-11-17 14:20:16 +08009#include <dm.h>
10#include <errno.h>
11#include <syscon.h>
12#include <asm/io.h>
13#include <asm/arch/clock.h>
14#include <asm/arch/cru_rk3036.h>
15#include <asm/arch/hardware.h>
huang lin01aa7022015-11-17 14:20:16 +080016#include <dm/lists.h>
Simon Glass8d32f4b2016-01-21 19:43:38 -070017#include <dt-bindings/clock/rk3036-cru.h>
Heiko Stübner1b7dcc32016-07-22 23:51:06 +020018#include <linux/log2.h>
huang lin01aa7022015-11-17 14:20:16 +080019
20DECLARE_GLOBAL_DATA_PTR;
21
huang lin01aa7022015-11-17 14:20:16 +080022enum {
23 VCO_MAX_HZ = 2400U * 1000000,
24 VCO_MIN_HZ = 600 * 1000000,
25 OUTPUT_MAX_HZ = 2400U * 1000000,
26 OUTPUT_MIN_HZ = 24 * 1000000,
27};
28
29#define RATE_TO_DIV(input_rate, output_rate) \
30 ((input_rate) / (output_rate) - 1);
31
32#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
33
34#define PLL_DIVISORS(hz, _refdiv, _postdiv1, _postdiv2) {\
35 .refdiv = _refdiv,\
36 .fbdiv = (u32)((u64)hz * _refdiv * _postdiv1 * _postdiv2 / OSC_HZ),\
37 .postdiv1 = _postdiv1, .postdiv2 = _postdiv2};\
38 _Static_assert(((u64)hz * _refdiv * _postdiv1 * _postdiv2 / OSC_HZ) *\
39 OSC_HZ / (_refdiv * _postdiv1 * _postdiv2) == hz,\
40 #hz "Hz cannot be hit with PLL "\
41 "divisors on line " __stringify(__LINE__));
42
Kever Yangef5eb172017-06-13 10:03:11 +080043/* use integer mode*/
huang lin01aa7022015-11-17 14:20:16 +080044static const struct pll_div apll_init_cfg = PLL_DIVISORS(APLL_HZ, 1, 3, 1);
45static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 2, 2, 1);
46
huang lin01aa7022015-11-17 14:20:16 +080047static int rkclk_set_pll(struct rk3036_cru *cru, enum rk_clk_id clk_id,
48 const struct pll_div *div)
49{
50 int pll_id = rk_pll_id(clk_id);
51 struct rk3036_pll *pll = &cru->pll[pll_id];
52
53 /* All PLLs have same VCO and output frequency range restrictions. */
54 uint vco_hz = OSC_HZ / 1000 * div->fbdiv / div->refdiv * 1000;
55 uint output_hz = vco_hz / div->postdiv1 / div->postdiv2;
56
57 debug("PLL at %p: fbdiv=%d, refdiv=%d, postdiv1=%d, postdiv2=%d,\
58 vco=%u Hz, output=%u Hz\n",
59 pll, div->fbdiv, div->refdiv, div->postdiv1,
60 div->postdiv2, vco_hz, output_hz);
61 assert(vco_hz >= VCO_MIN_HZ && vco_hz <= VCO_MAX_HZ &&
62 output_hz >= OUTPUT_MIN_HZ && output_hz <= OUTPUT_MAX_HZ);
63
Kever Yangef5eb172017-06-13 10:03:11 +080064 /* use integer mode */
65 rk_setreg(&pll->con1, 1 << PLL_DSMPD_SHIFT);
huang lin01aa7022015-11-17 14:20:16 +080066
67 rk_clrsetreg(&pll->con0,
Kever Yangcb04ad22017-05-15 20:52:15 +080068 PLL_POSTDIV1_MASK | PLL_FBDIV_MASK,
huang lin01aa7022015-11-17 14:20:16 +080069 (div->postdiv1 << PLL_POSTDIV1_SHIFT) | div->fbdiv);
Kever Yangcb04ad22017-05-15 20:52:15 +080070 rk_clrsetreg(&pll->con1, PLL_POSTDIV2_MASK | PLL_REFDIV_MASK,
71 (div->postdiv2 << PLL_POSTDIV2_SHIFT |
72 div->refdiv << PLL_REFDIV_SHIFT));
huang lin01aa7022015-11-17 14:20:16 +080073
74 /* waiting for pll lock */
75 while (readl(&pll->con1) & (1 << PLL_LOCK_STATUS_SHIFT))
76 udelay(1);
77
78 return 0;
79}
80
81static void rkclk_init(struct rk3036_cru *cru)
82{
83 u32 aclk_div;
84 u32 hclk_div;
85 u32 pclk_div;
86
87 /* pll enter slow-mode */
88 rk_clrsetreg(&cru->cru_mode_con,
Kever Yangcb04ad22017-05-15 20:52:15 +080089 GPLL_MODE_MASK | APLL_MODE_MASK,
huang lin01aa7022015-11-17 14:20:16 +080090 GPLL_MODE_SLOW << GPLL_MODE_SHIFT |
91 APLL_MODE_SLOW << APLL_MODE_SHIFT);
92
93 /* init pll */
94 rkclk_set_pll(cru, CLK_ARM, &apll_init_cfg);
95 rkclk_set_pll(cru, CLK_GENERAL, &gpll_init_cfg);
96
97 /*
Kever Yangcb04ad22017-05-15 20:52:15 +080098 * select apll as cpu/core clock pll source and
99 * set up dependent divisors for PERI and ACLK clocks.
huang lin01aa7022015-11-17 14:20:16 +0800100 * core hz : apll = 1:1
101 */
102 aclk_div = APLL_HZ / CORE_ACLK_HZ - 1;
103 assert((aclk_div + 1) * CORE_ACLK_HZ == APLL_HZ && aclk_div < 0x7);
104
105 pclk_div = APLL_HZ / CORE_PERI_HZ - 1;
106 assert((pclk_div + 1) * CORE_PERI_HZ == APLL_HZ && pclk_div < 0xf);
107
108 rk_clrsetreg(&cru->cru_clksel_con[0],
Kever Yangcb04ad22017-05-15 20:52:15 +0800109 CORE_CLK_PLL_SEL_MASK | CORE_DIV_CON_MASK,
huang lin01aa7022015-11-17 14:20:16 +0800110 CORE_CLK_PLL_SEL_APLL << CORE_CLK_PLL_SEL_SHIFT |
111 0 << CORE_DIV_CON_SHIFT);
112
113 rk_clrsetreg(&cru->cru_clksel_con[1],
Kever Yangcb04ad22017-05-15 20:52:15 +0800114 CORE_ACLK_DIV_MASK | CORE_PERI_DIV_MASK,
huang lin01aa7022015-11-17 14:20:16 +0800115 aclk_div << CORE_ACLK_DIV_SHIFT |
116 pclk_div << CORE_PERI_DIV_SHIFT);
117
118 /*
Kever Yangb45fc402017-05-15 20:52:16 +0800119 * select apll as pd_bus bus clock source and
huang lin01aa7022015-11-17 14:20:16 +0800120 * set up dependent divisors for PCLK/HCLK and ACLK clocks.
121 */
Kever Yangb45fc402017-05-15 20:52:16 +0800122 aclk_div = GPLL_HZ / BUS_ACLK_HZ - 1;
123 assert((aclk_div + 1) * BUS_ACLK_HZ == GPLL_HZ && aclk_div <= 0x1f);
huang lin01aa7022015-11-17 14:20:16 +0800124
Kever Yangb45fc402017-05-15 20:52:16 +0800125 pclk_div = GPLL_HZ / BUS_PCLK_HZ - 1;
126 assert((pclk_div + 1) * BUS_PCLK_HZ == GPLL_HZ && pclk_div <= 0x7);
huang lin01aa7022015-11-17 14:20:16 +0800127
Kever Yangb45fc402017-05-15 20:52:16 +0800128 hclk_div = GPLL_HZ / BUS_HCLK_HZ - 1;
129 assert((hclk_div + 1) * BUS_HCLK_HZ == GPLL_HZ && hclk_div <= 0x3);
huang lin01aa7022015-11-17 14:20:16 +0800130
131 rk_clrsetreg(&cru->cru_clksel_con[0],
Kever Yangcb04ad22017-05-15 20:52:15 +0800132 BUS_ACLK_PLL_SEL_MASK | BUS_ACLK_DIV_MASK,
Kever Yangb45fc402017-05-15 20:52:16 +0800133 BUS_ACLK_PLL_SEL_GPLL << BUS_ACLK_PLL_SEL_SHIFT |
Kever Yangcb04ad22017-05-15 20:52:15 +0800134 aclk_div << BUS_ACLK_DIV_SHIFT);
huang lin01aa7022015-11-17 14:20:16 +0800135
136 rk_clrsetreg(&cru->cru_clksel_con[1],
Kever Yangcb04ad22017-05-15 20:52:15 +0800137 BUS_PCLK_DIV_MASK | BUS_HCLK_DIV_MASK,
138 pclk_div << BUS_PCLK_DIV_SHIFT |
139 hclk_div << BUS_HCLK_DIV_SHIFT);
huang lin01aa7022015-11-17 14:20:16 +0800140
141 /*
Kever Yangcb04ad22017-05-15 20:52:15 +0800142 * select gpll as pd_peri bus clock source and
huang lin01aa7022015-11-17 14:20:16 +0800143 * set up dependent divisors for PCLK/HCLK and ACLK clocks.
144 */
145 aclk_div = GPLL_HZ / PERI_ACLK_HZ - 1;
146 assert((aclk_div + 1) * PERI_ACLK_HZ == GPLL_HZ && aclk_div < 0x1f);
147
Heiko Stübner1b7dcc32016-07-22 23:51:06 +0200148 hclk_div = ilog2(PERI_ACLK_HZ / PERI_HCLK_HZ);
huang lin01aa7022015-11-17 14:20:16 +0800149 assert((1 << hclk_div) * PERI_HCLK_HZ ==
Kever Yangb45fc402017-05-15 20:52:16 +0800150 PERI_ACLK_HZ && (hclk_div < 0x4));
huang lin01aa7022015-11-17 14:20:16 +0800151
Heiko Stübner1b7dcc32016-07-22 23:51:06 +0200152 pclk_div = ilog2(PERI_ACLK_HZ / PERI_PCLK_HZ);
huang lin01aa7022015-11-17 14:20:16 +0800153 assert((1 << pclk_div) * PERI_PCLK_HZ ==
154 PERI_ACLK_HZ && pclk_div < 0x8);
155
156 rk_clrsetreg(&cru->cru_clksel_con[10],
Kever Yangcb04ad22017-05-15 20:52:15 +0800157 PERI_PLL_SEL_MASK | PERI_PCLK_DIV_MASK |
158 PERI_HCLK_DIV_MASK | PERI_ACLK_DIV_MASK,
huang lin01aa7022015-11-17 14:20:16 +0800159 PERI_PLL_GPLL << PERI_PLL_SEL_SHIFT |
160 pclk_div << PERI_PCLK_DIV_SHIFT |
161 hclk_div << PERI_HCLK_DIV_SHIFT |
162 aclk_div << PERI_ACLK_DIV_SHIFT);
163
164 /* PLL enter normal-mode */
165 rk_clrsetreg(&cru->cru_mode_con,
Kever Yangcb04ad22017-05-15 20:52:15 +0800166 GPLL_MODE_MASK | APLL_MODE_MASK,
huang lin01aa7022015-11-17 14:20:16 +0800167 GPLL_MODE_NORM << GPLL_MODE_SHIFT |
168 APLL_MODE_NORM << APLL_MODE_SHIFT);
169}
170
171/* Get pll rate by id */
172static uint32_t rkclk_pll_get_rate(struct rk3036_cru *cru,
173 enum rk_clk_id clk_id)
174{
175 uint32_t refdiv, fbdiv, postdiv1, postdiv2;
176 uint32_t con;
177 int pll_id = rk_pll_id(clk_id);
178 struct rk3036_pll *pll = &cru->pll[pll_id];
179 static u8 clk_shift[CLK_COUNT] = {
180 0xff, APLL_MODE_SHIFT, DPLL_MODE_SHIFT, 0xff,
181 GPLL_MODE_SHIFT, 0xff
182 };
Kever Yangcb04ad22017-05-15 20:52:15 +0800183 static u32 clk_mask[CLK_COUNT] = {
184 0xffffffff, APLL_MODE_MASK, DPLL_MODE_MASK, 0xffffffff,
185 GPLL_MODE_MASK, 0xffffffff
huang lin01aa7022015-11-17 14:20:16 +0800186 };
187 uint shift;
188 uint mask;
189
190 con = readl(&cru->cru_mode_con);
191 shift = clk_shift[clk_id];
192 mask = clk_mask[clk_id];
193
Kever Yangcb04ad22017-05-15 20:52:15 +0800194 switch ((con & mask) >> shift) {
huang lin01aa7022015-11-17 14:20:16 +0800195 case GPLL_MODE_SLOW:
196 return OSC_HZ;
197 case GPLL_MODE_NORM:
198
199 /* normal mode */
200 con = readl(&pll->con0);
Kever Yangcb04ad22017-05-15 20:52:15 +0800201 postdiv1 = (con & PLL_POSTDIV1_MASK) >> PLL_POSTDIV1_SHIFT;
202 fbdiv = (con & PLL_FBDIV_MASK) >> PLL_FBDIV_SHIFT;
huang lin01aa7022015-11-17 14:20:16 +0800203 con = readl(&pll->con1);
Kever Yangcb04ad22017-05-15 20:52:15 +0800204 postdiv2 = (con & PLL_POSTDIV2_MASK) >> PLL_POSTDIV2_SHIFT;
205 refdiv = (con & PLL_REFDIV_MASK) >> PLL_REFDIV_SHIFT;
huang lin01aa7022015-11-17 14:20:16 +0800206 return (24 * fbdiv / (refdiv * postdiv1 * postdiv2)) * 1000000;
207 case GPLL_MODE_DEEP:
208 default:
209 return 32768;
210 }
211}
212
213static ulong rockchip_mmc_get_clk(struct rk3036_cru *cru, uint clk_general_rate,
Simon Glass8d32f4b2016-01-21 19:43:38 -0700214 int periph)
huang lin01aa7022015-11-17 14:20:16 +0800215{
216 uint src_rate;
217 uint div, mux;
218 u32 con;
219
220 switch (periph) {
Simon Glass8d32f4b2016-01-21 19:43:38 -0700221 case HCLK_EMMC:
Xu Ziyuan6682b9b2017-04-16 17:44:43 +0800222 case SCLK_EMMC:
huang lin01aa7022015-11-17 14:20:16 +0800223 con = readl(&cru->cru_clksel_con[12]);
Kever Yangcb04ad22017-05-15 20:52:15 +0800224 mux = (con & EMMC_PLL_MASK) >> EMMC_PLL_SHIFT;
225 div = (con & EMMC_DIV_MASK) >> EMMC_DIV_SHIFT;
huang lin01aa7022015-11-17 14:20:16 +0800226 break;
Simon Glass8d32f4b2016-01-21 19:43:38 -0700227 case HCLK_SDIO:
Xu Ziyuan6682b9b2017-04-16 17:44:43 +0800228 case SCLK_SDIO:
huang lin01aa7022015-11-17 14:20:16 +0800229 con = readl(&cru->cru_clksel_con[12]);
Kever Yangcb04ad22017-05-15 20:52:15 +0800230 mux = (con & MMC0_PLL_MASK) >> MMC0_PLL_SHIFT;
231 div = (con & MMC0_DIV_MASK) >> MMC0_DIV_SHIFT;
huang lin01aa7022015-11-17 14:20:16 +0800232 break;
233 default:
234 return -EINVAL;
235 }
236
237 src_rate = mux == EMMC_SEL_24M ? OSC_HZ : clk_general_rate;
Kever Yang99b546d2017-07-27 12:54:01 +0800238 return DIV_TO_RATE(src_rate, div) / 2;
huang lin01aa7022015-11-17 14:20:16 +0800239}
240
241static ulong rockchip_mmc_set_clk(struct rk3036_cru *cru, uint clk_general_rate,
Simon Glass8d32f4b2016-01-21 19:43:38 -0700242 int periph, uint freq)
huang lin01aa7022015-11-17 14:20:16 +0800243{
244 int src_clk_div;
245 int mux;
246
247 debug("%s: clk_general_rate=%u\n", __func__, clk_general_rate);
248
249 /* mmc clock auto divide 2 in internal */
Kever Yang99b546d2017-07-27 12:54:01 +0800250 src_clk_div = DIV_ROUND_UP(clk_general_rate / 2, freq);
huang lin01aa7022015-11-17 14:20:16 +0800251
Kever Yangf20995b2017-07-27 12:54:02 +0800252 if (src_clk_div > 128) {
Kever Yang99b546d2017-07-27 12:54:01 +0800253 src_clk_div = DIV_ROUND_UP(OSC_HZ / 2, freq);
Kever Yangf20995b2017-07-27 12:54:02 +0800254 assert(src_clk_div - 1 < 128);
huang lin01aa7022015-11-17 14:20:16 +0800255 mux = EMMC_SEL_24M;
256 } else {
257 mux = EMMC_SEL_GPLL;
258 }
259
260 switch (periph) {
Simon Glass8d32f4b2016-01-21 19:43:38 -0700261 case HCLK_EMMC:
Xu Ziyuan6682b9b2017-04-16 17:44:43 +0800262 case SCLK_EMMC:
huang lin01aa7022015-11-17 14:20:16 +0800263 rk_clrsetreg(&cru->cru_clksel_con[12],
Kever Yangcb04ad22017-05-15 20:52:15 +0800264 EMMC_PLL_MASK | EMMC_DIV_MASK,
huang lin01aa7022015-11-17 14:20:16 +0800265 mux << EMMC_PLL_SHIFT |
266 (src_clk_div - 1) << EMMC_DIV_SHIFT);
267 break;
Simon Glass8d32f4b2016-01-21 19:43:38 -0700268 case HCLK_SDIO:
Xu Ziyuan6682b9b2017-04-16 17:44:43 +0800269 case SCLK_SDIO:
huang lin01aa7022015-11-17 14:20:16 +0800270 rk_clrsetreg(&cru->cru_clksel_con[11],
Kever Yangcb04ad22017-05-15 20:52:15 +0800271 MMC0_PLL_MASK | MMC0_DIV_MASK,
huang lin01aa7022015-11-17 14:20:16 +0800272 mux << MMC0_PLL_SHIFT |
273 (src_clk_div - 1) << MMC0_DIV_SHIFT);
274 break;
275 default:
276 return -EINVAL;
277 }
278
279 return rockchip_mmc_get_clk(cru, clk_general_rate, periph);
280}
281
Stephen Warrena9622432016-06-17 09:44:00 -0600282static ulong rk3036_clk_get_rate(struct clk *clk)
huang lin01aa7022015-11-17 14:20:16 +0800283{
Stephen Warrena9622432016-06-17 09:44:00 -0600284 struct rk3036_clk_priv *priv = dev_get_priv(clk->dev);
huang lin01aa7022015-11-17 14:20:16 +0800285
Stephen Warrena9622432016-06-17 09:44:00 -0600286 switch (clk->id) {
287 case 0 ... 63:
288 return rkclk_pll_get_rate(priv->cru, clk->id);
289 default:
290 return -ENOENT;
291 }
huang lin01aa7022015-11-17 14:20:16 +0800292}
293
Stephen Warrena9622432016-06-17 09:44:00 -0600294static ulong rk3036_clk_set_rate(struct clk *clk, ulong rate)
huang lin01aa7022015-11-17 14:20:16 +0800295{
Stephen Warrena9622432016-06-17 09:44:00 -0600296 struct rk3036_clk_priv *priv = dev_get_priv(clk->dev);
297 ulong new_rate, gclk_rate;
huang lin01aa7022015-11-17 14:20:16 +0800298
Stephen Warrena9622432016-06-17 09:44:00 -0600299 gclk_rate = rkclk_pll_get_rate(priv->cru, CLK_GENERAL);
300 switch (clk->id) {
301 case 0 ... 63:
302 return 0;
Simon Glass8d32f4b2016-01-21 19:43:38 -0700303 case HCLK_EMMC:
Xu Ziyuan6682b9b2017-04-16 17:44:43 +0800304 case SCLK_EMMC:
Stephen Warrena9622432016-06-17 09:44:00 -0600305 new_rate = rockchip_mmc_set_clk(priv->cru, gclk_rate,
306 clk->id, rate);
huang lin01aa7022015-11-17 14:20:16 +0800307 break;
308 default:
309 return -ENOENT;
310 }
311
312 return new_rate;
313}
314
315static struct clk_ops rk3036_clk_ops = {
316 .get_rate = rk3036_clk_get_rate,
317 .set_rate = rk3036_clk_set_rate,
huang lin01aa7022015-11-17 14:20:16 +0800318};
319
320static int rk3036_clk_probe(struct udevice *dev)
321{
huang lin01aa7022015-11-17 14:20:16 +0800322 struct rk3036_clk_priv *priv = dev_get_priv(dev);
323
Kever Yangd7d162c2018-02-11 11:53:05 +0800324 priv->cru = dev_read_addr_ptr(dev);
huang lin01aa7022015-11-17 14:20:16 +0800325 rkclk_init(priv->cru);
326
327 return 0;
328}
329
huang lin01aa7022015-11-17 14:20:16 +0800330static int rk3036_clk_bind(struct udevice *dev)
331{
Stephen Warrena9622432016-06-17 09:44:00 -0600332 int ret;
Kever Yang4fbb6c22017-11-03 15:16:13 +0800333 struct udevice *sys_child;
334 struct sysreset_reg *priv;
huang lin01aa7022015-11-17 14:20:16 +0800335
336 /* The reset driver does not have a device node, so bind it here */
Kever Yang4fbb6c22017-11-03 15:16:13 +0800337 ret = device_bind_driver(dev, "rockchip_sysreset", "sysreset",
338 &sys_child);
339 if (ret) {
340 debug("Warning: No sysreset driver: ret=%d\n", ret);
341 } else {
342 priv = malloc(sizeof(struct sysreset_reg));
343 priv->glb_srst_fst_value = offsetof(struct rk3036_cru,
344 cru_glb_srst_fst_value);
345 priv->glb_srst_snd_value = offsetof(struct rk3036_cru,
346 cru_glb_srst_snd_value);
347 sys_child->priv = priv;
348 }
huang lin01aa7022015-11-17 14:20:16 +0800349
Elaine Zhang432976f2017-12-19 18:22:38 +0800350#if CONFIG_IS_ENABLED(CONFIG_RESET_ROCKCHIP)
351 ret = offsetof(struct rk3036_cru, cru_softrst_con[0]);
352 ret = rockchip_reset_bind(dev, ret, 9);
353 if (ret)
354 debug("Warning: software reset driver bind faile\n");
355#endif
356
huang lin01aa7022015-11-17 14:20:16 +0800357 return 0;
358}
359
360static const struct udevice_id rk3036_clk_ids[] = {
361 { .compatible = "rockchip,rk3036-cru" },
362 { }
363};
364
Simon Glass3814f0e2016-10-01 20:04:50 -0600365U_BOOT_DRIVER(rockchip_rk3036_cru) = {
huang lin01aa7022015-11-17 14:20:16 +0800366 .name = "clk_rk3036",
367 .id = UCLASS_CLK,
368 .of_match = rk3036_clk_ids,
369 .priv_auto_alloc_size = sizeof(struct rk3036_clk_priv),
huang lin01aa7022015-11-17 14:20:16 +0800370 .ops = &rk3036_clk_ops,
371 .bind = rk3036_clk_bind,
372 .probe = rk3036_clk_probe,
373};