blob: 1327116f195e31cefeb78cc8ba56fd37bbc695f2 [file] [log] [blame]
Andy Yanb9909aa2017-05-15 17:49:56 +08001/*
2 * (C) Copyright 2017 Rockchip Electronics Co., Ltd
3 * Author: Andy Yan <andy.yan@rock-chips.com>
Philipp Tomsich34b76132017-06-22 23:47:11 +02004 * (C) Copyright 2017 Theobroma Systems Design und Consulting GmbH
Andy Yanb9909aa2017-05-15 17:49:56 +08005 * SPDX-License-Identifier: GPL-2.0
6 */
7
8#include <common.h>
9#include <clk-uclass.h>
10#include <dm.h>
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020011#include <dt-structs.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080012#include <errno.h>
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020013#include <mapmem.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080014#include <syscon.h>
15#include <asm/arch/clock.h>
16#include <asm/arch/cru_rk3368.h>
17#include <asm/arch/hardware.h>
18#include <asm/io.h>
19#include <dm/lists.h>
20#include <dt-bindings/clock/rk3368-cru.h>
21
22DECLARE_GLOBAL_DATA_PTR;
23
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020024#if CONFIG_IS_ENABLED(OF_PLATDATA)
25struct rk3368_clk_plat {
26 struct dtd_rockchip_rk3368_cru dtd;
27};
28#endif
29
Andy Yanb9909aa2017-05-15 17:49:56 +080030struct pll_div {
31 u32 nr;
32 u32 nf;
33 u32 no;
34};
35
36#define OSC_HZ (24 * 1000 * 1000)
37#define APLL_L_HZ (800 * 1000 * 1000)
38#define APLL_B_HZ (816 * 1000 * 1000)
39#define GPLL_HZ (576 * 1000 * 1000)
40#define CPLL_HZ (400 * 1000 * 1000)
41
42#define RATE_TO_DIV(input_rate, output_rate) \
43 ((input_rate) / (output_rate) - 1);
44
45#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
46
47#define PLL_DIVISORS(hz, _nr, _no) { \
48 .nr = _nr, .nf = (u32)((u64)hz * _nr * _no / OSC_HZ), .no = _no}; \
49 _Static_assert(((u64)hz * _nr * _no / OSC_HZ) * OSC_HZ /\
50 (_nr * _no) == hz, #hz "Hz cannot be hit with PLL " \
51 "divisors on line " __stringify(__LINE__));
52
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020053#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080054static const struct pll_div apll_l_init_cfg = PLL_DIVISORS(APLL_L_HZ, 12, 2);
55static const struct pll_div apll_b_init_cfg = PLL_DIVISORS(APLL_B_HZ, 1, 2);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020056#if !defined(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080057static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 1, 2);
58static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 6);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020059#endif
60#endif
Andy Yanb9909aa2017-05-15 17:49:56 +080061
Philipp Tomsichfbf07a52017-07-04 14:49:38 +020062static ulong rk3368_clk_get_rate(struct clk *clk);
63
Andy Yanb9909aa2017-05-15 17:49:56 +080064/* Get pll rate by id */
65static uint32_t rkclk_pll_get_rate(struct rk3368_cru *cru,
66 enum rk3368_pll_id pll_id)
67{
68 uint32_t nr, no, nf;
69 uint32_t con;
70 struct rk3368_pll *pll = &cru->pll[pll_id];
71
72 con = readl(&pll->con3);
73
74 switch ((con & PLL_MODE_MASK) >> PLL_MODE_SHIFT) {
75 case PLL_MODE_SLOW:
76 return OSC_HZ;
77 case PLL_MODE_NORMAL:
78 con = readl(&pll->con0);
79 no = ((con & PLL_OD_MASK) >> PLL_OD_SHIFT) + 1;
80 nr = ((con & PLL_NR_MASK) >> PLL_NR_SHIFT) + 1;
81 con = readl(&pll->con1);
82 nf = ((con & PLL_NF_MASK) >> PLL_NF_SHIFT) + 1;
83
84 return (24 * nf / (nr * no)) * 1000000;
85 case PLL_MODE_DEEP_SLOW:
86 default:
87 return 32768;
88 }
89}
90
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020091#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080092static int rkclk_set_pll(struct rk3368_cru *cru, enum rk3368_pll_id pll_id,
Philipp Tomsich34b76132017-06-22 23:47:11 +020093 const struct pll_div *div)
Andy Yanb9909aa2017-05-15 17:49:56 +080094{
95 struct rk3368_pll *pll = &cru->pll[pll_id];
96 /* All PLLs have same VCO and output frequency range restrictions*/
97 uint vco_hz = OSC_HZ / 1000 * div->nf / div->nr * 1000;
98 uint output_hz = vco_hz / div->no;
99
100 debug("PLL at %p: nf=%d, nr=%d, no=%d, vco=%u Hz, output=%u Hz\n",
101 pll, div->nf, div->nr, div->no, vco_hz, output_hz);
102
103 /* enter slow mode and reset pll */
104 rk_clrsetreg(&pll->con3, PLL_MODE_MASK | PLL_RESET_MASK,
105 PLL_RESET << PLL_RESET_SHIFT);
106
107 rk_clrsetreg(&pll->con0, PLL_NR_MASK | PLL_OD_MASK,
108 ((div->nr - 1) << PLL_NR_SHIFT) |
109 ((div->no - 1) << PLL_OD_SHIFT));
110 writel((div->nf - 1) << PLL_NF_SHIFT, &pll->con1);
Philipp Tomsich34b76132017-06-22 23:47:11 +0200111 /*
112 * BWADJ should be set to NF / 2 to ensure the nominal bandwidth.
113 * Compare the RK3368 TRM, section "3.6.4 PLL Bandwidth Adjustment".
114 */
115 clrsetbits_le32(&pll->con2, PLL_BWADJ_MASK, (div->nf >> 1) - 1);
116
Andy Yanb9909aa2017-05-15 17:49:56 +0800117 udelay(10);
118
119 /* return from reset */
120 rk_clrreg(&pll->con3, PLL_RESET_MASK);
121
122 /* waiting for pll lock */
123 while (!(readl(&pll->con1) & PLL_LOCK_STA))
124 udelay(1);
125
126 rk_clrsetreg(&pll->con3, PLL_MODE_MASK,
127 PLL_MODE_NORMAL << PLL_MODE_SHIFT);
128
129 return 0;
130}
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200131#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800132
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200133#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +0800134static void rkclk_init(struct rk3368_cru *cru)
135{
136 u32 apllb, aplll, dpll, cpll, gpll;
137
Philipp Tomsich34b76132017-06-22 23:47:11 +0200138 rkclk_set_pll(cru, APLLB, &apll_b_init_cfg);
139 rkclk_set_pll(cru, APLLL, &apll_l_init_cfg);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200140#if !defined(CONFIG_TPL_BUILD)
141 /*
142 * If we plan to return to the boot ROM, we can't increase the
143 * GPLL rate from the SPL stage.
144 */
Philipp Tomsich34b76132017-06-22 23:47:11 +0200145 rkclk_set_pll(cru, GPLL, &gpll_init_cfg);
146 rkclk_set_pll(cru, CPLL, &cpll_init_cfg);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200147#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800148
149 apllb = rkclk_pll_get_rate(cru, APLLB);
150 aplll = rkclk_pll_get_rate(cru, APLLL);
151 dpll = rkclk_pll_get_rate(cru, DPLL);
152 cpll = rkclk_pll_get_rate(cru, CPLL);
153 gpll = rkclk_pll_get_rate(cru, GPLL);
154
155 debug("%s apllb(%d) apll(%d) dpll(%d) cpll(%d) gpll(%d)\n",
156 __func__, apllb, aplll, dpll, cpll, gpll);
157}
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200158#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800159
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200160#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
Andy Yanb9909aa2017-05-15 17:49:56 +0800161static ulong rk3368_mmc_get_clk(struct rk3368_cru *cru, uint clk_id)
162{
163 u32 div, con, con_id, rate;
164 u32 pll_rate;
165
166 switch (clk_id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200167 case HCLK_SDMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800168 con_id = 50;
169 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200170 case HCLK_EMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800171 con_id = 51;
172 break;
173 case SCLK_SDIO0:
174 con_id = 48;
175 break;
176 default:
177 return -EINVAL;
178 }
179
180 con = readl(&cru->clksel_con[con_id]);
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200181 switch (con & MMC_PLL_SEL_MASK) {
Andy Yanb9909aa2017-05-15 17:49:56 +0800182 case MMC_PLL_SEL_GPLL:
183 pll_rate = rkclk_pll_get_rate(cru, GPLL);
184 break;
185 case MMC_PLL_SEL_24M:
186 pll_rate = OSC_HZ;
187 break;
188 case MMC_PLL_SEL_CPLL:
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200189 pll_rate = rkclk_pll_get_rate(cru, CPLL);
190 break;
Andy Yanb9909aa2017-05-15 17:49:56 +0800191 case MMC_PLL_SEL_USBPHY_480M:
192 default:
193 return -EINVAL;
194 }
195 div = (con & MMC_CLK_DIV_MASK) >> MMC_CLK_DIV_SHIFT;
196 rate = DIV_TO_RATE(pll_rate, div);
197
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200198 debug("%s: raw rate %d (post-divide by 2)\n", __func__, rate);
Andy Yanb9909aa2017-05-15 17:49:56 +0800199 return rate >> 1;
200}
201
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200202static ulong rk3368_mmc_find_best_rate_and_parent(struct clk *clk,
203 ulong rate,
204 u32 *best_mux,
205 u32 *best_div)
Andy Yanb9909aa2017-05-15 17:49:56 +0800206{
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200207 int i;
208 ulong best_rate = 0;
209 const ulong MHz = 1000000;
210 const struct {
211 u32 mux;
212 ulong rate;
213 } parents[] = {
214 { .mux = MMC_PLL_SEL_CPLL, .rate = CPLL_HZ },
215 { .mux = MMC_PLL_SEL_GPLL, .rate = GPLL_HZ },
216 { .mux = MMC_PLL_SEL_24M, .rate = 24 * MHz }
217 };
218
219 debug("%s: target rate %ld\n", __func__, rate);
220 for (i = 0; i < ARRAY_SIZE(parents); ++i) {
221 /*
222 * Find the largest rate no larger than the target-rate for
223 * the current parent.
224 */
225 ulong parent_rate = parents[i].rate;
226 u32 div = DIV_ROUND_UP(parent_rate, rate);
227 u32 adj_div = div;
228 ulong new_rate = parent_rate / adj_div;
229
230 debug("%s: rate %ld, parent-mux %d, parent-rate %ld, div %d\n",
231 __func__, rate, parents[i].mux, parents[i].rate, div);
232
233 /* Skip, if not representable */
234 if ((div - 1) > MMC_CLK_DIV_MASK)
235 continue;
Andy Yanb9909aa2017-05-15 17:49:56 +0800236
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200237 /* Skip, if we already have a better (or equal) solution */
238 if (new_rate <= best_rate)
239 continue;
240
241 /* This is our new best rate. */
242 best_rate = new_rate;
243 *best_mux = parents[i].mux;
244 *best_div = div - 1;
245 }
246
247 debug("%s: best_mux = %x, best_div = %d, best_rate = %ld\n",
248 __func__, *best_mux, *best_div, best_rate);
249
250 return best_rate;
251}
252
253static ulong rk3368_mmc_set_clk(struct clk *clk, ulong rate)
254{
255 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
256 struct rk3368_cru *cru = priv->cru;
257 ulong clk_id = clk->id;
258 u32 con_id, mux = 0, div = 0;
259
260 /* Find the best parent and rate */
261 rk3368_mmc_find_best_rate_and_parent(clk, rate << 1, &mux, &div);
Andy Yanb9909aa2017-05-15 17:49:56 +0800262
263 switch (clk_id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200264 case HCLK_SDMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800265 con_id = 50;
266 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200267 case HCLK_EMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800268 con_id = 51;
269 break;
270 case SCLK_SDIO0:
271 con_id = 48;
272 break;
273 default:
274 return -EINVAL;
275 }
276
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200277 rk_clrsetreg(&cru->clksel_con[con_id],
278 MMC_PLL_SEL_MASK | MMC_CLK_DIV_MASK,
279 mux | div);
Andy Yanb9909aa2017-05-15 17:49:56 +0800280
281 return rk3368_mmc_get_clk(cru, clk_id);
282}
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200283#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800284
285static ulong rk3368_clk_get_rate(struct clk *clk)
286{
287 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
288 ulong rate = 0;
289
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200290 debug("%s: id %ld\n", __func__, clk->id);
Andy Yanb9909aa2017-05-15 17:49:56 +0800291 switch (clk->id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200292 case PLL_CPLL:
293 rate = rkclk_pll_get_rate(priv->cru, CPLL);
294 break;
295 case PLL_GPLL:
296 rate = rkclk_pll_get_rate(priv->cru, GPLL);
297 break;
298#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
Andy Yanb9909aa2017-05-15 17:49:56 +0800299 case HCLK_SDMMC:
300 case HCLK_EMMC:
301 rate = rk3368_mmc_get_clk(priv->cru, clk->id);
302 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200303#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800304 default:
305 return -ENOENT;
306 }
307
308 return rate;
309}
310
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200311static ulong rk3368_ddr_set_clk(struct rk3368_cru *cru, ulong set_rate)
312{
313 const struct pll_div *dpll_cfg = NULL;
314 const ulong MHz = 1000000;
315
316 /* Fout = ((Fin /NR) * NF )/ NO */
317 static const struct pll_div dpll_1200 =
318 PLL_DIVISORS(1200 * MHz, 1, 1);
319 static const struct pll_div dpll_1332 =
320 PLL_DIVISORS(1332 * MHz, 2, 1);
321 static const struct pll_div dpll_1600 =
322 PLL_DIVISORS(1600 * MHz, 3, 2);
323
324 switch (set_rate) {
325 case 1200*MHz:
326 dpll_cfg = &dpll_1200;
327 break;
328 case 1332*MHz:
329 dpll_cfg = &dpll_1332;
330 break;
331 case 1600*MHz:
332 dpll_cfg = &dpll_1600;
333 break;
334 default:
335 error("Unsupported SDRAM frequency!,%ld\n", set_rate);
336 }
337 rkclk_set_pll(cru, DPLL, dpll_cfg);
338
339 return set_rate;
340}
341
Andy Yanb9909aa2017-05-15 17:49:56 +0800342static ulong rk3368_clk_set_rate(struct clk *clk, ulong rate)
343{
344 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
345 ulong ret = 0;
346
347 debug("%s id:%ld rate:%ld\n", __func__, clk->id, rate);
348 switch (clk->id) {
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200349 case CLK_DDR:
350 ret = rk3368_ddr_set_clk(priv->cru, rate);
351 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200352#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
353 case HCLK_SDMMC:
354 case HCLK_EMMC:
355 ret = rk3368_mmc_set_clk(clk, rate);
356 break;
357#endif
358 case SCLK_MAC:
359 /* nothing to do, as this is an external clock */
360 ret = rate;
Andy Yanb9909aa2017-05-15 17:49:56 +0800361 break;
362 default:
363 return -ENOENT;
364 }
365
366 return ret;
367}
368
369static struct clk_ops rk3368_clk_ops = {
370 .get_rate = rk3368_clk_get_rate,
371 .set_rate = rk3368_clk_set_rate,
372};
373
374static int rk3368_clk_probe(struct udevice *dev)
375{
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200376 struct rk3368_clk_priv __maybe_unused *priv = dev_get_priv(dev);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200377#if CONFIG_IS_ENABLED(OF_PLATDATA)
378 struct rk3368_clk_plat *plat = dev_get_platdata(dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800379
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200380 priv->cru = map_sysmem(plat->dtd.reg[1], plat->dtd.reg[3]);
381#endif
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200382#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +0800383 rkclk_init(priv->cru);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200384#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800385
386 return 0;
387}
388
389static int rk3368_clk_ofdata_to_platdata(struct udevice *dev)
390{
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200391#if !CONFIG_IS_ENABLED(OF_PLATDATA)
Andy Yanb9909aa2017-05-15 17:49:56 +0800392 struct rk3368_clk_priv *priv = dev_get_priv(dev);
393
394 priv->cru = (struct rk3368_cru *)devfdt_get_addr(dev);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200395#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800396
397 return 0;
398}
399
400static int rk3368_clk_bind(struct udevice *dev)
401{
402 int ret;
403
404 /* The reset driver does not have a device node, so bind it here */
405 ret = device_bind_driver(gd->dm_root, "rk3368_sysreset", "reset", &dev);
406 if (ret)
407 error("bind RK3368 reset driver failed: ret=%d\n", ret);
408
409 return ret;
410}
411
412static const struct udevice_id rk3368_clk_ids[] = {
413 { .compatible = "rockchip,rk3368-cru" },
414 { }
415};
416
417U_BOOT_DRIVER(rockchip_rk3368_cru) = {
418 .name = "rockchip_rk3368_cru",
419 .id = UCLASS_CLK,
420 .of_match = rk3368_clk_ids,
Philipp Tomsichea825a32017-07-11 20:59:45 +0200421 .priv_auto_alloc_size = sizeof(struct rk3368_clk_priv),
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200422#if CONFIG_IS_ENABLED(OF_PLATDATA)
423 .platdata_auto_alloc_size = sizeof(struct rk3368_clk_plat),
424#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800425 .ofdata_to_platdata = rk3368_clk_ofdata_to_platdata,
426 .ops = &rk3368_clk_ops,
427 .bind = rk3368_clk_bind,
428 .probe = rk3368_clk_probe,
429};