blob: 2cce1b967d175c4bb31534b055f918e44c8cf5cc [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Andy Yanb9909aa2017-05-15 17:49:56 +08002/*
3 * (C) Copyright 2017 Rockchip Electronics Co., Ltd
4 * Author: Andy Yan <andy.yan@rock-chips.com>
Philipp Tomsich34b76132017-06-22 23:47:11 +02005 * (C) Copyright 2017 Theobroma Systems Design und Consulting GmbH
Andy Yanb9909aa2017-05-15 17:49:56 +08006 */
7
8#include <common.h>
9#include <clk-uclass.h>
10#include <dm.h>
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020011#include <dt-structs.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080012#include <errno.h>
Simon Glass9bc15642020-02-03 07:36:16 -070013#include <malloc.h>
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020014#include <mapmem.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080015#include <syscon.h>
David Wu4771ba62017-09-20 14:37:50 +080016#include <bitfield.h>
Kever Yang9fbe17c2019-03-28 11:01:23 +080017#include <asm/arch-rockchip/clock.h>
18#include <asm/arch-rockchip/cru_rk3368.h>
19#include <asm/arch-rockchip/hardware.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080020#include <asm/io.h>
21#include <dm/lists.h>
22#include <dt-bindings/clock/rk3368-cru.h>
23
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020024#if CONFIG_IS_ENABLED(OF_PLATDATA)
25struct rk3368_clk_plat {
26 struct dtd_rockchip_rk3368_cru dtd;
27};
28#endif
29
Andy Yanb9909aa2017-05-15 17:49:56 +080030struct pll_div {
31 u32 nr;
32 u32 nf;
33 u32 no;
34};
35
36#define OSC_HZ (24 * 1000 * 1000)
37#define APLL_L_HZ (800 * 1000 * 1000)
38#define APLL_B_HZ (816 * 1000 * 1000)
39#define GPLL_HZ (576 * 1000 * 1000)
40#define CPLL_HZ (400 * 1000 * 1000)
41
Andy Yanb9909aa2017-05-15 17:49:56 +080042#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
43
44#define PLL_DIVISORS(hz, _nr, _no) { \
45 .nr = _nr, .nf = (u32)((u64)hz * _nr * _no / OSC_HZ), .no = _no}; \
46 _Static_assert(((u64)hz * _nr * _no / OSC_HZ) * OSC_HZ /\
47 (_nr * _no) == hz, #hz "Hz cannot be hit with PLL " \
48 "divisors on line " __stringify(__LINE__));
49
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020050#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080051static const struct pll_div apll_l_init_cfg = PLL_DIVISORS(APLL_L_HZ, 12, 2);
52static const struct pll_div apll_b_init_cfg = PLL_DIVISORS(APLL_B_HZ, 1, 2);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020053#if !defined(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080054static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 1, 2);
55static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 6);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020056#endif
57#endif
Andy Yanb9909aa2017-05-15 17:49:56 +080058
Philipp Tomsichfbf07a52017-07-04 14:49:38 +020059static ulong rk3368_clk_get_rate(struct clk *clk);
60
Andy Yanb9909aa2017-05-15 17:49:56 +080061/* Get pll rate by id */
62static uint32_t rkclk_pll_get_rate(struct rk3368_cru *cru,
63 enum rk3368_pll_id pll_id)
64{
65 uint32_t nr, no, nf;
66 uint32_t con;
67 struct rk3368_pll *pll = &cru->pll[pll_id];
68
69 con = readl(&pll->con3);
70
71 switch ((con & PLL_MODE_MASK) >> PLL_MODE_SHIFT) {
72 case PLL_MODE_SLOW:
73 return OSC_HZ;
74 case PLL_MODE_NORMAL:
75 con = readl(&pll->con0);
76 no = ((con & PLL_OD_MASK) >> PLL_OD_SHIFT) + 1;
77 nr = ((con & PLL_NR_MASK) >> PLL_NR_SHIFT) + 1;
78 con = readl(&pll->con1);
79 nf = ((con & PLL_NF_MASK) >> PLL_NF_SHIFT) + 1;
80
81 return (24 * nf / (nr * no)) * 1000000;
82 case PLL_MODE_DEEP_SLOW:
83 default:
84 return 32768;
85 }
86}
87
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020088#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080089static int rkclk_set_pll(struct rk3368_cru *cru, enum rk3368_pll_id pll_id,
Philipp Tomsich34b76132017-06-22 23:47:11 +020090 const struct pll_div *div)
Andy Yanb9909aa2017-05-15 17:49:56 +080091{
92 struct rk3368_pll *pll = &cru->pll[pll_id];
93 /* All PLLs have same VCO and output frequency range restrictions*/
94 uint vco_hz = OSC_HZ / 1000 * div->nf / div->nr * 1000;
95 uint output_hz = vco_hz / div->no;
96
97 debug("PLL at %p: nf=%d, nr=%d, no=%d, vco=%u Hz, output=%u Hz\n",
98 pll, div->nf, div->nr, div->no, vco_hz, output_hz);
99
100 /* enter slow mode and reset pll */
101 rk_clrsetreg(&pll->con3, PLL_MODE_MASK | PLL_RESET_MASK,
102 PLL_RESET << PLL_RESET_SHIFT);
103
104 rk_clrsetreg(&pll->con0, PLL_NR_MASK | PLL_OD_MASK,
105 ((div->nr - 1) << PLL_NR_SHIFT) |
106 ((div->no - 1) << PLL_OD_SHIFT));
107 writel((div->nf - 1) << PLL_NF_SHIFT, &pll->con1);
Philipp Tomsich34b76132017-06-22 23:47:11 +0200108 /*
109 * BWADJ should be set to NF / 2 to ensure the nominal bandwidth.
110 * Compare the RK3368 TRM, section "3.6.4 PLL Bandwidth Adjustment".
111 */
112 clrsetbits_le32(&pll->con2, PLL_BWADJ_MASK, (div->nf >> 1) - 1);
113
Andy Yanb9909aa2017-05-15 17:49:56 +0800114 udelay(10);
115
116 /* return from reset */
117 rk_clrreg(&pll->con3, PLL_RESET_MASK);
118
119 /* waiting for pll lock */
120 while (!(readl(&pll->con1) & PLL_LOCK_STA))
121 udelay(1);
122
123 rk_clrsetreg(&pll->con3, PLL_MODE_MASK,
124 PLL_MODE_NORMAL << PLL_MODE_SHIFT);
125
126 return 0;
127}
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200128#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800129
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200130#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +0800131static void rkclk_init(struct rk3368_cru *cru)
132{
133 u32 apllb, aplll, dpll, cpll, gpll;
134
Philipp Tomsich34b76132017-06-22 23:47:11 +0200135 rkclk_set_pll(cru, APLLB, &apll_b_init_cfg);
136 rkclk_set_pll(cru, APLLL, &apll_l_init_cfg);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200137#if !defined(CONFIG_TPL_BUILD)
138 /*
139 * If we plan to return to the boot ROM, we can't increase the
140 * GPLL rate from the SPL stage.
141 */
Philipp Tomsich34b76132017-06-22 23:47:11 +0200142 rkclk_set_pll(cru, GPLL, &gpll_init_cfg);
143 rkclk_set_pll(cru, CPLL, &cpll_init_cfg);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200144#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800145
146 apllb = rkclk_pll_get_rate(cru, APLLB);
147 aplll = rkclk_pll_get_rate(cru, APLLL);
148 dpll = rkclk_pll_get_rate(cru, DPLL);
149 cpll = rkclk_pll_get_rate(cru, CPLL);
150 gpll = rkclk_pll_get_rate(cru, GPLL);
151
152 debug("%s apllb(%d) apll(%d) dpll(%d) cpll(%d) gpll(%d)\n",
153 __func__, apllb, aplll, dpll, cpll, gpll);
154}
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200155#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800156
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200157#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
Andy Yanb9909aa2017-05-15 17:49:56 +0800158static ulong rk3368_mmc_get_clk(struct rk3368_cru *cru, uint clk_id)
159{
160 u32 div, con, con_id, rate;
161 u32 pll_rate;
162
163 switch (clk_id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200164 case HCLK_SDMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800165 con_id = 50;
166 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200167 case HCLK_EMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800168 con_id = 51;
169 break;
170 case SCLK_SDIO0:
171 con_id = 48;
172 break;
173 default:
174 return -EINVAL;
175 }
176
177 con = readl(&cru->clksel_con[con_id]);
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200178 switch (con & MMC_PLL_SEL_MASK) {
Andy Yanb9909aa2017-05-15 17:49:56 +0800179 case MMC_PLL_SEL_GPLL:
180 pll_rate = rkclk_pll_get_rate(cru, GPLL);
181 break;
182 case MMC_PLL_SEL_24M:
183 pll_rate = OSC_HZ;
184 break;
185 case MMC_PLL_SEL_CPLL:
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200186 pll_rate = rkclk_pll_get_rate(cru, CPLL);
187 break;
Andy Yanb9909aa2017-05-15 17:49:56 +0800188 case MMC_PLL_SEL_USBPHY_480M:
189 default:
190 return -EINVAL;
191 }
192 div = (con & MMC_CLK_DIV_MASK) >> MMC_CLK_DIV_SHIFT;
193 rate = DIV_TO_RATE(pll_rate, div);
194
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200195 debug("%s: raw rate %d (post-divide by 2)\n", __func__, rate);
Andy Yanb9909aa2017-05-15 17:49:56 +0800196 return rate >> 1;
197}
198
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200199static ulong rk3368_mmc_find_best_rate_and_parent(struct clk *clk,
200 ulong rate,
201 u32 *best_mux,
202 u32 *best_div)
Andy Yanb9909aa2017-05-15 17:49:56 +0800203{
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200204 int i;
205 ulong best_rate = 0;
206 const ulong MHz = 1000000;
207 const struct {
208 u32 mux;
209 ulong rate;
210 } parents[] = {
211 { .mux = MMC_PLL_SEL_CPLL, .rate = CPLL_HZ },
212 { .mux = MMC_PLL_SEL_GPLL, .rate = GPLL_HZ },
213 { .mux = MMC_PLL_SEL_24M, .rate = 24 * MHz }
214 };
215
216 debug("%s: target rate %ld\n", __func__, rate);
217 for (i = 0; i < ARRAY_SIZE(parents); ++i) {
218 /*
219 * Find the largest rate no larger than the target-rate for
220 * the current parent.
221 */
222 ulong parent_rate = parents[i].rate;
223 u32 div = DIV_ROUND_UP(parent_rate, rate);
224 u32 adj_div = div;
225 ulong new_rate = parent_rate / adj_div;
226
227 debug("%s: rate %ld, parent-mux %d, parent-rate %ld, div %d\n",
228 __func__, rate, parents[i].mux, parents[i].rate, div);
229
230 /* Skip, if not representable */
231 if ((div - 1) > MMC_CLK_DIV_MASK)
232 continue;
Andy Yanb9909aa2017-05-15 17:49:56 +0800233
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200234 /* Skip, if we already have a better (or equal) solution */
235 if (new_rate <= best_rate)
236 continue;
237
238 /* This is our new best rate. */
239 best_rate = new_rate;
240 *best_mux = parents[i].mux;
241 *best_div = div - 1;
242 }
243
244 debug("%s: best_mux = %x, best_div = %d, best_rate = %ld\n",
245 __func__, *best_mux, *best_div, best_rate);
246
247 return best_rate;
248}
249
250static ulong rk3368_mmc_set_clk(struct clk *clk, ulong rate)
251{
252 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
253 struct rk3368_cru *cru = priv->cru;
254 ulong clk_id = clk->id;
255 u32 con_id, mux = 0, div = 0;
256
257 /* Find the best parent and rate */
258 rk3368_mmc_find_best_rate_and_parent(clk, rate << 1, &mux, &div);
Andy Yanb9909aa2017-05-15 17:49:56 +0800259
260 switch (clk_id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200261 case HCLK_SDMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800262 con_id = 50;
263 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200264 case HCLK_EMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800265 con_id = 51;
266 break;
267 case SCLK_SDIO0:
268 con_id = 48;
269 break;
270 default:
271 return -EINVAL;
272 }
273
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200274 rk_clrsetreg(&cru->clksel_con[con_id],
275 MMC_PLL_SEL_MASK | MMC_CLK_DIV_MASK,
276 mux | div);
Andy Yanb9909aa2017-05-15 17:49:56 +0800277
278 return rk3368_mmc_get_clk(cru, clk_id);
279}
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200280#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800281
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200282#if IS_ENABLED(CONFIG_TPL_BUILD)
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200283static ulong rk3368_ddr_set_clk(struct rk3368_cru *cru, ulong set_rate)
284{
285 const struct pll_div *dpll_cfg = NULL;
286 const ulong MHz = 1000000;
287
288 /* Fout = ((Fin /NR) * NF )/ NO */
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200289 static const struct pll_div dpll_1200 = PLL_DIVISORS(1200 * MHz, 1, 1);
290 static const struct pll_div dpll_1332 = PLL_DIVISORS(1332 * MHz, 2, 1);
291 static const struct pll_div dpll_1600 = PLL_DIVISORS(1600 * MHz, 3, 2);
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200292
293 switch (set_rate) {
294 case 1200*MHz:
295 dpll_cfg = &dpll_1200;
296 break;
297 case 1332*MHz:
298 dpll_cfg = &dpll_1332;
299 break;
300 case 1600*MHz:
301 dpll_cfg = &dpll_1600;
302 break;
303 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900304 pr_err("Unsupported SDRAM frequency!,%ld\n", set_rate);
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200305 }
306 rkclk_set_pll(cru, DPLL, dpll_cfg);
307
308 return set_rate;
309}
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200310#endif
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200311
Philipp Tomsicha249f102017-07-14 19:57:39 +0200312#if CONFIG_IS_ENABLED(GMAC_ROCKCHIP)
David Wue72793d2018-01-13 14:07:04 +0800313static ulong rk3368_gmac_set_clk(struct rk3368_cru *cru, ulong set_rate)
Philipp Tomsicha249f102017-07-14 19:57:39 +0200314{
David Wue72793d2018-01-13 14:07:04 +0800315 ulong ret;
316
Philipp Tomsicha249f102017-07-14 19:57:39 +0200317 /*
David Wue72793d2018-01-13 14:07:04 +0800318 * The gmac clock can be derived either from an external clock
319 * or can be generated from internally by a divider from SCLK_MAC.
Philipp Tomsicha249f102017-07-14 19:57:39 +0200320 */
David Wue72793d2018-01-13 14:07:04 +0800321 if (readl(&cru->clksel_con[43]) & GMAC_MUX_SEL_EXTCLK) {
322 /* An external clock will always generate the right rate... */
323 ret = set_rate;
324 } else {
325 u32 con = readl(&cru->clksel_con[43]);
326 ulong pll_rate;
327 u8 div;
328
329 if (((con >> GMAC_PLL_SHIFT) & GMAC_PLL_MASK) ==
330 GMAC_PLL_SELECT_GENERAL)
331 pll_rate = GPLL_HZ;
332 else if (((con >> GMAC_PLL_SHIFT) & GMAC_PLL_MASK) ==
333 GMAC_PLL_SELECT_CODEC)
334 pll_rate = CPLL_HZ;
335 else
336 /* CPLL is not set */
337 return -EPERM;
338
339 div = DIV_ROUND_UP(pll_rate, set_rate) - 1;
340 if (div <= 0x1f)
341 rk_clrsetreg(&cru->clksel_con[43], GMAC_DIV_CON_MASK,
342 div << GMAC_DIV_CON_SHIFT);
343 else
344 debug("Unsupported div for gmac:%d\n", div);
345
346 return DIV_TO_RATE(pll_rate, div);
347 }
348
349 return ret;
Philipp Tomsicha249f102017-07-14 19:57:39 +0200350}
351#endif
352
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200353/*
354 * RK3368 SPI clocks have a common divider-width (7 bits) and a single bit
355 * to select either CPLL or GPLL as the clock-parent. The location within
356 * the enclosing CLKSEL_CON (i.e. div_shift and sel_shift) are variable.
357 */
358
359struct spi_clkreg {
360 uint8_t reg; /* CLKSEL_CON[reg] register in CRU */
361 uint8_t div_shift;
362 uint8_t sel_shift;
363};
364
365/*
366 * The entries are numbered relative to their offset from SCLK_SPI0.
367 */
368static const struct spi_clkreg spi_clkregs[] = {
369 [0] = { .reg = 45, .div_shift = 0, .sel_shift = 7, },
370 [1] = { .reg = 45, .div_shift = 8, .sel_shift = 15, },
371 [2] = { .reg = 46, .div_shift = 8, .sel_shift = 15, },
372};
373
374static inline u32 extract_bits(u32 val, unsigned width, unsigned shift)
375{
376 return (val >> shift) & ((1 << width) - 1);
377}
378
379static ulong rk3368_spi_get_clk(struct rk3368_cru *cru, ulong clk_id)
380{
381 const struct spi_clkreg *spiclk = NULL;
382 u32 div, val;
383
384 switch (clk_id) {
385 case SCLK_SPI0 ... SCLK_SPI2:
386 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
387 break;
388
389 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900390 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200391 return -EINVAL;
392 }
393
394 val = readl(&cru->clksel_con[spiclk->reg]);
395 div = extract_bits(val, 7, spiclk->div_shift);
396
397 debug("%s: div 0x%x\n", __func__, div);
398 return DIV_TO_RATE(GPLL_HZ, div);
399}
400
401static ulong rk3368_spi_set_clk(struct rk3368_cru *cru, ulong clk_id, uint hz)
402{
403 const struct spi_clkreg *spiclk = NULL;
404 int src_clk_div;
405
406 src_clk_div = DIV_ROUND_UP(GPLL_HZ, hz);
407 assert(src_clk_div < 127);
408
409 switch (clk_id) {
410 case SCLK_SPI0 ... SCLK_SPI2:
411 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
412 break;
413
414 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900415 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200416 return -EINVAL;
417 }
418
419 rk_clrsetreg(&cru->clksel_con[spiclk->reg],
420 ((0x7f << spiclk->div_shift) |
421 (0x1 << spiclk->sel_shift)),
422 ((src_clk_div << spiclk->div_shift) |
423 (1 << spiclk->sel_shift)));
424
425 return rk3368_spi_get_clk(cru, clk_id);
426}
427
David Wu4771ba62017-09-20 14:37:50 +0800428static ulong rk3368_saradc_get_clk(struct rk3368_cru *cru)
429{
430 u32 div, val;
431
432 val = readl(&cru->clksel_con[25]);
433 div = bitfield_extract(val, CLK_SARADC_DIV_CON_SHIFT,
434 CLK_SARADC_DIV_CON_WIDTH);
435
436 return DIV_TO_RATE(OSC_HZ, div);
437}
438
439static ulong rk3368_saradc_set_clk(struct rk3368_cru *cru, uint hz)
440{
441 int src_clk_div;
442
443 src_clk_div = DIV_ROUND_UP(OSC_HZ, hz) - 1;
444 assert(src_clk_div < 128);
445
446 rk_clrsetreg(&cru->clksel_con[25],
447 CLK_SARADC_DIV_CON_MASK,
448 src_clk_div << CLK_SARADC_DIV_CON_SHIFT);
449
450 return rk3368_saradc_get_clk(cru);
451}
452
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200453static ulong rk3368_clk_get_rate(struct clk *clk)
454{
455 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
456 ulong rate = 0;
457
458 debug("%s: id %ld\n", __func__, clk->id);
459 switch (clk->id) {
460 case PLL_CPLL:
461 rate = rkclk_pll_get_rate(priv->cru, CPLL);
462 break;
463 case PLL_GPLL:
464 rate = rkclk_pll_get_rate(priv->cru, GPLL);
465 break;
466 case SCLK_SPI0 ... SCLK_SPI2:
467 rate = rk3368_spi_get_clk(priv->cru, clk->id);
468 break;
469#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
470 case HCLK_SDMMC:
471 case HCLK_EMMC:
472 rate = rk3368_mmc_get_clk(priv->cru, clk->id);
473 break;
474#endif
David Wu4771ba62017-09-20 14:37:50 +0800475 case SCLK_SARADC:
476 rate = rk3368_saradc_get_clk(priv->cru);
477 break;
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200478 default:
479 return -ENOENT;
480 }
481
482 return rate;
483}
484
Andy Yanb9909aa2017-05-15 17:49:56 +0800485static ulong rk3368_clk_set_rate(struct clk *clk, ulong rate)
486{
Philipp Tomsich83a5d2c2017-07-05 12:11:58 +0200487 __maybe_unused struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800488 ulong ret = 0;
489
490 debug("%s id:%ld rate:%ld\n", __func__, clk->id, rate);
491 switch (clk->id) {
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200492 case SCLK_SPI0 ... SCLK_SPI2:
493 ret = rk3368_spi_set_clk(priv->cru, clk->id, rate);
494 break;
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200495#if IS_ENABLED(CONFIG_TPL_BUILD)
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200496 case CLK_DDR:
497 ret = rk3368_ddr_set_clk(priv->cru, rate);
498 break;
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200499#endif
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200500#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC_SUPPORT)
501 case HCLK_SDMMC:
502 case HCLK_EMMC:
503 ret = rk3368_mmc_set_clk(clk, rate);
504 break;
505#endif
Philipp Tomsicha249f102017-07-14 19:57:39 +0200506#if CONFIG_IS_ENABLED(GMAC_ROCKCHIP)
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200507 case SCLK_MAC:
Philipp Tomsicha249f102017-07-14 19:57:39 +0200508 /* select the external clock */
David Wue72793d2018-01-13 14:07:04 +0800509 ret = rk3368_gmac_set_clk(priv->cru, rate);
Andy Yanb9909aa2017-05-15 17:49:56 +0800510 break;
Philipp Tomsicha249f102017-07-14 19:57:39 +0200511#endif
David Wu4771ba62017-09-20 14:37:50 +0800512 case SCLK_SARADC:
513 ret = rk3368_saradc_set_clk(priv->cru, rate);
514 break;
Andy Yanb9909aa2017-05-15 17:49:56 +0800515 default:
516 return -ENOENT;
517 }
518
519 return ret;
520}
521
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100522static int __maybe_unused rk3368_gmac_set_parent(struct clk *clk, struct clk *parent)
David Wue72793d2018-01-13 14:07:04 +0800523{
524 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
525 struct rk3368_cru *cru = priv->cru;
526 const char *clock_output_name;
527 int ret;
528
529 /*
530 * If the requested parent is in the same clock-controller and
531 * the id is SCLK_MAC ("sclk_mac"), switch to the internal
532 * clock.
533 */
534 if ((parent->dev == clk->dev) && (parent->id == SCLK_MAC)) {
535 debug("%s: switching GAMC to SCLK_MAC\n", __func__);
536 rk_clrreg(&cru->clksel_con[43], GMAC_MUX_SEL_EXTCLK);
537 return 0;
538 }
539
540 /*
541 * Otherwise, we need to check the clock-output-names of the
542 * requested parent to see if the requested id is "ext_gmac".
543 */
544 ret = dev_read_string_index(parent->dev, "clock-output-names",
545 parent->id, &clock_output_name);
546 if (ret < 0)
547 return -ENODATA;
548
549 /* If this is "ext_gmac", switch to the external clock input */
550 if (!strcmp(clock_output_name, "ext_gmac")) {
551 debug("%s: switching GMAC to external clock\n", __func__);
552 rk_setreg(&cru->clksel_con[43], GMAC_MUX_SEL_EXTCLK);
553 return 0;
554 }
555
556 return -EINVAL;
557}
558
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100559static int __maybe_unused rk3368_clk_set_parent(struct clk *clk, struct clk *parent)
David Wue72793d2018-01-13 14:07:04 +0800560{
561 switch (clk->id) {
562 case SCLK_MAC:
563 return rk3368_gmac_set_parent(clk, parent);
564 }
565
566 debug("%s: unsupported clk %ld\n", __func__, clk->id);
567 return -ENOENT;
568}
569
Andy Yanb9909aa2017-05-15 17:49:56 +0800570static struct clk_ops rk3368_clk_ops = {
571 .get_rate = rk3368_clk_get_rate,
572 .set_rate = rk3368_clk_set_rate,
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100573#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
David Wue72793d2018-01-13 14:07:04 +0800574 .set_parent = rk3368_clk_set_parent,
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100575#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800576};
577
578static int rk3368_clk_probe(struct udevice *dev)
579{
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200580 struct rk3368_clk_priv __maybe_unused *priv = dev_get_priv(dev);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200581#if CONFIG_IS_ENABLED(OF_PLATDATA)
582 struct rk3368_clk_plat *plat = dev_get_platdata(dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800583
Simon Glass1b1fe412017-08-29 14:15:50 -0600584 priv->cru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200585#endif
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200586#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +0800587 rkclk_init(priv->cru);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200588#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800589
590 return 0;
591}
592
593static int rk3368_clk_ofdata_to_platdata(struct udevice *dev)
594{
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200595#if !CONFIG_IS_ENABLED(OF_PLATDATA)
Andy Yanb9909aa2017-05-15 17:49:56 +0800596 struct rk3368_clk_priv *priv = dev_get_priv(dev);
597
Philipp Tomsich53559992017-09-11 22:04:18 +0200598 priv->cru = dev_read_addr_ptr(dev);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200599#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800600
601 return 0;
602}
603
604static int rk3368_clk_bind(struct udevice *dev)
605{
606 int ret;
Kever Yang4fbb6c22017-11-03 15:16:13 +0800607 struct udevice *sys_child;
608 struct sysreset_reg *priv;
Andy Yanb9909aa2017-05-15 17:49:56 +0800609
610 /* The reset driver does not have a device node, so bind it here */
Kever Yang4fbb6c22017-11-03 15:16:13 +0800611 ret = device_bind_driver(dev, "rockchip_sysreset", "sysreset",
612 &sys_child);
613 if (ret) {
614 debug("Warning: No sysreset driver: ret=%d\n", ret);
615 } else {
616 priv = malloc(sizeof(struct sysreset_reg));
617 priv->glb_srst_fst_value = offsetof(struct rk3368_cru,
618 glb_srst_fst_val);
619 priv->glb_srst_snd_value = offsetof(struct rk3368_cru,
620 glb_srst_snd_val);
621 sys_child->priv = priv;
622 }
Andy Yanb9909aa2017-05-15 17:49:56 +0800623
Heiko Stuebner416f8d32019-11-09 00:06:30 +0100624#if CONFIG_IS_ENABLED(RESET_ROCKCHIP)
Elaine Zhang432976f2017-12-19 18:22:38 +0800625 ret = offsetof(struct rk3368_cru, softrst_con[0]);
626 ret = rockchip_reset_bind(dev, ret, 15);
627 if (ret)
628 debug("Warning: software reset driver bind faile\n");
629#endif
630
Andy Yanb9909aa2017-05-15 17:49:56 +0800631 return ret;
632}
633
634static const struct udevice_id rk3368_clk_ids[] = {
635 { .compatible = "rockchip,rk3368-cru" },
636 { }
637};
638
639U_BOOT_DRIVER(rockchip_rk3368_cru) = {
640 .name = "rockchip_rk3368_cru",
641 .id = UCLASS_CLK,
642 .of_match = rk3368_clk_ids,
Philipp Tomsichea825a32017-07-11 20:59:45 +0200643 .priv_auto_alloc_size = sizeof(struct rk3368_clk_priv),
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200644#if CONFIG_IS_ENABLED(OF_PLATDATA)
645 .platdata_auto_alloc_size = sizeof(struct rk3368_clk_plat),
646#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800647 .ofdata_to_platdata = rk3368_clk_ofdata_to_platdata,
648 .ops = &rk3368_clk_ops,
649 .bind = rk3368_clk_bind,
650 .probe = rk3368_clk_probe,
651};