blob: 3406ff592e1798ccd9b3baa48953efb0194cdb77 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Andy Yanb9909aa2017-05-15 17:49:56 +08002/*
3 * (C) Copyright 2017 Rockchip Electronics Co., Ltd
4 * Author: Andy Yan <andy.yan@rock-chips.com>
Philipp Tomsich34b76132017-06-22 23:47:11 +02005 * (C) Copyright 2017 Theobroma Systems Design und Consulting GmbH
Andy Yanb9909aa2017-05-15 17:49:56 +08006 */
7
8#include <common.h>
9#include <clk-uclass.h>
10#include <dm.h>
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020011#include <dt-structs.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080012#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070014#include <malloc.h>
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020015#include <mapmem.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080016#include <syscon.h>
David Wu4771ba62017-09-20 14:37:50 +080017#include <bitfield.h>
Kever Yang9fbe17c2019-03-28 11:01:23 +080018#include <asm/arch-rockchip/clock.h>
19#include <asm/arch-rockchip/cru_rk3368.h>
20#include <asm/arch-rockchip/hardware.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080021#include <asm/io.h>
Simon Glass95588622020-12-22 19:30:28 -070022#include <dm/device-internal.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080023#include <dm/lists.h>
24#include <dt-bindings/clock/rk3368-cru.h>
Simon Glassdbd79542020-05-10 11:40:11 -060025#include <linux/delay.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060026#include <linux/printk.h>
Simon Glassfb64e362020-05-10 11:40:09 -060027#include <linux/stringify.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080028
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020029#if CONFIG_IS_ENABLED(OF_PLATDATA)
30struct rk3368_clk_plat {
31 struct dtd_rockchip_rk3368_cru dtd;
32};
33#endif
34
Andy Yanb9909aa2017-05-15 17:49:56 +080035struct pll_div {
36 u32 nr;
37 u32 nf;
38 u32 no;
39};
40
41#define OSC_HZ (24 * 1000 * 1000)
42#define APLL_L_HZ (800 * 1000 * 1000)
43#define APLL_B_HZ (816 * 1000 * 1000)
44#define GPLL_HZ (576 * 1000 * 1000)
45#define CPLL_HZ (400 * 1000 * 1000)
46
Andy Yanb9909aa2017-05-15 17:49:56 +080047#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
48
49#define PLL_DIVISORS(hz, _nr, _no) { \
50 .nr = _nr, .nf = (u32)((u64)hz * _nr * _no / OSC_HZ), .no = _no}; \
51 _Static_assert(((u64)hz * _nr * _no / OSC_HZ) * OSC_HZ /\
52 (_nr * _no) == hz, #hz "Hz cannot be hit with PLL " \
53 "divisors on line " __stringify(__LINE__));
54
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020055#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080056static const struct pll_div apll_l_init_cfg = PLL_DIVISORS(APLL_L_HZ, 12, 2);
57static const struct pll_div apll_b_init_cfg = PLL_DIVISORS(APLL_B_HZ, 1, 2);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020058#if !defined(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080059static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 1, 2);
60static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 6);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020061#endif
62#endif
Andy Yanb9909aa2017-05-15 17:49:56 +080063
Philipp Tomsichfbf07a52017-07-04 14:49:38 +020064static ulong rk3368_clk_get_rate(struct clk *clk);
65
Andy Yanb9909aa2017-05-15 17:49:56 +080066/* Get pll rate by id */
67static uint32_t rkclk_pll_get_rate(struct rk3368_cru *cru,
68 enum rk3368_pll_id pll_id)
69{
70 uint32_t nr, no, nf;
71 uint32_t con;
72 struct rk3368_pll *pll = &cru->pll[pll_id];
73
74 con = readl(&pll->con3);
75
76 switch ((con & PLL_MODE_MASK) >> PLL_MODE_SHIFT) {
77 case PLL_MODE_SLOW:
78 return OSC_HZ;
79 case PLL_MODE_NORMAL:
80 con = readl(&pll->con0);
81 no = ((con & PLL_OD_MASK) >> PLL_OD_SHIFT) + 1;
82 nr = ((con & PLL_NR_MASK) >> PLL_NR_SHIFT) + 1;
83 con = readl(&pll->con1);
84 nf = ((con & PLL_NF_MASK) >> PLL_NF_SHIFT) + 1;
85
86 return (24 * nf / (nr * no)) * 1000000;
87 case PLL_MODE_DEEP_SLOW:
88 default:
89 return 32768;
90 }
91}
92
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020093#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080094static int rkclk_set_pll(struct rk3368_cru *cru, enum rk3368_pll_id pll_id,
Philipp Tomsich34b76132017-06-22 23:47:11 +020095 const struct pll_div *div)
Andy Yanb9909aa2017-05-15 17:49:56 +080096{
97 struct rk3368_pll *pll = &cru->pll[pll_id];
98 /* All PLLs have same VCO and output frequency range restrictions*/
99 uint vco_hz = OSC_HZ / 1000 * div->nf / div->nr * 1000;
100 uint output_hz = vco_hz / div->no;
101
102 debug("PLL at %p: nf=%d, nr=%d, no=%d, vco=%u Hz, output=%u Hz\n",
103 pll, div->nf, div->nr, div->no, vco_hz, output_hz);
104
105 /* enter slow mode and reset pll */
106 rk_clrsetreg(&pll->con3, PLL_MODE_MASK | PLL_RESET_MASK,
107 PLL_RESET << PLL_RESET_SHIFT);
108
109 rk_clrsetreg(&pll->con0, PLL_NR_MASK | PLL_OD_MASK,
110 ((div->nr - 1) << PLL_NR_SHIFT) |
111 ((div->no - 1) << PLL_OD_SHIFT));
112 writel((div->nf - 1) << PLL_NF_SHIFT, &pll->con1);
Philipp Tomsich34b76132017-06-22 23:47:11 +0200113 /*
114 * BWADJ should be set to NF / 2 to ensure the nominal bandwidth.
115 * Compare the RK3368 TRM, section "3.6.4 PLL Bandwidth Adjustment".
116 */
117 clrsetbits_le32(&pll->con2, PLL_BWADJ_MASK, (div->nf >> 1) - 1);
118
Andy Yanb9909aa2017-05-15 17:49:56 +0800119 udelay(10);
120
121 /* return from reset */
122 rk_clrreg(&pll->con3, PLL_RESET_MASK);
123
124 /* waiting for pll lock */
125 while (!(readl(&pll->con1) & PLL_LOCK_STA))
126 udelay(1);
127
128 rk_clrsetreg(&pll->con3, PLL_MODE_MASK,
129 PLL_MODE_NORMAL << PLL_MODE_SHIFT);
130
131 return 0;
132}
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200133#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800134
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200135#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +0800136static void rkclk_init(struct rk3368_cru *cru)
137{
138 u32 apllb, aplll, dpll, cpll, gpll;
139
Philipp Tomsich34b76132017-06-22 23:47:11 +0200140 rkclk_set_pll(cru, APLLB, &apll_b_init_cfg);
141 rkclk_set_pll(cru, APLLL, &apll_l_init_cfg);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200142#if !defined(CONFIG_TPL_BUILD)
143 /*
144 * If we plan to return to the boot ROM, we can't increase the
145 * GPLL rate from the SPL stage.
146 */
Philipp Tomsich34b76132017-06-22 23:47:11 +0200147 rkclk_set_pll(cru, GPLL, &gpll_init_cfg);
148 rkclk_set_pll(cru, CPLL, &cpll_init_cfg);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200149#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800150
151 apllb = rkclk_pll_get_rate(cru, APLLB);
152 aplll = rkclk_pll_get_rate(cru, APLLL);
153 dpll = rkclk_pll_get_rate(cru, DPLL);
154 cpll = rkclk_pll_get_rate(cru, CPLL);
155 gpll = rkclk_pll_get_rate(cru, GPLL);
156
157 debug("%s apllb(%d) apll(%d) dpll(%d) cpll(%d) gpll(%d)\n",
158 __func__, apllb, aplll, dpll, cpll, gpll);
159}
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200160#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800161
Simon Glassb58bfe02021-08-08 12:20:09 -0600162#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC)
Andy Yanb9909aa2017-05-15 17:49:56 +0800163static ulong rk3368_mmc_get_clk(struct rk3368_cru *cru, uint clk_id)
164{
165 u32 div, con, con_id, rate;
166 u32 pll_rate;
167
168 switch (clk_id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200169 case HCLK_SDMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800170 con_id = 50;
171 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200172 case HCLK_EMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800173 con_id = 51;
174 break;
175 case SCLK_SDIO0:
176 con_id = 48;
177 break;
178 default:
179 return -EINVAL;
180 }
181
182 con = readl(&cru->clksel_con[con_id]);
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200183 switch (con & MMC_PLL_SEL_MASK) {
Andy Yanb9909aa2017-05-15 17:49:56 +0800184 case MMC_PLL_SEL_GPLL:
185 pll_rate = rkclk_pll_get_rate(cru, GPLL);
186 break;
187 case MMC_PLL_SEL_24M:
188 pll_rate = OSC_HZ;
189 break;
190 case MMC_PLL_SEL_CPLL:
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200191 pll_rate = rkclk_pll_get_rate(cru, CPLL);
192 break;
Andy Yanb9909aa2017-05-15 17:49:56 +0800193 case MMC_PLL_SEL_USBPHY_480M:
194 default:
195 return -EINVAL;
196 }
197 div = (con & MMC_CLK_DIV_MASK) >> MMC_CLK_DIV_SHIFT;
198 rate = DIV_TO_RATE(pll_rate, div);
199
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200200 debug("%s: raw rate %d (post-divide by 2)\n", __func__, rate);
Andy Yanb9909aa2017-05-15 17:49:56 +0800201 return rate >> 1;
202}
203
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200204static ulong rk3368_mmc_find_best_rate_and_parent(struct clk *clk,
205 ulong rate,
206 u32 *best_mux,
207 u32 *best_div)
Andy Yanb9909aa2017-05-15 17:49:56 +0800208{
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200209 int i;
210 ulong best_rate = 0;
211 const ulong MHz = 1000000;
212 const struct {
213 u32 mux;
214 ulong rate;
215 } parents[] = {
216 { .mux = MMC_PLL_SEL_CPLL, .rate = CPLL_HZ },
217 { .mux = MMC_PLL_SEL_GPLL, .rate = GPLL_HZ },
218 { .mux = MMC_PLL_SEL_24M, .rate = 24 * MHz }
219 };
220
221 debug("%s: target rate %ld\n", __func__, rate);
222 for (i = 0; i < ARRAY_SIZE(parents); ++i) {
223 /*
224 * Find the largest rate no larger than the target-rate for
225 * the current parent.
226 */
227 ulong parent_rate = parents[i].rate;
228 u32 div = DIV_ROUND_UP(parent_rate, rate);
229 u32 adj_div = div;
230 ulong new_rate = parent_rate / adj_div;
231
232 debug("%s: rate %ld, parent-mux %d, parent-rate %ld, div %d\n",
233 __func__, rate, parents[i].mux, parents[i].rate, div);
234
235 /* Skip, if not representable */
236 if ((div - 1) > MMC_CLK_DIV_MASK)
237 continue;
Andy Yanb9909aa2017-05-15 17:49:56 +0800238
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200239 /* Skip, if we already have a better (or equal) solution */
240 if (new_rate <= best_rate)
241 continue;
242
243 /* This is our new best rate. */
244 best_rate = new_rate;
245 *best_mux = parents[i].mux;
246 *best_div = div - 1;
247 }
248
249 debug("%s: best_mux = %x, best_div = %d, best_rate = %ld\n",
250 __func__, *best_mux, *best_div, best_rate);
251
252 return best_rate;
253}
254
255static ulong rk3368_mmc_set_clk(struct clk *clk, ulong rate)
256{
257 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
258 struct rk3368_cru *cru = priv->cru;
259 ulong clk_id = clk->id;
260 u32 con_id, mux = 0, div = 0;
261
262 /* Find the best parent and rate */
263 rk3368_mmc_find_best_rate_and_parent(clk, rate << 1, &mux, &div);
Andy Yanb9909aa2017-05-15 17:49:56 +0800264
265 switch (clk_id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200266 case HCLK_SDMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800267 con_id = 50;
268 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200269 case HCLK_EMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800270 con_id = 51;
271 break;
272 case SCLK_SDIO0:
273 con_id = 48;
274 break;
275 default:
276 return -EINVAL;
277 }
278
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200279 rk_clrsetreg(&cru->clksel_con[con_id],
280 MMC_PLL_SEL_MASK | MMC_CLK_DIV_MASK,
281 mux | div);
Andy Yanb9909aa2017-05-15 17:49:56 +0800282
283 return rk3368_mmc_get_clk(cru, clk_id);
284}
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200285#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800286
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200287#if IS_ENABLED(CONFIG_TPL_BUILD)
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200288static ulong rk3368_ddr_set_clk(struct rk3368_cru *cru, ulong set_rate)
289{
290 const struct pll_div *dpll_cfg = NULL;
291 const ulong MHz = 1000000;
292
293 /* Fout = ((Fin /NR) * NF )/ NO */
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200294 static const struct pll_div dpll_1200 = PLL_DIVISORS(1200 * MHz, 1, 1);
295 static const struct pll_div dpll_1332 = PLL_DIVISORS(1332 * MHz, 2, 1);
296 static const struct pll_div dpll_1600 = PLL_DIVISORS(1600 * MHz, 3, 2);
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200297
298 switch (set_rate) {
299 case 1200*MHz:
300 dpll_cfg = &dpll_1200;
301 break;
302 case 1332*MHz:
303 dpll_cfg = &dpll_1332;
304 break;
305 case 1600*MHz:
306 dpll_cfg = &dpll_1600;
307 break;
308 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900309 pr_err("Unsupported SDRAM frequency!,%ld\n", set_rate);
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200310 }
311 rkclk_set_pll(cru, DPLL, dpll_cfg);
312
313 return set_rate;
314}
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200315#endif
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200316
Philipp Tomsicha249f102017-07-14 19:57:39 +0200317#if CONFIG_IS_ENABLED(GMAC_ROCKCHIP)
David Wue72793d2018-01-13 14:07:04 +0800318static ulong rk3368_gmac_set_clk(struct rk3368_cru *cru, ulong set_rate)
Philipp Tomsicha249f102017-07-14 19:57:39 +0200319{
David Wue72793d2018-01-13 14:07:04 +0800320 ulong ret;
321
Philipp Tomsicha249f102017-07-14 19:57:39 +0200322 /*
David Wue72793d2018-01-13 14:07:04 +0800323 * The gmac clock can be derived either from an external clock
324 * or can be generated from internally by a divider from SCLK_MAC.
Philipp Tomsicha249f102017-07-14 19:57:39 +0200325 */
David Wue72793d2018-01-13 14:07:04 +0800326 if (readl(&cru->clksel_con[43]) & GMAC_MUX_SEL_EXTCLK) {
327 /* An external clock will always generate the right rate... */
328 ret = set_rate;
329 } else {
330 u32 con = readl(&cru->clksel_con[43]);
331 ulong pll_rate;
332 u8 div;
333
334 if (((con >> GMAC_PLL_SHIFT) & GMAC_PLL_MASK) ==
335 GMAC_PLL_SELECT_GENERAL)
336 pll_rate = GPLL_HZ;
337 else if (((con >> GMAC_PLL_SHIFT) & GMAC_PLL_MASK) ==
338 GMAC_PLL_SELECT_CODEC)
339 pll_rate = CPLL_HZ;
340 else
341 /* CPLL is not set */
342 return -EPERM;
343
344 div = DIV_ROUND_UP(pll_rate, set_rate) - 1;
345 if (div <= 0x1f)
346 rk_clrsetreg(&cru->clksel_con[43], GMAC_DIV_CON_MASK,
347 div << GMAC_DIV_CON_SHIFT);
348 else
349 debug("Unsupported div for gmac:%d\n", div);
350
351 return DIV_TO_RATE(pll_rate, div);
352 }
353
354 return ret;
Philipp Tomsicha249f102017-07-14 19:57:39 +0200355}
356#endif
357
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200358/*
359 * RK3368 SPI clocks have a common divider-width (7 bits) and a single bit
360 * to select either CPLL or GPLL as the clock-parent. The location within
361 * the enclosing CLKSEL_CON (i.e. div_shift and sel_shift) are variable.
362 */
363
364struct spi_clkreg {
365 uint8_t reg; /* CLKSEL_CON[reg] register in CRU */
366 uint8_t div_shift;
367 uint8_t sel_shift;
368};
369
370/*
371 * The entries are numbered relative to their offset from SCLK_SPI0.
372 */
373static const struct spi_clkreg spi_clkregs[] = {
374 [0] = { .reg = 45, .div_shift = 0, .sel_shift = 7, },
375 [1] = { .reg = 45, .div_shift = 8, .sel_shift = 15, },
376 [2] = { .reg = 46, .div_shift = 8, .sel_shift = 15, },
377};
378
379static inline u32 extract_bits(u32 val, unsigned width, unsigned shift)
380{
381 return (val >> shift) & ((1 << width) - 1);
382}
383
384static ulong rk3368_spi_get_clk(struct rk3368_cru *cru, ulong clk_id)
385{
386 const struct spi_clkreg *spiclk = NULL;
387 u32 div, val;
388
389 switch (clk_id) {
390 case SCLK_SPI0 ... SCLK_SPI2:
391 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
392 break;
393
394 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900395 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200396 return -EINVAL;
397 }
398
399 val = readl(&cru->clksel_con[spiclk->reg]);
400 div = extract_bits(val, 7, spiclk->div_shift);
401
402 debug("%s: div 0x%x\n", __func__, div);
403 return DIV_TO_RATE(GPLL_HZ, div);
404}
405
406static ulong rk3368_spi_set_clk(struct rk3368_cru *cru, ulong clk_id, uint hz)
407{
408 const struct spi_clkreg *spiclk = NULL;
409 int src_clk_div;
410
411 src_clk_div = DIV_ROUND_UP(GPLL_HZ, hz);
412 assert(src_clk_div < 127);
413
414 switch (clk_id) {
415 case SCLK_SPI0 ... SCLK_SPI2:
416 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
417 break;
418
419 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900420 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200421 return -EINVAL;
422 }
423
424 rk_clrsetreg(&cru->clksel_con[spiclk->reg],
425 ((0x7f << spiclk->div_shift) |
426 (0x1 << spiclk->sel_shift)),
427 ((src_clk_div << spiclk->div_shift) |
428 (1 << spiclk->sel_shift)));
429
430 return rk3368_spi_get_clk(cru, clk_id);
431}
432
David Wu4771ba62017-09-20 14:37:50 +0800433static ulong rk3368_saradc_get_clk(struct rk3368_cru *cru)
434{
435 u32 div, val;
436
437 val = readl(&cru->clksel_con[25]);
438 div = bitfield_extract(val, CLK_SARADC_DIV_CON_SHIFT,
439 CLK_SARADC_DIV_CON_WIDTH);
440
441 return DIV_TO_RATE(OSC_HZ, div);
442}
443
444static ulong rk3368_saradc_set_clk(struct rk3368_cru *cru, uint hz)
445{
446 int src_clk_div;
447
448 src_clk_div = DIV_ROUND_UP(OSC_HZ, hz) - 1;
449 assert(src_clk_div < 128);
450
451 rk_clrsetreg(&cru->clksel_con[25],
452 CLK_SARADC_DIV_CON_MASK,
453 src_clk_div << CLK_SARADC_DIV_CON_SHIFT);
454
455 return rk3368_saradc_get_clk(cru);
456}
457
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200458static ulong rk3368_clk_get_rate(struct clk *clk)
459{
460 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
461 ulong rate = 0;
462
463 debug("%s: id %ld\n", __func__, clk->id);
464 switch (clk->id) {
465 case PLL_CPLL:
466 rate = rkclk_pll_get_rate(priv->cru, CPLL);
467 break;
468 case PLL_GPLL:
469 rate = rkclk_pll_get_rate(priv->cru, GPLL);
470 break;
471 case SCLK_SPI0 ... SCLK_SPI2:
472 rate = rk3368_spi_get_clk(priv->cru, clk->id);
473 break;
Simon Glassb58bfe02021-08-08 12:20:09 -0600474#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC)
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200475 case HCLK_SDMMC:
476 case HCLK_EMMC:
477 rate = rk3368_mmc_get_clk(priv->cru, clk->id);
478 break;
479#endif
David Wu4771ba62017-09-20 14:37:50 +0800480 case SCLK_SARADC:
481 rate = rk3368_saradc_get_clk(priv->cru);
482 break;
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200483 default:
484 return -ENOENT;
485 }
486
487 return rate;
488}
489
Andy Yanb9909aa2017-05-15 17:49:56 +0800490static ulong rk3368_clk_set_rate(struct clk *clk, ulong rate)
491{
Philipp Tomsich83a5d2c2017-07-05 12:11:58 +0200492 __maybe_unused struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800493 ulong ret = 0;
494
495 debug("%s id:%ld rate:%ld\n", __func__, clk->id, rate);
496 switch (clk->id) {
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200497 case SCLK_SPI0 ... SCLK_SPI2:
498 ret = rk3368_spi_set_clk(priv->cru, clk->id, rate);
499 break;
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200500#if IS_ENABLED(CONFIG_TPL_BUILD)
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200501 case CLK_DDR:
502 ret = rk3368_ddr_set_clk(priv->cru, rate);
503 break;
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200504#endif
Simon Glassb58bfe02021-08-08 12:20:09 -0600505#if !IS_ENABLED(CONFIG_SPL_BUILD) || CONFIG_IS_ENABLED(MMC)
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200506 case HCLK_SDMMC:
507 case HCLK_EMMC:
508 ret = rk3368_mmc_set_clk(clk, rate);
509 break;
510#endif
Philipp Tomsicha249f102017-07-14 19:57:39 +0200511#if CONFIG_IS_ENABLED(GMAC_ROCKCHIP)
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200512 case SCLK_MAC:
Philipp Tomsicha249f102017-07-14 19:57:39 +0200513 /* select the external clock */
David Wue72793d2018-01-13 14:07:04 +0800514 ret = rk3368_gmac_set_clk(priv->cru, rate);
Andy Yanb9909aa2017-05-15 17:49:56 +0800515 break;
Philipp Tomsicha249f102017-07-14 19:57:39 +0200516#endif
David Wu4771ba62017-09-20 14:37:50 +0800517 case SCLK_SARADC:
518 ret = rk3368_saradc_set_clk(priv->cru, rate);
519 break;
Andy Yanb9909aa2017-05-15 17:49:56 +0800520 default:
521 return -ENOENT;
522 }
523
524 return ret;
525}
526
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100527static int __maybe_unused rk3368_gmac_set_parent(struct clk *clk, struct clk *parent)
David Wue72793d2018-01-13 14:07:04 +0800528{
529 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
530 struct rk3368_cru *cru = priv->cru;
531 const char *clock_output_name;
532 int ret;
533
534 /*
535 * If the requested parent is in the same clock-controller and
536 * the id is SCLK_MAC ("sclk_mac"), switch to the internal
537 * clock.
538 */
539 if ((parent->dev == clk->dev) && (parent->id == SCLK_MAC)) {
540 debug("%s: switching GAMC to SCLK_MAC\n", __func__);
541 rk_clrreg(&cru->clksel_con[43], GMAC_MUX_SEL_EXTCLK);
542 return 0;
543 }
544
545 /*
546 * Otherwise, we need to check the clock-output-names of the
547 * requested parent to see if the requested id is "ext_gmac".
548 */
549 ret = dev_read_string_index(parent->dev, "clock-output-names",
550 parent->id, &clock_output_name);
551 if (ret < 0)
552 return -ENODATA;
553
554 /* If this is "ext_gmac", switch to the external clock input */
555 if (!strcmp(clock_output_name, "ext_gmac")) {
556 debug("%s: switching GMAC to external clock\n", __func__);
557 rk_setreg(&cru->clksel_con[43], GMAC_MUX_SEL_EXTCLK);
558 return 0;
559 }
560
561 return -EINVAL;
562}
563
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100564static int __maybe_unused rk3368_clk_set_parent(struct clk *clk, struct clk *parent)
David Wue72793d2018-01-13 14:07:04 +0800565{
566 switch (clk->id) {
567 case SCLK_MAC:
568 return rk3368_gmac_set_parent(clk, parent);
569 }
570
571 debug("%s: unsupported clk %ld\n", __func__, clk->id);
572 return -ENOENT;
573}
574
Andy Yanb9909aa2017-05-15 17:49:56 +0800575static struct clk_ops rk3368_clk_ops = {
576 .get_rate = rk3368_clk_get_rate,
577 .set_rate = rk3368_clk_set_rate,
Simon Glass3580f6d2021-08-07 07:24:03 -0600578#if CONFIG_IS_ENABLED(OF_REAL)
David Wue72793d2018-01-13 14:07:04 +0800579 .set_parent = rk3368_clk_set_parent,
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100580#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800581};
582
583static int rk3368_clk_probe(struct udevice *dev)
584{
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200585 struct rk3368_clk_priv __maybe_unused *priv = dev_get_priv(dev);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200586#if CONFIG_IS_ENABLED(OF_PLATDATA)
Simon Glassfa20e932020-12-03 16:55:20 -0700587 struct rk3368_clk_plat *plat = dev_get_plat(dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800588
Simon Glass1b1fe412017-08-29 14:15:50 -0600589 priv->cru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200590#endif
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200591#if IS_ENABLED(CONFIG_SPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +0800592 rkclk_init(priv->cru);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200593#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800594
595 return 0;
596}
597
Simon Glassaad29ae2020-12-03 16:55:21 -0700598static int rk3368_clk_of_to_plat(struct udevice *dev)
Andy Yanb9909aa2017-05-15 17:49:56 +0800599{
Simon Glass6d70ba02021-08-07 07:24:06 -0600600 if (CONFIG_IS_ENABLED(OF_REAL)) {
601 struct rk3368_clk_priv *priv = dev_get_priv(dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800602
Simon Glass6d70ba02021-08-07 07:24:06 -0600603 priv->cru = dev_read_addr_ptr(dev);
604 }
Andy Yanb9909aa2017-05-15 17:49:56 +0800605
606 return 0;
607}
608
609static int rk3368_clk_bind(struct udevice *dev)
610{
611 int ret;
Kever Yang4fbb6c22017-11-03 15:16:13 +0800612 struct udevice *sys_child;
613 struct sysreset_reg *priv;
Andy Yanb9909aa2017-05-15 17:49:56 +0800614
615 /* The reset driver does not have a device node, so bind it here */
Kever Yang4fbb6c22017-11-03 15:16:13 +0800616 ret = device_bind_driver(dev, "rockchip_sysreset", "sysreset",
617 &sys_child);
618 if (ret) {
619 debug("Warning: No sysreset driver: ret=%d\n", ret);
620 } else {
621 priv = malloc(sizeof(struct sysreset_reg));
622 priv->glb_srst_fst_value = offsetof(struct rk3368_cru,
623 glb_srst_fst_val);
624 priv->glb_srst_snd_value = offsetof(struct rk3368_cru,
625 glb_srst_snd_val);
Simon Glass95588622020-12-22 19:30:28 -0700626 dev_set_priv(sys_child, priv);
Kever Yang4fbb6c22017-11-03 15:16:13 +0800627 }
Andy Yanb9909aa2017-05-15 17:49:56 +0800628
Heiko Stuebner416f8d32019-11-09 00:06:30 +0100629#if CONFIG_IS_ENABLED(RESET_ROCKCHIP)
Elaine Zhang432976f2017-12-19 18:22:38 +0800630 ret = offsetof(struct rk3368_cru, softrst_con[0]);
631 ret = rockchip_reset_bind(dev, ret, 15);
632 if (ret)
Eugen Hristevf1798262023-04-11 10:17:56 +0300633 debug("Warning: software reset driver bind failed\n");
Elaine Zhang432976f2017-12-19 18:22:38 +0800634#endif
635
Andy Yanb9909aa2017-05-15 17:49:56 +0800636 return ret;
637}
638
639static const struct udevice_id rk3368_clk_ids[] = {
640 { .compatible = "rockchip,rk3368-cru" },
641 { }
642};
643
644U_BOOT_DRIVER(rockchip_rk3368_cru) = {
645 .name = "rockchip_rk3368_cru",
646 .id = UCLASS_CLK,
647 .of_match = rk3368_clk_ids,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700648 .priv_auto = sizeof(struct rk3368_clk_priv),
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200649#if CONFIG_IS_ENABLED(OF_PLATDATA)
Simon Glass71fa5b42020-12-03 16:55:18 -0700650 .plat_auto = sizeof(struct rk3368_clk_plat),
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200651#endif
Simon Glassaad29ae2020-12-03 16:55:21 -0700652 .of_to_plat = rk3368_clk_of_to_plat,
Andy Yanb9909aa2017-05-15 17:49:56 +0800653 .ops = &rk3368_clk_ops,
654 .bind = rk3368_clk_bind,
655 .probe = rk3368_clk_probe,
656};