blob: 630253fbb1df5f9e43e2b00b72fdf812eae8bbb7 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Andy Yanb9909aa2017-05-15 17:49:56 +08002/*
3 * (C) Copyright 2017 Rockchip Electronics Co., Ltd
4 * Author: Andy Yan <andy.yan@rock-chips.com>
Philipp Tomsich34b76132017-06-22 23:47:11 +02005 * (C) Copyright 2017 Theobroma Systems Design und Consulting GmbH
Andy Yanb9909aa2017-05-15 17:49:56 +08006 */
7
Andy Yanb9909aa2017-05-15 17:49:56 +08008#include <clk-uclass.h>
9#include <dm.h>
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020010#include <dt-structs.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080011#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070013#include <malloc.h>
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020014#include <mapmem.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080015#include <syscon.h>
David Wu4771ba62017-09-20 14:37:50 +080016#include <bitfield.h>
Kever Yang9fbe17c2019-03-28 11:01:23 +080017#include <asm/arch-rockchip/clock.h>
18#include <asm/arch-rockchip/cru_rk3368.h>
19#include <asm/arch-rockchip/hardware.h>
Simon Glass95588622020-12-22 19:30:28 -070020#include <dm/device-internal.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080021#include <dm/lists.h>
22#include <dt-bindings/clock/rk3368-cru.h>
Simon Glassdbd79542020-05-10 11:40:11 -060023#include <linux/delay.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060024#include <linux/printk.h>
Simon Glassfb64e362020-05-10 11:40:09 -060025#include <linux/stringify.h>
Andy Yanb9909aa2017-05-15 17:49:56 +080026
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +020027#if CONFIG_IS_ENABLED(OF_PLATDATA)
28struct rk3368_clk_plat {
29 struct dtd_rockchip_rk3368_cru dtd;
30};
31#endif
32
Andy Yanb9909aa2017-05-15 17:49:56 +080033struct pll_div {
34 u32 nr;
35 u32 nf;
36 u32 no;
37};
38
39#define OSC_HZ (24 * 1000 * 1000)
40#define APLL_L_HZ (800 * 1000 * 1000)
41#define APLL_B_HZ (816 * 1000 * 1000)
42#define GPLL_HZ (576 * 1000 * 1000)
43#define CPLL_HZ (400 * 1000 * 1000)
44
Andy Yanb9909aa2017-05-15 17:49:56 +080045#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1))
46
47#define PLL_DIVISORS(hz, _nr, _no) { \
48 .nr = _nr, .nf = (u32)((u64)hz * _nr * _no / OSC_HZ), .no = _no}; \
49 _Static_assert(((u64)hz * _nr * _no / OSC_HZ) * OSC_HZ /\
50 (_nr * _no) == hz, #hz "Hz cannot be hit with PLL " \
51 "divisors on line " __stringify(__LINE__));
52
Simon Glass7ec24132024-09-29 19:49:48 -060053#if IS_ENABLED(CONFIG_XPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080054static const struct pll_div apll_l_init_cfg = PLL_DIVISORS(APLL_L_HZ, 12, 2);
55static const struct pll_div apll_b_init_cfg = PLL_DIVISORS(APLL_B_HZ, 1, 2);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020056#if !defined(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080057static const struct pll_div gpll_init_cfg = PLL_DIVISORS(GPLL_HZ, 1, 2);
58static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 6);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +020059#endif
60#endif
Andy Yanb9909aa2017-05-15 17:49:56 +080061
Philipp Tomsichfbf07a52017-07-04 14:49:38 +020062static ulong rk3368_clk_get_rate(struct clk *clk);
63
Andy Yanb9909aa2017-05-15 17:49:56 +080064/* Get pll rate by id */
65static uint32_t rkclk_pll_get_rate(struct rk3368_cru *cru,
66 enum rk3368_pll_id pll_id)
67{
68 uint32_t nr, no, nf;
69 uint32_t con;
70 struct rk3368_pll *pll = &cru->pll[pll_id];
71
72 con = readl(&pll->con3);
73
74 switch ((con & PLL_MODE_MASK) >> PLL_MODE_SHIFT) {
75 case PLL_MODE_SLOW:
76 return OSC_HZ;
77 case PLL_MODE_NORMAL:
78 con = readl(&pll->con0);
79 no = ((con & PLL_OD_MASK) >> PLL_OD_SHIFT) + 1;
80 nr = ((con & PLL_NR_MASK) >> PLL_NR_SHIFT) + 1;
81 con = readl(&pll->con1);
82 nf = ((con & PLL_NF_MASK) >> PLL_NF_SHIFT) + 1;
83
84 return (24 * nf / (nr * no)) * 1000000;
85 case PLL_MODE_DEEP_SLOW:
86 default:
87 return 32768;
88 }
89}
90
Simon Glass7ec24132024-09-29 19:49:48 -060091#if IS_ENABLED(CONFIG_XPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +080092static int rkclk_set_pll(struct rk3368_cru *cru, enum rk3368_pll_id pll_id,
Philipp Tomsich34b76132017-06-22 23:47:11 +020093 const struct pll_div *div)
Andy Yanb9909aa2017-05-15 17:49:56 +080094{
95 struct rk3368_pll *pll = &cru->pll[pll_id];
96 /* All PLLs have same VCO and output frequency range restrictions*/
97 uint vco_hz = OSC_HZ / 1000 * div->nf / div->nr * 1000;
98 uint output_hz = vco_hz / div->no;
99
100 debug("PLL at %p: nf=%d, nr=%d, no=%d, vco=%u Hz, output=%u Hz\n",
101 pll, div->nf, div->nr, div->no, vco_hz, output_hz);
102
103 /* enter slow mode and reset pll */
104 rk_clrsetreg(&pll->con3, PLL_MODE_MASK | PLL_RESET_MASK,
105 PLL_RESET << PLL_RESET_SHIFT);
106
107 rk_clrsetreg(&pll->con0, PLL_NR_MASK | PLL_OD_MASK,
108 ((div->nr - 1) << PLL_NR_SHIFT) |
109 ((div->no - 1) << PLL_OD_SHIFT));
110 writel((div->nf - 1) << PLL_NF_SHIFT, &pll->con1);
Philipp Tomsich34b76132017-06-22 23:47:11 +0200111 /*
112 * BWADJ should be set to NF / 2 to ensure the nominal bandwidth.
113 * Compare the RK3368 TRM, section "3.6.4 PLL Bandwidth Adjustment".
114 */
115 clrsetbits_le32(&pll->con2, PLL_BWADJ_MASK, (div->nf >> 1) - 1);
116
Andy Yanb9909aa2017-05-15 17:49:56 +0800117 udelay(10);
118
119 /* return from reset */
120 rk_clrreg(&pll->con3, PLL_RESET_MASK);
121
122 /* waiting for pll lock */
123 while (!(readl(&pll->con1) & PLL_LOCK_STA))
124 udelay(1);
125
126 rk_clrsetreg(&pll->con3, PLL_MODE_MASK,
127 PLL_MODE_NORMAL << PLL_MODE_SHIFT);
128
129 return 0;
130}
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200131#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800132
Simon Glass7ec24132024-09-29 19:49:48 -0600133#if IS_ENABLED(CONFIG_XPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +0800134static void rkclk_init(struct rk3368_cru *cru)
135{
136 u32 apllb, aplll, dpll, cpll, gpll;
137
Philipp Tomsich34b76132017-06-22 23:47:11 +0200138 rkclk_set_pll(cru, APLLB, &apll_b_init_cfg);
139 rkclk_set_pll(cru, APLLL, &apll_l_init_cfg);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200140#if !defined(CONFIG_TPL_BUILD)
141 /*
142 * If we plan to return to the boot ROM, we can't increase the
143 * GPLL rate from the SPL stage.
144 */
Philipp Tomsich34b76132017-06-22 23:47:11 +0200145 rkclk_set_pll(cru, GPLL, &gpll_init_cfg);
146 rkclk_set_pll(cru, CPLL, &cpll_init_cfg);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200147#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800148
149 apllb = rkclk_pll_get_rate(cru, APLLB);
150 aplll = rkclk_pll_get_rate(cru, APLLL);
151 dpll = rkclk_pll_get_rate(cru, DPLL);
152 cpll = rkclk_pll_get_rate(cru, CPLL);
153 gpll = rkclk_pll_get_rate(cru, GPLL);
154
155 debug("%s apllb(%d) apll(%d) dpll(%d) cpll(%d) gpll(%d)\n",
156 __func__, apllb, aplll, dpll, cpll, gpll);
157}
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200158#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800159
Simon Glass7ec24132024-09-29 19:49:48 -0600160#if !IS_ENABLED(CONFIG_XPL_BUILD) || CONFIG_IS_ENABLED(MMC)
Andy Yanb9909aa2017-05-15 17:49:56 +0800161static ulong rk3368_mmc_get_clk(struct rk3368_cru *cru, uint clk_id)
162{
163 u32 div, con, con_id, rate;
164 u32 pll_rate;
165
166 switch (clk_id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200167 case HCLK_SDMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800168 con_id = 50;
169 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200170 case HCLK_EMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800171 con_id = 51;
172 break;
173 case SCLK_SDIO0:
174 con_id = 48;
175 break;
176 default:
177 return -EINVAL;
178 }
179
180 con = readl(&cru->clksel_con[con_id]);
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200181 switch (con & MMC_PLL_SEL_MASK) {
Andy Yanb9909aa2017-05-15 17:49:56 +0800182 case MMC_PLL_SEL_GPLL:
183 pll_rate = rkclk_pll_get_rate(cru, GPLL);
184 break;
185 case MMC_PLL_SEL_24M:
186 pll_rate = OSC_HZ;
187 break;
188 case MMC_PLL_SEL_CPLL:
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200189 pll_rate = rkclk_pll_get_rate(cru, CPLL);
190 break;
Andy Yanb9909aa2017-05-15 17:49:56 +0800191 case MMC_PLL_SEL_USBPHY_480M:
192 default:
193 return -EINVAL;
194 }
195 div = (con & MMC_CLK_DIV_MASK) >> MMC_CLK_DIV_SHIFT;
196 rate = DIV_TO_RATE(pll_rate, div);
197
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200198 debug("%s: raw rate %d (post-divide by 2)\n", __func__, rate);
Andy Yanb9909aa2017-05-15 17:49:56 +0800199 return rate >> 1;
200}
201
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200202static ulong rk3368_mmc_find_best_rate_and_parent(struct clk *clk,
203 ulong rate,
204 u32 *best_mux,
205 u32 *best_div)
Andy Yanb9909aa2017-05-15 17:49:56 +0800206{
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200207 int i;
208 ulong best_rate = 0;
209 const ulong MHz = 1000000;
210 const struct {
211 u32 mux;
212 ulong rate;
213 } parents[] = {
214 { .mux = MMC_PLL_SEL_CPLL, .rate = CPLL_HZ },
215 { .mux = MMC_PLL_SEL_GPLL, .rate = GPLL_HZ },
216 { .mux = MMC_PLL_SEL_24M, .rate = 24 * MHz }
217 };
218
219 debug("%s: target rate %ld\n", __func__, rate);
220 for (i = 0; i < ARRAY_SIZE(parents); ++i) {
221 /*
222 * Find the largest rate no larger than the target-rate for
223 * the current parent.
224 */
225 ulong parent_rate = parents[i].rate;
226 u32 div = DIV_ROUND_UP(parent_rate, rate);
227 u32 adj_div = div;
228 ulong new_rate = parent_rate / adj_div;
229
230 debug("%s: rate %ld, parent-mux %d, parent-rate %ld, div %d\n",
231 __func__, rate, parents[i].mux, parents[i].rate, div);
232
233 /* Skip, if not representable */
234 if ((div - 1) > MMC_CLK_DIV_MASK)
235 continue;
Andy Yanb9909aa2017-05-15 17:49:56 +0800236
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200237 /* Skip, if we already have a better (or equal) solution */
238 if (new_rate <= best_rate)
239 continue;
240
241 /* This is our new best rate. */
242 best_rate = new_rate;
243 *best_mux = parents[i].mux;
244 *best_div = div - 1;
245 }
246
247 debug("%s: best_mux = %x, best_div = %d, best_rate = %ld\n",
248 __func__, *best_mux, *best_div, best_rate);
249
250 return best_rate;
251}
252
253static ulong rk3368_mmc_set_clk(struct clk *clk, ulong rate)
254{
255 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
256 struct rk3368_cru *cru = priv->cru;
257 ulong clk_id = clk->id;
258 u32 con_id, mux = 0, div = 0;
259
260 /* Find the best parent and rate */
261 rk3368_mmc_find_best_rate_and_parent(clk, rate << 1, &mux, &div);
Andy Yanb9909aa2017-05-15 17:49:56 +0800262
263 switch (clk_id) {
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200264 case HCLK_SDMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800265 con_id = 50;
266 break;
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200267 case HCLK_EMMC:
Andy Yanb9909aa2017-05-15 17:49:56 +0800268 con_id = 51;
269 break;
270 case SCLK_SDIO0:
271 con_id = 48;
272 break;
273 default:
274 return -EINVAL;
275 }
276
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200277 rk_clrsetreg(&cru->clksel_con[con_id],
278 MMC_PLL_SEL_MASK | MMC_CLK_DIV_MASK,
279 mux | div);
Andy Yanb9909aa2017-05-15 17:49:56 +0800280
281 return rk3368_mmc_get_clk(cru, clk_id);
282}
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200283#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800284
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200285#if IS_ENABLED(CONFIG_TPL_BUILD)
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200286static ulong rk3368_ddr_set_clk(struct rk3368_cru *cru, ulong set_rate)
287{
288 const struct pll_div *dpll_cfg = NULL;
289 const ulong MHz = 1000000;
290
291 /* Fout = ((Fin /NR) * NF )/ NO */
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200292 static const struct pll_div dpll_1200 = PLL_DIVISORS(1200 * MHz, 1, 1);
293 static const struct pll_div dpll_1332 = PLL_DIVISORS(1332 * MHz, 2, 1);
294 static const struct pll_div dpll_1600 = PLL_DIVISORS(1600 * MHz, 3, 2);
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200295
296 switch (set_rate) {
297 case 1200*MHz:
298 dpll_cfg = &dpll_1200;
299 break;
300 case 1332*MHz:
301 dpll_cfg = &dpll_1332;
302 break;
303 case 1600*MHz:
304 dpll_cfg = &dpll_1600;
305 break;
306 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900307 pr_err("Unsupported SDRAM frequency!,%ld\n", set_rate);
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200308 }
309 rkclk_set_pll(cru, DPLL, dpll_cfg);
310
311 return set_rate;
312}
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200313#endif
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200314
Philipp Tomsicha249f102017-07-14 19:57:39 +0200315#if CONFIG_IS_ENABLED(GMAC_ROCKCHIP)
David Wue72793d2018-01-13 14:07:04 +0800316static ulong rk3368_gmac_set_clk(struct rk3368_cru *cru, ulong set_rate)
Philipp Tomsicha249f102017-07-14 19:57:39 +0200317{
David Wue72793d2018-01-13 14:07:04 +0800318 ulong ret;
319
Philipp Tomsicha249f102017-07-14 19:57:39 +0200320 /*
David Wue72793d2018-01-13 14:07:04 +0800321 * The gmac clock can be derived either from an external clock
322 * or can be generated from internally by a divider from SCLK_MAC.
Philipp Tomsicha249f102017-07-14 19:57:39 +0200323 */
David Wue72793d2018-01-13 14:07:04 +0800324 if (readl(&cru->clksel_con[43]) & GMAC_MUX_SEL_EXTCLK) {
325 /* An external clock will always generate the right rate... */
326 ret = set_rate;
327 } else {
328 u32 con = readl(&cru->clksel_con[43]);
329 ulong pll_rate;
330 u8 div;
331
332 if (((con >> GMAC_PLL_SHIFT) & GMAC_PLL_MASK) ==
333 GMAC_PLL_SELECT_GENERAL)
334 pll_rate = GPLL_HZ;
335 else if (((con >> GMAC_PLL_SHIFT) & GMAC_PLL_MASK) ==
336 GMAC_PLL_SELECT_CODEC)
337 pll_rate = CPLL_HZ;
338 else
339 /* CPLL is not set */
340 return -EPERM;
341
342 div = DIV_ROUND_UP(pll_rate, set_rate) - 1;
343 if (div <= 0x1f)
344 rk_clrsetreg(&cru->clksel_con[43], GMAC_DIV_CON_MASK,
345 div << GMAC_DIV_CON_SHIFT);
346 else
347 debug("Unsupported div for gmac:%d\n", div);
348
349 return DIV_TO_RATE(pll_rate, div);
350 }
351
352 return ret;
Philipp Tomsicha249f102017-07-14 19:57:39 +0200353}
354#endif
355
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200356/*
357 * RK3368 SPI clocks have a common divider-width (7 bits) and a single bit
358 * to select either CPLL or GPLL as the clock-parent. The location within
359 * the enclosing CLKSEL_CON (i.e. div_shift and sel_shift) are variable.
360 */
361
362struct spi_clkreg {
363 uint8_t reg; /* CLKSEL_CON[reg] register in CRU */
364 uint8_t div_shift;
365 uint8_t sel_shift;
366};
367
368/*
369 * The entries are numbered relative to their offset from SCLK_SPI0.
370 */
371static const struct spi_clkreg spi_clkregs[] = {
372 [0] = { .reg = 45, .div_shift = 0, .sel_shift = 7, },
373 [1] = { .reg = 45, .div_shift = 8, .sel_shift = 15, },
374 [2] = { .reg = 46, .div_shift = 8, .sel_shift = 15, },
375};
376
377static inline u32 extract_bits(u32 val, unsigned width, unsigned shift)
378{
379 return (val >> shift) & ((1 << width) - 1);
380}
381
382static ulong rk3368_spi_get_clk(struct rk3368_cru *cru, ulong clk_id)
383{
384 const struct spi_clkreg *spiclk = NULL;
385 u32 div, val;
386
387 switch (clk_id) {
388 case SCLK_SPI0 ... SCLK_SPI2:
389 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
390 break;
391
392 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900393 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200394 return -EINVAL;
395 }
396
397 val = readl(&cru->clksel_con[spiclk->reg]);
398 div = extract_bits(val, 7, spiclk->div_shift);
399
400 debug("%s: div 0x%x\n", __func__, div);
401 return DIV_TO_RATE(GPLL_HZ, div);
402}
403
404static ulong rk3368_spi_set_clk(struct rk3368_cru *cru, ulong clk_id, uint hz)
405{
406 const struct spi_clkreg *spiclk = NULL;
407 int src_clk_div;
408
409 src_clk_div = DIV_ROUND_UP(GPLL_HZ, hz);
410 assert(src_clk_div < 127);
411
412 switch (clk_id) {
413 case SCLK_SPI0 ... SCLK_SPI2:
414 spiclk = &spi_clkregs[clk_id - SCLK_SPI0];
415 break;
416
417 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900418 pr_err("%s: SPI clk-id %ld not supported\n", __func__, clk_id);
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200419 return -EINVAL;
420 }
421
422 rk_clrsetreg(&cru->clksel_con[spiclk->reg],
423 ((0x7f << spiclk->div_shift) |
424 (0x1 << spiclk->sel_shift)),
425 ((src_clk_div << spiclk->div_shift) |
426 (1 << spiclk->sel_shift)));
427
428 return rk3368_spi_get_clk(cru, clk_id);
429}
430
David Wu4771ba62017-09-20 14:37:50 +0800431static ulong rk3368_saradc_get_clk(struct rk3368_cru *cru)
432{
433 u32 div, val;
434
435 val = readl(&cru->clksel_con[25]);
436 div = bitfield_extract(val, CLK_SARADC_DIV_CON_SHIFT,
437 CLK_SARADC_DIV_CON_WIDTH);
438
439 return DIV_TO_RATE(OSC_HZ, div);
440}
441
442static ulong rk3368_saradc_set_clk(struct rk3368_cru *cru, uint hz)
443{
444 int src_clk_div;
445
446 src_clk_div = DIV_ROUND_UP(OSC_HZ, hz) - 1;
447 assert(src_clk_div < 128);
448
449 rk_clrsetreg(&cru->clksel_con[25],
450 CLK_SARADC_DIV_CON_MASK,
451 src_clk_div << CLK_SARADC_DIV_CON_SHIFT);
452
453 return rk3368_saradc_get_clk(cru);
454}
455
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200456static ulong rk3368_clk_get_rate(struct clk *clk)
457{
458 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
459 ulong rate = 0;
460
461 debug("%s: id %ld\n", __func__, clk->id);
462 switch (clk->id) {
463 case PLL_CPLL:
464 rate = rkclk_pll_get_rate(priv->cru, CPLL);
465 break;
466 case PLL_GPLL:
467 rate = rkclk_pll_get_rate(priv->cru, GPLL);
468 break;
469 case SCLK_SPI0 ... SCLK_SPI2:
470 rate = rk3368_spi_get_clk(priv->cru, clk->id);
471 break;
Simon Glass7ec24132024-09-29 19:49:48 -0600472#if !IS_ENABLED(CONFIG_XPL_BUILD) || CONFIG_IS_ENABLED(MMC)
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200473 case HCLK_SDMMC:
474 case HCLK_EMMC:
475 rate = rk3368_mmc_get_clk(priv->cru, clk->id);
476 break;
477#endif
David Wu4771ba62017-09-20 14:37:50 +0800478 case SCLK_SARADC:
479 rate = rk3368_saradc_get_clk(priv->cru);
480 break;
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200481 default:
482 return -ENOENT;
483 }
484
485 return rate;
486}
487
Andy Yanb9909aa2017-05-15 17:49:56 +0800488static ulong rk3368_clk_set_rate(struct clk *clk, ulong rate)
489{
Philipp Tomsich83a5d2c2017-07-05 12:11:58 +0200490 __maybe_unused struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800491 ulong ret = 0;
492
493 debug("%s id:%ld rate:%ld\n", __func__, clk->id, rate);
494 switch (clk->id) {
Philipp Tomsichb4fb55f2017-07-25 16:48:16 +0200495 case SCLK_SPI0 ... SCLK_SPI2:
496 ret = rk3368_spi_set_clk(priv->cru, clk->id, rate);
497 break;
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200498#if IS_ENABLED(CONFIG_TPL_BUILD)
Philipp Tomsich313b2da2017-06-23 00:01:10 +0200499 case CLK_DDR:
500 ret = rk3368_ddr_set_clk(priv->cru, rate);
501 break;
Philipp Tomsichc23a9932017-07-05 11:55:23 +0200502#endif
Simon Glass7ec24132024-09-29 19:49:48 -0600503#if !IS_ENABLED(CONFIG_XPL_BUILD) || CONFIG_IS_ENABLED(MMC)
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200504 case HCLK_SDMMC:
505 case HCLK_EMMC:
506 ret = rk3368_mmc_set_clk(clk, rate);
507 break;
508#endif
Philipp Tomsicha249f102017-07-14 19:57:39 +0200509#if CONFIG_IS_ENABLED(GMAC_ROCKCHIP)
Philipp Tomsichfbf07a52017-07-04 14:49:38 +0200510 case SCLK_MAC:
Philipp Tomsicha249f102017-07-14 19:57:39 +0200511 /* select the external clock */
David Wue72793d2018-01-13 14:07:04 +0800512 ret = rk3368_gmac_set_clk(priv->cru, rate);
Andy Yanb9909aa2017-05-15 17:49:56 +0800513 break;
Philipp Tomsicha249f102017-07-14 19:57:39 +0200514#endif
David Wu4771ba62017-09-20 14:37:50 +0800515 case SCLK_SARADC:
516 ret = rk3368_saradc_set_clk(priv->cru, rate);
517 break;
Andy Yanb9909aa2017-05-15 17:49:56 +0800518 default:
519 return -ENOENT;
520 }
521
522 return ret;
523}
524
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100525static int __maybe_unused rk3368_gmac_set_parent(struct clk *clk, struct clk *parent)
David Wue72793d2018-01-13 14:07:04 +0800526{
527 struct rk3368_clk_priv *priv = dev_get_priv(clk->dev);
528 struct rk3368_cru *cru = priv->cru;
529 const char *clock_output_name;
530 int ret;
531
532 /*
533 * If the requested parent is in the same clock-controller and
534 * the id is SCLK_MAC ("sclk_mac"), switch to the internal
535 * clock.
536 */
537 if ((parent->dev == clk->dev) && (parent->id == SCLK_MAC)) {
538 debug("%s: switching GAMC to SCLK_MAC\n", __func__);
539 rk_clrreg(&cru->clksel_con[43], GMAC_MUX_SEL_EXTCLK);
540 return 0;
541 }
542
543 /*
544 * Otherwise, we need to check the clock-output-names of the
545 * requested parent to see if the requested id is "ext_gmac".
546 */
547 ret = dev_read_string_index(parent->dev, "clock-output-names",
548 parent->id, &clock_output_name);
549 if (ret < 0)
550 return -ENODATA;
551
552 /* If this is "ext_gmac", switch to the external clock input */
553 if (!strcmp(clock_output_name, "ext_gmac")) {
554 debug("%s: switching GMAC to external clock\n", __func__);
555 rk_setreg(&cru->clksel_con[43], GMAC_MUX_SEL_EXTCLK);
556 return 0;
557 }
558
559 return -EINVAL;
560}
561
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100562static int __maybe_unused rk3368_clk_set_parent(struct clk *clk, struct clk *parent)
David Wue72793d2018-01-13 14:07:04 +0800563{
564 switch (clk->id) {
565 case SCLK_MAC:
566 return rk3368_gmac_set_parent(clk, parent);
567 }
568
569 debug("%s: unsupported clk %ld\n", __func__, clk->id);
570 return -ENOENT;
571}
572
Andy Yanb9909aa2017-05-15 17:49:56 +0800573static struct clk_ops rk3368_clk_ops = {
574 .get_rate = rk3368_clk_get_rate,
575 .set_rate = rk3368_clk_set_rate,
Simon Glass3580f6d2021-08-07 07:24:03 -0600576#if CONFIG_IS_ENABLED(OF_REAL)
David Wue72793d2018-01-13 14:07:04 +0800577 .set_parent = rk3368_clk_set_parent,
Philipp Tomsich6dd2fb42018-01-25 15:27:10 +0100578#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800579};
580
581static int rk3368_clk_probe(struct udevice *dev)
582{
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200583 struct rk3368_clk_priv __maybe_unused *priv = dev_get_priv(dev);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200584#if CONFIG_IS_ENABLED(OF_PLATDATA)
Simon Glassfa20e932020-12-03 16:55:20 -0700585 struct rk3368_clk_plat *plat = dev_get_plat(dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800586
Simon Glass1b1fe412017-08-29 14:15:50 -0600587 priv->cru = map_sysmem(plat->dtd.reg[0], plat->dtd.reg[1]);
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200588#endif
Simon Glass7ec24132024-09-29 19:49:48 -0600589#if IS_ENABLED(CONFIG_XPL_BUILD) || IS_ENABLED(CONFIG_TPL_BUILD)
Andy Yanb9909aa2017-05-15 17:49:56 +0800590 rkclk_init(priv->cru);
Philipp Tomsich415ff7e2017-06-22 23:53:44 +0200591#endif
Andy Yanb9909aa2017-05-15 17:49:56 +0800592
593 return 0;
594}
595
Simon Glassaad29ae2020-12-03 16:55:21 -0700596static int rk3368_clk_of_to_plat(struct udevice *dev)
Andy Yanb9909aa2017-05-15 17:49:56 +0800597{
Simon Glass6d70ba02021-08-07 07:24:06 -0600598 if (CONFIG_IS_ENABLED(OF_REAL)) {
599 struct rk3368_clk_priv *priv = dev_get_priv(dev);
Andy Yanb9909aa2017-05-15 17:49:56 +0800600
Simon Glass6d70ba02021-08-07 07:24:06 -0600601 priv->cru = dev_read_addr_ptr(dev);
602 }
Andy Yanb9909aa2017-05-15 17:49:56 +0800603
604 return 0;
605}
606
607static int rk3368_clk_bind(struct udevice *dev)
608{
609 int ret;
Kever Yang4fbb6c22017-11-03 15:16:13 +0800610 struct udevice *sys_child;
611 struct sysreset_reg *priv;
Andy Yanb9909aa2017-05-15 17:49:56 +0800612
613 /* The reset driver does not have a device node, so bind it here */
Kever Yang4fbb6c22017-11-03 15:16:13 +0800614 ret = device_bind_driver(dev, "rockchip_sysreset", "sysreset",
615 &sys_child);
616 if (ret) {
617 debug("Warning: No sysreset driver: ret=%d\n", ret);
618 } else {
619 priv = malloc(sizeof(struct sysreset_reg));
620 priv->glb_srst_fst_value = offsetof(struct rk3368_cru,
621 glb_srst_fst_val);
622 priv->glb_srst_snd_value = offsetof(struct rk3368_cru,
623 glb_srst_snd_val);
Simon Glass95588622020-12-22 19:30:28 -0700624 dev_set_priv(sys_child, priv);
Kever Yang4fbb6c22017-11-03 15:16:13 +0800625 }
Andy Yanb9909aa2017-05-15 17:49:56 +0800626
Heiko Stuebner416f8d32019-11-09 00:06:30 +0100627#if CONFIG_IS_ENABLED(RESET_ROCKCHIP)
Elaine Zhang432976f2017-12-19 18:22:38 +0800628 ret = offsetof(struct rk3368_cru, softrst_con[0]);
629 ret = rockchip_reset_bind(dev, ret, 15);
630 if (ret)
Eugen Hristevf1798262023-04-11 10:17:56 +0300631 debug("Warning: software reset driver bind failed\n");
Elaine Zhang432976f2017-12-19 18:22:38 +0800632#endif
633
Andy Yanb9909aa2017-05-15 17:49:56 +0800634 return ret;
635}
636
637static const struct udevice_id rk3368_clk_ids[] = {
638 { .compatible = "rockchip,rk3368-cru" },
639 { }
640};
641
642U_BOOT_DRIVER(rockchip_rk3368_cru) = {
643 .name = "rockchip_rk3368_cru",
644 .id = UCLASS_CLK,
645 .of_match = rk3368_clk_ids,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700646 .priv_auto = sizeof(struct rk3368_clk_priv),
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200647#if CONFIG_IS_ENABLED(OF_PLATDATA)
Simon Glass71fa5b42020-12-03 16:55:18 -0700648 .plat_auto = sizeof(struct rk3368_clk_plat),
Philipp Tomsich79aa1ab2017-06-22 23:51:37 +0200649#endif
Simon Glassaad29ae2020-12-03 16:55:21 -0700650 .of_to_plat = rk3368_clk_of_to_plat,
Andy Yanb9909aa2017-05-15 17:49:56 +0800651 .ops = &rk3368_clk_ops,
652 .bind = rk3368_clk_bind,
653 .probe = rk3368_clk_probe,
654};