blob: d43b8a0648c2f2ac09df890a5b16bae69d306b55 [file] [log] [blame]
developer2186c982018-11-15 10:07:54 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek common clock driver
4 *
5 * Copyright (C) 2018 MediaTek Inc.
6 * Author: Ryder Lee <ryder.lee@mediatek.com>
7 */
8
9#include <common.h>
10#include <clk-uclass.h>
11#include <div64.h>
12#include <dm.h>
13#include <asm/io.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060014#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060015#include <linux/delay.h>
developer2186c982018-11-15 10:07:54 +080016
17#include "clk-mtk.h"
18
19#define REG_CON0 0
20#define REG_CON1 4
21
22#define CON0_BASE_EN BIT(0)
23#define CON0_PWR_ON BIT(0)
24#define CON0_ISO_EN BIT(1)
25#define CON1_PCW_CHG BIT(31)
26
27#define POSTDIV_MASK 0x7
28#define INTEGER_BITS 7
29
30/* scpsys clock off control */
31#define CLK_SCP_CFG0 0x200
32#define CLK_SCP_CFG1 0x204
33#define SCP_ARMCK_OFF_EN GENMASK(9, 0)
34#define SCP_AXICK_DCM_DIS_EN BIT(0)
35#define SCP_AXICK_26M_SEL_EN BIT(4)
36
37/* shared functions */
38
39/*
40 * In case the rate change propagation to parent clocks is undesirable,
41 * this function is recursively called to find the parent to calculate
42 * the accurate frequency.
43 */
developer65da8e72020-01-10 16:30:30 +080044static ulong mtk_clk_find_parent_rate(struct clk *clk, int id,
Fabien Parent94fc8422019-10-17 21:02:05 +020045 const struct driver *drv)
developer2186c982018-11-15 10:07:54 +080046{
47 struct clk parent = { .id = id, };
48
49 if (drv) {
50 struct udevice *dev;
51
52 if (uclass_get_device_by_driver(UCLASS_CLK, drv, &dev))
53 return -ENODEV;
54
55 parent.dev = dev;
56 } else {
57 parent.dev = clk->dev;
58 }
59
60 return clk_get_rate(&parent);
61}
62
63static int mtk_clk_mux_set_parent(void __iomem *base, u32 parent,
64 const struct mtk_composite *mux)
65{
66 u32 val, index = 0;
67
68 while (mux->parent[index] != parent)
69 if (++index == mux->num_parents)
70 return -EINVAL;
71
developerba560c72019-12-31 11:29:21 +080072 if (mux->flags & CLK_MUX_SETCLR_UPD) {
73 val = (mux->mux_mask << mux->mux_shift);
74 writel(val, base + mux->mux_clr_reg);
developer2186c982018-11-15 10:07:54 +080075
developerba560c72019-12-31 11:29:21 +080076 val = (index << mux->mux_shift);
77 writel(val, base + mux->mux_set_reg);
78
79 if (mux->upd_shift >= 0)
80 writel(BIT(mux->upd_shift), base + mux->upd_reg);
81 } else {
82 /* switch mux to a select parent */
83 val = readl(base + mux->mux_reg);
84 val &= ~(mux->mux_mask << mux->mux_shift);
85
86 val |= index << mux->mux_shift;
87 writel(val, base + mux->mux_reg);
88 }
developer2186c982018-11-15 10:07:54 +080089
90 return 0;
91}
92
93/* apmixedsys functions */
94
95static unsigned long __mtk_pll_recalc_rate(const struct mtk_pll_data *pll,
96 u32 fin, u32 pcw, int postdiv)
97{
98 int pcwbits = pll->pcwbits;
99 int pcwfbits;
developer0b5e5f12019-12-31 11:29:22 +0800100 int ibits;
developer2186c982018-11-15 10:07:54 +0800101 u64 vco;
102 u8 c = 0;
103
104 /* The fractional part of the PLL divider. */
developer0b5e5f12019-12-31 11:29:22 +0800105 ibits = pll->pcwibits ? pll->pcwibits : INTEGER_BITS;
106 pcwfbits = pcwbits > ibits ? pcwbits - ibits : 0;
developer2186c982018-11-15 10:07:54 +0800107
108 vco = (u64)fin * pcw;
109
110 if (pcwfbits && (vco & GENMASK(pcwfbits - 1, 0)))
111 c = 1;
112
113 vco >>= pcwfbits;
114
115 if (c)
116 vco++;
117
118 return ((unsigned long)vco + postdiv - 1) / postdiv;
119}
120
121/**
122 * MediaTek PLLs are configured through their pcw value. The pcw value
123 * describes a divider in the PLL feedback loop which consists of 7 bits
124 * for the integer part and the remaining bits (if present) for the
125 * fractional part. Also they have a 3 bit power-of-two post divider.
126 */
127static void mtk_pll_set_rate_regs(struct clk *clk, u32 pcw, int postdiv)
128{
129 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
130 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
developer0b5e5f12019-12-31 11:29:22 +0800131 u32 val, chg;
developer2186c982018-11-15 10:07:54 +0800132
133 /* set postdiv */
134 val = readl(priv->base + pll->pd_reg);
135 val &= ~(POSTDIV_MASK << pll->pd_shift);
136 val |= (ffs(postdiv) - 1) << pll->pd_shift;
137
138 /* postdiv and pcw need to set at the same time if on same register */
139 if (pll->pd_reg != pll->pcw_reg) {
140 writel(val, priv->base + pll->pd_reg);
141 val = readl(priv->base + pll->pcw_reg);
142 }
143
144 /* set pcw */
145 val &= ~GENMASK(pll->pcw_shift + pll->pcwbits - 1, pll->pcw_shift);
146 val |= pcw << pll->pcw_shift;
developer2186c982018-11-15 10:07:54 +0800147
developer0b5e5f12019-12-31 11:29:22 +0800148 if (pll->pcw_chg_reg) {
149 chg = readl(priv->base + pll->pcw_chg_reg);
150 chg |= CON1_PCW_CHG;
151 writel(val, priv->base + pll->pcw_reg);
152 writel(chg, priv->base + pll->pcw_chg_reg);
153 } else {
154 val |= CON1_PCW_CHG;
155 writel(val, priv->base + pll->pcw_reg);
156 }
developer2186c982018-11-15 10:07:54 +0800157
158 udelay(20);
159}
160
161/**
162 * mtk_pll_calc_values - calculate good values for a given input frequency.
163 * @clk: The clk
164 * @pcw: The pcw value (output)
165 * @postdiv: The post divider (output)
166 * @freq: The desired target frequency
167 */
168static void mtk_pll_calc_values(struct clk *clk, u32 *pcw, u32 *postdiv,
169 u32 freq)
170{
171 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
172 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
developer0b5e5f12019-12-31 11:29:22 +0800173 unsigned long fmin = pll->fmin ? pll->fmin : 1000 * MHZ;
developer2186c982018-11-15 10:07:54 +0800174 u64 _pcw;
developer0b5e5f12019-12-31 11:29:22 +0800175 int ibits;
developer2186c982018-11-15 10:07:54 +0800176 u32 val;
177
178 if (freq > pll->fmax)
179 freq = pll->fmax;
180
181 for (val = 0; val < 5; val++) {
182 *postdiv = 1 << val;
183 if ((u64)freq * *postdiv >= fmin)
184 break;
185 }
186
187 /* _pcw = freq * postdiv / xtal_rate * 2^pcwfbits */
developer0b5e5f12019-12-31 11:29:22 +0800188 ibits = pll->pcwibits ? pll->pcwibits : INTEGER_BITS;
189 _pcw = ((u64)freq << val) << (pll->pcwbits - ibits);
developer2186c982018-11-15 10:07:54 +0800190 do_div(_pcw, priv->tree->xtal2_rate);
191
192 *pcw = (u32)_pcw;
193}
194
195static ulong mtk_apmixedsys_set_rate(struct clk *clk, ulong rate)
196{
197 u32 pcw = 0;
198 u32 postdiv;
199
200 mtk_pll_calc_values(clk, &pcw, &postdiv, rate);
201 mtk_pll_set_rate_regs(clk, pcw, postdiv);
202
203 return 0;
204}
205
206static ulong mtk_apmixedsys_get_rate(struct clk *clk)
207{
208 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
209 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
210 u32 postdiv;
211 u32 pcw;
212
213 postdiv = (readl(priv->base + pll->pd_reg) >> pll->pd_shift) &
214 POSTDIV_MASK;
215 postdiv = 1 << postdiv;
216
217 pcw = readl(priv->base + pll->pcw_reg) >> pll->pcw_shift;
218 pcw &= GENMASK(pll->pcwbits - 1, 0);
219
220 return __mtk_pll_recalc_rate(pll, priv->tree->xtal2_rate,
221 pcw, postdiv);
222}
223
224static int mtk_apmixedsys_enable(struct clk *clk)
225{
226 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
227 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
228 u32 r;
229
230 r = readl(priv->base + pll->pwr_reg) | CON0_PWR_ON;
231 writel(r, priv->base + pll->pwr_reg);
232 udelay(1);
233
234 r = readl(priv->base + pll->pwr_reg) & ~CON0_ISO_EN;
235 writel(r, priv->base + pll->pwr_reg);
236 udelay(1);
237
238 r = readl(priv->base + pll->reg + REG_CON0);
239 r |= pll->en_mask;
240 writel(r, priv->base + pll->reg + REG_CON0);
241
242 udelay(20);
243
244 if (pll->flags & HAVE_RST_BAR) {
245 r = readl(priv->base + pll->reg + REG_CON0);
246 r |= pll->rst_bar_mask;
247 writel(r, priv->base + pll->reg + REG_CON0);
248 }
249
250 return 0;
251}
252
253static int mtk_apmixedsys_disable(struct clk *clk)
254{
255 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
256 const struct mtk_pll_data *pll = &priv->tree->plls[clk->id];
257 u32 r;
258
259 if (pll->flags & HAVE_RST_BAR) {
260 r = readl(priv->base + pll->reg + REG_CON0);
261 r &= ~pll->rst_bar_mask;
262 writel(r, priv->base + pll->reg + REG_CON0);
263 }
264
265 r = readl(priv->base + pll->reg + REG_CON0);
266 r &= ~CON0_BASE_EN;
267 writel(r, priv->base + pll->reg + REG_CON0);
268
269 r = readl(priv->base + pll->pwr_reg) | CON0_ISO_EN;
270 writel(r, priv->base + pll->pwr_reg);
271
272 r = readl(priv->base + pll->pwr_reg) & ~CON0_PWR_ON;
273 writel(r, priv->base + pll->pwr_reg);
274
275 return 0;
276}
277
278/* topckgen functions */
279
280static ulong mtk_factor_recalc_rate(const struct mtk_fixed_factor *fdiv,
281 ulong parent_rate)
282{
283 u64 rate = parent_rate * fdiv->mult;
284
285 do_div(rate, fdiv->div);
286
287 return rate;
288}
289
developer65da8e72020-01-10 16:30:30 +0800290static ulong mtk_topckgen_get_factor_rate(struct clk *clk, u32 off)
developer2186c982018-11-15 10:07:54 +0800291{
292 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
293 const struct mtk_fixed_factor *fdiv = &priv->tree->fdivs[off];
294 ulong rate;
295
296 switch (fdiv->flags & CLK_PARENT_MASK) {
297 case CLK_PARENT_APMIXED:
298 rate = mtk_clk_find_parent_rate(clk, fdiv->parent,
Simon Glass65130cd2020-12-28 20:34:56 -0700299 DM_DRIVER_GET(mtk_clk_apmixedsys));
developer2186c982018-11-15 10:07:54 +0800300 break;
301 case CLK_PARENT_TOPCKGEN:
302 rate = mtk_clk_find_parent_rate(clk, fdiv->parent, NULL);
303 break;
304
305 default:
306 rate = priv->tree->xtal_rate;
307 }
308
309 return mtk_factor_recalc_rate(fdiv, rate);
310}
311
developer65da8e72020-01-10 16:30:30 +0800312static ulong mtk_topckgen_get_mux_rate(struct clk *clk, u32 off)
developer2186c982018-11-15 10:07:54 +0800313{
314 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
315 const struct mtk_composite *mux = &priv->tree->muxes[off];
316 u32 index;
317
318 index = readl(priv->base + mux->mux_reg);
319 index &= mux->mux_mask << mux->mux_shift;
320 index = index >> mux->mux_shift;
321
322 if (mux->parent[index])
323 return mtk_clk_find_parent_rate(clk, mux->parent[index],
324 NULL);
325
326 return priv->tree->xtal_rate;
327}
328
329static ulong mtk_topckgen_get_rate(struct clk *clk)
330{
331 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
332
333 if (clk->id < priv->tree->fdivs_offs)
334 return priv->tree->fclks[clk->id].rate;
335 else if (clk->id < priv->tree->muxes_offs)
336 return mtk_topckgen_get_factor_rate(clk, clk->id -
337 priv->tree->fdivs_offs);
338 else
339 return mtk_topckgen_get_mux_rate(clk, clk->id -
340 priv->tree->muxes_offs);
341}
342
343static int mtk_topckgen_enable(struct clk *clk)
344{
345 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
346 const struct mtk_composite *mux;
347 u32 val;
348
349 if (clk->id < priv->tree->muxes_offs)
350 return 0;
351
352 mux = &priv->tree->muxes[clk->id - priv->tree->muxes_offs];
353 if (mux->gate_shift < 0)
354 return 0;
355
356 /* enable clock gate */
developerba560c72019-12-31 11:29:21 +0800357 if (mux->flags & CLK_MUX_SETCLR_UPD) {
358 val = BIT(mux->gate_shift);
359 writel(val, priv->base + mux->mux_clr_reg);
360 } else {
361 val = readl(priv->base + mux->gate_reg);
362 val &= ~BIT(mux->gate_shift);
363 writel(val, priv->base + mux->gate_reg);
364 }
developer2186c982018-11-15 10:07:54 +0800365
366 if (mux->flags & CLK_DOMAIN_SCPSYS) {
367 /* enable scpsys clock off control */
368 writel(SCP_ARMCK_OFF_EN, priv->base + CLK_SCP_CFG0);
369 writel(SCP_AXICK_DCM_DIS_EN | SCP_AXICK_26M_SEL_EN,
370 priv->base + CLK_SCP_CFG1);
371 }
372
373 return 0;
374}
375
376static int mtk_topckgen_disable(struct clk *clk)
377{
378 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
379 const struct mtk_composite *mux;
380 u32 val;
381
382 if (clk->id < priv->tree->muxes_offs)
383 return 0;
384
385 mux = &priv->tree->muxes[clk->id - priv->tree->muxes_offs];
386 if (mux->gate_shift < 0)
387 return 0;
388
389 /* disable clock gate */
developerba560c72019-12-31 11:29:21 +0800390 if (mux->flags & CLK_MUX_SETCLR_UPD) {
391 val = BIT(mux->gate_shift);
392 writel(val, priv->base + mux->mux_set_reg);
393 } else {
394 val = readl(priv->base + mux->gate_reg);
395 val |= BIT(mux->gate_shift);
396 writel(val, priv->base + mux->gate_reg);
397 }
developer2186c982018-11-15 10:07:54 +0800398
399 return 0;
400}
401
402static int mtk_topckgen_set_parent(struct clk *clk, struct clk *parent)
403{
404 struct mtk_clk_priv *priv = dev_get_priv(clk->dev);
405
406 if (clk->id < priv->tree->muxes_offs)
407 return 0;
408
409 return mtk_clk_mux_set_parent(priv->base, parent->id,
410 &priv->tree->muxes[clk->id - priv->tree->muxes_offs]);
411}
412
413/* CG functions */
414
415static int mtk_clk_gate_enable(struct clk *clk)
416{
417 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
418 const struct mtk_gate *gate = &priv->gates[clk->id];
419 u32 bit = BIT(gate->shift);
420
421 switch (gate->flags & CLK_GATE_MASK) {
422 case CLK_GATE_SETCLR:
423 writel(bit, priv->base + gate->regs->clr_ofs);
424 break;
Fabien Parent69463e52019-03-24 16:46:35 +0100425 case CLK_GATE_SETCLR_INV:
426 writel(bit, priv->base + gate->regs->set_ofs);
427 break;
428 case CLK_GATE_NO_SETCLR:
429 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, 0);
430 break;
developer2186c982018-11-15 10:07:54 +0800431 case CLK_GATE_NO_SETCLR_INV:
432 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, bit);
433 break;
434
435 default:
436 return -EINVAL;
437 }
438
439 return 0;
440}
441
442static int mtk_clk_gate_disable(struct clk *clk)
443{
444 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
445 const struct mtk_gate *gate = &priv->gates[clk->id];
446 u32 bit = BIT(gate->shift);
447
448 switch (gate->flags & CLK_GATE_MASK) {
449 case CLK_GATE_SETCLR:
450 writel(bit, priv->base + gate->regs->set_ofs);
451 break;
Fabien Parent69463e52019-03-24 16:46:35 +0100452 case CLK_GATE_SETCLR_INV:
453 writel(bit, priv->base + gate->regs->clr_ofs);
454 break;
455 case CLK_GATE_NO_SETCLR:
456 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, bit);
457 break;
developer2186c982018-11-15 10:07:54 +0800458 case CLK_GATE_NO_SETCLR_INV:
459 clrsetbits_le32(priv->base + gate->regs->sta_ofs, bit, 0);
460 break;
461
462 default:
463 return -EINVAL;
464 }
465
466 return 0;
467}
468
469static ulong mtk_clk_gate_get_rate(struct clk *clk)
470{
471 struct mtk_cg_priv *priv = dev_get_priv(clk->dev);
472 const struct mtk_gate *gate = &priv->gates[clk->id];
473
474 switch (gate->flags & CLK_PARENT_MASK) {
475 case CLK_PARENT_APMIXED:
476 return mtk_clk_find_parent_rate(clk, gate->parent,
Simon Glass65130cd2020-12-28 20:34:56 -0700477 DM_DRIVER_GET(mtk_clk_apmixedsys));
developer2186c982018-11-15 10:07:54 +0800478 break;
479 case CLK_PARENT_TOPCKGEN:
480 return mtk_clk_find_parent_rate(clk, gate->parent,
Simon Glass65130cd2020-12-28 20:34:56 -0700481 DM_DRIVER_GET(mtk_clk_topckgen));
developer2186c982018-11-15 10:07:54 +0800482 break;
483
484 default:
485 return priv->tree->xtal_rate;
486 }
487}
488
489const struct clk_ops mtk_clk_apmixedsys_ops = {
490 .enable = mtk_apmixedsys_enable,
491 .disable = mtk_apmixedsys_disable,
492 .set_rate = mtk_apmixedsys_set_rate,
493 .get_rate = mtk_apmixedsys_get_rate,
494};
495
496const struct clk_ops mtk_clk_topckgen_ops = {
497 .enable = mtk_topckgen_enable,
498 .disable = mtk_topckgen_disable,
499 .get_rate = mtk_topckgen_get_rate,
500 .set_parent = mtk_topckgen_set_parent,
501};
502
503const struct clk_ops mtk_clk_gate_ops = {
504 .enable = mtk_clk_gate_enable,
505 .disable = mtk_clk_gate_disable,
506 .get_rate = mtk_clk_gate_get_rate,
507};
508
509int mtk_common_clk_init(struct udevice *dev,
510 const struct mtk_clk_tree *tree)
511{
512 struct mtk_clk_priv *priv = dev_get_priv(dev);
513
514 priv->base = dev_read_addr_ptr(dev);
515 if (!priv->base)
516 return -ENOENT;
517
518 priv->tree = tree;
519
520 return 0;
521}
522
523int mtk_common_clk_gate_init(struct udevice *dev,
524 const struct mtk_clk_tree *tree,
525 const struct mtk_gate *gates)
526{
527 struct mtk_cg_priv *priv = dev_get_priv(dev);
528
529 priv->base = dev_read_addr_ptr(dev);
530 if (!priv->base)
531 return -ENOENT;
532
533 priv->tree = tree;
534 priv->gates = gates;
535
536 return 0;
537}