blob: 5ea86062800693594c2dc00e9547588ee8766aaf [file] [log] [blame]
Green Wanecefa5f2021-05-27 06:52:08 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2021 SiFive, Inc.
4 * Wesley Terpstra
5 * Paul Walmsley
6 * Zong Li
7 * Pragnesh Patel
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * The PRCI implements clock and reset control for the SiFive chip.
19 * This driver assumes that it has sole control over all PRCI resources.
20 *
21 * This driver is based on the PRCI driver written by Wesley Terpstra:
22 * https://github.com/riscv/riscv-linux/commit/999529edf517ed75b56659d456d221b2ee56bb60
23 */
24
Green Wanecefa5f2021-05-27 06:52:08 -070025#include <clk-uclass.h>
26#include <clk.h>
27#include <dm.h>
28#include <dm/device_compat.h>
29#include <reset.h>
30#include <asm/io.h>
31#include <asm/arch/reset.h>
32#include <linux/delay.h>
33#include <linux/math64.h>
34#include <dt-bindings/clock/sifive-fu740-prci.h>
35
36#include "fu540-prci.h"
37#include "fu740-prci.h"
38
39/*
40 * Private functions
41 */
42
43/**
44 * __prci_readl() - read from a PRCI register
45 * @pd: PRCI context
46 * @offs: register offset to read from (in bytes, from PRCI base address)
47 *
48 * Read the register located at offset @offs from the base virtual
49 * address of the PRCI register target described by @pd, and return
50 * the value to the caller.
51 *
52 * Context: Any context.
53 *
54 * Return: the contents of the register described by @pd and @offs.
55 */
56static u32 __prci_readl(struct __prci_data *pd, u32 offs)
57{
58 return readl(pd->va + offs);
59}
60
61static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
62{
63 writel(v, pd->va + offs);
64}
65
66/* WRPLL-related private functions */
67
68/**
69 * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
70 * @c: ptr to a struct wrpll_cfg record to write config into
71 * @r: value read from the PRCI PLL configuration register
72 *
73 * Given a value @r read from an FU540 PRCI PLL configuration register,
74 * split it into fields and populate it into the WRPLL configuration record
75 * pointed to by @c.
76 *
77 * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
78 * have the same register layout.
79 *
80 * Context: Any context.
81 */
82static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
83{
84 u32 v;
85
86 v = r & PRCI_COREPLLCFG0_DIVR_MASK;
87 v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
88 c->divr = v;
89
90 v = r & PRCI_COREPLLCFG0_DIVF_MASK;
91 v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
92 c->divf = v;
93
94 v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
95 v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
96 c->divq = v;
97
98 v = r & PRCI_COREPLLCFG0_RANGE_MASK;
99 v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
100 c->range = v;
101
102 c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
103 WRPLL_FLAGS_EXT_FEEDBACK_MASK);
104
105 /* external feedback mode not supported */
106 c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
107}
108
109/**
110 * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
111 * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
112 *
113 * Using a set of WRPLL configuration values pointed to by @c,
114 * assemble a PRCI PLL configuration register value, and return it to
115 * the caller.
116 *
117 * Context: Any context. Caller must ensure that the contents of the
118 * record pointed to by @c do not change during the execution
119 * of this function.
120 *
121 * Returns: a value suitable for writing into a PRCI PLL configuration
122 * register
123 */
124static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
125{
126 u32 r = 0;
127
128 r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
129 r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
130 r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
131 r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
132
133 /* external feedback mode not supported */
134 r |= PRCI_COREPLLCFG0_FSE_MASK;
135
136 return r;
137}
138
139/**
140 * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
141 * @pd: PRCI context
142 * @pwd: PRCI WRPLL metadata
143 *
144 * Read the current configuration of the PLL identified by @pwd from
145 * the PRCI identified by @pd, and store it into the local configuration
146 * cache in @pwd.
147 *
148 * Context: Any context. Caller must prevent the records pointed to by
149 * @pd and @pwd from changing during execution.
150 */
151static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
152 struct __prci_wrpll_data *pwd)
153{
154 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
155}
156
157/**
158 * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
159 * @pd: PRCI context
160 * @pwd: PRCI WRPLL metadata
161 * @c: WRPLL configuration record to write
162 *
163 * Write the WRPLL configuration described by @c into the WRPLL
164 * configuration register identified by @pwd in the PRCI instance
165 * described by @c. Make a cached copy of the WRPLL's current
166 * configuration so it can be used by other code.
167 *
168 * Context: Any context. Caller must prevent the records pointed to by
169 * @pd and @pwd from changing during execution.
170 */
171static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
172 struct __prci_wrpll_data *pwd,
173 struct wrpll_cfg *c)
174{
175 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
176
177 memcpy(&pwd->c, c, sizeof(*c));
178}
179
180/**
181 * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
182 * into the PRCI
183 * @pd: PRCI context
184 * @pwd: PRCI WRPLL metadata
185 * @enable: Clock enable or disable value
186 */
187static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
188 struct __prci_wrpll_data *pwd,
189 u32 enable)
190{
191 __prci_writel(enable, pwd->cfg1_offs, pd);
192}
193
194unsigned long sifive_prci_wrpll_recalc_rate(struct __prci_clock *pc,
195 unsigned long parent_rate)
196{
197 struct __prci_wrpll_data *pwd = pc->pwd;
198
199 return wrpll_calc_output_rate(&pwd->c, parent_rate);
200}
201
202unsigned long sifive_prci_wrpll_round_rate(struct __prci_clock *pc,
203 unsigned long rate,
204 unsigned long *parent_rate)
205{
206 struct __prci_wrpll_data *pwd = pc->pwd;
207 struct wrpll_cfg c;
208
209 memcpy(&c, &pwd->c, sizeof(c));
210
211 wrpll_configure_for_rate(&c, rate, *parent_rate);
212
213 return wrpll_calc_output_rate(&c, *parent_rate);
214}
215
216int sifive_prci_wrpll_set_rate(struct __prci_clock *pc,
217 unsigned long rate,
218 unsigned long parent_rate)
219{
220 struct __prci_wrpll_data *pwd = pc->pwd;
221 struct __prci_data *pd = pc->pd;
222 int r;
223
224 r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
225 if (r)
226 return r;
227
228 if (pwd->enable_bypass)
229 pwd->enable_bypass(pd);
230
231 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
232
233 udelay(wrpll_calc_max_lock_us(&pwd->c));
234
235 return 0;
236}
237
238int sifive_prci_clock_enable(struct __prci_clock *pc, bool enable)
239{
240 struct __prci_wrpll_data *pwd = pc->pwd;
241 struct __prci_data *pd = pc->pd;
242
243 if (enable) {
244 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
245
246 if (pwd->disable_bypass)
247 pwd->disable_bypass(pd);
248
249 if (pwd->release_reset)
250 pwd->release_reset(pd);
251 } else {
252 u32 r;
253
254 if (pwd->enable_bypass)
255 pwd->enable_bypass(pd);
256
257 r = __prci_readl(pd, pwd->cfg1_offs);
258 r &= ~PRCI_COREPLLCFG1_CKE_MASK;
259
260 __prci_wrpll_write_cfg1(pd, pwd, r);
261 }
262
263 return 0;
264}
265
266/* TLCLKSEL clock integration */
267
268unsigned long sifive_prci_tlclksel_recalc_rate(struct __prci_clock *pc,
269 unsigned long parent_rate)
270{
271 struct __prci_data *pd = pc->pd;
272 u32 v;
273 u8 div;
274
275 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
276 v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
277 div = v ? 1 : 2;
278
279 return div_u64(parent_rate, div);
280}
281
282/* HFPCLK clock integration */
283
284unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct __prci_clock *pc,
285 unsigned long parent_rate)
286{
287 struct __prci_data *pd = pc->pd;
288 u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
289
290 return div_u64(parent_rate, div + 2);
291}
292
293/**
294 * sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output
295 * FINAL_COREPLL
296 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
297 *
298 * Switch the CORECLK mux to the final COREPLL output clock; return once
299 * complete.
300 *
301 * Context: Any context. Caller must prevent concurrent changes to the
302 * PRCI_CORECLKSEL_OFFSET register.
303 */
304void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
305{
306 u32 r;
307
308 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
309 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
310 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
311
312 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
313}
314
315/**
316 * sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to
317 * output DVFS_COREPLL
318 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
319 *
320 * Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete.
321 *
322 * Context: Any context. Caller must prevent concurrent changes to the
323 * PRCI_COREPLLSEL_OFFSET register.
324 */
325void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
326{
327 u32 r;
328
329 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
330 r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
331 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
332
333 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
334}
335
336/**
337 * sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to
338 * output COREPLL
339 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
340 *
341 * Switch the COREPLL mux to the COREPLL output clock; return once complete.
342 *
343 * Context: Any context. Caller must prevent concurrent changes to the
344 * PRCI_COREPLLSEL_OFFSET register.
345 */
346void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
347{
348 u32 r;
349
350 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
351 r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
352 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
353
354 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
355}
356
357/**
358 * sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to
359 * output HFCLK
360 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
361 *
362 * Switch the HFPCLKPLL mux to the HFCLK input source; return once complete.
363 *
364 * Context: Any context. Caller must prevent concurrent changes to the
365 * PRCI_HFPCLKPLLSEL_OFFSET register.
366 */
367void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
368{
369 u32 r;
370
371 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
372 r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
373 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
374
375 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
376}
377
378/**
379 * sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to
380 * output HFPCLKPLL
381 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
382 *
383 * Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete.
384 *
385 * Context: Any context. Caller must prevent concurrent changes to the
386 * PRCI_HFPCLKPLLSEL_OFFSET register.
387 */
388void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
389{
390 u32 r;
391
392 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
393 r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
394 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
395
396 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
397}
398
399static int __prci_consumer_reset(const char *rst_name, bool trigger)
400{
401 struct udevice *dev;
402 struct reset_ctl rst_sig;
403 int ret;
404
405 ret = uclass_get_device_by_driver(UCLASS_RESET,
406 DM_DRIVER_GET(sifive_reset),
407 &dev);
408 if (ret) {
409 dev_err(dev, "Reset driver not found: %d\n", ret);
410 return ret;
411 }
412
413 ret = reset_get_by_name(dev, rst_name, &rst_sig);
414 if (ret) {
415 dev_err(dev, "failed to get %s reset\n", rst_name);
416 return ret;
417 }
418
419 if (reset_valid(&rst_sig)) {
420 if (trigger)
421 ret = reset_deassert(&rst_sig);
422 else
423 ret = reset_assert(&rst_sig);
424 if (ret) {
425 dev_err(dev, "failed to trigger reset id = %ld\n",
426 rst_sig.id);
427 return ret;
428 }
429 }
430
431 return ret;
432}
433
434/**
435 * sifive_prci_ddr_release_reset() - Release DDR reset
436 * @pd: struct __prci_data * for the PRCI containing the DDRCLK mux reg
437 *
438 */
439void sifive_prci_ddr_release_reset(struct __prci_data *pd)
440{
441 /* Release DDR ctrl reset */
442 __prci_consumer_reset("ddr_ctrl", true);
443
444 /* HACK to get the '1 full controller clock cycle'. */
445 asm volatile ("fence");
446
447 /* Release DDR AXI reset */
448 __prci_consumer_reset("ddr_axi", true);
449
450 /* Release DDR AHB reset */
451 __prci_consumer_reset("ddr_ahb", true);
452
453 /* Release DDR PHY reset */
454 __prci_consumer_reset("ddr_phy", true);
455
456 /* HACK to get the '1 full controller clock cycle'. */
457 asm volatile ("fence");
458
459 /*
460 * These take like 16 cycles to actually propagate. We can't go sending
461 * stuff before they come out of reset. So wait.
462 */
463 for (int i = 0; i < 256; i++)
464 asm volatile ("nop");
465}
466
467/**
468 * sifive_prci_ethernet_release_reset() - Release ethernet reset
469 * @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
470 *
471 */
472void sifive_prci_ethernet_release_reset(struct __prci_data *pd)
473{
474 /* Release GEMGXL reset */
475 __prci_consumer_reset("gemgxl_reset", true);
476
477 /* Procmon => core clock */
478 __prci_writel(PRCI_PROCMONCFG_CORE_CLOCK_MASK, PRCI_PROCMONCFG_OFFSET,
479 pd);
480
481 /* Release Chiplink reset */
482 __prci_consumer_reset("cltx_reset", true);
483}
484
485/**
486 * sifive_prci_cltx_release_reset() - Release cltx reset
487 * @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
488 *
489 */
490void sifive_prci_cltx_release_reset(struct __prci_data *pd)
491{
492 /* Release CLTX reset */
493 __prci_consumer_reset("cltx_reset", true);
494}
495
496/* Core clock mux control */
497
498/**
499 * sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
500 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
501 *
502 * Switch the CORECLK mux to the HFCLK input source; return once complete.
503 *
504 * Context: Any context. Caller must prevent concurrent changes to the
505 * PRCI_CORECLKSEL_OFFSET register.
506 */
507void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
508{
509 u32 r;
510
511 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
512 r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
513 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
514
515 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
516}
517
518/**
519 * sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
520 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
521 *
522 * Switch the CORECLK mux to the PLL output clock; return once complete.
523 *
524 * Context: Any context. Caller must prevent concurrent changes to the
525 * PRCI_CORECLKSEL_OFFSET register.
526 */
527void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
528{
529 u32 r;
530
531 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
532 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
533 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
534
535 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
536}
537
538static ulong sifive_prci_parent_rate(struct __prci_clock *pc, struct prci_clk_desc *data)
539{
540 ulong parent_rate;
541 ulong i;
542 struct __prci_clock *p;
543
544 if (strcmp(pc->parent_name, "corepll") == 0 ||
545 strcmp(pc->parent_name, "hfpclkpll") == 0) {
546 for (i = 0; i < data->num_clks; i++) {
547 if (strcmp(pc->parent_name, data->clks[i].name) == 0)
548 break;
549 }
550
551 if (i >= data->num_clks)
552 return -ENXIO;
553
554 p = &data->clks[i];
555 if (!p->pd || !p->ops->recalc_rate)
556 return -ENXIO;
557
558 return p->ops->recalc_rate(p, sifive_prci_parent_rate(p, data));
559 }
560
561 if (strcmp(pc->parent_name, "rtcclk") == 0)
562 parent_rate = clk_get_rate(&pc->pd->parent_rtcclk);
563 else
564 parent_rate = clk_get_rate(&pc->pd->parent_hfclk);
565
566 return parent_rate;
567}
568
569static ulong sifive_prci_get_rate(struct clk *clk)
570{
571 struct __prci_clock *pc;
572 struct prci_clk_desc *data =
573 (struct prci_clk_desc *)dev_get_driver_data(clk->dev);
574
575 if (data->num_clks <= clk->id)
576 return -ENXIO;
577
578 pc = &data->clks[clk->id];
579 if (!pc->pd || !pc->ops->recalc_rate)
580 return -ENXIO;
581
582 return pc->ops->recalc_rate(pc, sifive_prci_parent_rate(pc, data));
583}
584
585static ulong sifive_prci_set_rate(struct clk *clk, ulong rate)
586{
587 int err;
588 struct __prci_clock *pc;
589 struct prci_clk_desc *data =
590 (struct prci_clk_desc *)dev_get_driver_data(clk->dev);
591
592 if (data->num_clks <= clk->id)
593 return -ENXIO;
594
595 pc = &data->clks[clk->id];
596 if (!pc->pd || !pc->ops->set_rate)
597 return -ENXIO;
598
599 err = pc->ops->set_rate(pc, rate, sifive_prci_parent_rate(pc, data));
600 if (err)
601 return err;
602
603 return rate;
604}
605
606static int sifive_prci_enable(struct clk *clk)
607{
608 struct __prci_clock *pc;
609 int ret = 0;
610 struct prci_clk_desc *data =
611 (struct prci_clk_desc *)dev_get_driver_data(clk->dev);
612
613 if (data->num_clks <= clk->id)
614 return -ENXIO;
615
616 pc = &data->clks[clk->id];
617 if (!pc->pd)
618 return -ENXIO;
619
620 if (pc->ops->enable_clk)
621 ret = pc->ops->enable_clk(pc, 1);
622
623 return ret;
624}
625
626static int sifive_prci_disable(struct clk *clk)
627{
628 struct __prci_clock *pc;
629 int ret = 0;
630 struct prci_clk_desc *data =
631 (struct prci_clk_desc *)dev_get_driver_data(clk->dev);
632
633 if (data->num_clks <= clk->id)
634 return -ENXIO;
635
636 pc = &data->clks[clk->id];
637 if (!pc->pd)
638 return -ENXIO;
639
640 if (pc->ops->enable_clk)
641 ret = pc->ops->enable_clk(pc, 0);
642
643 return ret;
644}
645
646static int sifive_prci_probe(struct udevice *dev)
647{
648 int i, err;
649 struct __prci_clock *pc;
650 struct __prci_data *pd = dev_get_priv(dev);
651
652 struct prci_clk_desc *data =
653 (struct prci_clk_desc *)dev_get_driver_data(dev);
654
Bin Mengd0dcb752021-09-12 11:15:09 +0800655 pd->va = dev_read_addr_ptr(dev);
656 if (!pd->va)
657 return -EINVAL;
Green Wanecefa5f2021-05-27 06:52:08 -0700658
659 err = clk_get_by_index(dev, 0, &pd->parent_hfclk);
660 if (err)
661 return err;
662
663 err = clk_get_by_index(dev, 1, &pd->parent_rtcclk);
664 if (err)
665 return err;
666
667 for (i = 0; i < data->num_clks; ++i) {
668 pc = &data->clks[i];
669 pc->pd = pd;
670 if (pc->pwd)
671 __prci_wrpll_read_cfg0(pd, pc->pwd);
672 }
673
674 if (IS_ENABLED(CONFIG_SPL_BUILD)) {
675 if (device_is_compatible(dev, "sifive,fu740-c000-prci")) {
676 u32 prci_pll_reg;
677 unsigned long parent_rate;
678
679 prci_pll_reg = readl(pd->va + PRCI_PRCIPLL_OFFSET);
680
681 if (prci_pll_reg & PRCI_PRCIPLL_HFPCLKPLL) {
682 /*
683 * Only initialize the HFPCLK PLL. In this
684 * case the design uses hfpclk to drive
685 * Chiplink
686 */
Icenowy Zheng13d71702022-08-25 16:11:18 +0800687 pc = &data->clks[FU740_PRCI_CLK_HFPCLKPLL];
Green Wanecefa5f2021-05-27 06:52:08 -0700688 parent_rate = sifive_prci_parent_rate(pc, data);
689 sifive_prci_wrpll_set_rate(pc, 260000000,
690 parent_rate);
691 pc->ops->enable_clk(pc, 1);
692 } else if (prci_pll_reg & PRCI_PRCIPLL_CLTXPLL) {
693 /* CLTX pll init */
Icenowy Zheng13d71702022-08-25 16:11:18 +0800694 pc = &data->clks[FU740_PRCI_CLK_CLTXPLL];
Green Wanecefa5f2021-05-27 06:52:08 -0700695 parent_rate = sifive_prci_parent_rate(pc, data);
696 sifive_prci_wrpll_set_rate(pc, 260000000,
697 parent_rate);
698 pc->ops->enable_clk(pc, 1);
699 }
700 }
701 }
702
703 return 0;
704}
705
706static struct clk_ops sifive_prci_ops = {
707 .set_rate = sifive_prci_set_rate,
708 .get_rate = sifive_prci_get_rate,
709 .enable = sifive_prci_enable,
710 .disable = sifive_prci_disable,
711};
712
713static int sifive_clk_bind(struct udevice *dev)
714{
715 return sifive_reset_bind(dev, PRCI_DEVICERESETCNT);
716}
717
718static const struct udevice_id sifive_prci_ids[] = {
719 { .compatible = "sifive,fu540-c000-prci", .data = (ulong)&prci_clk_fu540 },
720 { .compatible = "sifive,fu740-c000-prci", .data = (ulong)&prci_clk_fu740 },
721 { }
722};
723
724U_BOOT_DRIVER(sifive_prci) = {
725 .name = "sifive-prci",
726 .id = UCLASS_CLK,
727 .of_match = sifive_prci_ids,
728 .probe = sifive_prci_probe,
729 .ops = &sifive_prci_ops,
730 .priv_auto = sizeof(struct __prci_data),
731 .bind = sifive_clk_bind,
732};