blob: 00c31fc04e9e187674fc4a42fa67c6db53b09dca [file] [log] [blame]
Anup Patel42fdf082019-02-25 08:14:49 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Copyright (C) 2018 SiFive, Inc.
6 * Wesley Terpstra
7 * Paul Walmsley
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * The FU540 PRCI implements clock and reset control for the SiFive
19 * FU540-C000 chip. This driver assumes that it has sole control
20 * over all PRCI resources.
21 *
22 * This driver is based on the PRCI driver written by Wesley Terpstra.
23 *
24 * Refer, commit 999529edf517ed75b56659d456d221b2ee56bb60 of:
25 * https://github.com/riscv/riscv-linux
26 *
27 * References:
28 * - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
29 */
30
Jagan Teki72be9862019-05-08 19:52:18 +053031#include <common.h>
Anup Patel42fdf082019-02-25 08:14:49 +000032#include <asm/io.h>
33#include <clk-uclass.h>
34#include <clk.h>
Anup Patel42fdf082019-02-25 08:14:49 +000035#include <div64.h>
36#include <dm.h>
37#include <errno.h>
Simon Glassdbd79542020-05-10 11:40:11 -060038#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070039#include <linux/err.h>
Anup Patel42fdf082019-02-25 08:14:49 +000040
41#include <linux/math64.h>
Anup Patel00a156d2019-06-25 06:31:02 +000042#include <linux/clk/analogbits-wrpll-cln28hpc.h>
Anup Patel83d5b502019-06-25 06:31:15 +000043#include <dt-bindings/clock/sifive-fu540-prci.h>
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -070044#include <dt-bindings/reset/sifive-fu540-prci.h>
Anup Patel42fdf082019-02-25 08:14:49 +000045
Anup Patel42fdf082019-02-25 08:14:49 +000046/*
47 * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
48 * hfclk and rtcclk
49 */
50#define EXPECTED_CLK_PARENT_COUNT 2
51
52/*
53 * Register offsets and bitmasks
54 */
55
56/* COREPLLCFG0 */
57#define PRCI_COREPLLCFG0_OFFSET 0x4
58#define PRCI_COREPLLCFG0_DIVR_SHIFT 0
59#define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
60#define PRCI_COREPLLCFG0_DIVF_SHIFT 6
61#define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
62#define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
63#define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
64#define PRCI_COREPLLCFG0_RANGE_SHIFT 18
65#define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
66#define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
67#define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
68#define PRCI_COREPLLCFG0_FSE_SHIFT 25
69#define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
70#define PRCI_COREPLLCFG0_LOCK_SHIFT 31
71#define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
72
Pragnesh Patel54ce0e02020-05-29 11:33:29 +053073/* COREPLLCFG1 */
74#define PRCI_COREPLLCFG1_OFFSET 0x8
75#define PRCI_COREPLLCFG1_CKE_SHIFT 31
76#define PRCI_COREPLLCFG1_CKE_MASK (0x1 << PRCI_COREPLLCFG1_CKE_SHIFT)
77
Anup Patel42fdf082019-02-25 08:14:49 +000078/* DDRPLLCFG0 */
79#define PRCI_DDRPLLCFG0_OFFSET 0xc
80#define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
81#define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
82#define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
83#define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
84#define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
85#define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
86#define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
87#define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
88#define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
89#define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
90#define PRCI_DDRPLLCFG0_FSE_SHIFT 25
91#define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
92#define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
93#define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
94
95/* DDRPLLCFG1 */
96#define PRCI_DDRPLLCFG1_OFFSET 0x10
Pragnesh Patel54ce0e02020-05-29 11:33:29 +053097#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
Anup Patel42fdf082019-02-25 08:14:49 +000098#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
99
100/* GEMGXLPLLCFG0 */
101#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
102#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
103#define PRCI_GEMGXLPLLCFG0_DIVR_MASK \
104 (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
105#define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
106#define PRCI_GEMGXLPLLCFG0_DIVF_MASK \
107 (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
108#define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
109#define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
110#define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
111#define PRCI_GEMGXLPLLCFG0_RANGE_MASK \
112 (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
113#define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
114#define PRCI_GEMGXLPLLCFG0_BYPASS_MASK \
115 (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
116#define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
117#define PRCI_GEMGXLPLLCFG0_FSE_MASK \
118 (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
119#define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
120#define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
121
122/* GEMGXLPLLCFG1 */
123#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530124#define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 31
Anup Patel42fdf082019-02-25 08:14:49 +0000125#define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
126
127/* CORECLKSEL */
128#define PRCI_CORECLKSEL_OFFSET 0x24
129#define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
130#define PRCI_CORECLKSEL_CORECLKSEL_MASK \
131 (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
132
133/* DEVICESRESETREG */
134#define PRCI_DEVICESRESETREG_OFFSET 0x28
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700135
Anup Patel42fdf082019-02-25 08:14:49 +0000136#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700137 (0x1 << PRCI_RST_DDR_CTRL_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000138#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700139 (0x1 << PRCI_RST_DDR_AXI_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000140#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700141 (0x1 << PRCI_RST_DDR_AHB_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000142#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700143 (0x1 << PRCI_RST_DDR_PHY_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000144#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700145 (0x1 << PRCI_RST_GEMGXL_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000146
147/* CLKMUXSTATUSREG */
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530148#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
Anup Patel42fdf082019-02-25 08:14:49 +0000149#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
150#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK \
151 (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
152
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530153/* PROCMONCFG */
154#define PRCI_PROCMONCFG_OFFSET 0xF0
155#define PRCI_PROCMONCFG_CORE_CLOCK_SHIFT 24
156#define PRCI_PROCMONCFG_CORE_CLOCK_MASK \
157 (0x1 << PRCI_PROCMONCFG_CORE_CLOCK_SHIFT)
158
Anup Patel42fdf082019-02-25 08:14:49 +0000159/*
160 * Private structures
161 */
162
163/**
164 * struct __prci_data - per-device-instance data
165 * @va: base virtual address of the PRCI IP block
166 * @parent: parent clk instance
167 *
168 * PRCI per-device instance data
169 */
170struct __prci_data {
Anup Patel9a99add2019-06-25 06:31:21 +0000171 void *va;
172 struct clk parent_hfclk;
173 struct clk parent_rtcclk;
Anup Patel42fdf082019-02-25 08:14:49 +0000174};
175
176/**
177 * struct __prci_wrpll_data - WRPLL configuration and integration data
178 * @c: WRPLL current configuration record
Anup Patel9a99add2019-06-25 06:31:21 +0000179 * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
180 * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
Anup Patel42fdf082019-02-25 08:14:49 +0000181 * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530182 * @cfg1_offs: WRPLL CFG1 register offset (in bytes) from the PRCI base address
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530183 * @release_reset: fn ptr to code to release clock reset
Anup Patel42fdf082019-02-25 08:14:49 +0000184 *
Anup Patel9a99add2019-06-25 06:31:21 +0000185 * @enable_bypass and @disable_bypass are used for WRPLL instances
186 * that contain a separate external glitchless clock mux downstream
187 * from the PLL. The WRPLL internal bypass mux is not glitchless.
Anup Patel42fdf082019-02-25 08:14:49 +0000188 */
189struct __prci_wrpll_data {
Anup Patel6f7b5a22019-06-25 06:31:08 +0000190 struct wrpll_cfg c;
Anup Patel9a99add2019-06-25 06:31:21 +0000191 void (*enable_bypass)(struct __prci_data *pd);
192 void (*disable_bypass)(struct __prci_data *pd);
Anup Patel42fdf082019-02-25 08:14:49 +0000193 u8 cfg0_offs;
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530194 u8 cfg1_offs;
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530195 void (*release_reset)(struct __prci_data *pd);
Anup Patel42fdf082019-02-25 08:14:49 +0000196};
197
198struct __prci_clock;
199
Anup Patel9a99add2019-06-25 06:31:21 +0000200/* struct __prci_clock_ops - clock operations */
Anup Patel42fdf082019-02-25 08:14:49 +0000201struct __prci_clock_ops {
202 int (*set_rate)(struct __prci_clock *pc,
203 unsigned long rate,
204 unsigned long parent_rate);
205 unsigned long (*round_rate)(struct __prci_clock *pc,
206 unsigned long rate,
207 unsigned long *parent_rate);
208 unsigned long (*recalc_rate)(struct __prci_clock *pc,
209 unsigned long parent_rate);
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530210 int (*enable_clk)(struct __prci_clock *pc, bool enable);
Anup Patel42fdf082019-02-25 08:14:49 +0000211};
212
213/**
214 * struct __prci_clock - describes a clock device managed by PRCI
215 * @name: user-readable clock name string - should match the manual
216 * @parent_name: parent name for this clock
Anup Patel9a99add2019-06-25 06:31:21 +0000217 * @ops: struct __prci_clock_ops for control
Anup Patel42fdf082019-02-25 08:14:49 +0000218 * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
219 * @pd: PRCI-specific data associated with this clock (if not NULL)
220 *
221 * PRCI clock data. Used by the PRCI driver to register PRCI-provided
222 * clocks to the Linux clock infrastructure.
223 */
224struct __prci_clock {
225 const char *name;
226 const char *parent_name;
227 const struct __prci_clock_ops *ops;
228 struct __prci_wrpll_data *pwd;
229 struct __prci_data *pd;
230};
231
232/*
233 * Private functions
234 */
235
236/**
237 * __prci_readl() - read from a PRCI register
238 * @pd: PRCI context
239 * @offs: register offset to read from (in bytes, from PRCI base address)
240 *
241 * Read the register located at offset @offs from the base virtual
242 * address of the PRCI register target described by @pd, and return
243 * the value to the caller.
244 *
245 * Context: Any context.
246 *
247 * Return: the contents of the register described by @pd and @offs.
248 */
249static u32 __prci_readl(struct __prci_data *pd, u32 offs)
250{
Anup Patel9a99add2019-06-25 06:31:21 +0000251 return readl(pd->va + offs);
Anup Patel42fdf082019-02-25 08:14:49 +0000252}
253
254static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
255{
Anup Patel9a99add2019-06-25 06:31:21 +0000256 writel(v, pd->va + offs);
Anup Patel42fdf082019-02-25 08:14:49 +0000257}
258
259/* WRPLL-related private functions */
260
261/**
262 * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
Anup Patel6f7b5a22019-06-25 06:31:08 +0000263 * @c: ptr to a struct wrpll_cfg record to write config into
Anup Patel42fdf082019-02-25 08:14:49 +0000264 * @r: value read from the PRCI PLL configuration register
265 *
266 * Given a value @r read from an FU540 PRCI PLL configuration register,
267 * split it into fields and populate it into the WRPLL configuration record
268 * pointed to by @c.
269 *
270 * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
271 * have the same register layout.
272 *
273 * Context: Any context.
274 */
Anup Patel6f7b5a22019-06-25 06:31:08 +0000275static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
Anup Patel42fdf082019-02-25 08:14:49 +0000276{
277 u32 v;
278
279 v = r & PRCI_COREPLLCFG0_DIVR_MASK;
280 v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
281 c->divr = v;
282
283 v = r & PRCI_COREPLLCFG0_DIVF_MASK;
284 v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
285 c->divf = v;
286
287 v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
288 v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
289 c->divq = v;
290
291 v = r & PRCI_COREPLLCFG0_RANGE_MASK;
292 v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
293 c->range = v;
294
295 c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
296 WRPLL_FLAGS_EXT_FEEDBACK_MASK);
297
Anup Patel9a99add2019-06-25 06:31:21 +0000298 /* external feedback mode not supported */
299 c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
Anup Patel42fdf082019-02-25 08:14:49 +0000300}
301
302/**
303 * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
Anup Patel6f7b5a22019-06-25 06:31:08 +0000304 * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
Anup Patel42fdf082019-02-25 08:14:49 +0000305 *
306 * Using a set of WRPLL configuration values pointed to by @c,
307 * assemble a PRCI PLL configuration register value, and return it to
308 * the caller.
309 *
310 * Context: Any context. Caller must ensure that the contents of the
311 * record pointed to by @c do not change during the execution
312 * of this function.
313 *
314 * Returns: a value suitable for writing into a PRCI PLL configuration
315 * register
316 */
Anup Patel9a99add2019-06-25 06:31:21 +0000317static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
Anup Patel42fdf082019-02-25 08:14:49 +0000318{
319 u32 r = 0;
320
321 r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
322 r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
323 r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
324 r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
Anup Patel9a99add2019-06-25 06:31:21 +0000325
326 /* external feedback mode not supported */
327 r |= PRCI_COREPLLCFG0_FSE_MASK;
Anup Patel42fdf082019-02-25 08:14:49 +0000328
329 return r;
330}
331
332/**
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530333 * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
Anup Patel42fdf082019-02-25 08:14:49 +0000334 * @pd: PRCI context
335 * @pwd: PRCI WRPLL metadata
336 *
337 * Read the current configuration of the PLL identified by @pwd from
338 * the PRCI identified by @pd, and store it into the local configuration
339 * cache in @pwd.
340 *
341 * Context: Any context. Caller must prevent the records pointed to by
342 * @pd and @pwd from changing during execution.
343 */
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530344static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
345 struct __prci_wrpll_data *pwd)
Anup Patel42fdf082019-02-25 08:14:49 +0000346{
347 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
348}
349
350/**
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530351 * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
Anup Patel42fdf082019-02-25 08:14:49 +0000352 * @pd: PRCI context
353 * @pwd: PRCI WRPLL metadata
354 * @c: WRPLL configuration record to write
355 *
356 * Write the WRPLL configuration described by @c into the WRPLL
357 * configuration register identified by @pwd in the PRCI instance
358 * described by @c. Make a cached copy of the WRPLL's current
359 * configuration so it can be used by other code.
360 *
361 * Context: Any context. Caller must prevent the records pointed to by
362 * @pd and @pwd from changing during execution.
363 */
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530364static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
365 struct __prci_wrpll_data *pwd,
366 struct wrpll_cfg *c)
Anup Patel42fdf082019-02-25 08:14:49 +0000367{
368 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
369
Anup Patel9a99add2019-06-25 06:31:21 +0000370 memcpy(&pwd->c, c, sizeof(*c));
Anup Patel42fdf082019-02-25 08:14:49 +0000371}
372
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530373/**
374 * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
375 * into the PRCI
376 * @pd: PRCI context
377 * @pwd: PRCI WRPLL metadata
378 * @enable: Clock enable or disable value
379 */
380static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
381 struct __prci_wrpll_data *pwd,
382 u32 enable)
383{
384 __prci_writel(enable, pwd->cfg1_offs, pd);
385}
386
Anup Patel42fdf082019-02-25 08:14:49 +0000387/* Core clock mux control */
388
389/**
390 * __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
391 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
392 *
393 * Switch the CORECLK mux to the HFCLK input source; return once complete.
394 *
395 * Context: Any context. Caller must prevent concurrent changes to the
396 * PRCI_CORECLKSEL_OFFSET register.
397 */
398static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
399{
400 u32 r;
401
402 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
403 r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
404 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
405
406 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
407}
408
409/**
410 * __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
411 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
412 *
413 * Switch the CORECLK mux to the PLL output clock; return once complete.
414 *
415 * Context: Any context. Caller must prevent concurrent changes to the
416 * PRCI_CORECLKSEL_OFFSET register.
417 */
418static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
419{
420 u32 r;
421
422 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
423 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
424 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
425
426 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
427}
428
429static unsigned long sifive_fu540_prci_wrpll_recalc_rate(
430 struct __prci_clock *pc,
431 unsigned long parent_rate)
432{
433 struct __prci_wrpll_data *pwd = pc->pwd;
434
Anup Patel6f7b5a22019-06-25 06:31:08 +0000435 return wrpll_calc_output_rate(&pwd->c, parent_rate);
Anup Patel42fdf082019-02-25 08:14:49 +0000436}
437
438static unsigned long sifive_fu540_prci_wrpll_round_rate(
439 struct __prci_clock *pc,
440 unsigned long rate,
441 unsigned long *parent_rate)
442{
443 struct __prci_wrpll_data *pwd = pc->pwd;
Anup Patel6f7b5a22019-06-25 06:31:08 +0000444 struct wrpll_cfg c;
Anup Patel42fdf082019-02-25 08:14:49 +0000445
446 memcpy(&c, &pwd->c, sizeof(c));
447
Anup Patel6f7b5a22019-06-25 06:31:08 +0000448 wrpll_configure_for_rate(&c, rate, *parent_rate);
Anup Patel42fdf082019-02-25 08:14:49 +0000449
Anup Patel6f7b5a22019-06-25 06:31:08 +0000450 return wrpll_calc_output_rate(&c, *parent_rate);
Anup Patel42fdf082019-02-25 08:14:49 +0000451}
452
453static int sifive_fu540_prci_wrpll_set_rate(struct __prci_clock *pc,
454 unsigned long rate,
455 unsigned long parent_rate)
456{
457 struct __prci_wrpll_data *pwd = pc->pwd;
458 struct __prci_data *pd = pc->pd;
459 int r;
460
Anup Patel6f7b5a22019-06-25 06:31:08 +0000461 r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
Anup Patel42fdf082019-02-25 08:14:49 +0000462 if (r)
Anup Patel9a99add2019-06-25 06:31:21 +0000463 return r;
Anup Patel42fdf082019-02-25 08:14:49 +0000464
Anup Patel9a99add2019-06-25 06:31:21 +0000465 if (pwd->enable_bypass)
466 pwd->enable_bypass(pd);
Anup Patel42fdf082019-02-25 08:14:49 +0000467
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530468 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
Anup Patel42fdf082019-02-25 08:14:49 +0000469
Anup Patel6f7b5a22019-06-25 06:31:08 +0000470 udelay(wrpll_calc_max_lock_us(&pwd->c));
Anup Patel42fdf082019-02-25 08:14:49 +0000471
Anup Patel9a99add2019-06-25 06:31:21 +0000472 if (pwd->disable_bypass)
473 pwd->disable_bypass(pd);
Anup Patel42fdf082019-02-25 08:14:49 +0000474
475 return 0;
476}
477
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530478static int sifive_fu540_prci_clock_enable(struct __prci_clock *pc, bool enable)
479{
480 struct __prci_wrpll_data *pwd = pc->pwd;
481 struct __prci_data *pd = pc->pd;
482
483 if (enable) {
484 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530485
486 if (pwd->release_reset)
487 pwd->release_reset(pd);
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530488 } else {
489 u32 r;
490
491 r = __prci_readl(pd, pwd->cfg1_offs);
492 r &= ~PRCI_COREPLLCFG1_CKE_MASK;
493
494 __prci_wrpll_write_cfg1(pd, pwd, r);
495 }
496
497 return 0;
498}
499
Anup Patel42fdf082019-02-25 08:14:49 +0000500static const struct __prci_clock_ops sifive_fu540_prci_wrpll_clk_ops = {
501 .set_rate = sifive_fu540_prci_wrpll_set_rate,
502 .round_rate = sifive_fu540_prci_wrpll_round_rate,
503 .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530504 .enable_clk = sifive_fu540_prci_clock_enable,
Anup Patel42fdf082019-02-25 08:14:49 +0000505};
506
Anup Patel42fdf082019-02-25 08:14:49 +0000507/* TLCLKSEL clock integration */
508
509static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(
510 struct __prci_clock *pc,
511 unsigned long parent_rate)
512{
513 struct __prci_data *pd = pc->pd;
514 u32 v;
515 u8 div;
516
517 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
518 v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
519 div = v ? 1 : 2;
520
521 return div_u64(parent_rate, div);
522}
523
524static const struct __prci_clock_ops sifive_fu540_prci_tlclksel_clk_ops = {
525 .recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
526};
527
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530528/**
529 * __prci_ddr_release_reset() - Release DDR reset
530 * @pd: struct __prci_data * for the PRCI containing the DDRCLK mux reg
531 *
532 */
533static void __prci_ddr_release_reset(struct __prci_data *pd)
534{
535 u32 v;
536
537 v = __prci_readl(pd, PRCI_DEVICESRESETREG_OFFSET);
538 v |= PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK;
539 __prci_writel(v, PRCI_DEVICESRESETREG_OFFSET, pd);
540
541 /* HACK to get the '1 full controller clock cycle'. */
542 asm volatile ("fence");
543 v = __prci_readl(pd, PRCI_DEVICESRESETREG_OFFSET);
544 v |= (PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK |
545 PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK |
546 PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK);
547 __prci_writel(v, PRCI_DEVICESRESETREG_OFFSET, pd);
548
549 /* HACK to get the '1 full controller clock cycle'. */
550 asm volatile ("fence");
551
552 /*
553 * These take like 16 cycles to actually propagate. We can't go sending
554 * stuff before they come out of reset. So wait.
555 */
556 for (int i = 0; i < 256; i++)
557 asm volatile ("nop");
558}
559
Pragnesh Patele848dba2020-05-29 11:33:31 +0530560/**
561 * __prci_ethernet_release_reset() - Release ethernet reset
562 * @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
563 *
564 */
565static void __prci_ethernet_release_reset(struct __prci_data *pd)
566{
567 u32 v;
568
569 /* Release GEMGXL reset */
570 v = __prci_readl(pd, PRCI_DEVICESRESETREG_OFFSET);
571 v |= PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK;
572 __prci_writel(v, PRCI_DEVICESRESETREG_OFFSET, pd);
573
574 /* Procmon => core clock */
575 __prci_writel(PRCI_PROCMONCFG_CORE_CLOCK_MASK, PRCI_PROCMONCFG_OFFSET,
576 pd);
577}
578
Anup Patel42fdf082019-02-25 08:14:49 +0000579/*
580 * PRCI integration data for each WRPLL instance
581 */
582
583static struct __prci_wrpll_data __prci_corepll_data = {
584 .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530585 .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
Anup Patel9a99add2019-06-25 06:31:21 +0000586 .enable_bypass = __prci_coreclksel_use_hfclk,
587 .disable_bypass = __prci_coreclksel_use_corepll,
Anup Patel42fdf082019-02-25 08:14:49 +0000588};
589
590static struct __prci_wrpll_data __prci_ddrpll_data = {
591 .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530592 .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530593 .release_reset = __prci_ddr_release_reset,
Anup Patel42fdf082019-02-25 08:14:49 +0000594};
595
596static struct __prci_wrpll_data __prci_gemgxlpll_data = {
597 .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530598 .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
Pragnesh Patele848dba2020-05-29 11:33:31 +0530599 .release_reset = __prci_ethernet_release_reset,
Anup Patel42fdf082019-02-25 08:14:49 +0000600};
601
602/*
603 * List of clock controls provided by the PRCI
604 */
605
606static struct __prci_clock __prci_init_clocks[] = {
607 [PRCI_CLK_COREPLL] = {
608 .name = "corepll",
609 .parent_name = "hfclk",
610 .ops = &sifive_fu540_prci_wrpll_clk_ops,
611 .pwd = &__prci_corepll_data,
612 },
613 [PRCI_CLK_DDRPLL] = {
614 .name = "ddrpll",
615 .parent_name = "hfclk",
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530616 .ops = &sifive_fu540_prci_wrpll_clk_ops,
Anup Patel42fdf082019-02-25 08:14:49 +0000617 .pwd = &__prci_ddrpll_data,
618 },
619 [PRCI_CLK_GEMGXLPLL] = {
620 .name = "gemgxlpll",
621 .parent_name = "hfclk",
622 .ops = &sifive_fu540_prci_wrpll_clk_ops,
623 .pwd = &__prci_gemgxlpll_data,
624 },
625 [PRCI_CLK_TLCLK] = {
626 .name = "tlclk",
627 .parent_name = "corepll",
628 .ops = &sifive_fu540_prci_tlclksel_clk_ops,
629 },
630};
631
Anup Patel9a99add2019-06-25 06:31:21 +0000632static ulong sifive_fu540_prci_parent_rate(struct __prci_clock *pc)
633{
634 ulong parent_rate;
635 struct __prci_clock *p;
636
637 if (strcmp(pc->parent_name, "corepll") == 0) {
638 p = &__prci_init_clocks[PRCI_CLK_COREPLL];
639 if (!p->pd || !p->ops->recalc_rate)
640 return -ENXIO;
641
642 return p->ops->recalc_rate(p, sifive_fu540_prci_parent_rate(p));
643 }
644
645 if (strcmp(pc->parent_name, "rtcclk") == 0)
646 parent_rate = clk_get_rate(&pc->pd->parent_rtcclk);
647 else
648 parent_rate = clk_get_rate(&pc->pd->parent_hfclk);
649
650 return parent_rate;
651}
652
Anup Patel42fdf082019-02-25 08:14:49 +0000653static ulong sifive_fu540_prci_get_rate(struct clk *clk)
654{
655 struct __prci_clock *pc;
656
657 if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
658 return -ENXIO;
659
660 pc = &__prci_init_clocks[clk->id];
661 if (!pc->pd || !pc->ops->recalc_rate)
662 return -ENXIO;
663
Anup Patel9a99add2019-06-25 06:31:21 +0000664 return pc->ops->recalc_rate(pc, sifive_fu540_prci_parent_rate(pc));
Anup Patel42fdf082019-02-25 08:14:49 +0000665}
666
667static ulong sifive_fu540_prci_set_rate(struct clk *clk, ulong rate)
668{
669 int err;
670 struct __prci_clock *pc;
671
672 if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
673 return -ENXIO;
674
675 pc = &__prci_init_clocks[clk->id];
676 if (!pc->pd || !pc->ops->set_rate)
677 return -ENXIO;
678
Anup Patel9a99add2019-06-25 06:31:21 +0000679 err = pc->ops->set_rate(pc, rate, sifive_fu540_prci_parent_rate(pc));
Anup Patel42fdf082019-02-25 08:14:49 +0000680 if (err)
681 return err;
682
683 return rate;
684}
685
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530686static int sifive_fu540_prci_enable(struct clk *clk)
687{
688 struct __prci_clock *pc;
689 int ret = 0;
690
691 if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
692 return -ENXIO;
693
694 pc = &__prci_init_clocks[clk->id];
695 if (!pc->pd)
696 return -ENXIO;
697
698 if (pc->ops->enable_clk)
699 ret = pc->ops->enable_clk(pc, 1);
700
701 return ret;
702}
703
704static int sifive_fu540_prci_disable(struct clk *clk)
705{
706 struct __prci_clock *pc;
707 int ret = 0;
708
709 if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
710 return -ENXIO;
711
712 pc = &__prci_init_clocks[clk->id];
713 if (!pc->pd)
714 return -ENXIO;
715
716 if (pc->ops->enable_clk)
717 ret = pc->ops->enable_clk(pc, 0);
718
719 return ret;
720}
721
Anup Patel42fdf082019-02-25 08:14:49 +0000722static int sifive_fu540_prci_probe(struct udevice *dev)
723{
724 int i, err;
725 struct __prci_clock *pc;
726 struct __prci_data *pd = dev_get_priv(dev);
727
Anup Patel9a99add2019-06-25 06:31:21 +0000728 pd->va = (void *)dev_read_addr(dev);
729 if (IS_ERR(pd->va))
730 return PTR_ERR(pd->va);
Anup Patel42fdf082019-02-25 08:14:49 +0000731
Anup Patel9a99add2019-06-25 06:31:21 +0000732 err = clk_get_by_index(dev, 0, &pd->parent_hfclk);
Anup Patel42fdf082019-02-25 08:14:49 +0000733 if (err)
734 return err;
735
Anup Patel9a99add2019-06-25 06:31:21 +0000736 err = clk_get_by_index(dev, 1, &pd->parent_rtcclk);
737 if (err)
738 return err;
739
Anup Patel42fdf082019-02-25 08:14:49 +0000740 for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
741 pc = &__prci_init_clocks[i];
742 pc->pd = pd;
743 if (pc->pwd)
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530744 __prci_wrpll_read_cfg0(pd, pc->pwd);
Anup Patel42fdf082019-02-25 08:14:49 +0000745 }
746
747 return 0;
748}
749
750static struct clk_ops sifive_fu540_prci_ops = {
751 .set_rate = sifive_fu540_prci_set_rate,
752 .get_rate = sifive_fu540_prci_get_rate,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530753 .enable = sifive_fu540_prci_enable,
754 .disable = sifive_fu540_prci_disable,
Anup Patel42fdf082019-02-25 08:14:49 +0000755};
756
757static const struct udevice_id sifive_fu540_prci_ids[] = {
Anup Patel9a99add2019-06-25 06:31:21 +0000758 { .compatible = "sifive,fu540-c000-prci" },
Anup Patel42fdf082019-02-25 08:14:49 +0000759 { }
760};
761
762U_BOOT_DRIVER(sifive_fu540_prci) = {
763 .name = "sifive-fu540-prci",
764 .id = UCLASS_CLK,
765 .of_match = sifive_fu540_prci_ids,
766 .probe = sifive_fu540_prci_probe,
767 .ops = &sifive_fu540_prci_ops,
768 .priv_auto_alloc_size = sizeof(struct __prci_data),
769};