blob: 1b4d81d4f06f651bfc10a9da1e14f54dbcb64f70 [file] [log] [blame]
Anup Patel42fdf082019-02-25 08:14:49 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Copyright (C) 2018 SiFive, Inc.
6 * Wesley Terpstra
7 * Paul Walmsley
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * The FU540 PRCI implements clock and reset control for the SiFive
19 * FU540-C000 chip. This driver assumes that it has sole control
20 * over all PRCI resources.
21 *
22 * This driver is based on the PRCI driver written by Wesley Terpstra.
23 *
24 * Refer, commit 999529edf517ed75b56659d456d221b2ee56bb60 of:
25 * https://github.com/riscv/riscv-linux
26 *
27 * References:
28 * - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
29 */
30
Jagan Teki72be9862019-05-08 19:52:18 +053031#include <common.h>
Anup Patel42fdf082019-02-25 08:14:49 +000032#include <clk-uclass.h>
33#include <clk.h>
Anup Patel42fdf082019-02-25 08:14:49 +000034#include <div64.h>
35#include <dm.h>
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -070036#include <dm/device.h>
Sean Anderson1f4d2062020-10-04 21:39:42 -040037#include <dm/device_compat.h>
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -070038#include <dm/uclass.h>
Sean Anderson1f4d2062020-10-04 21:39:42 -040039#include <dt-bindings/clock/sifive-fu540-prci.h>
40#include <dt-bindings/reset/sifive-fu540-prci.h>
41#include <errno.h>
42#include <reset-uclass.h>
43#include <asm/io.h>
44#include <asm/arch/reset.h>
Simon Glassdbd79542020-05-10 11:40:11 -060045#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070046#include <linux/err.h>
Anup Patel42fdf082019-02-25 08:14:49 +000047#include <linux/math64.h>
Anup Patel00a156d2019-06-25 06:31:02 +000048#include <linux/clk/analogbits-wrpll-cln28hpc.h>
Anup Patel42fdf082019-02-25 08:14:49 +000049
Anup Patel42fdf082019-02-25 08:14:49 +000050/*
51 * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
52 * hfclk and rtcclk
53 */
54#define EXPECTED_CLK_PARENT_COUNT 2
55
56/*
57 * Register offsets and bitmasks
58 */
59
60/* COREPLLCFG0 */
61#define PRCI_COREPLLCFG0_OFFSET 0x4
62#define PRCI_COREPLLCFG0_DIVR_SHIFT 0
63#define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
64#define PRCI_COREPLLCFG0_DIVF_SHIFT 6
65#define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
66#define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
67#define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
68#define PRCI_COREPLLCFG0_RANGE_SHIFT 18
69#define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
70#define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
71#define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
72#define PRCI_COREPLLCFG0_FSE_SHIFT 25
73#define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
74#define PRCI_COREPLLCFG0_LOCK_SHIFT 31
75#define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
76
Pragnesh Patel54ce0e02020-05-29 11:33:29 +053077/* COREPLLCFG1 */
78#define PRCI_COREPLLCFG1_OFFSET 0x8
79#define PRCI_COREPLLCFG1_CKE_SHIFT 31
80#define PRCI_COREPLLCFG1_CKE_MASK (0x1 << PRCI_COREPLLCFG1_CKE_SHIFT)
81
Anup Patel42fdf082019-02-25 08:14:49 +000082/* DDRPLLCFG0 */
83#define PRCI_DDRPLLCFG0_OFFSET 0xc
84#define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
85#define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
86#define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
87#define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
88#define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
89#define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
90#define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
91#define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
92#define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
93#define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
94#define PRCI_DDRPLLCFG0_FSE_SHIFT 25
95#define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
96#define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
97#define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
98
99/* DDRPLLCFG1 */
100#define PRCI_DDRPLLCFG1_OFFSET 0x10
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530101#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
Anup Patel42fdf082019-02-25 08:14:49 +0000102#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
103
104/* GEMGXLPLLCFG0 */
105#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
106#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
107#define PRCI_GEMGXLPLLCFG0_DIVR_MASK \
108 (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
109#define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
110#define PRCI_GEMGXLPLLCFG0_DIVF_MASK \
111 (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
112#define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
113#define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
114#define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
115#define PRCI_GEMGXLPLLCFG0_RANGE_MASK \
116 (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
117#define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
118#define PRCI_GEMGXLPLLCFG0_BYPASS_MASK \
119 (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
120#define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
121#define PRCI_GEMGXLPLLCFG0_FSE_MASK \
122 (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
123#define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
124#define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
125
126/* GEMGXLPLLCFG1 */
127#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530128#define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 31
Anup Patel42fdf082019-02-25 08:14:49 +0000129#define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
130
131/* CORECLKSEL */
132#define PRCI_CORECLKSEL_OFFSET 0x24
133#define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
134#define PRCI_CORECLKSEL_CORECLKSEL_MASK \
135 (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
136
137/* DEVICESRESETREG */
138#define PRCI_DEVICESRESETREG_OFFSET 0x28
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -0700139#define PRCI_DEVICERESETCNT 5
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700140
Anup Patel42fdf082019-02-25 08:14:49 +0000141#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700142 (0x1 << PRCI_RST_DDR_CTRL_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000143#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700144 (0x1 << PRCI_RST_DDR_AXI_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000145#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700146 (0x1 << PRCI_RST_DDR_AHB_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000147#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700148 (0x1 << PRCI_RST_DDR_PHY_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000149#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK \
Sagar Shrikant Kadam9158b392020-07-29 02:36:11 -0700150 (0x1 << PRCI_RST_GEMGXL_N)
Anup Patel42fdf082019-02-25 08:14:49 +0000151
152/* CLKMUXSTATUSREG */
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530153#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
Anup Patel42fdf082019-02-25 08:14:49 +0000154#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
155#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK \
156 (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
157
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530158/* PROCMONCFG */
159#define PRCI_PROCMONCFG_OFFSET 0xF0
160#define PRCI_PROCMONCFG_CORE_CLOCK_SHIFT 24
161#define PRCI_PROCMONCFG_CORE_CLOCK_MASK \
162 (0x1 << PRCI_PROCMONCFG_CORE_CLOCK_SHIFT)
163
Anup Patel42fdf082019-02-25 08:14:49 +0000164/*
165 * Private structures
166 */
167
168/**
169 * struct __prci_data - per-device-instance data
170 * @va: base virtual address of the PRCI IP block
171 * @parent: parent clk instance
172 *
173 * PRCI per-device instance data
174 */
175struct __prci_data {
Anup Patel9a99add2019-06-25 06:31:21 +0000176 void *va;
177 struct clk parent_hfclk;
178 struct clk parent_rtcclk;
Anup Patel42fdf082019-02-25 08:14:49 +0000179};
180
181/**
182 * struct __prci_wrpll_data - WRPLL configuration and integration data
183 * @c: WRPLL current configuration record
Anup Patel9a99add2019-06-25 06:31:21 +0000184 * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
185 * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
Anup Patel42fdf082019-02-25 08:14:49 +0000186 * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530187 * @cfg1_offs: WRPLL CFG1 register offset (in bytes) from the PRCI base address
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530188 * @release_reset: fn ptr to code to release clock reset
Anup Patel42fdf082019-02-25 08:14:49 +0000189 *
Anup Patel9a99add2019-06-25 06:31:21 +0000190 * @enable_bypass and @disable_bypass are used for WRPLL instances
191 * that contain a separate external glitchless clock mux downstream
192 * from the PLL. The WRPLL internal bypass mux is not glitchless.
Anup Patel42fdf082019-02-25 08:14:49 +0000193 */
194struct __prci_wrpll_data {
Anup Patel6f7b5a22019-06-25 06:31:08 +0000195 struct wrpll_cfg c;
Anup Patel9a99add2019-06-25 06:31:21 +0000196 void (*enable_bypass)(struct __prci_data *pd);
197 void (*disable_bypass)(struct __prci_data *pd);
Anup Patel42fdf082019-02-25 08:14:49 +0000198 u8 cfg0_offs;
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530199 u8 cfg1_offs;
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530200 void (*release_reset)(struct __prci_data *pd);
Anup Patel42fdf082019-02-25 08:14:49 +0000201};
202
203struct __prci_clock;
204
Anup Patel9a99add2019-06-25 06:31:21 +0000205/* struct __prci_clock_ops - clock operations */
Anup Patel42fdf082019-02-25 08:14:49 +0000206struct __prci_clock_ops {
207 int (*set_rate)(struct __prci_clock *pc,
208 unsigned long rate,
209 unsigned long parent_rate);
210 unsigned long (*round_rate)(struct __prci_clock *pc,
211 unsigned long rate,
212 unsigned long *parent_rate);
213 unsigned long (*recalc_rate)(struct __prci_clock *pc,
214 unsigned long parent_rate);
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530215 int (*enable_clk)(struct __prci_clock *pc, bool enable);
Anup Patel42fdf082019-02-25 08:14:49 +0000216};
217
218/**
219 * struct __prci_clock - describes a clock device managed by PRCI
220 * @name: user-readable clock name string - should match the manual
221 * @parent_name: parent name for this clock
Anup Patel9a99add2019-06-25 06:31:21 +0000222 * @ops: struct __prci_clock_ops for control
Anup Patel42fdf082019-02-25 08:14:49 +0000223 * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
224 * @pd: PRCI-specific data associated with this clock (if not NULL)
225 *
226 * PRCI clock data. Used by the PRCI driver to register PRCI-provided
227 * clocks to the Linux clock infrastructure.
228 */
229struct __prci_clock {
230 const char *name;
231 const char *parent_name;
232 const struct __prci_clock_ops *ops;
233 struct __prci_wrpll_data *pwd;
234 struct __prci_data *pd;
235};
236
237/*
238 * Private functions
239 */
240
241/**
242 * __prci_readl() - read from a PRCI register
243 * @pd: PRCI context
244 * @offs: register offset to read from (in bytes, from PRCI base address)
245 *
246 * Read the register located at offset @offs from the base virtual
247 * address of the PRCI register target described by @pd, and return
248 * the value to the caller.
249 *
250 * Context: Any context.
251 *
252 * Return: the contents of the register described by @pd and @offs.
253 */
254static u32 __prci_readl(struct __prci_data *pd, u32 offs)
255{
Anup Patel9a99add2019-06-25 06:31:21 +0000256 return readl(pd->va + offs);
Anup Patel42fdf082019-02-25 08:14:49 +0000257}
258
259static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
260{
Anup Patel9a99add2019-06-25 06:31:21 +0000261 writel(v, pd->va + offs);
Anup Patel42fdf082019-02-25 08:14:49 +0000262}
263
264/* WRPLL-related private functions */
265
266/**
267 * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
Anup Patel6f7b5a22019-06-25 06:31:08 +0000268 * @c: ptr to a struct wrpll_cfg record to write config into
Anup Patel42fdf082019-02-25 08:14:49 +0000269 * @r: value read from the PRCI PLL configuration register
270 *
271 * Given a value @r read from an FU540 PRCI PLL configuration register,
272 * split it into fields and populate it into the WRPLL configuration record
273 * pointed to by @c.
274 *
275 * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
276 * have the same register layout.
277 *
278 * Context: Any context.
279 */
Anup Patel6f7b5a22019-06-25 06:31:08 +0000280static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
Anup Patel42fdf082019-02-25 08:14:49 +0000281{
282 u32 v;
283
284 v = r & PRCI_COREPLLCFG0_DIVR_MASK;
285 v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
286 c->divr = v;
287
288 v = r & PRCI_COREPLLCFG0_DIVF_MASK;
289 v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
290 c->divf = v;
291
292 v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
293 v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
294 c->divq = v;
295
296 v = r & PRCI_COREPLLCFG0_RANGE_MASK;
297 v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
298 c->range = v;
299
300 c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
301 WRPLL_FLAGS_EXT_FEEDBACK_MASK);
302
Anup Patel9a99add2019-06-25 06:31:21 +0000303 /* external feedback mode not supported */
304 c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
Anup Patel42fdf082019-02-25 08:14:49 +0000305}
306
307/**
308 * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
Anup Patel6f7b5a22019-06-25 06:31:08 +0000309 * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
Anup Patel42fdf082019-02-25 08:14:49 +0000310 *
311 * Using a set of WRPLL configuration values pointed to by @c,
312 * assemble a PRCI PLL configuration register value, and return it to
313 * the caller.
314 *
315 * Context: Any context. Caller must ensure that the contents of the
316 * record pointed to by @c do not change during the execution
317 * of this function.
318 *
319 * Returns: a value suitable for writing into a PRCI PLL configuration
320 * register
321 */
Anup Patel9a99add2019-06-25 06:31:21 +0000322static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
Anup Patel42fdf082019-02-25 08:14:49 +0000323{
324 u32 r = 0;
325
326 r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
327 r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
328 r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
329 r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
Anup Patel9a99add2019-06-25 06:31:21 +0000330
331 /* external feedback mode not supported */
332 r |= PRCI_COREPLLCFG0_FSE_MASK;
Anup Patel42fdf082019-02-25 08:14:49 +0000333
334 return r;
335}
336
337/**
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530338 * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
Anup Patel42fdf082019-02-25 08:14:49 +0000339 * @pd: PRCI context
340 * @pwd: PRCI WRPLL metadata
341 *
342 * Read the current configuration of the PLL identified by @pwd from
343 * the PRCI identified by @pd, and store it into the local configuration
344 * cache in @pwd.
345 *
346 * Context: Any context. Caller must prevent the records pointed to by
347 * @pd and @pwd from changing during execution.
348 */
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530349static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
350 struct __prci_wrpll_data *pwd)
Anup Patel42fdf082019-02-25 08:14:49 +0000351{
352 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
353}
354
355/**
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530356 * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
Anup Patel42fdf082019-02-25 08:14:49 +0000357 * @pd: PRCI context
358 * @pwd: PRCI WRPLL metadata
359 * @c: WRPLL configuration record to write
360 *
361 * Write the WRPLL configuration described by @c into the WRPLL
362 * configuration register identified by @pwd in the PRCI instance
363 * described by @c. Make a cached copy of the WRPLL's current
364 * configuration so it can be used by other code.
365 *
366 * Context: Any context. Caller must prevent the records pointed to by
367 * @pd and @pwd from changing during execution.
368 */
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530369static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
370 struct __prci_wrpll_data *pwd,
371 struct wrpll_cfg *c)
Anup Patel42fdf082019-02-25 08:14:49 +0000372{
373 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
374
Anup Patel9a99add2019-06-25 06:31:21 +0000375 memcpy(&pwd->c, c, sizeof(*c));
Anup Patel42fdf082019-02-25 08:14:49 +0000376}
377
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530378/**
379 * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
380 * into the PRCI
381 * @pd: PRCI context
382 * @pwd: PRCI WRPLL metadata
383 * @enable: Clock enable or disable value
384 */
385static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
386 struct __prci_wrpll_data *pwd,
387 u32 enable)
388{
389 __prci_writel(enable, pwd->cfg1_offs, pd);
390}
391
Anup Patel42fdf082019-02-25 08:14:49 +0000392/* Core clock mux control */
393
394/**
395 * __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
396 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
397 *
398 * Switch the CORECLK mux to the HFCLK input source; return once complete.
399 *
400 * Context: Any context. Caller must prevent concurrent changes to the
401 * PRCI_CORECLKSEL_OFFSET register.
402 */
403static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
404{
405 u32 r;
406
407 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
408 r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
409 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
410
411 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
412}
413
414/**
415 * __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
416 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
417 *
418 * Switch the CORECLK mux to the PLL output clock; return once complete.
419 *
420 * Context: Any context. Caller must prevent concurrent changes to the
421 * PRCI_CORECLKSEL_OFFSET register.
422 */
423static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
424{
425 u32 r;
426
427 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
428 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
429 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
430
431 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
432}
433
434static unsigned long sifive_fu540_prci_wrpll_recalc_rate(
435 struct __prci_clock *pc,
436 unsigned long parent_rate)
437{
438 struct __prci_wrpll_data *pwd = pc->pwd;
439
Anup Patel6f7b5a22019-06-25 06:31:08 +0000440 return wrpll_calc_output_rate(&pwd->c, parent_rate);
Anup Patel42fdf082019-02-25 08:14:49 +0000441}
442
443static unsigned long sifive_fu540_prci_wrpll_round_rate(
444 struct __prci_clock *pc,
445 unsigned long rate,
446 unsigned long *parent_rate)
447{
448 struct __prci_wrpll_data *pwd = pc->pwd;
Anup Patel6f7b5a22019-06-25 06:31:08 +0000449 struct wrpll_cfg c;
Anup Patel42fdf082019-02-25 08:14:49 +0000450
451 memcpy(&c, &pwd->c, sizeof(c));
452
Anup Patel6f7b5a22019-06-25 06:31:08 +0000453 wrpll_configure_for_rate(&c, rate, *parent_rate);
Anup Patel42fdf082019-02-25 08:14:49 +0000454
Anup Patel6f7b5a22019-06-25 06:31:08 +0000455 return wrpll_calc_output_rate(&c, *parent_rate);
Anup Patel42fdf082019-02-25 08:14:49 +0000456}
457
458static int sifive_fu540_prci_wrpll_set_rate(struct __prci_clock *pc,
459 unsigned long rate,
460 unsigned long parent_rate)
461{
462 struct __prci_wrpll_data *pwd = pc->pwd;
463 struct __prci_data *pd = pc->pd;
464 int r;
465
Anup Patel6f7b5a22019-06-25 06:31:08 +0000466 r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
Anup Patel42fdf082019-02-25 08:14:49 +0000467 if (r)
Anup Patel9a99add2019-06-25 06:31:21 +0000468 return r;
Anup Patel42fdf082019-02-25 08:14:49 +0000469
Anup Patel9a99add2019-06-25 06:31:21 +0000470 if (pwd->enable_bypass)
471 pwd->enable_bypass(pd);
Anup Patel42fdf082019-02-25 08:14:49 +0000472
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530473 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
Anup Patel42fdf082019-02-25 08:14:49 +0000474
Anup Patel6f7b5a22019-06-25 06:31:08 +0000475 udelay(wrpll_calc_max_lock_us(&pwd->c));
Anup Patel42fdf082019-02-25 08:14:49 +0000476
Anup Patel9a99add2019-06-25 06:31:21 +0000477 if (pwd->disable_bypass)
478 pwd->disable_bypass(pd);
Anup Patel42fdf082019-02-25 08:14:49 +0000479
480 return 0;
481}
482
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530483static int sifive_fu540_prci_clock_enable(struct __prci_clock *pc, bool enable)
484{
485 struct __prci_wrpll_data *pwd = pc->pwd;
486 struct __prci_data *pd = pc->pd;
487
488 if (enable) {
489 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530490
491 if (pwd->release_reset)
492 pwd->release_reset(pd);
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530493 } else {
494 u32 r;
495
496 r = __prci_readl(pd, pwd->cfg1_offs);
497 r &= ~PRCI_COREPLLCFG1_CKE_MASK;
498
499 __prci_wrpll_write_cfg1(pd, pwd, r);
500 }
501
502 return 0;
503}
504
Anup Patel42fdf082019-02-25 08:14:49 +0000505static const struct __prci_clock_ops sifive_fu540_prci_wrpll_clk_ops = {
506 .set_rate = sifive_fu540_prci_wrpll_set_rate,
507 .round_rate = sifive_fu540_prci_wrpll_round_rate,
508 .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530509 .enable_clk = sifive_fu540_prci_clock_enable,
Anup Patel42fdf082019-02-25 08:14:49 +0000510};
511
Anup Patel42fdf082019-02-25 08:14:49 +0000512/* TLCLKSEL clock integration */
513
514static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(
515 struct __prci_clock *pc,
516 unsigned long parent_rate)
517{
518 struct __prci_data *pd = pc->pd;
519 u32 v;
520 u8 div;
521
522 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
523 v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
524 div = v ? 1 : 2;
525
526 return div_u64(parent_rate, div);
527}
528
529static const struct __prci_clock_ops sifive_fu540_prci_tlclksel_clk_ops = {
530 .recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
531};
532
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -0700533static int __prci_consumer_reset(const char *rst_name, bool trigger)
534{
535 struct udevice *dev;
536 struct reset_ctl rst_sig;
537 int ret;
538
539 ret = uclass_get_device_by_driver(UCLASS_RESET,
540 DM_GET_DRIVER(sifive_reset),
541 &dev);
542 if (ret) {
543 dev_err(dev, "Reset driver not found: %d\n", ret);
544 return ret;
545 }
546
547 ret = reset_get_by_name(dev, rst_name, &rst_sig);
548 if (ret) {
549 dev_err(dev, "failed to get %s reset\n", rst_name);
550 return ret;
551 }
552
553 if (reset_valid(&rst_sig)) {
554 if (trigger)
555 ret = reset_deassert(&rst_sig);
556 else
557 ret = reset_assert(&rst_sig);
558 if (ret) {
559 dev_err(dev, "failed to trigger reset id = %ld\n",
560 rst_sig.id);
561 return ret;
562 }
563 }
564
565 return ret;
566}
567
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530568/**
569 * __prci_ddr_release_reset() - Release DDR reset
570 * @pd: struct __prci_data * for the PRCI containing the DDRCLK mux reg
571 *
572 */
573static void __prci_ddr_release_reset(struct __prci_data *pd)
574{
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -0700575 /* Release DDR ctrl reset */
576 __prci_consumer_reset("ddr_ctrl", true);
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530577
578 /* HACK to get the '1 full controller clock cycle'. */
579 asm volatile ("fence");
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -0700580
581 /* Release DDR AXI reset */
582 __prci_consumer_reset("ddr_axi", true);
583
584 /* Release DDR AHB reset */
585 __prci_consumer_reset("ddr_ahb", true);
586
587 /* Release DDR PHY reset */
588 __prci_consumer_reset("ddr_phy", true);
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530589
590 /* HACK to get the '1 full controller clock cycle'. */
591 asm volatile ("fence");
592
593 /*
594 * These take like 16 cycles to actually propagate. We can't go sending
595 * stuff before they come out of reset. So wait.
596 */
597 for (int i = 0; i < 256; i++)
598 asm volatile ("nop");
599}
600
Pragnesh Patele848dba2020-05-29 11:33:31 +0530601/**
602 * __prci_ethernet_release_reset() - Release ethernet reset
603 * @pd: struct __prci_data * for the PRCI containing the Ethernet CLK mux reg
604 *
605 */
606static void __prci_ethernet_release_reset(struct __prci_data *pd)
607{
Pragnesh Patele848dba2020-05-29 11:33:31 +0530608 /* Release GEMGXL reset */
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -0700609 __prci_consumer_reset("gemgxl_reset", true);
Pragnesh Patele848dba2020-05-29 11:33:31 +0530610
611 /* Procmon => core clock */
612 __prci_writel(PRCI_PROCMONCFG_CORE_CLOCK_MASK, PRCI_PROCMONCFG_OFFSET,
613 pd);
614}
615
Anup Patel42fdf082019-02-25 08:14:49 +0000616/*
617 * PRCI integration data for each WRPLL instance
618 */
619
620static struct __prci_wrpll_data __prci_corepll_data = {
621 .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530622 .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
Anup Patel9a99add2019-06-25 06:31:21 +0000623 .enable_bypass = __prci_coreclksel_use_hfclk,
624 .disable_bypass = __prci_coreclksel_use_corepll,
Anup Patel42fdf082019-02-25 08:14:49 +0000625};
626
627static struct __prci_wrpll_data __prci_ddrpll_data = {
628 .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530629 .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530630 .release_reset = __prci_ddr_release_reset,
Anup Patel42fdf082019-02-25 08:14:49 +0000631};
632
633static struct __prci_wrpll_data __prci_gemgxlpll_data = {
634 .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530635 .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
Pragnesh Patele848dba2020-05-29 11:33:31 +0530636 .release_reset = __prci_ethernet_release_reset,
Anup Patel42fdf082019-02-25 08:14:49 +0000637};
638
639/*
640 * List of clock controls provided by the PRCI
641 */
642
643static struct __prci_clock __prci_init_clocks[] = {
644 [PRCI_CLK_COREPLL] = {
645 .name = "corepll",
646 .parent_name = "hfclk",
647 .ops = &sifive_fu540_prci_wrpll_clk_ops,
648 .pwd = &__prci_corepll_data,
649 },
650 [PRCI_CLK_DDRPLL] = {
651 .name = "ddrpll",
652 .parent_name = "hfclk",
Pragnesh Patel1790bce2020-05-29 11:33:30 +0530653 .ops = &sifive_fu540_prci_wrpll_clk_ops,
Anup Patel42fdf082019-02-25 08:14:49 +0000654 .pwd = &__prci_ddrpll_data,
655 },
656 [PRCI_CLK_GEMGXLPLL] = {
657 .name = "gemgxlpll",
658 .parent_name = "hfclk",
659 .ops = &sifive_fu540_prci_wrpll_clk_ops,
660 .pwd = &__prci_gemgxlpll_data,
661 },
662 [PRCI_CLK_TLCLK] = {
663 .name = "tlclk",
664 .parent_name = "corepll",
665 .ops = &sifive_fu540_prci_tlclksel_clk_ops,
666 },
667};
668
Anup Patel9a99add2019-06-25 06:31:21 +0000669static ulong sifive_fu540_prci_parent_rate(struct __prci_clock *pc)
670{
671 ulong parent_rate;
672 struct __prci_clock *p;
673
674 if (strcmp(pc->parent_name, "corepll") == 0) {
675 p = &__prci_init_clocks[PRCI_CLK_COREPLL];
676 if (!p->pd || !p->ops->recalc_rate)
677 return -ENXIO;
678
679 return p->ops->recalc_rate(p, sifive_fu540_prci_parent_rate(p));
680 }
681
682 if (strcmp(pc->parent_name, "rtcclk") == 0)
683 parent_rate = clk_get_rate(&pc->pd->parent_rtcclk);
684 else
685 parent_rate = clk_get_rate(&pc->pd->parent_hfclk);
686
687 return parent_rate;
688}
689
Anup Patel42fdf082019-02-25 08:14:49 +0000690static ulong sifive_fu540_prci_get_rate(struct clk *clk)
691{
692 struct __prci_clock *pc;
693
694 if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
695 return -ENXIO;
696
697 pc = &__prci_init_clocks[clk->id];
698 if (!pc->pd || !pc->ops->recalc_rate)
699 return -ENXIO;
700
Anup Patel9a99add2019-06-25 06:31:21 +0000701 return pc->ops->recalc_rate(pc, sifive_fu540_prci_parent_rate(pc));
Anup Patel42fdf082019-02-25 08:14:49 +0000702}
703
704static ulong sifive_fu540_prci_set_rate(struct clk *clk, ulong rate)
705{
706 int err;
707 struct __prci_clock *pc;
708
709 if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
710 return -ENXIO;
711
712 pc = &__prci_init_clocks[clk->id];
713 if (!pc->pd || !pc->ops->set_rate)
714 return -ENXIO;
715
Anup Patel9a99add2019-06-25 06:31:21 +0000716 err = pc->ops->set_rate(pc, rate, sifive_fu540_prci_parent_rate(pc));
Anup Patel42fdf082019-02-25 08:14:49 +0000717 if (err)
718 return err;
719
720 return rate;
721}
722
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530723static int sifive_fu540_prci_enable(struct clk *clk)
724{
725 struct __prci_clock *pc;
726 int ret = 0;
727
728 if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
729 return -ENXIO;
730
731 pc = &__prci_init_clocks[clk->id];
732 if (!pc->pd)
733 return -ENXIO;
734
735 if (pc->ops->enable_clk)
736 ret = pc->ops->enable_clk(pc, 1);
737
738 return ret;
739}
740
741static int sifive_fu540_prci_disable(struct clk *clk)
742{
743 struct __prci_clock *pc;
744 int ret = 0;
745
746 if (ARRAY_SIZE(__prci_init_clocks) <= clk->id)
747 return -ENXIO;
748
749 pc = &__prci_init_clocks[clk->id];
750 if (!pc->pd)
751 return -ENXIO;
752
753 if (pc->ops->enable_clk)
754 ret = pc->ops->enable_clk(pc, 0);
755
756 return ret;
757}
758
Anup Patel42fdf082019-02-25 08:14:49 +0000759static int sifive_fu540_prci_probe(struct udevice *dev)
760{
761 int i, err;
762 struct __prci_clock *pc;
763 struct __prci_data *pd = dev_get_priv(dev);
764
Anup Patel9a99add2019-06-25 06:31:21 +0000765 pd->va = (void *)dev_read_addr(dev);
766 if (IS_ERR(pd->va))
767 return PTR_ERR(pd->va);
Anup Patel42fdf082019-02-25 08:14:49 +0000768
Anup Patel9a99add2019-06-25 06:31:21 +0000769 err = clk_get_by_index(dev, 0, &pd->parent_hfclk);
Anup Patel42fdf082019-02-25 08:14:49 +0000770 if (err)
771 return err;
772
Anup Patel9a99add2019-06-25 06:31:21 +0000773 err = clk_get_by_index(dev, 1, &pd->parent_rtcclk);
774 if (err)
775 return err;
776
Anup Patel42fdf082019-02-25 08:14:49 +0000777 for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
778 pc = &__prci_init_clocks[i];
779 pc->pd = pd;
780 if (pc->pwd)
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530781 __prci_wrpll_read_cfg0(pd, pc->pwd);
Anup Patel42fdf082019-02-25 08:14:49 +0000782 }
783
784 return 0;
785}
786
787static struct clk_ops sifive_fu540_prci_ops = {
788 .set_rate = sifive_fu540_prci_set_rate,
789 .get_rate = sifive_fu540_prci_get_rate,
Pragnesh Patel54ce0e02020-05-29 11:33:29 +0530790 .enable = sifive_fu540_prci_enable,
791 .disable = sifive_fu540_prci_disable,
Anup Patel42fdf082019-02-25 08:14:49 +0000792};
793
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -0700794static int sifive_fu540_clk_bind(struct udevice *dev)
795{
796 return sifive_reset_bind(dev, PRCI_DEVICERESETCNT);
797}
798
Anup Patel42fdf082019-02-25 08:14:49 +0000799static const struct udevice_id sifive_fu540_prci_ids[] = {
Anup Patel9a99add2019-06-25 06:31:21 +0000800 { .compatible = "sifive,fu540-c000-prci" },
Anup Patel42fdf082019-02-25 08:14:49 +0000801 { }
802};
803
804U_BOOT_DRIVER(sifive_fu540_prci) = {
805 .name = "sifive-fu540-prci",
806 .id = UCLASS_CLK,
807 .of_match = sifive_fu540_prci_ids,
808 .probe = sifive_fu540_prci_probe,
809 .ops = &sifive_fu540_prci_ops,
810 .priv_auto_alloc_size = sizeof(struct __prci_data),
Sagar Shrikant Kadame1ff6eb2020-07-29 02:36:13 -0700811 .bind = sifive_fu540_clk_bind,
Anup Patel42fdf082019-02-25 08:14:49 +0000812};