blob: 5932d694d3009862400198f772a061860df29d55 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Aneesh V0d2628b2011-07-21 09:10:07 -04002/*
3 *
4 * Clock initialization for OMAP4
5 *
6 * (C) Copyright 2010
7 * Texas Instruments, <www.ti.com>
8 *
9 * Aneesh V <aneesh@ti.com>
10 *
11 * Based on previous work by:
12 * Santosh Shilimkar <santosh.shilimkar@ti.com>
13 * Rajendra Nayak <rnayak@ti.com>
Aneesh V0d2628b2011-07-21 09:10:07 -040014 */
15#include <common.h>
Lokesh Vutla36852972013-05-30 03:19:29 +000016#include <i2c.h>
Aneesh V0d2628b2011-07-21 09:10:07 -040017#include <asm/omap_common.h>
Sanjeev Premi0c2c8ac2011-09-08 10:48:39 -040018#include <asm/gpio.h>
Lokesh Vutla61c517f2013-05-30 02:54:32 +000019#include <asm/arch/clock.h>
Aneesh V0d2628b2011-07-21 09:10:07 -040020#include <asm/arch/sys_proto.h>
21#include <asm/utils.h>
Aneesh V0fa1d1b2011-07-21 09:29:32 -040022#include <asm/omap_gpio.h>
Lokesh Vutlafef54c32013-02-04 04:21:59 +000023#include <asm/emif.h>
Aneesh V0d2628b2011-07-21 09:10:07 -040024
25#ifndef CONFIG_SPL_BUILD
26/*
27 * printing to console doesn't work unless
28 * this code is executed from SPL
29 */
30#define printf(fmt, args...)
31#define puts(s)
32#endif
33
SRICHARAN R1a79cab2013-02-04 04:22:01 +000034const u32 sys_clk_array[8] = {
35 12000000, /* 12 MHz */
Lokesh Vutla16523262013-05-30 03:19:38 +000036 20000000, /* 20 MHz */
SRICHARAN R1a79cab2013-02-04 04:22:01 +000037 16800000, /* 16.8 MHz */
38 19200000, /* 19.2 MHz */
39 26000000, /* 26 MHz */
40 27000000, /* 27 MHz */
41 38400000, /* 38.4 MHz */
42};
43
Aneesh V0d2628b2011-07-21 09:10:07 -040044static inline u32 __get_sys_clk_index(void)
45{
Lokesh Vutla5e70e292013-02-12 21:29:05 +000046 s8 ind;
Aneesh V0d2628b2011-07-21 09:10:07 -040047 /*
48 * For ES1 the ROM code calibration of sys clock is not reliable
49 * due to hw issue. So, use hard-coded value. If this value is not
50 * correct for any board over-ride this function in board file
51 * From ES2.0 onwards you will get this information from
52 * CM_SYS_CLKSEL
53 */
54 if (omap_revision() == OMAP4430_ES1_0)
55 ind = OMAP_SYS_CLK_IND_38_4_MHZ;
56 else {
57 /* SYS_CLKSEL - 1 to match the dpll param array indices */
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +000058 ind = (readl((*prcm)->cm_sys_clksel) &
Aneesh V0d2628b2011-07-21 09:10:07 -040059 CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
60 }
61 return ind;
62}
63
64u32 get_sys_clk_index(void)
65 __attribute__ ((weak, alias("__get_sys_clk_index")));
66
67u32 get_sys_clk_freq(void)
68{
69 u8 index = get_sys_clk_index();
70 return sys_clk_array[index];
71}
72
SRICHARAN R1a79cab2013-02-04 04:22:01 +000073void setup_post_dividers(u32 const base, const struct dpll_params *params)
74{
75 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
76
77 /* Setup post-dividers */
78 if (params->m2 >= 0)
79 writel(params->m2, &dpll_regs->cm_div_m2_dpll);
80 if (params->m3 >= 0)
81 writel(params->m3, &dpll_regs->cm_div_m3_dpll);
82 if (params->m4_h11 >= 0)
83 writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
84 if (params->m5_h12 >= 0)
85 writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
86 if (params->m6_h13 >= 0)
87 writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
88 if (params->m7_h14 >= 0)
89 writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
SRICHARAN Ra04ed142013-02-12 01:33:43 +000090 if (params->h21 >= 0)
91 writel(params->h21, &dpll_regs->cm_div_h21_dpll);
SRICHARAN R1a79cab2013-02-04 04:22:01 +000092 if (params->h22 >= 0)
93 writel(params->h22, &dpll_regs->cm_div_h22_dpll);
94 if (params->h23 >= 0)
95 writel(params->h23, &dpll_regs->cm_div_h23_dpll);
SRICHARAN Ra04ed142013-02-12 01:33:43 +000096 if (params->h24 >= 0)
97 writel(params->h24, &dpll_regs->cm_div_h24_dpll);
SRICHARAN R1a79cab2013-02-04 04:22:01 +000098}
99
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000100static inline void do_bypass_dpll(u32 const base)
Aneesh V0d2628b2011-07-21 09:10:07 -0400101{
102 struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
103
104 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
105 CM_CLKMODE_DPLL_DPLL_EN_MASK,
106 DPLL_EN_FAST_RELOCK_BYPASS <<
107 CM_CLKMODE_DPLL_EN_SHIFT);
108}
109
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000110static inline void wait_for_bypass(u32 const base)
Aneesh V0d2628b2011-07-21 09:10:07 -0400111{
112 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
113
114 if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
115 LDELAY)) {
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000116 printf("Bypassing DPLL failed %x\n", base);
Aneesh V0d2628b2011-07-21 09:10:07 -0400117 }
118}
119
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000120static inline void do_lock_dpll(u32 const base)
Aneesh V0d2628b2011-07-21 09:10:07 -0400121{
122 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
123
124 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
125 CM_CLKMODE_DPLL_DPLL_EN_MASK,
126 DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
127}
128
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000129static inline void wait_for_lock(u32 const base)
Aneesh V0d2628b2011-07-21 09:10:07 -0400130{
131 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
132
133 if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
134 &dpll_regs->cm_idlest_dpll, LDELAY)) {
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000135 printf("DPLL locking failed for %x\n", base);
Aneesh V0d2628b2011-07-21 09:10:07 -0400136 hang();
137 }
138}
139
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000140inline u32 check_for_lock(u32 const base)
Sricharan308fe922011-11-15 09:50:03 -0500141{
142 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
143 u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
144
145 return lock;
146}
147
SRICHARAN R1a79cab2013-02-04 04:22:01 +0000148const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
149{
150 u32 sysclk_ind = get_sys_clk_index();
151 return &dpll_data->mpu[sysclk_ind];
152}
153
154const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
155{
156 u32 sysclk_ind = get_sys_clk_index();
157 return &dpll_data->core[sysclk_ind];
158}
159
160const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
161{
162 u32 sysclk_ind = get_sys_clk_index();
163 return &dpll_data->per[sysclk_ind];
164}
165
166const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
167{
168 u32 sysclk_ind = get_sys_clk_index();
169 return &dpll_data->iva[sysclk_ind];
170}
171
172const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
173{
174 u32 sysclk_ind = get_sys_clk_index();
175 return &dpll_data->usb[sysclk_ind];
176}
177
178const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
179{
180#ifdef CONFIG_SYS_OMAP_ABE_SYSCK
181 u32 sysclk_ind = get_sys_clk_index();
182 return &dpll_data->abe[sysclk_ind];
183#else
184 return dpll_data->abe;
185#endif
186}
187
Lokesh Vutla5e70e292013-02-12 21:29:05 +0000188static const struct dpll_params *get_ddr_dpll_params
189 (struct dplls const *dpll_data)
190{
191 u32 sysclk_ind = get_sys_clk_index();
192
193 if (!dpll_data->ddr)
194 return NULL;
195 return &dpll_data->ddr[sysclk_ind];
196}
197
Lokesh Vutlaadc52df2013-07-08 16:04:39 +0530198#ifdef CONFIG_DRIVER_TI_CPSW
199static const struct dpll_params *get_gmac_dpll_params
200 (struct dplls const *dpll_data)
201{
202 u32 sysclk_ind = get_sys_clk_index();
203
204 if (!dpll_data->gmac)
205 return NULL;
206 return &dpll_data->gmac[sysclk_ind];
207}
208#endif
209
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000210static void do_setup_dpll(u32 const base, const struct dpll_params *params,
Sricharan308fe922011-11-15 09:50:03 -0500211 u8 lock, char *dpll)
Aneesh V0d2628b2011-07-21 09:10:07 -0400212{
Sricharan308fe922011-11-15 09:50:03 -0500213 u32 temp, M, N;
Aneesh V0d2628b2011-07-21 09:10:07 -0400214 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
215
Lokesh Vutla5e70e292013-02-12 21:29:05 +0000216 if (!params)
217 return;
218
Sricharan308fe922011-11-15 09:50:03 -0500219 temp = readl(&dpll_regs->cm_clksel_dpll);
220
221 if (check_for_lock(base)) {
222 /*
223 * The Dpll has already been locked by rom code using CH.
224 * Check if M,N are matching with Ideal nominal opp values.
225 * If matches, skip the rest otherwise relock.
226 */
227 M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
228 N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
229 if ((M != (params->m)) || (N != (params->n))) {
230 debug("\n %s Dpll locked, but not for ideal M = %d,"
231 "N = %d values, current values are M = %d,"
232 "N= %d" , dpll, params->m, params->n,
233 M, N);
234 } else {
235 /* Dpll locked with ideal values for nominal opps. */
236 debug("\n %s Dpll already locked with ideal"
237 "nominal opp values", dpll);
Lokesh Vutla9bd0f9a2016-05-23 13:31:19 +0530238
239 bypass_dpll(base);
Sricharan308fe922011-11-15 09:50:03 -0500240 goto setup_post_dividers;
241 }
242 }
243
Aneesh V0d2628b2011-07-21 09:10:07 -0400244 bypass_dpll(base);
245
246 /* Set M & N */
Aneesh V0d2628b2011-07-21 09:10:07 -0400247 temp &= ~CM_CLKSEL_DPLL_M_MASK;
248 temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
249
250 temp &= ~CM_CLKSEL_DPLL_N_MASK;
251 temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
252
253 writel(temp, &dpll_regs->cm_clksel_dpll);
254
Lokesh Vutla9bd0f9a2016-05-23 13:31:19 +0530255setup_post_dividers:
256 setup_post_dividers(base, params);
257
Aneesh V0d2628b2011-07-21 09:10:07 -0400258 /* Lock */
259 if (lock)
260 do_lock_dpll(base);
261
Aneesh V0d2628b2011-07-21 09:10:07 -0400262 /* Wait till the DPLL locks */
263 if (lock)
264 wait_for_lock(base);
265}
266
Sricharan9784f1f2011-11-15 09:49:58 -0500267u32 omap_ddr_clk(void)
Aneesh V0d2628b2011-07-21 09:10:07 -0400268{
Sricharan9784f1f2011-11-15 09:49:58 -0500269 u32 ddr_clk, sys_clk_khz, omap_rev, divider;
Aneesh V0d2628b2011-07-21 09:10:07 -0400270 const struct dpll_params *core_dpll_params;
271
Sricharan9784f1f2011-11-15 09:49:58 -0500272 omap_rev = omap_revision();
Aneesh V0d2628b2011-07-21 09:10:07 -0400273 sys_clk_khz = get_sys_clk_freq() / 1000;
274
SRICHARAN R1a79cab2013-02-04 04:22:01 +0000275 core_dpll_params = get_core_dpll_params(*dplls_data);
Aneesh V0d2628b2011-07-21 09:10:07 -0400276
277 debug("sys_clk %d\n ", sys_clk_khz * 1000);
278
279 /* Find Core DPLL locked frequency first */
280 ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
281 (core_dpll_params->n + 1);
Sricharan9784f1f2011-11-15 09:49:58 -0500282
283 if (omap_rev < OMAP5430_ES1_0) {
284 /*
285 * DDR frequency is PHY_ROOT_CLK/2
286 * PHY_ROOT_CLK = Fdpll/2/M2
287 */
288 divider = 4;
289 } else {
290 /*
291 * DDR frequency is PHY_ROOT_CLK
292 * PHY_ROOT_CLK = Fdpll/2/M2
293 */
294 divider = 2;
295 }
Aneesh V0d2628b2011-07-21 09:10:07 -0400296
Sricharan9784f1f2011-11-15 09:49:58 -0500297 ddr_clk = ddr_clk / divider / core_dpll_params->m2;
Aneesh V0d2628b2011-07-21 09:10:07 -0400298 ddr_clk *= 1000; /* convert to Hz */
299 debug("ddr_clk %d\n ", ddr_clk);
300
301 return ddr_clk;
302}
303
Aneesh Va47a79f2011-07-21 09:29:36 -0400304/*
305 * Lock MPU dpll
306 *
307 * Resulting MPU frequencies:
308 * 4430 ES1.0 : 600 MHz
309 * 4430 ES2.x : 792 MHz (OPP Turbo)
310 * 4460 : 920 MHz (OPP Turbo) - DCC disabled
311 */
312void configure_mpu_dpll(void)
313{
314 const struct dpll_params *params;
315 struct dpll_regs *mpu_dpll_regs;
Sricharan9784f1f2011-11-15 09:49:58 -0500316 u32 omap_rev;
317 omap_rev = omap_revision();
Aneesh Va47a79f2011-07-21 09:29:36 -0400318
Sricharan9784f1f2011-11-15 09:49:58 -0500319 /*
320 * DCC and clock divider settings for 4460.
321 * DCC is required, if more than a certain frequency is required.
322 * For, 4460 > 1GHZ.
323 * 5430 > 1.4GHZ.
324 */
325 if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
Aneesh Va47a79f2011-07-21 09:29:36 -0400326 mpu_dpll_regs =
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000327 (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
328 bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
329 clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
Aneesh Va47a79f2011-07-21 09:29:36 -0400330 MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000331 setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
Aneesh Va47a79f2011-07-21 09:29:36 -0400332 MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
333 clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
334 CM_CLKSEL_DCC_EN_MASK);
335 }
336
SRICHARAN R1a79cab2013-02-04 04:22:01 +0000337 params = get_mpu_dpll_params(*dplls_data);
Sricharan308fe922011-11-15 09:50:03 -0500338
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000339 do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
Aneesh Va47a79f2011-07-21 09:29:36 -0400340 debug("MPU DPLL locked\n");
341}
342
Paul Kocialkowskiec3ec832016-02-27 19:19:01 +0100343#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
344 defined(CONFIG_USB_MUSB_OMAP2PLUS)
Govindraj.Rad4426b2012-02-06 03:55:36 +0000345static void setup_usb_dpll(void)
346{
347 const struct dpll_params *params;
348 u32 sys_clk_khz, sd_div, num, den;
349
350 sys_clk_khz = get_sys_clk_freq() / 1000;
351 /*
352 * USB:
353 * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
354 * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
355 * - where CLKINP is sys_clk in MHz
356 * Use CLKINP in KHz and adjust the denominator accordingly so
357 * that we have enough accuracy and at the same time no overflow
358 */
SRICHARAN R1a79cab2013-02-04 04:22:01 +0000359 params = get_usb_dpll_params(*dplls_data);
Govindraj.Rad4426b2012-02-06 03:55:36 +0000360 num = params->m * sys_clk_khz;
361 den = (params->n + 1) * 250 * 1000;
362 num += den - 1;
363 sd_div = num / den;
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000364 clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
Govindraj.Rad4426b2012-02-06 03:55:36 +0000365 CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
366 sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
367
368 /* Now setup the dpll with the regular function */
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000369 do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
Govindraj.Rad4426b2012-02-06 03:55:36 +0000370}
371#endif
372
Aneesh V0d2628b2011-07-21 09:10:07 -0400373static void setup_dplls(void)
374{
Anatolij Gustschin20f23512011-12-03 06:46:14 +0000375 u32 temp;
Aneesh V0d2628b2011-07-21 09:10:07 -0400376 const struct dpll_params *params;
Tom Rinibe8d6352015-06-05 15:51:11 +0530377 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
Aneesh V0d2628b2011-07-21 09:10:07 -0400378
Anatolij Gustschin20f23512011-12-03 06:46:14 +0000379 debug("setup_dplls\n");
Aneesh V0d2628b2011-07-21 09:10:07 -0400380
381 /* CORE dpll */
SRICHARAN R1a79cab2013-02-04 04:22:01 +0000382 params = get_core_dpll_params(*dplls_data); /* default - safest */
Aneesh V0d2628b2011-07-21 09:10:07 -0400383 /*
384 * Do not lock the core DPLL now. Just set it up.
385 * Core DPLL will be locked after setting up EMIF
386 * using the FREQ_UPDATE method(freq_update_core())
387 */
Tom Rinibe8d6352015-06-05 15:51:11 +0530388 if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
389 EMIF_SDRAM_TYPE_LPDDR2)
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000390 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
Lokesh Vutlacdfc4ea2012-05-22 00:03:26 +0000391 DPLL_NO_LOCK, "core");
392 else
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000393 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
Lokesh Vutlacdfc4ea2012-05-22 00:03:26 +0000394 DPLL_LOCK, "core");
Aneesh V0d2628b2011-07-21 09:10:07 -0400395 /* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
396 temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
397 (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
398 (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000399 writel(temp, (*prcm)->cm_clksel_core);
Aneesh V0d2628b2011-07-21 09:10:07 -0400400 debug("Core DPLL configured\n");
401
402 /* lock PER dpll */
SRICHARAN R1a79cab2013-02-04 04:22:01 +0000403 params = get_per_dpll_params(*dplls_data);
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000404 do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
Sricharan308fe922011-11-15 09:50:03 -0500405 params, DPLL_LOCK, "per");
Aneesh V0d2628b2011-07-21 09:10:07 -0400406 debug("PER DPLL locked\n");
407
408 /* MPU dpll */
Aneesh Va47a79f2011-07-21 09:29:36 -0400409 configure_mpu_dpll();
Govindraj.Rad4426b2012-02-06 03:55:36 +0000410
Paul Kocialkowskiec3ec832016-02-27 19:19:01 +0100411#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
412 defined(CONFIG_USB_MUSB_OMAP2PLUS)
Govindraj.Rad4426b2012-02-06 03:55:36 +0000413 setup_usb_dpll();
414#endif
Lokesh Vutla5e70e292013-02-12 21:29:05 +0000415 params = get_ddr_dpll_params(*dplls_data);
416 do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
417 params, DPLL_LOCK, "ddr");
Lokesh Vutlaadc52df2013-07-08 16:04:39 +0530418
419#ifdef CONFIG_DRIVER_TI_CPSW
420 params = get_gmac_dpll_params(*dplls_data);
421 do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
422 DPLL_LOCK, "gmac");
423#endif
Aneesh V0d2628b2011-07-21 09:10:07 -0400424}
425
SRICHARAN R00d328c2013-02-04 04:22:02 +0000426u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
Aneesh V0fa1d1b2011-07-21 09:29:32 -0400427{
SRICHARAN R00d328c2013-02-04 04:22:02 +0000428 u32 offset_code;
Aneesh V0fa1d1b2011-07-21 09:29:32 -0400429
SRICHARAN R00d328c2013-02-04 04:22:02 +0000430 volt_offset -= pmic->base_offset;
Aneesh V0fa1d1b2011-07-21 09:29:32 -0400431
SRICHARAN R00d328c2013-02-04 04:22:02 +0000432 offset_code = (volt_offset + pmic->step - 1) / pmic->step;
Nishanth Menona0f45c12012-03-01 14:17:38 +0000433
SRICHARAN R00d328c2013-02-04 04:22:02 +0000434 /*
435 * Offset codes 1-6 all give the base voltage in Palmas
436 * Offset code 0 switches OFF the SMPS
437 */
438 return offset_code + pmic->start_code;
Aneesh V0fa1d1b2011-07-21 09:29:32 -0400439}
440
SRICHARAN R00d328c2013-02-04 04:22:02 +0000441void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
Aneesh V0d2628b2011-07-21 09:10:07 -0400442{
Nishanth Menon41d7ab12012-03-01 14:17:37 +0000443 u32 offset_code;
Aneesh V0d2628b2011-07-21 09:10:07 -0400444 u32 offset = volt_mv;
SRICHARAN R00d328c2013-02-04 04:22:02 +0000445 int ret = 0;
446
Lokesh Vutla36852972013-05-30 03:19:29 +0000447 if (!volt_mv)
448 return;
449
Lokesh Vutlaae49f6d2013-05-30 02:54:33 +0000450 pmic->pmic_bus_init();
SRICHARAN R00d328c2013-02-04 04:22:02 +0000451 /* See if we can first get the GPIO if needed */
452 if (pmic->gpio_en)
453 ret = gpio_request(pmic->gpio, "PMIC_GPIO");
454
455 if (ret < 0) {
456 printf("%s: gpio %d request failed %d\n", __func__,
457 pmic->gpio, ret);
458 return;
459 }
460
461 /* Pull the GPIO low to select SET0 register, while we program SET1 */
462 if (pmic->gpio_en)
463 gpio_direction_output(pmic->gpio, 0);
Lokesh Vutla266b23a2016-08-17 16:25:35 +0530464
Aneesh V0d2628b2011-07-21 09:10:07 -0400465 /* convert to uV for better accuracy in the calculations */
466 offset *= 1000;
467
SRICHARAN R00d328c2013-02-04 04:22:02 +0000468 offset_code = get_offset_code(offset, pmic);
Aneesh V0d2628b2011-07-21 09:10:07 -0400469
470 debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
471 offset_code);
SRICHARAN R698a1f22012-03-12 02:25:38 +0000472
Lokesh Vutlaae49f6d2013-05-30 02:54:33 +0000473 if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
Aneesh V0d2628b2011-07-21 09:10:07 -0400474 printf("Scaling voltage failed for 0x%x\n", vcore_reg);
SRICHARAN R00d328c2013-02-04 04:22:02 +0000475 if (pmic->gpio_en)
476 gpio_direction_output(pmic->gpio, 1);
477}
478
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530479int __weak get_voltrail_opp(int rail_offset)
480{
481 /*
482 * By default return OPP_NOM for all voltage rails.
483 */
484 return OPP_NOM;
485}
486
487static u32 optimize_vcore_voltage(struct volts const *v, int opp)
Nishanth Menon93cdb282013-05-30 03:19:31 +0000488{
489 u32 val;
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530490
491 if (!v->value[opp])
Nishanth Menon93cdb282013-05-30 03:19:31 +0000492 return 0;
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530493 if (!v->efuse.reg[opp])
494 return v->value[opp];
Nishanth Menon93cdb282013-05-30 03:19:31 +0000495
496 switch (v->efuse.reg_bits) {
497 case 16:
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530498 val = readw(v->efuse.reg[opp]);
Nishanth Menon93cdb282013-05-30 03:19:31 +0000499 break;
500 case 32:
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530501 val = readl(v->efuse.reg[opp]);
Nishanth Menon93cdb282013-05-30 03:19:31 +0000502 break;
503 default:
504 printf("Error: efuse 0x%08x bits=%d unknown\n",
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530505 v->efuse.reg[opp], v->efuse.reg_bits);
506 return v->value[opp];
Nishanth Menon93cdb282013-05-30 03:19:31 +0000507 }
508
509 if (!val) {
510 printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530511 v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp]);
512 return v->value[opp];
Nishanth Menon93cdb282013-05-30 03:19:31 +0000513 }
514
515 debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530516 __func__, v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp],
517 val);
Nishanth Menon93cdb282013-05-30 03:19:31 +0000518 return val;
519}
520
Lokesh Vutla3de40ac2015-06-04 16:42:36 +0530521#ifdef CONFIG_IODELAY_RECALIBRATION
522void __weak recalibrate_iodelay(void)
523{
524}
525#endif
526
SRICHARAN R00d328c2013-02-04 04:22:02 +0000527/*
Lubomir Popov21f34062014-12-19 17:34:31 +0200528 * Setup the voltages for the main SoC core power domains.
529 * We start with the maximum voltages allowed here, as set in the corresponding
530 * vcores_data struct, and then scale (usually down) to the fused values that
531 * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
532 * are initialised.
533 * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
534 * compiled conditionally. Note that the new code writes the scaled (or zeroed)
535 * values back to the vcores_data struct for eventual reuse. Zero values mean
536 * that the corresponding rails are not controlled separately, and are not sent
537 * to the PMIC.
SRICHARAN R00d328c2013-02-04 04:22:02 +0000538 */
539void scale_vcores(struct vcores_data const *vcores)
540{
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530541 int i, opp, j, ol;
Lubomir Popov21f34062014-12-19 17:34:31 +0200542 struct volts *pv = (struct volts *)vcores;
543 struct volts *px;
544
545 for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530546 opp = get_voltrail_opp(i);
547 debug("%d -> ", pv->value[opp]);
548
549 if (pv->value[opp]) {
Lubomir Popov21f34062014-12-19 17:34:31 +0200550 /* Handle non-empty members only */
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530551 pv->value[opp] = optimize_vcore_voltage(pv, opp);
Lubomir Popov21f34062014-12-19 17:34:31 +0200552 px = (struct volts *)vcores;
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530553 j = 0;
Lubomir Popov21f34062014-12-19 17:34:31 +0200554 while (px < pv) {
555 /*
556 * Scan already handled non-empty members to see
557 * if we have a group and find the max voltage,
558 * which is set to the first occurance of the
559 * particular SMPS; the other group voltages are
560 * zeroed.
561 */
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530562 ol = get_voltrail_opp(j);
563 if (px->value[ol] &&
564 (pv->pmic->i2c_slave_addr ==
565 px->pmic->i2c_slave_addr) &&
566 (pv->addr == px->addr)) {
567 /* Same PMIC, same SMPS */
568 if (pv->value[opp] > px->value[ol])
569 px->value[ol] = pv->value[opp];
Lubomir Popov21f34062014-12-19 17:34:31 +0200570
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530571 pv->value[opp] = 0;
572 }
Lubomir Popov21f34062014-12-19 17:34:31 +0200573 px++;
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530574 j++;
Lubomir Popov21f34062014-12-19 17:34:31 +0200575 }
576 }
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530577 debug("%d\n", pv->value[opp]);
Lubomir Popov21f34062014-12-19 17:34:31 +0200578 pv++;
579 }
580
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530581 opp = get_voltrail_opp(VOLT_CORE);
582 debug("cor: %d\n", vcores->core.value[opp]);
583 do_scale_vcore(vcores->core.addr, vcores->core.value[opp],
584 vcores->core.pmic);
Lokesh Vutla3de40ac2015-06-04 16:42:36 +0530585 /*
586 * IO delay recalibration should be done immediately after
587 * adjusting AVS voltages for VDD_CORE_L.
588 * Respective boards should call __recalibrate_iodelay()
589 * with proper mux, virtual and manual mode configurations.
590 */
591#ifdef CONFIG_IODELAY_RECALIBRATION
592 recalibrate_iodelay();
593#endif
594
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530595 opp = get_voltrail_opp(VOLT_MPU);
596 debug("mpu: %d\n", vcores->mpu.value[opp]);
597 do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp],
598 vcores->mpu.pmic);
Lubomir Popov21f34062014-12-19 17:34:31 +0200599 /* Configure MPU ABB LDO after scale */
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530600 abb_setup(vcores->mpu.efuse.reg[opp],
Lubomir Popov21f34062014-12-19 17:34:31 +0200601 (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
602 (*prcm)->prm_abbldo_mpu_setup,
603 (*prcm)->prm_abbldo_mpu_ctrl,
604 (*prcm)->prm_irqstatus_mpu_2,
Nishanth Menon1eb62b42016-04-21 14:34:23 -0500605 vcores->mpu.abb_tx_done_mask,
Lubomir Popov21f34062014-12-19 17:34:31 +0200606 OMAP_ABB_FAST_OPP);
607
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530608 opp = get_voltrail_opp(VOLT_MM);
609 debug("mm: %d\n", vcores->mm.value[opp]);
610 do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp],
611 vcores->mm.pmic);
Lokesh Vutla42eae242016-08-17 16:25:36 +0530612 /* Configure MM ABB LDO after scale */
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530613 abb_setup(vcores->mm.efuse.reg[opp],
Lokesh Vutla42eae242016-08-17 16:25:36 +0530614 (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
615 (*prcm)->prm_abbldo_mm_setup,
616 (*prcm)->prm_abbldo_mm_ctrl,
617 (*prcm)->prm_irqstatus_mpu,
618 vcores->mm.abb_tx_done_mask,
619 OMAP_ABB_FAST_OPP);
Lubomir Popov21f34062014-12-19 17:34:31 +0200620
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530621 opp = get_voltrail_opp(VOLT_GPU);
622 debug("gpu: %d\n", vcores->gpu.value[opp]);
623 do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp],
624 vcores->gpu.pmic);
Nishanth Menon59b92af2016-04-21 14:34:25 -0500625 /* Configure GPU ABB LDO after scale */
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530626 abb_setup(vcores->gpu.efuse.reg[opp],
Nishanth Menon59b92af2016-04-21 14:34:25 -0500627 (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
628 (*prcm)->prm_abbldo_gpu_setup,
629 (*prcm)->prm_abbldo_gpu_ctrl,
630 (*prcm)->prm_irqstatus_mpu,
631 vcores->gpu.abb_tx_done_mask,
632 OMAP_ABB_FAST_OPP);
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530633
634 opp = get_voltrail_opp(VOLT_EVE);
635 debug("eve: %d\n", vcores->eve.value[opp]);
636 do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp],
637 vcores->eve.pmic);
Nishanth Menon59b92af2016-04-21 14:34:25 -0500638 /* Configure EVE ABB LDO after scale */
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530639 abb_setup(vcores->eve.efuse.reg[opp],
Nishanth Menon59b92af2016-04-21 14:34:25 -0500640 (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
641 (*prcm)->prm_abbldo_eve_setup,
642 (*prcm)->prm_abbldo_eve_ctrl,
643 (*prcm)->prm_irqstatus_mpu,
644 vcores->eve.abb_tx_done_mask,
645 OMAP_ABB_FAST_OPP);
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530646
647 opp = get_voltrail_opp(VOLT_IVA);
648 debug("iva: %d\n", vcores->iva.value[opp]);
649 do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp],
650 vcores->iva.pmic);
Nishanth Menon59b92af2016-04-21 14:34:25 -0500651 /* Configure IVA ABB LDO after scale */
Lokesh Vutla6ede0fd2016-11-23 12:54:39 +0530652 abb_setup(vcores->iva.efuse.reg[opp],
Nishanth Menon59b92af2016-04-21 14:34:25 -0500653 (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
654 (*prcm)->prm_abbldo_iva_setup,
655 (*prcm)->prm_abbldo_iva_ctrl,
656 (*prcm)->prm_irqstatus_mpu,
657 vcores->iva.abb_tx_done_mask,
658 OMAP_ABB_FAST_OPP);
Aneesh V0d2628b2011-07-21 09:10:07 -0400659}
660
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000661static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
Aneesh V0d2628b2011-07-21 09:10:07 -0400662{
663 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
664 enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000665 debug("Enable clock domain - %x\n", clkctrl_reg);
Aneesh V0d2628b2011-07-21 09:10:07 -0400666}
667
Kishon Vijay Abraham I920f156f2015-08-17 13:29:51 +0530668static inline void disable_clock_domain(u32 const clkctrl_reg)
669{
670 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
671 CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
672 CD_CLKCTRL_CLKTRCTRL_SHIFT);
673 debug("Disable clock domain - %x\n", clkctrl_reg);
674}
675
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000676static inline void wait_for_clk_enable(u32 clkctrl_addr)
Aneesh V0d2628b2011-07-21 09:10:07 -0400677{
678 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
679 u32 bound = LDELAY;
680
681 while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
682 (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
683
684 clkctrl = readl(clkctrl_addr);
685 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
686 MODULE_CLKCTRL_IDLEST_SHIFT;
687 if (--bound == 0) {
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000688 printf("Clock enable failed for 0x%x idlest 0x%x\n",
Aneesh V0d2628b2011-07-21 09:10:07 -0400689 clkctrl_addr, clkctrl);
690 return;
691 }
692 }
693}
694
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000695static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
Aneesh V0d2628b2011-07-21 09:10:07 -0400696 u32 wait_for_enable)
697{
698 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
699 enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000700 debug("Enable clock module - %x\n", clkctrl_addr);
Aneesh V0d2628b2011-07-21 09:10:07 -0400701 if (wait_for_enable)
702 wait_for_clk_enable(clkctrl_addr);
703}
704
Kishon Vijay Abraham I920f156f2015-08-17 13:29:51 +0530705static inline void wait_for_clk_disable(u32 clkctrl_addr)
706{
707 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
708 u32 bound = LDELAY;
709
710 while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
711 clkctrl = readl(clkctrl_addr);
712 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
713 MODULE_CLKCTRL_IDLEST_SHIFT;
714 if (--bound == 0) {
715 printf("Clock disable failed for 0x%x idlest 0x%x\n",
716 clkctrl_addr, clkctrl);
717 return;
718 }
719 }
720}
721
722static inline void disable_clock_module(u32 const clkctrl_addr,
723 u32 wait_for_disable)
724{
725 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
726 MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
727 MODULE_CLKCTRL_MODULEMODE_SHIFT);
728 debug("Disable clock module - %x\n", clkctrl_addr);
729 if (wait_for_disable)
730 wait_for_clk_disable(clkctrl_addr);
731}
732
Aneesh V0d2628b2011-07-21 09:10:07 -0400733void freq_update_core(void)
734{
735 u32 freq_config1 = 0;
736 const struct dpll_params *core_dpll_params;
SRICHARAN R3d534962012-03-12 02:25:37 +0000737 u32 omap_rev = omap_revision();
Aneesh V0d2628b2011-07-21 09:10:07 -0400738
SRICHARAN R1a79cab2013-02-04 04:22:01 +0000739 core_dpll_params = get_core_dpll_params(*dplls_data);
Aneesh V0d2628b2011-07-21 09:10:07 -0400740 /* Put EMIF clock domain in sw wakeup mode */
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000741 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
Aneesh V0d2628b2011-07-21 09:10:07 -0400742 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000743 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
744 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
Aneesh V0d2628b2011-07-21 09:10:07 -0400745
746 freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
747 SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
748
749 freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
750 SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
751
752 freq_config1 |= (core_dpll_params->m2 <<
753 SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
754 SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
755
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000756 writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
Aneesh V0d2628b2011-07-21 09:10:07 -0400757 if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000758 (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
Aneesh V0d2628b2011-07-21 09:10:07 -0400759 puts("FREQ UPDATE procedure failed!!");
760 hang();
761 }
762
SRICHARAN R3d534962012-03-12 02:25:37 +0000763 /*
764 * Putting EMIF in HW_AUTO is seen to be causing issues with
Lubomir Popova01f0b02013-04-04 05:51:45 +0000765 * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
SRICHARAN R3d534962012-03-12 02:25:37 +0000766 * in OMAP5430 ES1.0 silicon
767 */
768 if (omap_rev != OMAP5430_ES1_0) {
769 /* Put EMIF clock domain back in hw auto mode */
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000770 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
SRICHARAN R3d534962012-03-12 02:25:37 +0000771 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000772 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
773 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
SRICHARAN R3d534962012-03-12 02:25:37 +0000774 }
Aneesh V0d2628b2011-07-21 09:10:07 -0400775}
776
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000777void bypass_dpll(u32 const base)
Aneesh V0d2628b2011-07-21 09:10:07 -0400778{
779 do_bypass_dpll(base);
780 wait_for_bypass(base);
781}
782
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000783void lock_dpll(u32 const base)
Aneesh V0d2628b2011-07-21 09:10:07 -0400784{
785 do_lock_dpll(base);
786 wait_for_lock(base);
787}
788
Kipisz, Stevenebe86dc2016-02-24 12:30:52 -0600789static void setup_clocks_for_console(void)
Aneesh Vb8e60b92011-07-21 09:10:21 -0400790{
791 /* Do not add any spl_debug prints in this function */
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000792 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
Aneesh Vb8e60b92011-07-21 09:10:21 -0400793 CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
794 CD_CLKCTRL_CLKTRCTRL_SHIFT);
795
796 /* Enable all UARTs - console will be on one of them */
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000797 clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
Aneesh Vb8e60b92011-07-21 09:10:21 -0400798 MODULE_CLKCTRL_MODULEMODE_MASK,
799 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
800 MODULE_CLKCTRL_MODULEMODE_SHIFT);
801
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000802 clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
Aneesh Vb8e60b92011-07-21 09:10:21 -0400803 MODULE_CLKCTRL_MODULEMODE_MASK,
804 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
805 MODULE_CLKCTRL_MODULEMODE_SHIFT);
806
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000807 clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
Aneesh Vb8e60b92011-07-21 09:10:21 -0400808 MODULE_CLKCTRL_MODULEMODE_MASK,
809 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
810 MODULE_CLKCTRL_MODULEMODE_SHIFT);
811
Lubomir Popova01f0b02013-04-04 05:51:45 +0000812 clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
Aneesh Vb8e60b92011-07-21 09:10:21 -0400813 MODULE_CLKCTRL_MODULEMODE_MASK,
814 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
815 MODULE_CLKCTRL_MODULEMODE_SHIFT);
816
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000817 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
Aneesh Vb8e60b92011-07-21 09:10:21 -0400818 CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
819 CD_CLKCTRL_CLKTRCTRL_SHIFT);
820}
821
SRICHARAN Rfb6aa1f2013-02-04 04:22:00 +0000822void do_enable_clocks(u32 const *clk_domains,
823 u32 const *clk_modules_hw_auto,
824 u32 const *clk_modules_explicit_en,
Sricharan9784f1f2011-11-15 09:49:58 -0500825 u8 wait_for_enable)
826{
827 u32 i, max = 100;
828
829 /* Put the clock domains in SW_WKUP mode */
Lukasz Majewski9e3491b2017-03-27 10:15:27 +0200830 for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
Sricharan9784f1f2011-11-15 09:49:58 -0500831 enable_clock_domain(clk_domains[i],
832 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
833 }
834
835 /* Clock modules that need to be put in HW_AUTO */
Lukasz Majewski9e3491b2017-03-27 10:15:27 +0200836 for (i = 0; (i < max) && clk_modules_hw_auto &&
837 clk_modules_hw_auto[i]; i++) {
Sricharan9784f1f2011-11-15 09:49:58 -0500838 enable_clock_module(clk_modules_hw_auto[i],
839 MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
840 wait_for_enable);
841 };
842
843 /* Clock modules that need to be put in SW_EXPLICIT_EN mode */
Lukasz Majewski9e3491b2017-03-27 10:15:27 +0200844 for (i = 0; (i < max) && clk_modules_explicit_en &&
845 clk_modules_explicit_en[i]; i++) {
Sricharan9784f1f2011-11-15 09:49:58 -0500846 enable_clock_module(clk_modules_explicit_en[i],
847 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
848 wait_for_enable);
849 };
850
851 /* Put the clock domains in HW_AUTO mode now */
Lukasz Majewski9e3491b2017-03-27 10:15:27 +0200852 for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
Sricharan9784f1f2011-11-15 09:49:58 -0500853 enable_clock_domain(clk_domains[i],
854 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
855 }
856}
857
Kishon Vijay Abraham I920f156f2015-08-17 13:29:51 +0530858void do_disable_clocks(u32 const *clk_domains,
859 u32 const *clk_modules_disable,
860 u8 wait_for_disable)
861{
862 u32 i, max = 100;
863
864
865 /* Clock modules that need to be put in SW_DISABLE */
866 for (i = 0; (i < max) && clk_modules_disable[i]; i++)
867 disable_clock_module(clk_modules_disable[i],
868 wait_for_disable);
869
870 /* Put the clock domains in SW_SLEEP mode */
871 for (i = 0; (i < max) && clk_domains[i]; i++)
872 disable_clock_domain(clk_domains[i]);
873}
874
Kipisz, Stevenebe86dc2016-02-24 12:30:52 -0600875/**
876 * setup_early_clocks() - Setup early clocks needed for SoC
877 *
878 * Setup clocks for console, SPL basic initialization clocks and initialize
879 * the timer. This is invoked prior prcm_init.
880 */
881void setup_early_clocks(void)
Aneesh V0d2628b2011-07-21 09:10:07 -0400882{
Sricharan9310ff72011-11-15 09:49:55 -0500883 switch (omap_hw_init_context()) {
Aneesh V0d2628b2011-07-21 09:10:07 -0400884 case OMAP_INIT_CONTEXT_SPL:
885 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
886 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
Kipisz, Stevenebe86dc2016-02-24 12:30:52 -0600887 setup_clocks_for_console();
Aneesh V9a390882011-07-21 09:29:29 -0400888 enable_basic_clocks();
Lokesh Vutlad9c839a2013-05-30 03:19:30 +0000889 timer_init();
Kipisz, Stevenebe86dc2016-02-24 12:30:52 -0600890 /* Fall through */
891 }
892}
893
894void prcm_init(void)
895{
896 switch (omap_hw_init_context()) {
897 case OMAP_INIT_CONTEXT_SPL:
898 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
899 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
SRICHARAN R00d328c2013-02-04 04:22:02 +0000900 scale_vcores(*omap_vcores);
Aneesh V0d2628b2011-07-21 09:10:07 -0400901 setup_dplls();
Lokesh Vutla100c2d82013-04-17 20:49:40 +0000902 setup_warmreset_time();
Aneesh V0d2628b2011-07-21 09:10:07 -0400903 break;
904 default:
905 break;
906 }
Sricharan308fe922011-11-15 09:50:03 -0500907
908 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
909 enable_basic_uboot_clocks();
Aneesh V0d2628b2011-07-21 09:10:07 -0400910}
Lokesh Vutla36852972013-05-30 03:19:29 +0000911
Jean-Jacques Hiblot52a51512018-12-07 14:50:49 +0100912#if !defined(CONFIG_DM_I2C)
Lokesh Vutla36852972013-05-30 03:19:29 +0000913void gpi2c_init(void)
914{
915 static int gpi2c = 1;
916
917 if (gpi2c) {
Heiko Schocherf53f2b82013-10-22 11:03:18 +0200918 i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED,
919 CONFIG_SYS_OMAP24_I2C_SLAVE);
Lokesh Vutla36852972013-05-30 03:19:29 +0000920 gpi2c = 0;
921 }
922}
Jean-Jacques Hiblot52a51512018-12-07 14:50:49 +0100923#endif