Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 2 | /* |
| 3 | * (C) Copyright 2013 |
| 4 | * NVIDIA Corporation <www.nvidia.com> |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <common.h> |
| 8 | #include <asm/io.h> |
| 9 | #include <asm/arch/ahb.h> |
| 10 | #include <asm/arch/clock.h> |
| 11 | #include <asm/arch/flow.h> |
| 12 | #include <asm/arch/pinmux.h> |
| 13 | #include <asm/arch/tegra.h> |
| 14 | #include <asm/arch-tegra/clk_rst.h> |
| 15 | #include <asm/arch-tegra/pmc.h> |
| 16 | #include <asm/arch-tegra/ap.h> |
Masahiro Yamada | ed1632a | 2015-02-20 17:04:04 +0900 | [diff] [blame] | 17 | #include "../cpu.h" |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 18 | |
| 19 | /* Tegra124-specific CPU init code */ |
| 20 | |
| 21 | static void enable_cpu_power_rail(void) |
| 22 | { |
| 23 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 24 | |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 25 | debug("%s entry\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 26 | |
| 27 | /* un-tristate PWR_I2C SCL/SDA, rest of the defaults are correct */ |
Stephen Warren | 6685f04 | 2014-03-21 12:29:01 -0600 | [diff] [blame] | 28 | pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SCL_PZ6); |
| 29 | pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SDA_PZ7); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 30 | |
| 31 | pmic_enable_cpu_vdd(); |
| 32 | |
| 33 | /* |
| 34 | * Set CPUPWRGOOD_TIMER - APB clock is 1/2 of SCLK (102MHz), |
| 35 | * set it for 5ms as per SysEng (102MHz*5ms = 510000 (7C830h). |
| 36 | */ |
| 37 | writel(0x7C830, &pmc->pmc_cpupwrgood_timer); |
| 38 | |
| 39 | /* Set polarity to 0 (normal) and enable CPUPWRREQ_OE */ |
| 40 | clrbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_POL); |
| 41 | setbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_OE); |
| 42 | } |
| 43 | |
| 44 | static void enable_cpu_clocks(void) |
| 45 | { |
| 46 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 47 | struct clk_pll_info *pllinfo = &tegra_pll_info_table[CLOCK_ID_XCPU]; |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 48 | u32 reg; |
| 49 | |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 50 | debug("%s entry\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 51 | |
| 52 | /* Wait for PLL-X to lock */ |
| 53 | do { |
| 54 | reg = readl(&clkrst->crc_pll_simple[SIMPLE_PLLX].pll_base); |
| 55 | debug("%s: PLLX base = 0x%08X\n", __func__, reg); |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 56 | } while ((reg & (1 << pllinfo->lock_det)) == 0); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 57 | |
| 58 | debug("%s: PLLX locked, delay for stable clocks\n", __func__); |
| 59 | /* Wait until all clocks are stable */ |
| 60 | udelay(PLL_STABILIZATION_DELAY); |
| 61 | |
| 62 | debug("%s: Setting CCLK_BURST and DIVIDER\n", __func__); |
| 63 | writel(CCLK_BURST_POLICY, &clkrst->crc_cclk_brst_pol); |
| 64 | writel(SUPER_CCLK_DIVIDER, &clkrst->crc_super_cclk_div); |
| 65 | |
| 66 | debug("%s: Enabling clock to all CPUs\n", __func__); |
| 67 | /* Enable the clock to all CPUs */ |
| 68 | reg = CLR_CPU3_CLK_STP | CLR_CPU2_CLK_STP | CLR_CPU1_CLK_STP | |
| 69 | CLR_CPU0_CLK_STP; |
| 70 | writel(reg, &clkrst->crc_clk_cpu_cmplx_clr); |
| 71 | |
| 72 | debug("%s: Enabling main CPU complex clocks\n", __func__); |
| 73 | /* Always enable the main CPU complex clocks */ |
| 74 | clock_enable(PERIPH_ID_CPU); |
| 75 | clock_enable(PERIPH_ID_CPULP); |
| 76 | clock_enable(PERIPH_ID_CPUG); |
| 77 | |
| 78 | debug("%s: Done\n", __func__); |
| 79 | } |
| 80 | |
| 81 | static void remove_cpu_resets(void) |
| 82 | { |
| 83 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 84 | u32 reg; |
| 85 | |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 86 | debug("%s entry\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 87 | |
| 88 | /* Take the slow and fast partitions out of reset */ |
| 89 | reg = CLR_NONCPURESET; |
| 90 | writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr); |
| 91 | writel(reg, &clkrst->crc_rst_cpug_cmplx_clr); |
| 92 | |
| 93 | /* Clear the SW-controlled reset of the slow cluster */ |
| 94 | reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 | |
| 95 | CLR_L2RESET | CLR_PRESETDBG; |
| 96 | writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr); |
| 97 | |
| 98 | /* Clear the SW-controlled reset of the fast cluster */ |
| 99 | reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 | |
| 100 | CLR_CPURESET1 | CLR_DBGRESET1 | CLR_CORERESET1 | CLR_CXRESET1 | |
| 101 | CLR_CPURESET2 | CLR_DBGRESET2 | CLR_CORERESET2 | CLR_CXRESET2 | |
| 102 | CLR_CPURESET3 | CLR_DBGRESET3 | CLR_CORERESET3 | CLR_CXRESET3 | |
| 103 | CLR_L2RESET | CLR_PRESETDBG; |
| 104 | writel(reg, &clkrst->crc_rst_cpug_cmplx_clr); |
| 105 | } |
| 106 | |
Bibek Basu | a808b23 | 2018-06-22 13:02:28 -0600 | [diff] [blame] | 107 | static void tegra124_ram_repair(void) |
| 108 | { |
| 109 | struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE; |
| 110 | u32 ram_repair_timeout; /*usec*/ |
| 111 | u32 val; |
| 112 | |
| 113 | /* |
| 114 | * Request the Flow Controller perform RAM repair whenever it turns on |
| 115 | * a power rail that requires RAM repair. |
| 116 | */ |
| 117 | clrbits_le32(&flow->ram_repair, RAM_REPAIR_BYPASS_EN); |
| 118 | |
| 119 | /* Request SW trigerred RAM repair by setting req bit */ |
| 120 | /* cluster 0 */ |
| 121 | setbits_le32(&flow->ram_repair, RAM_REPAIR_REQ); |
| 122 | /* Wait for completion (status == 0) */ |
| 123 | ram_repair_timeout = 500; |
| 124 | do { |
| 125 | udelay(1); |
| 126 | val = readl(&flow->ram_repair); |
| 127 | } while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--); |
| 128 | if (!ram_repair_timeout) |
| 129 | debug("Ram Repair cluster0 failed\n"); |
| 130 | |
| 131 | /* cluster 1 */ |
| 132 | setbits_le32(&flow->ram_repair_cluster1, RAM_REPAIR_REQ); |
| 133 | /* Wait for completion (status == 0) */ |
| 134 | ram_repair_timeout = 500; |
| 135 | do { |
| 136 | udelay(1); |
| 137 | val = readl(&flow->ram_repair_cluster1); |
| 138 | } while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--); |
| 139 | |
| 140 | if (!ram_repair_timeout) |
| 141 | debug("Ram Repair cluster1 failed\n"); |
| 142 | } |
| 143 | |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 144 | /** |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 145 | * Tegra124 requires some special clock initialization, including setting up |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 146 | * the DVC I2C, turning on MSELECT and selecting the G CPU cluster |
| 147 | */ |
| 148 | void tegra124_init_clocks(void) |
| 149 | { |
| 150 | struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE; |
| 151 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 152 | struct clk_rst_ctlr *clkrst = |
| 153 | (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 154 | u32 val; |
| 155 | |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 156 | debug("%s entry\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 157 | |
| 158 | /* Set active CPU cluster to G */ |
| 159 | clrbits_le32(&flow->cluster_control, 1); |
| 160 | |
| 161 | /* Change the oscillator drive strength */ |
| 162 | val = readl(&clkrst->crc_osc_ctrl); |
| 163 | val &= ~OSC_XOFS_MASK; |
| 164 | val |= (OSC_DRIVE_STRENGTH << OSC_XOFS_SHIFT); |
| 165 | writel(val, &clkrst->crc_osc_ctrl); |
| 166 | |
| 167 | /* Update same value in PMC_OSC_EDPD_OVER XOFS field for warmboot */ |
| 168 | val = readl(&pmc->pmc_osc_edpd_over); |
| 169 | val &= ~PMC_XOFS_MASK; |
| 170 | val |= (OSC_DRIVE_STRENGTH << PMC_XOFS_SHIFT); |
| 171 | writel(val, &pmc->pmc_osc_edpd_over); |
| 172 | |
| 173 | /* Set HOLD_CKE_LOW_EN to 1 */ |
| 174 | setbits_le32(&pmc->pmc_cntrl2, HOLD_CKE_LOW_EN); |
| 175 | |
| 176 | debug("Setting up PLLX\n"); |
| 177 | init_pllx(); |
| 178 | |
| 179 | val = (1 << CLK_SYS_RATE_AHB_RATE_SHIFT); |
| 180 | writel(val, &clkrst->crc_clk_sys_rate); |
| 181 | |
| 182 | /* Enable clocks to required peripherals. TBD - minimize this list */ |
| 183 | debug("Enabling clocks\n"); |
| 184 | |
| 185 | clock_set_enable(PERIPH_ID_CACHE2, 1); |
| 186 | clock_set_enable(PERIPH_ID_GPIO, 1); |
| 187 | clock_set_enable(PERIPH_ID_TMR, 1); |
| 188 | clock_set_enable(PERIPH_ID_CPU, 1); |
| 189 | clock_set_enable(PERIPH_ID_EMC, 1); |
| 190 | clock_set_enable(PERIPH_ID_I2C5, 1); |
| 191 | clock_set_enable(PERIPH_ID_APBDMA, 1); |
| 192 | clock_set_enable(PERIPH_ID_MEM, 1); |
| 193 | clock_set_enable(PERIPH_ID_CORESIGHT, 1); |
| 194 | clock_set_enable(PERIPH_ID_MSELECT, 1); |
| 195 | clock_set_enable(PERIPH_ID_DVFS, 1); |
| 196 | |
| 197 | /* |
| 198 | * Set MSELECT clock source as PLLP (00), and ask for a clock |
| 199 | * divider that would set the MSELECT clock at 102MHz for a |
| 200 | * PLLP base of 408MHz. |
| 201 | */ |
| 202 | clock_ll_set_source_divisor(PERIPH_ID_MSELECT, 0, |
| 203 | CLK_DIVIDER(NVBL_PLLP_KHZ, 102000)); |
| 204 | |
| 205 | /* Give clock time to stabilize */ |
| 206 | udelay(IO_STABILIZATION_DELAY); |
| 207 | |
| 208 | /* I2C5 (DVC) gets CLK_M and a divisor of 17 */ |
| 209 | clock_ll_set_source_divisor(PERIPH_ID_I2C5, 3, 16); |
| 210 | |
| 211 | /* Give clock time to stabilize */ |
| 212 | udelay(IO_STABILIZATION_DELAY); |
| 213 | |
| 214 | /* Take required peripherals out of reset */ |
| 215 | debug("Taking periphs out of reset\n"); |
| 216 | reset_set_enable(PERIPH_ID_CACHE2, 0); |
| 217 | reset_set_enable(PERIPH_ID_GPIO, 0); |
| 218 | reset_set_enable(PERIPH_ID_TMR, 0); |
| 219 | reset_set_enable(PERIPH_ID_COP, 0); |
| 220 | reset_set_enable(PERIPH_ID_EMC, 0); |
| 221 | reset_set_enable(PERIPH_ID_I2C5, 0); |
| 222 | reset_set_enable(PERIPH_ID_APBDMA, 0); |
| 223 | reset_set_enable(PERIPH_ID_MEM, 0); |
| 224 | reset_set_enable(PERIPH_ID_CORESIGHT, 0); |
| 225 | reset_set_enable(PERIPH_ID_MSELECT, 0); |
| 226 | reset_set_enable(PERIPH_ID_DVFS, 0); |
| 227 | |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 228 | debug("%s exit\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | static bool is_partition_powered(u32 partid) |
| 232 | { |
| 233 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 234 | u32 reg; |
| 235 | |
| 236 | /* Get power gate status */ |
| 237 | reg = readl(&pmc->pmc_pwrgate_status); |
| 238 | return !!(reg & (1 << partid)); |
| 239 | } |
| 240 | |
Dominik Sliwa | 3f6a2e2 | 2019-08-01 11:06:39 +0300 | [diff] [blame] | 241 | static void unpower_partition(u32 partid) |
| 242 | { |
| 243 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 244 | |
| 245 | debug("%s: part ID = %08X\n", __func__, partid); |
| 246 | /* Is the partition on? */ |
| 247 | if (is_partition_powered(partid)) { |
| 248 | /* Yes, toggle the partition power state (ON -> OFF) */ |
| 249 | debug("power_partition, toggling state\n"); |
| 250 | writel(START_CP | partid, &pmc->pmc_pwrgate_toggle); |
| 251 | |
| 252 | /* Wait for the power to come down */ |
| 253 | while (is_partition_powered(partid)) |
| 254 | ; |
| 255 | |
| 256 | /* Give I/O signals time to stabilize */ |
| 257 | udelay(IO_STABILIZATION_DELAY); |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | void unpower_cpus(void) |
| 262 | { |
| 263 | debug("%s entry: G cluster\n", __func__); |
| 264 | |
| 265 | /* Power down the fast cluster rail partition */ |
| 266 | debug("%s: CRAIL\n", __func__); |
| 267 | unpower_partition(CRAIL); |
| 268 | |
| 269 | /* Power down the fast cluster non-CPU partition */ |
| 270 | debug("%s: C0NC\n", __func__); |
| 271 | unpower_partition(C0NC); |
| 272 | |
| 273 | /* Power down the fast cluster CPU0 partition */ |
| 274 | debug("%s: CE0\n", __func__); |
| 275 | unpower_partition(CE0); |
| 276 | |
| 277 | debug("%s: done\n", __func__); |
| 278 | } |
| 279 | |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 280 | static void power_partition(u32 partid) |
| 281 | { |
| 282 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 283 | |
| 284 | debug("%s: part ID = %08X\n", __func__, partid); |
| 285 | /* Is the partition already on? */ |
| 286 | if (!is_partition_powered(partid)) { |
| 287 | /* No, toggle the partition power state (OFF -> ON) */ |
| 288 | debug("power_partition, toggling state\n"); |
| 289 | writel(START_CP | partid, &pmc->pmc_pwrgate_toggle); |
| 290 | |
| 291 | /* Wait for the power to come up */ |
| 292 | while (!is_partition_powered(partid)) |
| 293 | ; |
| 294 | |
| 295 | /* Give I/O signals time to stabilize */ |
| 296 | udelay(IO_STABILIZATION_DELAY); |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | void powerup_cpus(void) |
| 301 | { |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 302 | /* We boot to the fast cluster */ |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 303 | debug("%s entry: G cluster\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 304 | |
| 305 | /* Power up the fast cluster rail partition */ |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 306 | debug("%s: CRAIL\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 307 | power_partition(CRAIL); |
| 308 | |
| 309 | /* Power up the fast cluster non-CPU partition */ |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 310 | debug("%s: C0NC\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 311 | power_partition(C0NC); |
| 312 | |
| 313 | /* Power up the fast cluster CPU0 partition */ |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 314 | debug("%s: CE0\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 315 | power_partition(CE0); |
| 316 | |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 317 | debug("%s: done\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | void start_cpu(u32 reset_vector) |
| 321 | { |
| 322 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 323 | |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 324 | debug("%s entry, reset_vector = %x\n", __func__, reset_vector); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 325 | |
Dominik Sliwa | 3f6a2e2 | 2019-08-01 11:06:39 +0300 | [diff] [blame] | 326 | /* |
| 327 | * High power clusters are on after software reset, |
| 328 | * it may interfere with tegra124_ram_repair. |
| 329 | * unpower them. |
| 330 | */ |
| 331 | unpower_cpus(); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 332 | tegra124_init_clocks(); |
| 333 | |
| 334 | /* Set power-gating timer multiplier */ |
Stephen Warren | c952f6e | 2014-02-03 14:03:25 -0700 | [diff] [blame] | 335 | writel((MULT_8 << TIMER_MULT_SHIFT) | (MULT_8 << TIMER_MULT_CPU_SHIFT), |
| 336 | &pmc->pmc_pwrgate_timer_mult); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 337 | |
| 338 | enable_cpu_power_rail(); |
Bibek Basu | a808b23 | 2018-06-22 13:02:28 -0600 | [diff] [blame] | 339 | powerup_cpus(); |
| 340 | tegra124_ram_repair(); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 341 | enable_cpu_clocks(); |
| 342 | clock_enable_coresight(1); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 343 | writel(reset_vector, EXCEP_VECTOR_CPU_RESET_VECTOR); |
Bibek Basu | a808b23 | 2018-06-22 13:02:28 -0600 | [diff] [blame] | 344 | remove_cpu_resets(); |
Tom Warren | a8480ef | 2015-06-25 09:50:44 -0700 | [diff] [blame] | 345 | debug("%s exit, should continue @ reset_vector\n", __func__); |
Tom Warren | 9588ab7 | 2014-01-24 12:46:14 -0700 | [diff] [blame] | 346 | } |