Tom Warren | 112a188 | 2011-04-14 12:18:06 +0000 | [diff] [blame] | 1 | /* |
| 2 | * (C) Copyright 2010-2011 |
| 3 | * NVIDIA Corporation <www.nvidia.com> |
| 4 | * |
| 5 | * See file CREDITS for list of people who contributed to this |
| 6 | * project. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License as |
| 10 | * published by the Free Software Foundation; either version 2 of |
| 11 | * the License, or (at your option) any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, |
| 21 | * MA 02111-1307 USA |
| 22 | */ |
| 23 | |
| 24 | #include "ap20.h" |
| 25 | #include <asm/io.h> |
| 26 | #include <asm/arch/tegra2.h> |
| 27 | #include <asm/arch/clk_rst.h> |
| 28 | #include <asm/arch/pmc.h> |
| 29 | #include <asm/arch/pinmux.h> |
| 30 | #include <asm/arch/scu.h> |
| 31 | #include <common.h> |
| 32 | |
| 33 | u32 s_first_boot = 1; |
| 34 | |
Tom Warren | 30e80f6 | 2011-04-14 12:09:39 +0000 | [diff] [blame] | 35 | void init_pllx(void) |
| 36 | { |
| 37 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 38 | u32 reg; |
| 39 | |
| 40 | /* If PLLX is already enabled, just return */ |
| 41 | reg = readl(&clkrst->crc_pllx_base); |
| 42 | if (reg & PLL_ENABLE) |
| 43 | return; |
| 44 | |
| 45 | /* Set PLLX_MISC */ |
| 46 | reg = CPCON; /* CPCON[11:8] = 0001 */ |
| 47 | writel(reg, &clkrst->crc_pllx_misc); |
| 48 | |
| 49 | /* Use 12MHz clock here */ |
| 50 | reg = (PLL_BYPASS | PLL_DIVM); |
| 51 | reg |= (1000 << 8); /* DIVN = 0x3E8 */ |
| 52 | writel(reg, &clkrst->crc_pllx_base); |
| 53 | |
| 54 | reg |= PLL_ENABLE; |
| 55 | writel(reg, &clkrst->crc_pllx_base); |
| 56 | |
| 57 | reg &= ~PLL_BYPASS; |
| 58 | writel(reg, &clkrst->crc_pllx_base); |
| 59 | } |
| 60 | |
Tom Warren | 112a188 | 2011-04-14 12:18:06 +0000 | [diff] [blame] | 61 | static void enable_cpu_clock(int enable) |
| 62 | { |
| 63 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 64 | u32 reg, clk; |
| 65 | |
| 66 | /* |
| 67 | * NOTE: |
| 68 | * Regardless of whether the request is to enable or disable the CPU |
| 69 | * clock, every processor in the CPU complex except the master (CPU 0) |
| 70 | * will have it's clock stopped because the AVP only talks to the |
| 71 | * master. The AVP does not know (nor does it need to know) that there |
| 72 | * are multiple processors in the CPU complex. |
| 73 | */ |
| 74 | |
| 75 | if (enable) { |
Tom Warren | 30e80f6 | 2011-04-14 12:09:39 +0000 | [diff] [blame] | 76 | /* Initialize PLLX */ |
| 77 | init_pllx(); |
| 78 | |
Tom Warren | 112a188 | 2011-04-14 12:18:06 +0000 | [diff] [blame] | 79 | /* Wait until all clocks are stable */ |
| 80 | udelay(PLL_STABILIZATION_DELAY); |
| 81 | |
| 82 | writel(CCLK_BURST_POLICY, &clkrst->crc_cclk_brst_pol); |
| 83 | writel(SUPER_CCLK_DIVIDER, &clkrst->crc_super_cclk_div); |
| 84 | } |
| 85 | |
| 86 | /* Fetch the register containing the main CPU complex clock enable */ |
| 87 | reg = readl(&clkrst->crc_clk_out_enb_l); |
| 88 | reg |= CLK_ENB_CPU; |
| 89 | |
| 90 | /* |
| 91 | * Read the register containing the individual CPU clock enables and |
| 92 | * always stop the clock to CPU 1. |
| 93 | */ |
| 94 | clk = readl(&clkrst->crc_clk_cpu_cmplx); |
| 95 | clk |= CPU1_CLK_STP; |
| 96 | |
| 97 | if (enable) { |
| 98 | /* Unstop the CPU clock */ |
| 99 | clk &= ~CPU0_CLK_STP; |
| 100 | } else { |
| 101 | /* Stop the CPU clock */ |
| 102 | clk |= CPU0_CLK_STP; |
| 103 | } |
| 104 | |
| 105 | writel(clk, &clkrst->crc_clk_cpu_cmplx); |
| 106 | writel(reg, &clkrst->crc_clk_out_enb_l); |
| 107 | } |
| 108 | |
| 109 | static int is_cpu_powered(void) |
| 110 | { |
| 111 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 112 | |
| 113 | return (readl(&pmc->pmc_pwrgate_status) & CPU_PWRED) ? 1 : 0; |
| 114 | } |
| 115 | |
| 116 | static void remove_cpu_io_clamps(void) |
| 117 | { |
| 118 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 119 | u32 reg; |
| 120 | |
| 121 | /* Remove the clamps on the CPU I/O signals */ |
| 122 | reg = readl(&pmc->pmc_remove_clamping); |
| 123 | reg |= CPU_CLMP; |
| 124 | writel(reg, &pmc->pmc_remove_clamping); |
| 125 | |
| 126 | /* Give I/O signals time to stabilize */ |
| 127 | udelay(IO_STABILIZATION_DELAY); |
| 128 | } |
| 129 | |
| 130 | static void powerup_cpu(void) |
| 131 | { |
| 132 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 133 | u32 reg; |
| 134 | int timeout = IO_STABILIZATION_DELAY; |
| 135 | |
| 136 | if (!is_cpu_powered()) { |
| 137 | /* Toggle the CPU power state (OFF -> ON) */ |
| 138 | reg = readl(&pmc->pmc_pwrgate_toggle); |
| 139 | reg &= PARTID_CP; |
| 140 | reg |= START_CP; |
| 141 | writel(reg, &pmc->pmc_pwrgate_toggle); |
| 142 | |
| 143 | /* Wait for the power to come up */ |
| 144 | while (!is_cpu_powered()) { |
| 145 | if (timeout-- == 0) |
| 146 | printf("CPU failed to power up!\n"); |
| 147 | else |
| 148 | udelay(10); |
| 149 | } |
| 150 | |
| 151 | /* |
| 152 | * Remove the I/O clamps from CPU power partition. |
| 153 | * Recommended only on a Warm boot, if the CPU partition gets |
| 154 | * power gated. Shouldn't cause any harm when called after a |
| 155 | * cold boot according to HW, probably just redundant. |
| 156 | */ |
| 157 | remove_cpu_io_clamps(); |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | static void enable_cpu_power_rail(void) |
| 162 | { |
| 163 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 164 | u32 reg; |
| 165 | |
| 166 | reg = readl(&pmc->pmc_cntrl); |
| 167 | reg |= CPUPWRREQ_OE; |
| 168 | writel(reg, &pmc->pmc_cntrl); |
| 169 | |
| 170 | /* |
| 171 | * The TI PMU65861C needs a 3.75ms delay between enabling |
| 172 | * the power rail and enabling the CPU clock. This delay |
| 173 | * between SM1EN and SM1 is for switching time + the ramp |
| 174 | * up of the voltage to the CPU (VDD_CPU from PMU). |
| 175 | */ |
| 176 | udelay(3750); |
| 177 | } |
| 178 | |
| 179 | static void reset_A9_cpu(int reset) |
| 180 | { |
| 181 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 182 | u32 reg, cpu; |
| 183 | |
| 184 | /* |
| 185 | * NOTE: Regardless of whether the request is to hold the CPU in reset |
| 186 | * or take it out of reset, every processor in the CPU complex |
| 187 | * except the master (CPU 0) will be held in reset because the |
| 188 | * AVP only talks to the master. The AVP does not know that there |
| 189 | * are multiple processors in the CPU complex. |
| 190 | */ |
| 191 | |
| 192 | /* Hold CPU 1 in reset */ |
| 193 | cpu = SET_DBGRESET1 | SET_DERESET1 | SET_CPURESET1; |
| 194 | writel(cpu, &clkrst->crc_cpu_cmplx_set); |
| 195 | |
| 196 | reg = readl(&clkrst->crc_rst_dev_l); |
| 197 | if (reset) { |
| 198 | /* Now place CPU0 into reset */ |
| 199 | cpu |= SET_DBGRESET0 | SET_DERESET0 | SET_CPURESET0; |
| 200 | writel(cpu, &clkrst->crc_cpu_cmplx_set); |
| 201 | |
| 202 | /* Enable master CPU reset */ |
| 203 | reg |= SWR_CPU_RST; |
| 204 | } else { |
| 205 | /* Take CPU0 out of reset */ |
| 206 | cpu = CLR_DBGRESET0 | CLR_DERESET0 | CLR_CPURESET0; |
| 207 | writel(cpu, &clkrst->crc_cpu_cmplx_clr); |
| 208 | |
| 209 | /* Disable master CPU reset */ |
| 210 | reg &= ~SWR_CPU_RST; |
| 211 | } |
| 212 | |
| 213 | writel(reg, &clkrst->crc_rst_dev_l); |
| 214 | } |
| 215 | |
| 216 | static void clock_enable_coresight(int enable) |
| 217 | { |
| 218 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 219 | u32 rst, clk, src; |
| 220 | |
| 221 | rst = readl(&clkrst->crc_rst_dev_u); |
| 222 | clk = readl(&clkrst->crc_clk_out_enb_u); |
| 223 | |
| 224 | if (enable) { |
| 225 | rst &= ~SWR_CSITE_RST; |
| 226 | clk |= CLK_ENB_CSITE; |
| 227 | } else { |
| 228 | rst |= SWR_CSITE_RST; |
| 229 | clk &= ~CLK_ENB_CSITE; |
| 230 | } |
| 231 | |
| 232 | writel(clk, &clkrst->crc_clk_out_enb_u); |
| 233 | writel(rst, &clkrst->crc_rst_dev_u); |
| 234 | |
| 235 | if (enable) { |
| 236 | /* |
| 237 | * Put CoreSight on PLLP_OUT0 (216 MHz) and divide it down by |
| 238 | * 1.5, giving an effective frequency of 144MHz. |
| 239 | * Set PLLP_OUT0 [bits31:30 = 00], and use a 7.1 divisor |
| 240 | * (bits 7:0), so 00000001b == 1.5 (n+1 + .5) |
| 241 | */ |
| 242 | src = CLK_DIVIDER(NVBL_PLLP_KHZ, 144000); |
| 243 | writel(src, &clkrst->crc_clk_src_csite); |
| 244 | |
| 245 | /* Unlock the CPU CoreSight interfaces */ |
| 246 | rst = 0xC5ACCE55; |
| 247 | writel(rst, CSITE_CPU_DBG0_LAR); |
| 248 | writel(rst, CSITE_CPU_DBG1_LAR); |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | void start_cpu(u32 reset_vector) |
| 253 | { |
| 254 | /* Enable VDD_CPU */ |
| 255 | enable_cpu_power_rail(); |
| 256 | |
| 257 | /* Hold the CPUs in reset */ |
| 258 | reset_A9_cpu(1); |
| 259 | |
| 260 | /* Disable the CPU clock */ |
| 261 | enable_cpu_clock(0); |
| 262 | |
| 263 | /* Enable CoreSight */ |
| 264 | clock_enable_coresight(1); |
| 265 | |
| 266 | /* |
| 267 | * Set the entry point for CPU execution from reset, |
| 268 | * if it's a non-zero value. |
| 269 | */ |
| 270 | if (reset_vector) |
| 271 | writel(reset_vector, EXCEP_VECTOR_CPU_RESET_VECTOR); |
| 272 | |
| 273 | /* Enable the CPU clock */ |
| 274 | enable_cpu_clock(1); |
| 275 | |
| 276 | /* If the CPU doesn't already have power, power it up */ |
| 277 | powerup_cpu(); |
| 278 | |
| 279 | /* Take the CPU out of reset */ |
| 280 | reset_A9_cpu(0); |
| 281 | } |
| 282 | |
| 283 | |
| 284 | void halt_avp(void) |
| 285 | { |
| 286 | for (;;) { |
| 287 | writel((HALT_COP_EVENT_JTAG | HALT_COP_EVENT_IRQ_1 \ |
| 288 | | HALT_COP_EVENT_FIQ_1 | (FLOW_MODE_STOP<<29)), |
| 289 | FLOW_CTLR_HALT_COP_EVENTS); |
| 290 | } |
| 291 | } |
| 292 | |
| 293 | void enable_scu(void) |
| 294 | { |
| 295 | struct scu_ctlr *scu = (struct scu_ctlr *)NV_PA_ARM_PERIPHBASE; |
| 296 | u32 reg; |
| 297 | |
| 298 | /* If SCU already setup/enabled, return */ |
| 299 | if (readl(&scu->scu_ctrl) & SCU_CTRL_ENABLE) |
| 300 | return; |
| 301 | |
| 302 | /* Invalidate all ways for all processors */ |
| 303 | writel(0xFFFF, &scu->scu_inv_all); |
| 304 | |
| 305 | /* Enable SCU - bit 0 */ |
| 306 | reg = readl(&scu->scu_ctrl); |
| 307 | reg |= SCU_CTRL_ENABLE; |
| 308 | writel(reg, &scu->scu_ctrl); |
| 309 | } |
| 310 | |
| 311 | void init_pmc_scratch(void) |
| 312 | { |
| 313 | struct pmc_ctlr *const pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 314 | int i; |
| 315 | |
| 316 | /* SCRATCH0 is initialized by the boot ROM and shouldn't be cleared */ |
| 317 | for (i = 0; i < 23; i++) |
| 318 | writel(0, &pmc->pmc_scratch1+i); |
| 319 | |
| 320 | /* ODMDATA is for kernel use to determine RAM size, LP config, etc. */ |
| 321 | writel(CONFIG_SYS_BOARD_ODMDATA, &pmc->pmc_scratch20); |
| 322 | } |
| 323 | |
| 324 | void cpu_start(void) |
| 325 | { |
| 326 | struct pmux_tri_ctlr *pmt = (struct pmux_tri_ctlr *)NV_PA_APB_MISC_BASE; |
| 327 | |
| 328 | /* enable JTAG */ |
| 329 | writel(0xC0, &pmt->pmt_cfg_ctl); |
| 330 | |
| 331 | if (s_first_boot) { |
| 332 | /* |
| 333 | * Need to set this before cold-booting, |
| 334 | * otherwise we'll end up in an infinite loop. |
| 335 | */ |
| 336 | s_first_boot = 0; |
| 337 | cold_boot(); |
| 338 | } |
| 339 | } |
| 340 | |
| 341 | void tegra2_start() |
| 342 | { |
| 343 | if (s_first_boot) { |
| 344 | /* Init Debug UART Port (115200 8n1) */ |
| 345 | uart_init(); |
| 346 | |
| 347 | /* Init PMC scratch memory */ |
| 348 | init_pmc_scratch(); |
| 349 | } |
| 350 | |
| 351 | #ifdef CONFIG_ENABLE_CORTEXA9 |
| 352 | /* take the mpcore out of reset */ |
| 353 | cpu_start(); |
| 354 | |
| 355 | /* configure cache */ |
| 356 | cache_configure(); |
| 357 | #endif |
| 358 | } |