Tony Xie | f6118cc | 2016-01-15 17:17:32 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are met: |
| 6 | * |
| 7 | * Redistributions of source code must retain the above copyright notice, this |
| 8 | * list of conditions and the following disclaimer. |
| 9 | * |
| 10 | * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * Neither the name of ARM nor the names of its contributors may be used |
| 15 | * to endorse or promote products derived from this software without specific |
| 16 | * prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 28 | * POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
| 31 | #include <arch_helpers.h> |
| 32 | #include <assert.h> |
| 33 | #include <bakery_lock.h> |
| 34 | #include <debug.h> |
| 35 | #include <delay_timer.h> |
| 36 | #include <errno.h> |
| 37 | #include <mmio.h> |
| 38 | #include <platform.h> |
| 39 | #include <platform_def.h> |
| 40 | #include <plat_private.h> |
| 41 | #include <rk3399_def.h> |
| 42 | #include <pmu_sram.h> |
| 43 | #include <soc.h> |
| 44 | #include <pmu.h> |
| 45 | #include <pmu_com.h> |
| 46 | |
| 47 | static struct psram_data_t *psram_sleep_cfg = |
| 48 | (struct psram_data_t *)PSRAM_DT_BASE; |
| 49 | |
| 50 | /* |
| 51 | * There are two ways to powering on or off on core. |
| 52 | * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, |
| 53 | * it is core_pwr_pd mode |
| 54 | * 2) Enable the core power manage in PMU_CORE_PM_CON reg, |
| 55 | * then, if the core enter into wfi, it power domain will be |
| 56 | * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode |
| 57 | * so we need core_pm_cfg_info to distinguish which method be used now. |
| 58 | */ |
| 59 | |
| 60 | static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] |
| 61 | #if USE_COHERENT_MEM |
| 62 | __attribute__ ((section("tzfw_coherent_mem"))) |
| 63 | #endif |
| 64 | ;/* coheront */ |
| 65 | |
| 66 | void plat_rockchip_pmusram_prepare(void) |
| 67 | { |
| 68 | uint32_t *sram_dst, *sram_src; |
| 69 | size_t sram_size = 2; |
| 70 | |
| 71 | /* |
| 72 | * pmu sram code and data prepare |
| 73 | */ |
| 74 | sram_dst = (uint32_t *)PMUSRAM_BASE; |
| 75 | sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start; |
| 76 | sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end - |
| 77 | (uint32_t *)sram_src; |
| 78 | |
| 79 | u32_align_cpy(sram_dst, sram_src, sram_size); |
| 80 | |
| 81 | psram_sleep_cfg->sp = PSRAM_DT_BASE; |
| 82 | } |
| 83 | |
| 84 | static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) |
| 85 | { |
| 86 | return core_pm_cfg_info[cpu_id]; |
| 87 | } |
| 88 | |
| 89 | static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) |
| 90 | { |
| 91 | core_pm_cfg_info[cpu_id] = value; |
| 92 | #if !USE_COHERENT_MEM |
| 93 | flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], |
| 94 | sizeof(uint32_t)); |
| 95 | #endif |
| 96 | } |
| 97 | |
| 98 | static int cpus_power_domain_on(uint32_t cpu_id) |
| 99 | { |
| 100 | uint32_t cfg_info; |
| 101 | uint32_t cpu_pd = PD_CPUL0 + cpu_id; |
| 102 | /* |
| 103 | * There are two ways to powering on or off on core. |
| 104 | * 1) Control it power domain into on or off in PMU_PWRDN_CON reg |
| 105 | * 2) Enable the core power manage in PMU_CORE_PM_CON reg, |
| 106 | * then, if the core enter into wfi, it power domain will be |
| 107 | * powered off automatically. |
| 108 | */ |
| 109 | |
| 110 | cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); |
| 111 | |
| 112 | if (cfg_info == core_pwr_pd) { |
| 113 | /* disable core_pm cfg */ |
| 114 | mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), |
| 115 | CORES_PM_DISABLE); |
| 116 | /* if the cores have be on, power off it firstly */ |
| 117 | if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { |
| 118 | mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); |
| 119 | pmu_power_domain_ctr(cpu_pd, pmu_pd_off); |
| 120 | } |
| 121 | |
| 122 | pmu_power_domain_ctr(cpu_pd, pmu_pd_on); |
| 123 | } else { |
| 124 | if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { |
| 125 | WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); |
| 126 | return -EINVAL; |
| 127 | } |
| 128 | |
| 129 | mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), |
| 130 | BIT(core_pm_sft_wakeup_en)); |
| 131 | } |
| 132 | |
| 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) |
| 137 | { |
| 138 | uint32_t cpu_pd; |
| 139 | uint32_t core_pm_value; |
| 140 | |
| 141 | cpu_pd = PD_CPUL0 + cpu_id; |
| 142 | if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) |
| 143 | return 0; |
| 144 | |
| 145 | if (pd_cfg == core_pwr_pd) { |
| 146 | if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) |
| 147 | return -EINVAL; |
| 148 | |
| 149 | /* disable core_pm cfg */ |
| 150 | mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), |
| 151 | CORES_PM_DISABLE); |
| 152 | |
| 153 | set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); |
| 154 | pmu_power_domain_ctr(cpu_pd, pmu_pd_off); |
| 155 | } else { |
| 156 | set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); |
| 157 | |
| 158 | core_pm_value = BIT(core_pm_en); |
| 159 | if (pd_cfg == core_pwr_wfi_int) |
| 160 | core_pm_value |= BIT(core_pm_int_wakeup_en); |
| 161 | mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), |
| 162 | core_pm_value); |
| 163 | } |
| 164 | |
| 165 | return 0; |
| 166 | } |
| 167 | |
| 168 | static void nonboot_cpus_off(void) |
| 169 | { |
| 170 | uint32_t boot_cpu, cpu; |
| 171 | |
| 172 | boot_cpu = plat_my_core_pos(); |
| 173 | |
| 174 | /* turn off noboot cpus */ |
| 175 | for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { |
| 176 | if (cpu == boot_cpu) |
| 177 | continue; |
| 178 | cpus_power_domain_off(cpu, core_pwr_pd); |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint) |
| 183 | { |
| 184 | uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); |
| 185 | |
| 186 | assert(cpuson_flags[cpu_id] == 0); |
| 187 | cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; |
| 188 | cpuson_entry_point[cpu_id] = entrypoint; |
| 189 | dsb(); |
| 190 | |
| 191 | cpus_power_domain_on(cpu_id); |
| 192 | |
| 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | static int cores_pwr_domain_off(void) |
| 197 | { |
| 198 | uint32_t cpu_id = plat_my_core_pos(); |
| 199 | |
| 200 | cpus_power_domain_off(cpu_id, core_pwr_wfi); |
| 201 | |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | static int cores_pwr_domain_suspend(void) |
| 206 | { |
| 207 | uint32_t cpu_id = plat_my_core_pos(); |
| 208 | |
| 209 | assert(cpuson_flags[cpu_id] == 0); |
| 210 | cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; |
| 211 | cpuson_entry_point[cpu_id] = (uintptr_t)psci_entrypoint; |
| 212 | dsb(); |
| 213 | |
| 214 | cpus_power_domain_off(cpu_id, core_pwr_wfi_int); |
| 215 | |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | static int cores_pwr_domain_on_finish(void) |
| 220 | { |
| 221 | uint32_t cpu_id = plat_my_core_pos(); |
| 222 | |
| 223 | cpuson_flags[cpu_id] = 0; |
| 224 | cpuson_entry_point[cpu_id] = 0; |
| 225 | |
| 226 | /* Disable core_pm */ |
| 227 | mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static int cores_pwr_domain_resume(void) |
| 233 | { |
| 234 | uint32_t cpu_id = plat_my_core_pos(); |
| 235 | |
| 236 | cpuson_flags[cpu_id] = 0; |
| 237 | cpuson_entry_point[cpu_id] = 0; |
| 238 | |
| 239 | /* Disable core_pm */ |
| 240 | mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); |
| 241 | |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | static void sys_slp_config(void) |
| 246 | { |
| 247 | uint32_t slp_mode_cfg = 0; |
| 248 | |
| 249 | slp_mode_cfg = PMU_PWR_MODE_EN | |
| 250 | PMU_CPU0_PD_EN | |
| 251 | PMU_L2_FLUSH_EN | |
| 252 | PMU_L2_IDLE_EN | |
| 253 | PMU_SCU_PD_EN | |
| 254 | PMU_CLK_CORE_SRC_GATE_EN; |
| 255 | mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_CLUSTER_L_WKUP_EN); |
| 256 | mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_CLUSTER_B_WKUP_EN); |
| 257 | mmio_clrbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_GPIO_WKUP_EN); |
| 258 | mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); |
| 259 | } |
| 260 | |
| 261 | static int sys_pwr_domain_suspend(void) |
| 262 | { |
| 263 | sys_slp_config(); |
| 264 | plls_suspend(); |
| 265 | psram_sleep_cfg->sys_mode = PMU_SYS_SLP_MODE; |
| 266 | pmu_sgrf_rst_hld(); |
| 267 | return 0; |
| 268 | } |
| 269 | |
| 270 | static int sys_pwr_domain_resume(void) |
| 271 | { |
| 272 | pmu_sgrf_rst_hld_release(); |
| 273 | psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE; |
| 274 | plls_resume(); |
| 275 | |
| 276 | return 0; |
| 277 | } |
| 278 | |
| 279 | static struct rockchip_pm_ops_cb pm_ops = { |
| 280 | .cores_pwr_dm_on = cores_pwr_domain_on, |
| 281 | .cores_pwr_dm_off = cores_pwr_domain_off, |
| 282 | .cores_pwr_dm_on_finish = cores_pwr_domain_on_finish, |
| 283 | .cores_pwr_dm_suspend = cores_pwr_domain_suspend, |
| 284 | .cores_pwr_dm_resume = cores_pwr_domain_resume, |
| 285 | .sys_pwr_dm_suspend = sys_pwr_domain_suspend, |
| 286 | .sys_pwr_dm_resume = sys_pwr_domain_resume, |
| 287 | .sys_gbl_soft_reset = soc_global_soft_reset, |
| 288 | }; |
| 289 | |
| 290 | void plat_rockchip_pmu_init(void) |
| 291 | { |
| 292 | uint32_t cpu; |
| 293 | |
| 294 | rockchip_pd_lock_init(); |
| 295 | plat_setup_rockchip_pm_ops(&pm_ops); |
| 296 | |
| 297 | for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) |
| 298 | cpuson_flags[cpu] = 0; |
| 299 | |
| 300 | psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE; |
| 301 | |
| 302 | psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff; |
| 303 | |
| 304 | /* cpu boot from pmusram */ |
| 305 | mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), |
| 306 | (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) | |
| 307 | CPU_BOOT_ADDR_WMASK); |
| 308 | |
| 309 | nonboot_cpus_off(); |
| 310 | INFO("%s(%d): pd status %x\n", __func__, __LINE__, |
| 311 | mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); |
| 312 | } |