blob: 9a51d5c29e8ad6a8f6a94491e9fd4c429b408f18 [file] [log] [blame]
Achin Guptae1aa5162014-06-26 09:58:52 +01001/*
2 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
Achin Guptaf6b9e992014-07-31 11:19:11 +010033#include <assert_macros.S>
Achin Guptae1aa5162014-06-26 09:58:52 +010034#include <platform_def.h>
Achin Guptaf6b9e992014-07-31 11:19:11 +010035#include <psci.h>
Achin Guptae1aa5162014-06-26 09:58:52 +010036
37 .globl psci_do_pwrdown_cache_maintenance
38 .globl psci_do_pwrup_cache_maintenance
39
40/* -----------------------------------------------------------------------
41 * void psci_do_pwrdown_cache_maintenance(uint32_t affinity level);
42 *
Achin Guptaf6b9e992014-07-31 11:19:11 +010043 * This function performs cache maintenance if the specified affinity
44 * level is the equal to the level of the highest affinity instance which
45 * will be/is physically powered off. The levels of cache affected are
46 * determined by the affinity level which is passed as the argument i.e.
47 * level 0 results in a flush of the L1 cache. Both the L1 and L2 caches
48 * are flushed for a higher affinity level.
49 *
50 * Additionally, this function also ensures that stack memory is correctly
51 * flushed out to avoid coherency issues due to a change in its memory
52 * attributes after the data cache is disabled.
Achin Guptae1aa5162014-06-26 09:58:52 +010053 * -----------------------------------------------------------------------
54 */
55func psci_do_pwrdown_cache_maintenance
56 stp x29, x30, [sp,#-16]!
57 stp x19, x20, [sp,#-16]!
58
Achin Guptaf6b9e992014-07-31 11:19:11 +010059 mov x19, x0
60 bl psci_get_max_phys_off_afflvl
61#if ASM_ASSERTION
62 cmp x0, #PSCI_INVALID_DATA
63 ASM_ASSERT(ne)
64#endif
65 cmp x0, x19
66 b.ne 1f
67
Achin Guptae1aa5162014-06-26 09:58:52 +010068 /* ---------------------------------------------
Achin Guptae1aa5162014-06-26 09:58:52 +010069 * Determine to how many levels of cache will be
70 * subject to cache maintenance. Affinity level
71 * 0 implies that only the cpu is being powered
72 * down. Only the L1 data cache needs to be
73 * flushed to the PoU in this case. For a higher
74 * affinity level we are assuming that a flush
75 * of L1 data and L2 unified cache is enough.
76 * This information should be provided by the
77 * platform.
78 * ---------------------------------------------
79 */
80 cmp x0, #MPIDR_AFFLVL0
Soby Mathew8e2f2872014-08-14 12:49:05 +010081 b.eq do_core_pwr_dwn
82 bl prepare_cluster_pwr_dwn
Achin Guptae1aa5162014-06-26 09:58:52 +010083 b do_stack_maintenance
84
Soby Mathew8e2f2872014-08-14 12:49:05 +010085do_core_pwr_dwn:
86 bl prepare_core_pwr_dwn
Achin Guptae1aa5162014-06-26 09:58:52 +010087
88 /* ---------------------------------------------
89 * Do stack maintenance by flushing the used
90 * stack to the main memory and invalidating the
91 * remainder.
92 * ---------------------------------------------
93 */
94do_stack_maintenance:
95 mrs x0, mpidr_el1
96 bl platform_get_stack
97
98 /* ---------------------------------------------
99 * Calculate and store the size of the used
100 * stack memory in x1.
101 * ---------------------------------------------
102 */
103 mov x19, x0
104 mov x1, sp
105 sub x1, x0, x1
106 mov x0, sp
107 bl flush_dcache_range
108
109 /* ---------------------------------------------
110 * Calculate and store the size of the unused
111 * stack memory in x1. Calculate and store the
112 * stack base address in x0.
113 * ---------------------------------------------
114 */
115 sub x0, x19, #PLATFORM_STACK_SIZE
116 sub x1, sp, x0
117 bl inv_dcache_range
118
Achin Guptaf6b9e992014-07-31 11:19:11 +01001191:
Achin Guptae1aa5162014-06-26 09:58:52 +0100120 ldp x19, x20, [sp], #16
121 ldp x29, x30, [sp], #16
122 ret
123
124
125/* -----------------------------------------------------------------------
126 * void psci_do_pwrup_cache_maintenance(void);
127 *
128 * This function performs cache maintenance after this cpu is powered up.
129 * Currently, this involves managing the used stack memory before turning
130 * on the data cache.
131 * -----------------------------------------------------------------------
132 */
133func psci_do_pwrup_cache_maintenance
134 stp x29, x30, [sp,#-16]!
135
136 /* ---------------------------------------------
137 * Ensure any inflight stack writes have made it
138 * to main memory.
139 * ---------------------------------------------
140 */
141 dmb st
142
143 /* ---------------------------------------------
144 * Calculate and store the size of the used
145 * stack memory in x1. Calculate and store the
146 * stack base address in x0.
147 * ---------------------------------------------
148 */
149 mrs x0, mpidr_el1
150 bl platform_get_stack
151 mov x1, sp
152 sub x1, x0, x1
153 mov x0, sp
154 bl inv_dcache_range
155
156 /* ---------------------------------------------
157 * Enable the data cache.
158 * ---------------------------------------------
159 */
160 mrs x0, sctlr_el3
161 orr x0, x0, #SCTLR_C_BIT
162 msr sctlr_el3, x0
163 isb
164
165 ldp x29, x30, [sp], #16
166 ret