blob: 21b5688cea05ade7695372923dce98c61e1335db [file] [log] [blame]
Achin Guptae1aa5162014-06-26 09:58:52 +01001/*
2 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <platform_def.h>
34
35 .globl psci_do_pwrdown_cache_maintenance
36 .globl psci_do_pwrup_cache_maintenance
37
38/* -----------------------------------------------------------------------
39 * void psci_do_pwrdown_cache_maintenance(uint32_t affinity level);
40 *
41 * This function performs cache maintenance before this cpu is powered
42 * off. The levels of cache affected are determined by the affinity level
43 * which is passed as the argument. Additionally, this function also
44 * ensures that stack memory is correctly flushed out to avoid coherency
45 * issues due to a change in its memory attributes after the data cache
46 * is disabled.
47 * -----------------------------------------------------------------------
48 */
49func psci_do_pwrdown_cache_maintenance
50 stp x29, x30, [sp,#-16]!
51 stp x19, x20, [sp,#-16]!
52
53 /* ---------------------------------------------
54 * Disable the Data Cache.
55 * ---------------------------------------------
56 */
57 mrs x1, sctlr_el3
58 bic x1, x1, #SCTLR_C_BIT
59 msr sctlr_el3, x1
60 isb
61
62 /* ---------------------------------------------
63 * Determine to how many levels of cache will be
64 * subject to cache maintenance. Affinity level
65 * 0 implies that only the cpu is being powered
66 * down. Only the L1 data cache needs to be
67 * flushed to the PoU in this case. For a higher
68 * affinity level we are assuming that a flush
69 * of L1 data and L2 unified cache is enough.
70 * This information should be provided by the
71 * platform.
72 * ---------------------------------------------
73 */
74 cmp x0, #MPIDR_AFFLVL0
75 mov x0, #DCCISW
76 b.ne flush_caches_to_poc
77
78 /* ---------------------------------------------
79 * Flush L1 cache to PoU.
80 * ---------------------------------------------
81 */
82 bl dcsw_op_louis
83 b do_stack_maintenance
84
85 /* ---------------------------------------------
86 * Flush L1 and L2 caches to PoC.
87 * ---------------------------------------------
88 */
89flush_caches_to_poc:
90 bl dcsw_op_all
91
92 /* ---------------------------------------------
93 * TODO: Intra-cluster coherency should be
94 * turned off here once cpu-specific
95 * abstractions are in place.
96 * ---------------------------------------------
97 */
98
99 /* ---------------------------------------------
100 * Do stack maintenance by flushing the used
101 * stack to the main memory and invalidating the
102 * remainder.
103 * ---------------------------------------------
104 */
105do_stack_maintenance:
106 mrs x0, mpidr_el1
107 bl platform_get_stack
108
109 /* ---------------------------------------------
110 * Calculate and store the size of the used
111 * stack memory in x1.
112 * ---------------------------------------------
113 */
114 mov x19, x0
115 mov x1, sp
116 sub x1, x0, x1
117 mov x0, sp
118 bl flush_dcache_range
119
120 /* ---------------------------------------------
121 * Calculate and store the size of the unused
122 * stack memory in x1. Calculate and store the
123 * stack base address in x0.
124 * ---------------------------------------------
125 */
126 sub x0, x19, #PLATFORM_STACK_SIZE
127 sub x1, sp, x0
128 bl inv_dcache_range
129
130 ldp x19, x20, [sp], #16
131 ldp x29, x30, [sp], #16
132 ret
133
134
135/* -----------------------------------------------------------------------
136 * void psci_do_pwrup_cache_maintenance(void);
137 *
138 * This function performs cache maintenance after this cpu is powered up.
139 * Currently, this involves managing the used stack memory before turning
140 * on the data cache.
141 * -----------------------------------------------------------------------
142 */
143func psci_do_pwrup_cache_maintenance
144 stp x29, x30, [sp,#-16]!
145
146 /* ---------------------------------------------
147 * Ensure any inflight stack writes have made it
148 * to main memory.
149 * ---------------------------------------------
150 */
151 dmb st
152
153 /* ---------------------------------------------
154 * Calculate and store the size of the used
155 * stack memory in x1. Calculate and store the
156 * stack base address in x0.
157 * ---------------------------------------------
158 */
159 mrs x0, mpidr_el1
160 bl platform_get_stack
161 mov x1, sp
162 sub x1, x0, x1
163 mov x0, sp
164 bl inv_dcache_range
165
166 /* ---------------------------------------------
167 * Enable the data cache.
168 * ---------------------------------------------
169 */
170 mrs x0, sctlr_el3
171 orr x0, x0, #SCTLR_C_BIT
172 msr sctlr_el3, x0
173 isb
174
175 ldp x29, x30, [sp], #16
176 ret