Merge pull request #788 from jeenu-arm/cpuops-framework
Add provision to extend CPU operations at more levels
diff --git a/docs/firmware-design.md b/docs/firmware-design.md
index c37f9c5..0acb1fa 100644
--- a/docs/firmware-design.md
+++ b/docs/firmware-design.md
@@ -1127,7 +1127,8 @@
The CPU specific operations framework depends on the `cpu_ops` structure which
needs to be exported for each type of CPU in the platform. It is defined in
`include/lib/cpus/aarch64/cpu_macros.S` and has the following fields : `midr`,
-`reset_func()`, `core_pwr_dwn()`, `cluster_pwr_dwn()` and `cpu_reg_dump()`.
+`reset_func()`, `cpu_pwr_down_ops` (array of power down functions) and
+`cpu_reg_dump()`.
The CPU specific files in `lib/cpus` export a `cpu_ops` data structure with
suitable handlers for that CPU. For example, `lib/cpus/aarch64/cortex_a53.S`
@@ -1161,15 +1162,15 @@
entry is stored in per-CPU data by `init_cpu_ops()` so that it can be quickly
retrieved during power down sequences.
-The PSCI service, upon receiving a power down request, determines the highest
-power level at which to execute power down sequence for a particular CPU and
-invokes the corresponding 'prepare' power down handler in the CPU specific
-operations framework. For example, when a CPU executes a power down for power
-level 0, the `prepare_core_pwr_dwn()` retrieves the `cpu_ops` pointer from the
-per-CPU data and the corresponding `core_pwr_dwn()` is invoked. Similarly when
-a CPU executes power down at power level 1, the `prepare_cluster_pwr_dwn()`
-retrieves the `cpu_ops` pointer and the corresponding `cluster_pwr_dwn()` is
-invoked.
+Various CPU drivers register handlers to perform power down at certain power
+levels for that specific CPU. The PSCI service, upon receiving a power down
+request, determines the highest power level at which to execute power down
+sequence for a particular CPU. It uses the `prepare_cpu_pwr_dwn()` function to
+pick the right power down handler for the requested level. The function
+retrieves `cpu_ops` pointer member of per-CPU data, and from that, further
+retrieves `cpu_pwr_down_ops` array, and indexes into the required level. If the
+requested power level is higher than what a CPU driver supports, the handler
+registered for highest level is invoked.
At runtime the platform hooks for power down are invoked by the PSCI service to
perform platform specific operations during a power down sequence, for example
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
index 2b9947e..17dd258 100644
--- a/include/lib/cpus/aarch32/cpu_macros.S
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -35,6 +35,15 @@
#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
(MIDR_PN_MASK << MIDR_PN_SHIFT)
+/* The number of CPU operations allowed */
+#define CPU_MAX_PWR_DWN_OPS 2
+
+/* Special constant to specify that CPU has no reset function */
+#define CPU_NO_RESET_FUNC 0
+
+/* Word size for 32-bit CPUs */
+#define CPU_WORD_SIZE 4
+
/*
* Define the offsets to the fields in cpu_ops structure.
*/
@@ -47,33 +56,86 @@
.space 4
#endif
#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
-CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
- .space 4
-CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
- .space 4
+CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
+ .space (4 * CPU_MAX_PWR_DWN_OPS)
#endif
CPU_OPS_SIZE = .
/*
- * Convenience macro to declare cpu_ops structure.
- * Make sure the structure fields are as per the offsets
- * defined above.
+ * Write given expressions as words
+ *
+ * _count:
+ * Write at least _count words. If the given number of expressions
+ * is less than _count, repeat the last expression to fill _count
+ * words in total
+ * _rest:
+ * Optional list of expressions. _this is for parameter extraction
+ * only, and has no significance to the caller
+ *
+ * Invoked as:
+ * fill_constants 2, foo, bar, blah, ...
*/
- .macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
+ .macro fill_constants _count:req, _this, _rest:vararg
+ .ifgt \_count
+ /* Write the current expression */
+ .ifb \_this
+ .error "Nothing to fill"
+ .endif
+ .word \_this
+
+ /* Invoke recursively for remaining expressions */
+ .ifnb \_rest
+ fill_constants \_count-1, \_rest
+ .else
+ fill_constants \_count-1, \_this
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Declare CPU operations
+ *
+ * _name:
+ * Name of the CPU for which operations are being specified
+ * _midr:
+ * Numeric value expected to read from CPU's MIDR
+ * _resetfunc:
+ * Reset function for the CPU. If there's no CPU reset function,
+ * specify CPU_NO_RESET_FUNC
+ * _power_down_ops:
+ * Comma-separated list of functions to perform power-down
+ * operatios on the CPU. At least one, and up to
+ * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
+ * Starting at power level 0, these functions shall handle power
+ * down at subsequent power levels. If there aren't exactly
+ * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
+ * used to handle power down at subsequent levels
+ */
+ .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
+ _power_down_ops:vararg
.section cpu_ops, "a"
.align 2
.type cpu_ops_\_name, %object
.word \_midr
#if IMAGE_BL1 || IMAGE_BL32
- .if \_noresetfunc
- .word 0
- .else
- .word \_name\()_reset_func
- .endif
+ .word \_resetfunc
#endif
#if IMAGE_BL32
- .word \_name\()_core_pwr_dwn
- .word \_name\()_cluster_pwr_dwn
+1:
+ /* Insert list of functions */
+ fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
+2:
+ /*
+ * Error if no or more than CPU_MAX_PWR_DWN_OPS were specified in the
+ * list
+ */
+ .ifeq 2b - 1b
+ .error "At least one power down function must be specified"
+ .else
+ .iflt 2b - 1b - (CPU_MAX_PWR_DWN_OPS * CPU_WORD_SIZE)
+ .error "More than CPU_MAX_PWR_DWN_OPS functions specified"
+ .endif
+ .endif
#endif
.endm
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index f34f078..570ef88 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,6 +35,15 @@
#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
(MIDR_PN_MASK << MIDR_PN_SHIFT)
+/* The number of CPU operations allowed */
+#define CPU_MAX_PWR_DWN_OPS 2
+
+/* Special constant to specify that CPU has no reset function */
+#define CPU_NO_RESET_FUNC 0
+
+/* Word size for 64-bit CPUs */
+#define CPU_WORD_SIZE 8
+
/*
* Define the offsets to the fields in cpu_ops structure.
*/
@@ -47,10 +56,8 @@
.space 8
#endif
#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
-CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
- .space 8
-CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
- .space 8
+CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
+ .space (8 * CPU_MAX_PWR_DWN_OPS)
#endif
#if (IMAGE_BL31 && CRASH_REPORTING)
CPU_REG_DUMP: /* cpu specific register dump for crash reporting */
@@ -59,24 +66,80 @@
CPU_OPS_SIZE = .
/*
- * Convenience macro to declare cpu_ops structure.
- * Make sure the structure fields are as per the offsets
- * defined above.
+ * Write given expressions as quad words
+ *
+ * _count:
+ * Write at least _count quad words. If the given number of
+ * expressions is less than _count, repeat the last expression to
+ * fill _count quad words in total
+ * _rest:
+ * Optional list of expressions. _this is for parameter extraction
+ * only, and has no significance to the caller
+ *
+ * Invoked as:
+ * fill_constants 2, foo, bar, blah, ...
*/
- .macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
- .section cpu_ops, "a"; .align 3
+ .macro fill_constants _count:req, _this, _rest:vararg
+ .ifgt \_count
+ /* Write the current expression */
+ .ifb \_this
+ .error "Nothing to fill"
+ .endif
+ .quad \_this
+
+ /* Invoke recursively for remaining expressions */
+ .ifnb \_rest
+ fill_constants \_count-1, \_rest
+ .else
+ fill_constants \_count-1, \_this
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Declare CPU operations
+ *
+ * _name:
+ * Name of the CPU for which operations are being specified
+ * _midr:
+ * Numeric value expected to read from CPU's MIDR
+ * _resetfunc:
+ * Reset function for the CPU. If there's no CPU reset function,
+ * specify CPU_NO_RESET_FUNC
+ * _power_down_ops:
+ * Comma-separated list of functions to perform power-down
+ * operatios on the CPU. At least one, and up to
+ * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
+ * Starting at power level 0, these functions shall handle power
+ * down at subsequent power levels. If there aren't exactly
+ * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
+ * used to handle power down at subsequent levels
+ */
+ .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
+ _power_down_ops:vararg
+ .section cpu_ops, "a"
+ .align 3
.type cpu_ops_\_name, %object
.quad \_midr
#if IMAGE_BL1 || IMAGE_BL31
- .if \_noresetfunc
- .quad 0
- .else
- .quad \_name\()_reset_func
- .endif
+ .quad \_resetfunc
#endif
#if IMAGE_BL31
- .quad \_name\()_core_pwr_dwn
- .quad \_name\()_cluster_pwr_dwn
+1:
+ /* Insert list of functions */
+ fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
+2:
+ /*
+ * Error if no or more than CPU_MAX_PWR_DWN_OPS were specified in the
+ * list
+ */
+ .ifeq 2b - 1b
+ .error "At least one power down function must be specified"
+ .else
+ .iflt 2b - 1b - (CPU_MAX_PWR_DWN_OPS * CPU_WORD_SIZE)
+ .error "More than CPU_MAX_PWR_DWN_OPS functions specified"
+ .endif
+ .endif
#endif
#if (IMAGE_BL31 && CRASH_REPORTING)
.quad \_name\()_cpu_reg_dump
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
index 10ea4e4..3d6064c 100644
--- a/lib/cpus/aarch32/aem_generic.S
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -65,4 +65,6 @@
endfunc aem_generic_cluster_pwr_dwn
/* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
index f2b85a3..f631c4c 100644
--- a/lib/cpus/aarch32/cortex_a32.S
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -141,4 +141,7 @@
b cortex_a32_disable_smp
endfunc cortex_a32_cluster_pwr_dwn
-declare_cpu_ops cortex_a32, CORTEX_A32_MIDR
+declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
+ cortex_a32_reset_func, \
+ cortex_a32_core_pwr_dwn, \
+ cortex_a32_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index a4dfe5f..900d158 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -70,50 +70,39 @@
#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
/*
- * The prepare core power down function for all platforms. After
- * the cpu_ops pointer is retrieved from cpu_data, the corresponding
- * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS.
+ * void prepare_cpu_pwr_dwn(unsigned int power_level)
+ *
+ * Prepare CPU power down function for all platforms. The function takes
+ * a domain level to be powered down as its parameter. After the cpu_ops
+ * pointer is retrieved from cpu_data, the handler for requested power
+ * level is called.
*/
- .globl prepare_core_pwr_dwn
-func prepare_core_pwr_dwn
- /* r12 is pushed to meet the 8 byte stack alignment requirement */
- push {r12, lr}
- bl _cpu_data
- pop {r12, lr}
-
- ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
-#if ASM_ASSERTION
- cmp r1, #0
- ASM_ASSERT(ne)
-#endif
-
- /* Get the cpu_ops core_pwr_dwn handler */
- ldr r0, [r1, #CPU_PWR_DWN_CORE]
- bx r0
-endfunc prepare_core_pwr_dwn
-
+ .globl prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
/*
- * The prepare cluster power down function for all platforms. After
- * the cpu_ops pointer is retrieved from cpu_data, the corresponding
- * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS.
+ * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+ * power down handler for the last power level
*/
- .globl prepare_cluster_pwr_dwn
-func prepare_cluster_pwr_dwn
- /* r12 is pushed to meet the 8 byte stack alignment requirement */
- push {r12, lr}
+ mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
+ cmp r0, r2
+ movhi r0, r2
+
+ push {r0, lr}
bl _cpu_data
- pop {r12, lr}
+ pop {r2, lr}
- ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+ ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
- cmp r1, #0
+ cmp r0, #0
ASM_ASSERT(ne)
#endif
- /* Get the cpu_ops cluster_pwr_dwn handler */
- ldr r0, [r1, #CPU_PWR_DWN_CLUSTER]
- bx r0
-endfunc prepare_cluster_pwr_dwn
+ /* Get the appropriate power down handler */
+ mov r1, #CPU_PWR_DWN_OPS
+ add r1, r1, r2, lsl #2
+ ldr r1, [r0, r1]
+ bx r1
+endfunc prepare_cpu_pwr_dwn
/*
* Initializes the cpu_ops_ptr if not already initialized
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index 0ab5253..0cedd85 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -90,7 +90,11 @@
/* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
/* cpu_ops for Foundation FVP */
-declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, 1
+declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
index ba29d6d..c17c8f1 100644
--- a/lib/cpus/aarch64/cortex_a35.S
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -157,4 +157,7 @@
ret
endfunc cortex_a35_cpu_reg_dump
-declare_cpu_ops cortex_a35, CORTEX_A35_MIDR
+declare_cpu_ops cortex_a35, CORTEX_A35_MIDR, \
+ cortex_a35_reset_func, \
+ cortex_a35_core_pwr_dwn, \
+ cortex_a35_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index ed546e7..06be9ce 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -244,4 +244,7 @@
ret
endfunc cortex_a53_cpu_reg_dump
-declare_cpu_ops cortex_a53, CORTEX_A53_MIDR
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+ cortex_a53_reset_func, \
+ cortex_a53_core_pwr_dwn, \
+ cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index d6b181d..e531b1e 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -488,4 +488,7 @@
endfunc cortex_a57_cpu_reg_dump
-declare_cpu_ops cortex_a57, CORTEX_A57_MIDR
+declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
+ cortex_a57_reset_func, \
+ cortex_a57_core_pwr_dwn, \
+ cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 9f04fb7..fffc99f 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -242,4 +242,7 @@
endfunc cortex_a72_cpu_reg_dump
-declare_cpu_ops cortex_a72, CORTEX_A72_MIDR
+declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
+ cortex_a72_reset_func, \
+ cortex_a72_core_pwr_dwn, \
+ cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index e1615db..49d5449 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -153,4 +153,7 @@
ret
endfunc cortex_a73_cpu_reg_dump
-declare_cpu_ops cortex_a73, CORTEX_A73_MIDR
+declare_cpu_ops cortex_a73, CORTEX_A73_MIDR, \
+ cortex_a73_reset_func, \
+ cortex_a73_core_pwr_dwn, \
+ cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index dab933c..ec7f1dd 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -74,31 +74,23 @@
#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
/*
- * The prepare core power down function for all platforms. After
- * the cpu_ops pointer is retrieved from cpu_data, the corresponding
- * pwr_dwn_core in the cpu_ops is invoked.
+ * void prepare_cpu_pwr_dwn(unsigned int power_level)
+ *
+ * Prepare CPU power down function for all platforms. The function takes
+ * a domain level to be powered down as its parameter. After the cpu_ops
+ * pointer is retrieved from cpu_data, the handler for requested power
+ * level is called.
*/
- .globl prepare_core_pwr_dwn
-func prepare_core_pwr_dwn
- mrs x1, tpidr_el3
- ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
-#if ASM_ASSERTION
- cmp x0, #0
- ASM_ASSERT(ne)
-#endif
-
- /* Get the cpu_ops core_pwr_dwn handler */
- ldr x1, [x0, #CPU_PWR_DWN_CORE]
- br x1
-endfunc prepare_core_pwr_dwn
-
+ .globl prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
/*
- * The prepare cluster power down function for all platforms. After
- * the cpu_ops pointer is retrieved from cpu_data, the corresponding
- * pwr_dwn_cluster in the cpu_ops is invoked.
+ * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+ * power down handler for the last power level
*/
- .globl prepare_cluster_pwr_dwn
-func prepare_cluster_pwr_dwn
+ mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
+ cmp x0, x2
+ csel x2, x2, x0, hi
+
mrs x1, tpidr_el3
ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
@@ -106,10 +98,12 @@
ASM_ASSERT(ne)
#endif
- /* Get the cpu_ops cluster_pwr_dwn handler */
- ldr x1, [x0, #CPU_PWR_DWN_CLUSTER]
+ /* Get the appropriate power down handler */
+ mov x1, #CPU_PWR_DWN_OPS
+ add x1, x1, x2, lsl #3
+ ldr x1, [x0, x1]
br x1
-endfunc prepare_cluster_pwr_dwn
+endfunc prepare_cpu_pwr_dwn
/*
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index bce0573..0b61440 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -163,4 +163,7 @@
ret
endfunc denver_cpu_reg_dump
-declare_cpu_ops denver, DENVER_1_0_MIDR
+declare_cpu_ops denver, DENVER_1_0_MIDR, \
+ denver_reset_func, \
+ denver_core_pwr_dwn, \
+ denver_cluster_pwr_dwn
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
index 5a41ff3..9f991df 100644
--- a/lib/psci/aarch32/psci_helpers.S
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -65,22 +65,13 @@
bl do_stack_maintenance
/* ---------------------------------------------
- * Determine how many levels of cache will be
- * subject to cache maintenance. Power level
- * 0 implies that only the cpu is being powered
- * down. Only the L1 data cache needs to be
- * flushed to the PoU in this case. For a higher
- * power level we are assuming that a flush
- * of L1 data and L2 unified cache is enough.
- * This information should be provided by the
- * platform.
+ * Invoke CPU-specifc power down operations for
+ * the appropriate level
* ---------------------------------------------
*/
- cmp r4, #PSCI_CPU_PWR_LVL
- pop {r4,lr}
-
- beq prepare_core_pwr_dwn
- b prepare_cluster_pwr_dwn
+ mov r0, r4
+ pop {r4, lr}
+ b prepare_cpu_pwr_dwn
endfunc psci_do_pwrdown_cache_maintenance
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index eaa17c7..108f068 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -59,24 +59,11 @@
stp x19, x20, [sp,#-16]!
/* ---------------------------------------------
- * Determine to how many levels of cache will be
- * subject to cache maintenance. Power level
- * 0 implies that only the cpu is being powered
- * down. Only the L1 data cache needs to be
- * flushed to the PoU in this case. For a higher
- * power level we are assuming that a flush
- * of L1 data and L2 unified cache is enough.
- * This information should be provided by the
- * platform.
+ * Invoke CPU-specific power down operations for
+ * the appropriate level
* ---------------------------------------------
*/
- cmp w0, #PSCI_CPU_PWR_LVL
- b.eq do_core_pwr_dwn
- bl prepare_cluster_pwr_dwn
- b do_stack_maintenance
-
-do_core_pwr_dwn:
- bl prepare_core_pwr_dwn
+ bl prepare_cpu_pwr_dwn
/* ---------------------------------------------
* Do stack maintenance by flushing the used
@@ -84,7 +71,6 @@
* remainder.
* ---------------------------------------------
*/
-do_stack_maintenance:
bl plat_get_my_stack
/* ---------------------------------------------