refactor(st-ddr): create generic services
Disabling AXI port, enabling host interface and both enabling/disabling
software self-refresh services are already present inside the driver
source code.
Factorize by gathering them as services inside the generic part,
and adapt driver to call these new functions.
Add services to manage quasi-dynamic registers. DDRCTRL contains
quasi-dynamic registers, which are dynamic only under some conditions
defined by the user guide (with 4 groups).
In our driver, out of reset state, only groups 3 and 4 are updated.
Group 4 needs only sw_done/sw_done_ack sequence, already available.
Group 3 sequence include more conditions, that are gathered in
specific services. stm32mp_ddr_disable_host_interface() has been added
to do this.
Add dedicated generic service to toggle rfshctl3.refresh_update_level
and wait for completion.
Manage AXI ports and HIF when updating QD3 registers. Quasi-dynamic
group 3 (QD3) registers are updated when DDR is not completely
initialized, i.e. when AXI ports are not enabled and possibly when
host interface (HIF) is not enabled too.
In that case, a specific mechanism is necessary to restore the same
conditions as before accessing QD3 registers.
Static functions have been added to get AXI ports and HIF states and
are used to determine the needed conditions to set/unset.
Signed-off-by: Nicolas Le Bayon <nicolas.le.bayon@st.com>
Change-Id: I880f88b1cb6fc76199ad9ea33e9d63a5c469aed4
diff --git a/drivers/st/ddr/stm32mp1_ddr.c b/drivers/st/ddr/stm32mp1_ddr.c
index d280627..415d9e4 100644
--- a/drivers/st/ddr/stm32mp1_ddr.c
+++ b/drivers/st/ddr/stm32mp1_ddr.c
@@ -506,8 +506,7 @@
#endif
/* 12. Exit the self-refresh state by setting PWRCTL.selfref_sw = 0. */
- mmio_clrbits_32((uintptr_t)&priv->ctl->pwrctl,
- DDRCTRL_PWRCTL_SELFREF_SW);
+ stm32mp_ddr_sw_selfref_exit(priv->ctl);
stm32mp1_wait_operating_mode(priv, DDRCTRL_STAT_OPERATING_MODE_NORMAL);
/*
@@ -522,10 +521,7 @@
*/
/* 15. Write DBG1.dis_hif = 0 to re-enable reads and writes. */
- mmio_clrbits_32((uintptr_t)&priv->ctl->dbg1, DDRCTRL_DBG1_DIS_HIF);
- VERBOSE("[0x%lx] dbg1 = 0x%x\n",
- (uintptr_t)&priv->ctl->dbg1,
- mmio_read_32((uintptr_t)&priv->ctl->dbg1));
+ stm32mp_ddr_enable_host_interface(priv->ctl);
}
static void stm32mp1_refresh_disable(struct stm32mp_ddrctl *ctl)
diff --git a/drivers/st/ddr/stm32mp_ddr.c b/drivers/st/ddr/stm32mp_ddr.c
index 65a49ad..26ad078 100644
--- a/drivers/st/ddr/stm32mp_ddr.c
+++ b/drivers/st/ddr/stm32mp_ddr.c
@@ -14,6 +14,9 @@
#define INVALID_OFFSET 0xFFU
+static bool axi_port_reenable_request;
+static bool host_interface_reenable_request;
+
static uintptr_t get_base_addr(const struct stm32mp_ddr_priv *priv, enum stm32mp_ddr_base_type base)
{
if (base == DDRPHY_BASE) {
@@ -93,5 +96,194 @@
VERBOSE("[0x%lx] pctrl_1 = 0x%x\n", (uintptr_t)&ctl->pctrl_1,
mmio_read_32((uintptr_t)&ctl->pctrl_1));
#endif
+}
+
+int stm32mp_ddr_disable_axi_port(struct stm32mp_ddrctl *ctl)
+{
+ uint64_t timeout;
+ uint32_t pstat;
+
+ /* Disable uMCTL2 AXI port 0 */
+ mmio_clrbits_32((uintptr_t)&ctl->pctrl_0, DDRCTRL_PCTRL_N_PORT_EN);
+ VERBOSE("[0x%lx] pctrl_0 = 0x%x\n", (uintptr_t)&ctl->pctrl_0,
+ mmio_read_32((uintptr_t)&ctl->pctrl_0));
+
+#if STM32MP_DDR_DUAL_AXI_PORT
+ /* Disable uMCTL2 AXI port 1 */
+ mmio_clrbits_32((uintptr_t)&ctl->pctrl_1, DDRCTRL_PCTRL_N_PORT_EN);
+ VERBOSE("[0x%lx] pctrl_1 = 0x%x\n", (uintptr_t)&ctl->pctrl_1,
+ mmio_read_32((uintptr_t)&ctl->pctrl_1));
+#endif
+
+ /*
+ * Waits until all AXI ports are idle
+ * Poll PSTAT.rd_port_busy_n = 0
+ * Poll PSTAT.wr_port_busy_n = 0
+ */
+ timeout = timeout_init_us(DDR_TIMEOUT_US_1S);
+ do {
+ pstat = mmio_read_32((uintptr_t)&ctl->pstat);
+ VERBOSE("[0x%lx] pstat = 0x%x ",
+ (uintptr_t)&ctl->pstat, pstat);
+ if (timeout_elapsed(timeout)) {
+ return -1;
+ }
+ } while (pstat != 0U);
+
+ return 0;
+}
+
+static bool ddr_is_axi_port_enabled(struct stm32mp_ddrctl *ctl)
+{
+ return (mmio_read_32((uintptr_t)&ctl->pctrl_0) & DDRCTRL_PCTRL_N_PORT_EN) != 0U;
+}
+
+void stm32mp_ddr_enable_host_interface(struct stm32mp_ddrctl *ctl)
+{
+ mmio_clrbits_32((uintptr_t)&ctl->dbg1, DDRCTRL_DBG1_DIS_HIF);
+ VERBOSE("[0x%lx] dbg1 = 0x%x\n",
+ (uintptr_t)&ctl->dbg1,
+ mmio_read_32((uintptr_t)&ctl->dbg1));
+}
+
+void stm32mp_ddr_disable_host_interface(struct stm32mp_ddrctl *ctl)
+{
+ uint64_t timeout;
+ uint32_t dbgcam;
+ int count = 0;
+
+ mmio_setbits_32((uintptr_t)&ctl->dbg1, DDRCTRL_DBG1_DIS_HIF);
+ VERBOSE("[0x%lx] dbg1 = 0x%x\n",
+ (uintptr_t)&ctl->dbg1,
+ mmio_read_32((uintptr_t)&ctl->dbg1));
+
+ /*
+ * Waits until all queues and pipelines are empty
+ * Poll DBGCAM.dbg_wr_q_empty = 1
+ * Poll DBGCAM.dbg_rd_q_empty = 1
+ * Poll DBGCAM.dbg_wr_data_pipeline_empty = 1
+ * Poll DBGCAM.dbg_rd_data_pipeline_empty = 1
+ *
+ * data_pipeline fields must be polled twice to ensure
+ * value propoagation, so count is added to loop condition.
+ */
+ timeout = timeout_init_us(DDR_TIMEOUT_US_1S);
+ do {
+ dbgcam = mmio_read_32((uintptr_t)&ctl->dbgcam);
+ VERBOSE("[0x%lx] dbgcam = 0x%x ",
+ (uintptr_t)&ctl->dbgcam, dbgcam);
+ if (timeout_elapsed(timeout)) {
+ panic();
+ }
+ count++;
+ } while (((dbgcam & DDRCTRL_DBG_Q_AND_DATA_PIPELINE_EMPTY) !=
+ DDRCTRL_DBG_Q_AND_DATA_PIPELINE_EMPTY) || (count < 2));
+}
+
+static bool ddr_is_host_interface_enabled(struct stm32mp_ddrctl *ctl)
+{
+ return (mmio_read_32((uintptr_t)&ctl->dbg1) & DDRCTRL_DBG1_DIS_HIF) == 0U;
+}
+
+int stm32mp_ddr_sw_selfref_entry(struct stm32mp_ddrctl *ctl)
+{
+ uint64_t timeout;
+ uint32_t stat;
+ uint32_t operating_mode;
+ uint32_t selref_type;
+
+ mmio_setbits_32((uintptr_t)&ctl->pwrctl, DDRCTRL_PWRCTL_SELFREF_SW);
+ VERBOSE("[0x%lx] pwrctl = 0x%x\n",
+ (uintptr_t)&ctl->pwrctl,
+ mmio_read_32((uintptr_t)&ctl->pwrctl));
+
+ /*
+ * Wait operating mode change in self-refresh mode
+ * with STAT.operating_mode[1:0]==11.
+ * Ensure transition to self-refresh was due to software
+ * by checking also that STAT.selfref_type[1:0]=2.
+ */
+ timeout = timeout_init_us(DDR_TIMEOUT_500US);
+ while (!timeout_elapsed(timeout)) {
+ stat = mmio_read_32((uintptr_t)&ctl->stat);
+ operating_mode = stat & DDRCTRL_STAT_OPERATING_MODE_MASK;
+ selref_type = stat & DDRCTRL_STAT_SELFREF_TYPE_MASK;
+
+ if ((operating_mode == DDRCTRL_STAT_OPERATING_MODE_SR) &&
+ (selref_type == DDRCTRL_STAT_SELFREF_TYPE_SR)) {
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+void stm32mp_ddr_sw_selfref_exit(struct stm32mp_ddrctl *ctl)
+{
+ mmio_clrbits_32((uintptr_t)&ctl->pwrctl, DDRCTRL_PWRCTL_SELFREF_SW);
+ VERBOSE("[0x%lx] pwrctl = 0x%x\n",
+ (uintptr_t)&ctl->pwrctl,
+ mmio_read_32((uintptr_t)&ctl->pwrctl));
+}
+
+void stm32mp_ddr_set_qd3_update_conditions(struct stm32mp_ddrctl *ctl)
+{
+ if (ddr_is_axi_port_enabled(ctl)) {
+ if (stm32mp_ddr_disable_axi_port(ctl) != 0) {
+ panic();
+ }
+ axi_port_reenable_request = true;
+ }
+
+ if (ddr_is_host_interface_enabled(ctl)) {
+ stm32mp_ddr_disable_host_interface(ctl);
+ host_interface_reenable_request = true;
+ }
+
+ stm32mp_ddr_start_sw_done(ctl);
+}
+
+void stm32mp_ddr_unset_qd3_update_conditions(struct stm32mp_ddrctl *ctl)
+{
+ stm32mp_ddr_wait_sw_done_ack(ctl);
+
+ if (host_interface_reenable_request) {
+ stm32mp_ddr_enable_host_interface(ctl);
+ host_interface_reenable_request = false;
+ }
+
+ if (axi_port_reenable_request) {
+ stm32mp_ddr_enable_axi_port(ctl);
+ axi_port_reenable_request = false;
+ }
+}
+
+void stm32mp_ddr_wait_refresh_update_done_ack(struct stm32mp_ddrctl *ctl)
+{
+ uint64_t timeout;
+ uint32_t rfshctl3;
+ uint32_t refresh_update_level = DDRCTRL_RFSHCTL3_REFRESH_UPDATE_LEVEL;
+
+ /* Toggle rfshctl3.refresh_update_level */
+ rfshctl3 = mmio_read_32((uintptr_t)&ctl->rfshctl3);
+ if ((rfshctl3 & refresh_update_level) == refresh_update_level) {
+ mmio_setbits_32((uintptr_t)&ctl->rfshctl3, refresh_update_level);
+ } else {
+ mmio_clrbits_32((uintptr_t)&ctl->rfshctl3, refresh_update_level);
+ refresh_update_level = 0U;
+ }
+
+ VERBOSE("[0x%lx] rfshctl3 = 0x%x\n",
+ (uintptr_t)&ctl->rfshctl3, mmio_read_32((uintptr_t)&ctl->rfshctl3));
+
+ timeout = timeout_init_us(DDR_TIMEOUT_US_1S);
+ do {
+ rfshctl3 = mmio_read_32((uintptr_t)&ctl->rfshctl3);
+ VERBOSE("[0x%lx] rfshctl3 = 0x%x ", (uintptr_t)&ctl->rfshctl3, rfshctl3);
+ if (timeout_elapsed(timeout)) {
+ panic();
+ }
+ } while ((rfshctl3 & DDRCTRL_RFSHCTL3_REFRESH_UPDATE_LEVEL) != refresh_update_level);
+ VERBOSE("[0x%lx] rfshctl3 = 0x%x\n", (uintptr_t)&ctl->rfshctl3, rfshctl3);
}
diff --git a/include/drivers/st/stm32mp_ddr.h b/include/drivers/st/stm32mp_ddr.h
index 14fd337..a02ba72 100644
--- a/include/drivers/st/stm32mp_ddr.h
+++ b/include/drivers/st/stm32mp_ddr.h
@@ -68,6 +68,14 @@
void stm32mp_ddr_start_sw_done(struct stm32mp_ddrctl *ctl);
void stm32mp_ddr_wait_sw_done_ack(struct stm32mp_ddrctl *ctl);
void stm32mp_ddr_enable_axi_port(struct stm32mp_ddrctl *ctl);
+int stm32mp_ddr_disable_axi_port(struct stm32mp_ddrctl *ctl);
+void stm32mp_ddr_enable_host_interface(struct stm32mp_ddrctl *ctl);
+void stm32mp_ddr_disable_host_interface(struct stm32mp_ddrctl *ctl);
+int stm32mp_ddr_sw_selfref_entry(struct stm32mp_ddrctl *ctl);
+void stm32mp_ddr_sw_selfref_exit(struct stm32mp_ddrctl *ctl);
+void stm32mp_ddr_set_qd3_update_conditions(struct stm32mp_ddrctl *ctl);
+void stm32mp_ddr_unset_qd3_update_conditions(struct stm32mp_ddrctl *ctl);
+void stm32mp_ddr_wait_refresh_update_done_ack(struct stm32mp_ddrctl *ctl);
int stm32mp_board_ddr_power_init(enum ddr_type ddr_type);
#endif /* STM32MP_DDR_H */
diff --git a/include/drivers/st/stm32mp_ddrctrl_regs.h b/include/drivers/st/stm32mp_ddrctrl_regs.h
index 79de86b..be8f86d 100644
--- a/include/drivers/st/stm32mp_ddrctrl_regs.h
+++ b/include/drivers/st/stm32mp_ddrctrl_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, STMicroelectronics - All Rights Reserved
+ * Copyright (c) 2022-2024, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
*/
@@ -242,11 +242,16 @@
#define DDRCTRL_DBGCAM_WR_DATA_PIPELINE_EMPTY BIT(29)
#define DDRCTRL_DBGCAM_RD_DATA_PIPELINE_EMPTY BIT(28)
#define DDRCTRL_DBGCAM_DBG_WR_Q_EMPTY BIT(26)
+#define DDRCTRL_DBGCAM_DBG_RD_Q_EMPTY BIT(25)
#define DDRCTRL_DBGCAM_DBG_LPR_Q_DEPTH GENMASK(12, 8)
#define DDRCTRL_DBGCAM_DBG_HPR_Q_DEPTH GENMASK(4, 0)
#define DDRCTRL_DBGCAM_DATA_PIPELINE_EMPTY \
(DDRCTRL_DBGCAM_WR_DATA_PIPELINE_EMPTY | \
DDRCTRL_DBGCAM_RD_DATA_PIPELINE_EMPTY)
+#define DDRCTRL_DBG_Q_AND_DATA_PIPELINE_EMPTY \
+ (DDRCTRL_DBGCAM_DBG_WR_Q_EMPTY | \
+ DDRCTRL_DBGCAM_DBG_RD_Q_EMPTY | \
+ DDRCTRL_DBGCAM_DATA_PIPELINE_EMPTY)
#define DDRCTRL_DBGCAM_DBG_Q_DEPTH \
(DDRCTRL_DBGCAM_DBG_WR_Q_EMPTY | \
DDRCTRL_DBGCAM_DBG_LPR_Q_DEPTH | \