feat(intel): support wipe DDR after calibration
After a calibration we cannot trust the DDR content. Let's explicitly
clear the DDR content using the built-in scrubber in this case.
Signed-off-by: Jit Loon Lim <jit.loon.lim@intel.com>
Signed-off-by: Sieu Mun Tang <sieu.mun.tang@intel.com>
Change-Id: I6f429623f76a21f61f85efbb660cf65d99c04f56
diff --git a/plat/intel/soc/common/drivers/ddr/ddr.c b/plat/intel/soc/common/drivers/ddr/ddr.c
index 188302f..62f03ef 100644
--- a/plat/intel/soc/common/drivers/ddr/ddr.c
+++ b/plat/intel/soc/common/drivers/ddr/ddr.c
@@ -7,6 +7,7 @@
#include <assert.h>
#include <errno.h>
#include <common/debug.h>
+#include <drivers/delay_timer.h>
#include "ddr.h"
#include <lib/mmio.h>
#include "socfpga_handoff.h"
@@ -340,3 +341,143 @@
NOTICE("DDR init successfully\n");
return status;
}
+
+int ddr_config_scrubber(phys_addr_t umctl2_base, enum ddr_type umctl2_type)
+{
+ uint32_t temp[9] = {0};
+ int ret = 0;
+
+ /* Write default value to prevent scrubber stop due to lower power */
+ mmio_write_32(0, umctl2_base + DDR4_PWRCTL_OFFSET);
+
+ /* To backup user configurations in temp array */
+ temp[0] = mmio_read_32(umctl2_base + DDR4_SBRCTL_OFFSET);
+ temp[1] = mmio_read_32(umctl2_base + DDR4_SBRWDATA0_OFFSET);
+ temp[2] = mmio_read_32(umctl2_base + DDR4_SBRSTART0_OFFSET);
+ if (umctl2_type == DDR_TYPE_DDR4) {
+ temp[3] = mmio_read_32(umctl2_base + DDR4_SBRWDATA1_OFFSET);
+ temp[4] = mmio_read_32(umctl2_base + DDR4_SBRSTART1_OFFSET);
+ }
+ temp[5] = mmio_read_32(umctl2_base + DDR4_SBRRANGE0_OFFSET);
+ temp[6] = mmio_read_32(umctl2_base + DDR4_SBRRANGE1_OFFSET);
+ temp[7] = mmio_read_32(umctl2_base + DDR4_ECCCFG0_OFFSET);
+ temp[8] = mmio_read_32(umctl2_base + DDR4_ECCCFG1_OFFSET);
+
+ if (umctl2_type != DDR_TYPE_DDR4) {
+ /* Lock ECC region, ensure this regions is not being accessed */
+ mmio_setbits_32(umctl2_base + DDR4_ECCCFG1_OFFSET,
+ LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK);
+ }
+ /* Disable input traffic per port */
+ mmio_clrbits_32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
+ /* Disables scrubber */
+ mmio_clrbits_32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
+ /* Polling all scrub writes data have been sent */
+ ret = poll_idle_status((umctl2_base + DDR4_SBRSTAT_OFFSET),
+ DDR4_SBRSTAT_SCRUB_BUSY, true, 5000);
+
+ if (ret) {
+ INFO("%s: Timeout while waiting for", __func__);
+ INFO(" sending all scrub data\n");
+ return ret;
+ }
+
+ /* LPDDR4 supports inline ECC only */
+ if (umctl2_type != DDR_TYPE_DDR4) {
+ /*
+ * Setting all regions for protected, this is required for
+ * srubber to init whole LPDDR4 expect ECC region
+ */
+ mmio_write_32(((ONE_EIGHT <<
+ LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT) |
+ (ALL_PROTECTED << LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT)),
+ umctl2_base + DDR4_ECCCFG0_OFFSET);
+ }
+
+ /* Scrub_burst = 1, scrub_mode = 1(performs writes) */
+ mmio_write_32(DDR4_SBRCTL_SCRUB_BURST_1 | DDR4_SBRCTL_SCRUB_WRITE,
+ umctl2_base + DDR4_SBRCTL_OFFSET);
+
+ /* Wipe DDR content after calibration */
+ ret = ddr_zerofill_scrubber(umctl2_base, umctl2_type);
+ if (ret) {
+ ERROR("Failed to clear DDR content\n");
+ }
+
+ /* Polling all scrub writes data have been sent */
+ ret = poll_idle_status((umctl2_base + DDR4_SBRSTAT_OFFSET),
+ DDR4_SBRSTAT_SCRUB_BUSY, true, 5000);
+ if (ret) {
+ INFO("%s: Timeout while waiting for", __func__);
+ INFO(" sending all scrub data\n");
+ return ret;
+ }
+
+ /* Disables scrubber */
+ mmio_clrbits_32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
+
+ /* Restore user configurations */
+ mmio_write_32(temp[0], umctl2_base + DDR4_SBRCTL_OFFSET);
+ mmio_write_32(temp[1], umctl2_base + DDR4_SBRWDATA0_OFFSET);
+ mmio_write_32(temp[2], umctl2_base + DDR4_SBRSTART0_OFFSET);
+ if (umctl2_type == DDR_TYPE_DDR4) {
+ mmio_write_32(temp[3], umctl2_base + DDR4_SBRWDATA1_OFFSET);
+ mmio_write_32(temp[4], umctl2_base + DDR4_SBRSTART1_OFFSET);
+ }
+ mmio_write_32(temp[5], umctl2_base + DDR4_SBRRANGE0_OFFSET);
+ mmio_write_32(temp[6], umctl2_base + DDR4_SBRRANGE1_OFFSET);
+ mmio_write_32(temp[7], umctl2_base + DDR4_ECCCFG0_OFFSET);
+ mmio_write_32(temp[8], umctl2_base + DDR4_ECCCFG1_OFFSET);
+
+ /* Enables ECC scrub on scrubber */
+ if (!(mmio_read_32(umctl2_base + DDR4_SBRCTL_OFFSET) & DDR4_SBRCTL_SCRUB_WRITE)) {
+ /* Enables scrubber */
+ mmio_setbits_32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
+ }
+
+ return 0;
+}
+
+int ddr_zerofill_scrubber(phys_addr_t umctl2_base, enum ddr_type umctl2_type)
+{
+ int ret = 0;
+
+ /* Zeroing whole DDR */
+ mmio_write_32(0, umctl2_base + DDR4_SBRWDATA0_OFFSET);
+ mmio_write_32(0, umctl2_base + DDR4_SBRSTART0_OFFSET);
+ if (umctl2_type == DDR_TYPE_DDR4) {
+ mmio_write_32(0, umctl2_base + DDR4_SBRWDATA1_OFFSET);
+ mmio_write_32(0, umctl2_base + DDR4_SBRSTART1_OFFSET);
+ }
+ mmio_write_32(0, umctl2_base + DDR4_SBRRANGE0_OFFSET);
+ mmio_write_32(0, umctl2_base + DDR4_SBRRANGE1_OFFSET);
+
+ NOTICE("Enabling scrubber (zeroing whole DDR) ...\n");
+
+ /* Enables scrubber */
+ mmio_setbits_32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
+ /* Polling all scrub writes commands have been sent */
+ ret = poll_idle_status((umctl2_base + DDR4_SBRSTAT_OFFSET),
+ DDR4_SBRSTAT_SCRUB_DONE, true, 5000);
+ if (ret) {
+ INFO("%s: Timeout while waiting for", __func__);
+ INFO(" sending all scrub commands\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int poll_idle_status(uint32_t addr, uint32_t mask, uint32_t match, uint32_t delay_ms)
+{
+ int time_out = delay_ms;
+
+ while (time_out-- > 0) {
+
+ if ((mmio_read_32(addr) & mask) == match) {
+ return 0;
+ }
+ udelay(1000);
+ }
+ return -ETIMEDOUT;
+}
diff --git a/plat/intel/soc/common/drivers/ddr/ddr.h b/plat/intel/soc/common/drivers/ddr/ddr.h
index 416b64e..e50cda8 100644
--- a/plat/intel/soc/common/drivers/ddr/ddr.h
+++ b/plat/intel/soc/common/drivers/ddr/ddr.h
@@ -10,6 +10,28 @@
#include <lib/mmio.h>
#include "socfpga_handoff.h"
+enum ddr_type {
+ DDR_TYPE_LPDDR4_0,
+ DDR_TYPE_LPDDR4_1,
+ DDR_TYPE_DDR4,
+ DDR_TYPE_LPDDR5_0,
+ DDR_TYPE_LPDDR5_1,
+ DDR_TYPE_DDR5,
+ DDR_TYPE_UNKNOWN
+};
+
+/* Region size for ECCCFG0.ecc_region_map */
+enum region_size {
+ ONE_EIGHT,
+ ONE_SIXTEENTH,
+ ONE_THIRTY_SECOND,
+ ONE_SIXTY_FOURTH
+};
+
+/* DATATYPE DEFINATION */
+typedef unsigned long long phys_addr_t;
+typedef unsigned long long phys_size_t;
+
/* MACRO DEFINATION */
#define IO96B_0_REG_BASE 0x18400000
#define IO96B_1_REG_BASE 0x18800000
@@ -86,6 +108,34 @@
#define IOSSM_MB_WRITE(addr, data) mmio_write_32(addr, data)
+/* DDR4 Register */
+#define DDR4_PWRCTL_OFFSET 0x30
+#define DDR4_SBRCTL_OFFSET 0x0F24
+#define DDR4_SBRSTAT_OFFSET 0x0F28
+#define DDR4_SBRWDATA0_OFFSET 0x0F2C
+#define DDR4_SBRSTART0_OFFSET 0x0F38
+#define DDR4_SBRWDATA1_OFFSET 0x0F30
+#define DDR4_SBRSTART1_OFFSET 0x0F3C
+#define DDR4_SBRRANGE0_OFFSET 0x0F40
+#define DDR4_SBRRANGE1_OFFSET 0x0F44
+#define DDR4_ECCCFG0_OFFSET 0x70
+#define DDR4_ECCCFG1_OFFSET 0x74
+#define DDR4_PCTRL0_OFFSET 0x0490
+
+#define LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT 30
+#define ALL_PROTECTED 0x7F
+#define LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT 8
+
+
+
+#define LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK BIT(4)
+#define DDR4_PCTRL0_PORT_EN BIT(0)
+#define DDR4_SBRCTL_SCRUB_EN BIT(0)
+#define DDR4_SBRSTAT_SCRUB_BUSY BIT(0)
+#define DDR4_SBRCTL_SCRUB_BURST_1 BIT(4)
+#define DDR4_SBRCTL_SCRUB_WRITE BIT(2)
+#define DDR4_SBRSTAT_SCRUB_DONE BIT(1)
+
/* FUNCTION DEFINATION */
int ddr_calibration_check(void);
@@ -109,4 +159,10 @@
bool is_ddr_init_in_progress(void);
+int ddr_zerofill_scrubber(phys_addr_t umctl2_base, enum ddr_type umctl2_type);
+
+int ddr_config_scrubber(phys_addr_t umctl2_base, enum ddr_type umctl2_type);
+
+int poll_idle_status(uint32_t addr, uint32_t mask, uint32_t match, uint32_t delay_ms);
+
#endif