feat(intel): update Agilex5 DDR and IOSSM driver
DDR and IOSSM driver code for Agilex5 platform,
initialize the DDR/IOSSM in BL2 EL3 early flow.
Change-Id: I3e4205171d9356190b60498cae322318520bb1c2
Signed-off-by: Girisha Dengi <girisha.dengi@intel.com>
Signed-off-by: Sieu Mun Tang <sieu.mun.tang@intel.com>
diff --git a/plat/intel/soc/agilex/include/socfpga_plat_def.h b/plat/intel/soc/agilex/include/socfpga_plat_def.h
index 9ef7598..b03886f 100644
--- a/plat/intel/soc/agilex/include/socfpga_plat_def.h
+++ b/plat/intel/soc/agilex/include/socfpga_plat_def.h
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
diff --git a/plat/intel/soc/agilex5/bl2_plat_setup.c b/plat/intel/soc/agilex5/bl2_plat_setup.c
index 265ee57..9f51260 100644
--- a/plat/intel/soc/agilex5/bl2_plat_setup.c
+++ b/plat/intel/soc/agilex5/bl2_plat_setup.c
@@ -20,6 +20,7 @@
#include <lib/xlat_tables/xlat_tables_v2.h>
#include "agilex5_clock_manager.h"
+#include "agilex5_ddr.h"
#include "agilex5_memory_controller.h"
#include "agilex5_mmc.h"
#include "agilex5_pinmux.h"
@@ -79,6 +80,10 @@
enable_nonsecure_access();
deassert_peripheral_reset();
+
+ /* DDR and IOSSM driver init */
+ agilex5_ddr_init(&reverse_handoff_ptr);
+
if (combo_phy_init(&reverse_handoff_ptr) != 0) {
ERROR("Combo Phy initialization failed\n");
}
diff --git a/plat/intel/soc/agilex5/include/agilex5_ddr.h b/plat/intel/soc/agilex5/include/agilex5_ddr.h
new file mode 100644
index 0000000..631e006
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/agilex5_ddr.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGILEX5_DDR_H
+#define AGILEX5_DDR_H
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <lib/utils_def.h>
+
+#include "socfpga_handoff.h"
+
+#define CONFIG_NR_DRAM_BANKS 1
+
+typedef unsigned long long phys_addr_t;
+typedef unsigned long long phys_size_t;
+typedef phys_addr_t fdt_addr_t;
+
+/* DDR/RAM configuration */
+struct ddr_info {
+ phys_addr_t start;
+ phys_size_t size;
+};
+
+int agilex5_ddr_init(handoff *hoff_ptr);
+
+#endif /* AGILEX5_DDR_H */
diff --git a/plat/intel/soc/agilex5/include/agilex5_iossm_mailbox.h b/plat/intel/soc/agilex5/include/agilex5_iossm_mailbox.h
new file mode 100644
index 0000000..1fd8ef6
--- /dev/null
+++ b/plat/intel/soc/agilex5/include/agilex5_iossm_mailbox.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGILEX5_IOSSM_MAILBOX_H
+#define AGILEX5_IOSSM_MAILBOX_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "lib/mmio.h"
+#include "agilex5_ddr.h"
+
+#define TIMEOUT_5000MS 5000
+#define TIMEOUT TIMEOUT_5000MS
+#define IOSSM_STATUS_CAL_SUCCESS BIT(0)
+#define IOSSM_STATUS_CAL_FAIL BIT(1)
+#define IOSSM_STATUS_CAL_BUSY BIT(2)
+#define IOSSM_STATUS_COMMAND_RESPONSE_READY 1
+#define IOSSM_CMD_RESPONSE_STATUS_OFFSET 0x45C
+#define IOSSM_CMD_RESPONSE_DATA_0_OFFSET 0x458
+#define IOSSM_CMD_RESPONSE_DATA_1_OFFSET 0x454
+#define IOSSM_CMD_RESPONSE_DATA_2_OFFSET 0x450
+#define IOSSM_CMD_REQ_OFFSET 0x43C
+#define IOSSM_CMD_PARAM_0_OFFSET 0x438
+#define IOSSM_CMD_PARAM_1_OFFSET 0x434
+#define IOSSM_CMD_PARAM_2_OFFSET 0x430
+#define IOSSM_CMD_PARAM_3_OFFSET 0x42C
+#define IOSSM_CMD_PARAM_4_OFFSET 0x428
+#define IOSSM_CMD_PARAM_5_OFFSET 0x424
+#define IOSSM_CMD_PARAM_6_OFFSET 0x420
+#define IOSSM_STATUS_OFFSET 0x400
+#define IOSSM_CMD_RESPONSE_DATA_SHORT_MASK GENMASK(31, 16)
+#define IOSSM_CMD_RESPONSE_DATA_SHORT(data) (((data) & \
+ IOSSM_CMD_RESPONSE_DATA_SHORT_MASK) >> 16)
+#define MAX_IO96B_SUPPORTED 2
+#define MAX_MEM_INTERFACES_SUPPORTED 2
+
+/* supported mailbox command type */
+enum iossm_mailbox_cmd_type {
+ CMD_NOP,
+ CMD_GET_SYS_INFO,
+ CMD_GET_MEM_INFO,
+ CMD_GET_MEM_CAL_INFO,
+ CMD_TRIG_CONTROLLER_OP,
+ CMD_TRIG_MEM_CAL_OP
+};
+
+/* supported mailbox command opcode */
+enum iossm_mailbox_cmd_opcode {
+ GET_MEM_INTF_INFO = 0x0001,
+ GET_MEM_TECHNOLOGY,
+ GET_MEMCLK_FREQ_KHZ,
+ GET_MEM_WIDTH_INFO,
+ ECC_ENABLE_SET = 0x0101,
+ ECC_ENABLE_STATUS,
+ ECC_INTERRUPT_STATUS,
+ ECC_INTERRUPT_ACK,
+ ECC_INTERRUPT_MASK,
+ ECC_WRITEBACK_ENABLE,
+ ECC_SCRUB_IN_PROGRESS_STATUS = 0x0201,
+ ECC_SCRUB_MODE_0_START,
+ ECC_SCRUB_MODE_1_START,
+ BIST_STANDARD_MODE_START = 0x0301,
+ BIST_RESULTS_STATUS,
+ BIST_MEM_INIT_START,
+ BIST_MEM_INIT_STATUS,
+ BIST_SET_DATA_PATTERN_UPPER,
+ BIST_SET_DATA_PATTERN_LOWER,
+ TRIG_MEM_CAL = 0x000a,
+ GET_MEM_CAL_STATUS
+};
+
+/*
+ * IOSSM mailbox required information
+ *
+ * @num_mem_interface: Number of memory interfaces instantiated
+ * @ip_type: IP type implemented on the IO96B
+ * @ip_instance_id: IP identifier for every IP instance implemented on the IO96B
+ */
+struct io96b_mb_ctrl {
+ uint32_t num_mem_interface;
+ uint32_t ip_type[2];
+ uint32_t ip_instance_id[2];
+};
+
+/*
+ * IOSSM mailbox response outputs
+ *
+ * @cmd_resp_status: Command Interface status
+ * @cmd_resp_data_*: More spaces for command response
+ */
+struct io96b_mb_resp {
+ uint32_t cmd_resp_status;
+ uint32_t cmd_resp_data_0;
+ uint32_t cmd_resp_data_1;
+ uint32_t cmd_resp_data_2;
+};
+
+/*
+ * IO96B instance specific information
+ *
+ * @size: Memory size
+ * @io96b_csr_addr: IO96B instance CSR address
+ * @cal_status: IO96B instance calibration status
+ * @mb_ctrl: IOSSM mailbox required information
+ */
+struct io96b_instance {
+ uint16_t size;
+ phys_addr_t io96b_csr_addr;
+ bool cal_status;
+ struct io96b_mb_ctrl mb_ctrl;
+};
+
+/*
+ * Overall IO96B instance(s) information
+ *
+ * @num_instance: Number of instance(s) assigned to HPS
+ * @overall_cal_status: Overall calibration status for all IO96B instance(s)
+ * @ddr_type: DDR memory type
+ * @ecc_status: ECC enable status (false = disabled, true = enabled)
+ * @overall_size: Total DDR memory size
+ * @io96b_0: IO96B 0 instance specific information
+ * @io96b_1: IO96B 1 instance specific information
+ */
+struct io96b_info {
+ uint8_t num_instance;
+ bool overall_cal_status;
+ const char *ddr_type;
+ bool ecc_status;
+ uint16_t overall_size;
+ struct io96b_instance io96b_0;
+ struct io96b_instance io96b_1;
+};
+
+int io96b_mb_req(phys_addr_t io96b_csr_addr, uint32_t ip_type, uint32_t instance_id,
+ uint32_t usr_cmd_type, uint32_t usr_cmd_opcode, uint32_t cmd_param_0,
+ uint32_t cmd_param_1, uint32_t cmd_param_2, uint32_t cmd_param_3,
+ uint32_t cmd_param_4, uint32_t cmd_param_5, uint32_t cmd_param_6,
+ uint32_t resp_data_len, struct io96b_mb_resp *resp);
+
+/* Supported IOSSM mailbox function */
+void io96b_mb_init(struct io96b_info *io96b_ctrl);
+int io96b_cal_status(phys_addr_t addr);
+void init_mem_cal(struct io96b_info *io96b_ctrl);
+int trig_mem_cal(struct io96b_info *io96b_ctrl);
+int get_mem_technology(struct io96b_info *io96b_ctrl);
+int get_mem_width_info(struct io96b_info *io96b_ctrl);
+int ecc_enable_status(struct io96b_info *io96b_ctrl);
+int bist_mem_init_start(struct io96b_info *io96b_ctrl);
+
+#endif /* AGILEX5_IOSSM_MAILBOX_H */
diff --git a/plat/intel/soc/agilex5/platform.mk b/plat/intel/soc/agilex5/platform.mk
index 90678e1..4bb90d5 100644
--- a/plat/intel/soc/agilex5/platform.mk
+++ b/plat/intel/soc/agilex5/platform.mk
@@ -58,14 +58,16 @@
lib/cpus/aarch64/cortex_a76.S \
plat/intel/soc/agilex5/soc/agilex5_clock_manager.c \
plat/intel/soc/agilex5/soc/agilex5_memory_controller.c \
- plat/intel/soc/agilex5/soc/agilex5_mmc.c \
+ plat/intel/soc/agilex5/soc/agilex5_mmc.c \
plat/intel/soc/agilex5/soc/agilex5_pinmux.c \
plat/intel/soc/agilex5/soc/agilex5_power_manager.c \
+ plat/intel/soc/agilex5/soc/agilex5_ddr.c \
+ plat/intel/soc/agilex5/soc/agilex5_iossm_mailbox.c \
plat/intel/soc/common/bl2_plat_mem_params_desc.c \
plat/intel/soc/common/socfpga_image_load.c \
plat/intel/soc/common/socfpga_ros.c \
plat/intel/soc/common/socfpga_storage.c \
- plat/intel/soc/common/socfpga_vab.c \
+ plat/intel/soc/common/socfpga_vab.c \
plat/intel/soc/common/soc/socfpga_emac.c \
plat/intel/soc/common/soc/socfpga_firewall.c \
plat/intel/soc/common/soc/socfpga_handoff.c \
diff --git a/plat/intel/soc/agilex5/soc/agilex5_ddr.c b/plat/intel/soc/agilex5/soc/agilex5_ddr.c
new file mode 100644
index 0000000..ef2ae57
--- /dev/null
+++ b/plat/intel/soc/agilex5/soc/agilex5_ddr.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include "lib/mmio.h"
+
+#include "agilex5_ddr.h"
+#include "agilex5_iossm_mailbox.h"
+
+/*
+ * TODO: We need to leverage the legacy products DDR drivers and consider
+ * the upcoming products like KM and then come up with common source code/driver
+ * architecture to address all the products in one view.
+ */
+
+#define SYSMGR_BS_COLD3_DDR_RESET_TYPE_MASK GENMASK(31, 29)
+#define SYSMGR_BS_COLD3_DDR_RESET_TYPE_SHIFT 29
+#define SYSMGR_BS_COLD3_DDR_DBE_MASK (1 << 1)
+#define SYSMGR_BS_COLD3_OCRAM_DBE_MASK (1)
+#define SYSMGR_BS_POR0_DDR_PROGRESS_MASK (1)
+
+/* MPFE NOC registers */
+#define F2SDRAM_SIDEBAND_FLAGOUTSET0 0x50
+#define F2SDRAM_SIDEBAND_FLAGOUTCLR0 0x54
+#define F2SDRAM_SIDEBAND_FLAGOUTSTATUS0 0x58
+
+#define SOCFPGA_F2SDRAM_MGR_ADDRESS 0x18001000
+#define SOCFPGA_MPFE_SCR_IO96B0 0x18000D00
+#define SOCFPGA_MPFE_SCR_IO96B1 0x18000D04
+#define SOCFPGA_MPFE_NOC_SCHED_CSR 0x18000D08
+
+#define SIDEBANDMGR_FLAGOUTSET0_REG (SOCFPGA_F2SDRAM_MGR_ADDRESS \
+ + F2SDRAM_SIDEBAND_FLAGOUTSET0)
+#define SIDEBANDMGR_FLAGOUTSTATUS0_REG (SOCFPGA_F2SDRAM_MGR_ADDRESS \
+ +F2SDRAM_SIDEBAND_FLAGOUTSTATUS0)
+#define SIDEBANDMGR_FLAGOUTCLR0_REG (SOCFPGA_F2SDRAM_MGR_ADDRESS \
+ + F2SDRAM_SIDEBAND_FLAGOUTCLR0)
+#define SZ_8 0x00000008
+
+
+/* Firewall MPU DDR SCR registers */
+#define FW_MPU_DDR_SCR_EN 0x00
+#define FW_MPU_DDR_SCR_EN_SET 0x04
+#define FW_MPU_DDR_SCR_MPUREGION0ADDR_BASE 0x10
+#define FW_MPU_DDR_SCR_MPUREGION0ADDR_BASEEXT 0x14
+#define FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT 0x18
+#define FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT 0x1c
+
+#define SOCFPGA_FW_DDR_CCU_DMI0_ADDRESS 0x18000800
+#define SOCFPGA_FW_DDR_CCU_DMI1_ADDRESS 0x18000A00
+#define SOCFPGA_FW_TBU2NOC_ADDRESS 0x18000C00
+
+#define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASE 0x90
+#define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASEEXT 0x94
+#define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT 0x98
+#define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT 0x9c
+#define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT_FIELD 0xff
+
+/* Firewall F2SDRAM DDR SCR registers */
+#define FW_F2SDRAM_DDR_SCR_EN 0x00
+#define FW_F2SDRAM_DDR_SCR_EN_SET 0x04
+#define FW_F2SDRAM_DDR_SCR_REGION0ADDR_BASE 0x10
+#define FW_F2SDRAM_DDR_SCR_REGION0ADDR_BASEEXT 0x14
+#define FW_F2SDRAM_DDR_SCR_REGION0ADDR_LIMIT 0x18
+#define FW_F2SDRAM_DDR_SCR_REGION0ADDR_LIMITEXT 0x1c
+
+#define FW_MPU_DDR_SCR_WRITEL(data, reg) \
+ do { \
+ mmio_write_32(SOCFPGA_FW_DDR_CCU_DMI0_ADDRESS + (reg), data); \
+ mmio_write_32(SOCFPGA_FW_DDR_CCU_DMI1_ADDRESS + (reg), data); \
+ } while (0)
+
+#define FW_F2SDRAM_DDR_SCR_WRITEL(data, reg) \
+ mmio_write_32(SOCFPGA_FW_TBU2NOC_ADDRESS + (reg), data)
+
+/* DDR banks info set */
+static struct ddr_info ddr_info_set[CONFIG_NR_DRAM_BANKS];
+
+/* Reset type */
+enum reset_type {
+ POR_RESET,
+ WARM_RESET,
+ COLD_RESET,
+ NCONFIG,
+ JTAG_CONFIG,
+ RSU_RECONFIG
+};
+
+/* Get reset type by reading boot scratch register cold3 */
+static inline enum reset_type get_reset_type(uint32_t sys_reg)
+{
+ return ((sys_reg & SYSMGR_BS_COLD3_DDR_RESET_TYPE_MASK) >>
+ SYSMGR_BS_COLD3_DDR_RESET_TYPE_SHIFT);
+}
+
+/* DDR hang check before the reset */
+static inline bool is_ddr_init_hang(void)
+{
+ uint32_t sys_reg = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0));
+
+ if ((sys_reg & SYSMGR_BS_POR0_DDR_PROGRESS_MASK) != 0) {
+ INFO("DDR: Hang before this reset\n");
+ return true;
+ }
+
+ return false;
+}
+
+/* Set the DDR init progress bit */
+static inline void ddr_init_inprogress(bool start)
+{
+ if (start) {
+ mmio_setbits_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0),
+ SYSMGR_BS_POR0_DDR_PROGRESS_MASK);
+ } else {
+ mmio_clrbits_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0),
+ SYSMGR_BS_POR0_DDR_PROGRESS_MASK);
+ }
+}
+
+/* Configure the IO96B CSRs address based on the handoff data */
+static void config_io96b_csr_addr(bool is_dualemif, struct io96b_info *io96b_ctrl)
+{
+ if (is_dualemif)
+ io96b_ctrl->num_instance = 2;
+ else
+ io96b_ctrl->num_instance = 1;
+
+ /* Assign IO96B CSR base address if it is valid */
+ for (int i = 0; i < io96b_ctrl->num_instance; i++) {
+ switch (i) {
+ case 0:
+ io96b_ctrl->io96b_0.io96b_csr_addr = 0x18400000;
+ INFO("DDR: IO96B0 0x%llx CSR enabled\n",
+ io96b_ctrl->io96b_0.io96b_csr_addr);
+ break;
+
+ case 1:
+ io96b_ctrl->io96b_1.io96b_csr_addr = 0x18800000;
+ INFO("DDR: IO96B1 0x%llx CSR enabled\n",
+ io96b_ctrl->io96b_1.io96b_csr_addr);
+ break;
+
+ default:
+ ERROR("%s: Invalid IO96B CSR\n", __func__);
+ } /* switch */
+ } /* for */
+}
+
+static inline bool hps_ocram_dbe_status(void)
+{
+ uint32_t sys_reg = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_3));
+
+ if ((sys_reg & SYSMGR_BS_COLD3_OCRAM_DBE_MASK) != 0)
+ return true;
+
+ return false;
+}
+
+static inline bool ddr_ecc_dbe_status(void)
+{
+ uint32_t sys_reg = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_3));
+
+ if ((sys_reg & SYSMGR_BS_COLD3_DDR_DBE_MASK) != 0)
+ return true;
+
+ return false;
+}
+
+static void sdram_set_firewall_non_f2sdram(void)
+{
+ uint32_t i;
+ phys_size_t value;
+ uint32_t lower, upper;
+
+ for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
+ if (ddr_info_set[i].size == 0) {
+ continue;
+ }
+
+ value = ddr_info_set[i].start;
+
+ /*
+ * Keep first 1MB of SDRAM memory region as secure region when
+ * using ATF flow, where the ATF code is located.
+ */
+ value += SZ_1M;
+
+ /* Setting non-secure MPU region base and base extended */
+ lower = LO(value);
+ upper = HI(value);
+
+ FW_MPU_DDR_SCR_WRITEL(lower,
+ FW_MPU_DDR_SCR_MPUREGION0ADDR_BASE +
+ (i * 4 * sizeof(uint32_t)));
+ FW_MPU_DDR_SCR_WRITEL(upper & 0xff,
+ FW_MPU_DDR_SCR_MPUREGION0ADDR_BASEEXT +
+ (i * 4 * sizeof(uint32_t)));
+
+ /* Setting non-secure Non-MPU region base and base extended */
+ FW_MPU_DDR_SCR_WRITEL(lower,
+ FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASE +
+ (i * 4 * sizeof(uint32_t)));
+ FW_MPU_DDR_SCR_WRITEL(upper & 0xff,
+ FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASEEXT +
+ (i * 4 * sizeof(uint32_t)));
+
+ /* Setting non-secure MPU limit and limit extended */
+ value = ddr_info_set[i].start + ddr_info_set[i].size - 1;
+
+ lower = LO(value);
+ upper = HI(value);
+
+ FW_MPU_DDR_SCR_WRITEL(lower,
+ FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT +
+ (i * 4 * sizeof(uint32_t)));
+ FW_MPU_DDR_SCR_WRITEL(upper & 0xff,
+ FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT +
+ (i * 4 * sizeof(uint32_t)));
+
+ /* Setting non-secure Non-MPU limit and limit extended */
+ FW_MPU_DDR_SCR_WRITEL(lower,
+ FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT +
+ (i * 4 * sizeof(uint32_t)));
+ FW_MPU_DDR_SCR_WRITEL(upper & 0xff,
+ FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT +
+ (i * 4 * sizeof(uint32_t)));
+
+ FW_MPU_DDR_SCR_WRITEL(BIT(i) | BIT(i + 8),
+ FW_MPU_DDR_SCR_EN_SET);
+ }
+}
+
+static void sdram_set_firewall_f2sdram(void)
+{
+ uint32_t i;
+ phys_size_t value;
+ uint32_t lower, upper;
+
+ for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
+ if (ddr_info_set[i].size == 0) {
+ continue;
+ }
+
+ value = ddr_info_set[i].start;
+
+ /* Keep first 1MB of SDRAM memory region as secure region when
+ * using ATF flow, where the ATF code is located.
+ */
+ value += SZ_1M;
+
+ /* Setting base and base extended */
+ lower = LO(value);
+ upper = HI(value);
+ FW_F2SDRAM_DDR_SCR_WRITEL(lower,
+ FW_F2SDRAM_DDR_SCR_REGION0ADDR_BASE +
+ (i * 4 * sizeof(uint32_t)));
+ FW_F2SDRAM_DDR_SCR_WRITEL(upper & 0xff,
+ FW_F2SDRAM_DDR_SCR_REGION0ADDR_BASEEXT +
+ (i * 4 * sizeof(uint32_t)));
+
+ /* Setting limit and limit extended */
+ value = ddr_info_set[i].start + ddr_info_set[i].size - 1;
+
+ lower = LO(value);
+ upper = HI(value);
+
+ FW_F2SDRAM_DDR_SCR_WRITEL(lower,
+ FW_F2SDRAM_DDR_SCR_REGION0ADDR_LIMIT +
+ (i * 4 * sizeof(uint32_t)));
+ FW_F2SDRAM_DDR_SCR_WRITEL(upper & 0xff,
+ FW_F2SDRAM_DDR_SCR_REGION0ADDR_LIMITEXT +
+ (i * 4 * sizeof(uint32_t)));
+
+ FW_F2SDRAM_DDR_SCR_WRITEL(BIT(i), FW_F2SDRAM_DDR_SCR_EN_SET);
+ }
+}
+
+static void sdram_set_firewall(void)
+{
+ sdram_set_firewall_non_f2sdram();
+ sdram_set_firewall_f2sdram();
+}
+
+/*
+ * Agilex5 DDR/IOSSM controller initialization routine
+ */
+int agilex5_ddr_init(handoff *hoff_ptr)
+{
+ int ret;
+ bool full_mem_init = false;
+ phys_size_t hw_ddr_size;
+ phys_size_t config_ddr_size;
+ struct io96b_info io96b_ctrl;
+ enum reset_type reset_t = get_reset_type(mmio_read_32(SOCFPGA_SYSMGR(
+ BOOT_SCRATCH_COLD_3)));
+ bool is_dualport = hoff_ptr->ddr_config & BIT(0);
+ bool is_dualemif = hoff_ptr->ddr_config & BIT(1);
+
+ NOTICE("DDR: Reset type is '%s'\n",
+ (reset_t == POR_RESET ? "Power-On" : (reset_t == COLD_RESET ? "Cold" : "Warm")));
+
+ /* DDR initialization progress status tracking */
+ bool is_ddr_hang_bfr_rst = is_ddr_init_hang();
+
+ /* Set the DDR initialization progress */
+ ddr_init_inprogress(true);
+
+ /* Configure the IO96B CSR address based on the handoff data */
+ config_io96b_csr_addr(is_dualemif, &io96b_ctrl);
+
+ /* Configuring MPFE sideband manager registers */
+ /* Dual port setting */
+ if (is_dualport)
+ mmio_setbits_32(SIDEBANDMGR_FLAGOUTSET0_REG, BIT(4));
+
+ /* Dual EMIF setting */
+ if (is_dualemif) {
+ /* Set mpfe_lite_active in the system manager */
+ /* TODO: recheck on the bit value?? */
+ mmio_setbits_32(SOCFPGA_SYSMGR(MPFE_CONFIG), BIT(8));
+
+ mmio_setbits_32(SIDEBANDMGR_FLAGOUTSET0_REG, BIT(5));
+ }
+
+ if (is_dualport || is_dualemif)
+ INFO("DDR: SIDEBANDMGR_FLAGOUTSTATUS0: 0x%x\n",
+ mmio_read_32(SIDEBANDMGR_FLAGOUTSTATUS0_REG));
+
+ /* Ensure calibration status passing */
+ init_mem_cal(&io96b_ctrl);
+
+ /* Initiate IOSSM mailbox */
+ io96b_mb_init(&io96b_ctrl);
+
+ /* Need to trigger re-calibration for DDR DBE */
+ if (ddr_ecc_dbe_status()) {
+ io96b_ctrl.io96b_0.cal_status = false;
+ io96b_ctrl.io96b_1.cal_status = false;
+ io96b_ctrl.overall_cal_status = io96b_ctrl.io96b_0.cal_status ||
+ io96b_ctrl.io96b_1.cal_status;
+ }
+
+ /* Trigger re-calibration if calibration failed */
+ if (!(io96b_ctrl.overall_cal_status)) {
+ NOTICE("DDR: Re-calibration in progress...\n");
+ trig_mem_cal(&io96b_ctrl);
+ }
+ NOTICE("DDR: Calibration success\n");
+
+ /* DDR type, DDR size and ECC status) */
+ ret = get_mem_technology(&io96b_ctrl);
+ if (ret != 0) {
+ ERROR("DDR: Failed to get DDR type\n");
+ return ret;
+ }
+
+ ret = get_mem_width_info(&io96b_ctrl);
+ if (ret != 0) {
+ ERROR("DDR: Failed to get DDR size\n");
+ return ret;
+ }
+
+ /* DDR size queried from the IOSSM controller */
+ hw_ddr_size = (phys_size_t)io96b_ctrl.overall_size * SZ_1G / SZ_8;
+
+ /* TODO: Hard code 1GB as of now, and DDR start and end address */
+ config_ddr_size = 0x40000000;
+ ddr_info_set[0].start = 0x80000000;
+ ddr_info_set[0].size = 0x40000000;
+
+ if (config_ddr_size != hw_ddr_size) {
+ WARN("DDR: DDR size configured is (%lld MiB)\n", config_ddr_size >> 20);
+ WARN("DDR: Mismatch with hardware size (%lld MiB).\n", hw_ddr_size >> 20);
+ }
+
+ if (config_ddr_size > hw_ddr_size) {
+ ERROR("DDR: Confgured DDR size is greater than the hardware size - HANG!!!\n");
+ while (1)
+ ;
+ }
+
+ ret = ecc_enable_status(&io96b_ctrl);
+ if (ret != 0) {
+ ERROR("DDR: Failed to get DDR ECC status\n");
+ return ret;
+ }
+
+ /*
+ * HPS cold or warm reset? If yes, skip full memory initialization if
+ * ECC is enabled to preserve memory content.
+ */
+ if (io96b_ctrl.ecc_status != 0) {
+ full_mem_init = hps_ocram_dbe_status() | ddr_ecc_dbe_status() |
+ is_ddr_hang_bfr_rst;
+ if ((full_mem_init == true) || (reset_t == WARM_RESET ||
+ reset_t == COLD_RESET) == 0) {
+ ret = bist_mem_init_start(&io96b_ctrl);
+ if (ret != 0) {
+ ERROR("DDR: Failed to fully initialize DDR memory\n");
+ return ret;
+ }
+ }
+ INFO("DDR: ECC initialized successfully\n");
+ }
+
+ sdram_set_firewall();
+
+ /*
+ * Firewall setting for MPFE CSRs, allow both secure and non-secure
+ * transactions.
+ */
+ /* IO96B0_reg */
+ mmio_setbits_32(SOCFPGA_MPFE_SCR_IO96B0, BIT(0));
+ /* IO96B1_reg */
+ mmio_setbits_32(SOCFPGA_MPFE_SCR_IO96B1, BIT(0));
+ /* noc_scheduler_csr */
+ mmio_setbits_32(SOCFPGA_MPFE_NOC_SCHED_CSR, BIT(0));
+
+ INFO("DDR: firewall init done\n");
+
+ /* Ending DDR driver initialization success tracking */
+ ddr_init_inprogress(false);
+
+ NOTICE("###DDR:init success###\n");
+
+ return 0;
+}
diff --git a/plat/intel/soc/agilex5/soc/agilex5_iossm_mailbox.c b/plat/intel/soc/agilex5/soc/agilex5_iossm_mailbox.c
new file mode 100644
index 0000000..c2ab047
--- /dev/null
+++ b/plat/intel/soc/agilex5/soc/agilex5_iossm_mailbox.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+
+#include "agilex5_iossm_mailbox.h"
+
+/* supported DDR type list */
+static const char *ddr_type_list[7] = {
+ "DDR4", "DDR5", "DDR5_RDIMM", "LPDDR4", "LPDDR5", "QDRIV", "UNKNOWN"
+};
+
+static inline int wait_for_bit(const void *reg,
+ const uint32_t mask,
+ const bool set,
+ const unsigned int timeout_ms)
+{
+ uint32_t val;
+ uint32_t timeout_sec = (timeout_ms / 1000);
+
+ while (timeout_sec > 0) {
+ val = mmio_read_32((uintptr_t)reg);
+
+ INFO("IOSSM: timeout_sec %d, val %x\n", timeout_sec, val);
+
+ if (!set) {
+ val = ~val;
+ }
+
+ if ((val & mask) == mask) {
+ INFO("IOSSM: %s, success\n", __func__);
+ return 0;
+ }
+
+ /* one second delay */
+ mdelay(1000);
+
+ timeout_sec--;
+ }
+
+ ERROR("IOSSM: %s, failed, time out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+int io96b_mb_req(phys_addr_t io96b_csr_addr, uint32_t ip_type, uint32_t instance_id,
+ uint32_t usr_cmd_type, uint32_t usr_cmd_opcode, uint32_t cmd_param_0,
+ uint32_t cmd_param_1, uint32_t cmd_param_2, uint32_t cmd_param_3,
+ uint32_t cmd_param_4, uint32_t cmd_param_5, uint32_t cmd_param_6,
+ uint32_t resp_data_len, struct io96b_mb_resp *resp)
+{
+ int i;
+ int ret;
+ uint32_t cmd_req, cmd_resp;
+
+ /* Initialized zeros for responses*/
+ resp->cmd_resp_status = 0;
+ resp->cmd_resp_data_0 = 0;
+ resp->cmd_resp_data_1 = 0;
+ resp->cmd_resp_data_2 = 0;
+
+ /* Ensure CMD_REQ is cleared before write any command request */
+ ret = wait_for_bit((const void *)(io96b_csr_addr + IOSSM_CMD_REQ_OFFSET),
+ GENMASK(31, 0), 0, 10000);
+
+ if (ret != 0) {
+ ERROR("%s: CMD_REQ not ready\n", __func__);
+ return -1;
+ }
+
+ /* Write CMD_PARAM_* */
+ for (i = 0; i < 6 ; i++) {
+ switch (i) {
+ case 0:
+ if (cmd_param_0 != 0) {
+ mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_0_OFFSET,
+ cmd_param_0);
+ }
+ break;
+ case 1:
+ if (cmd_param_1 != 0) {
+ mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_1_OFFSET,
+ cmd_param_1);
+ }
+ break;
+ case 2:
+ if (cmd_param_2 != 0) {
+ mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_2_OFFSET,
+ cmd_param_2);
+ }
+ break;
+ case 3:
+ if (cmd_param_3 != 0) {
+ mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_3_OFFSET,
+ cmd_param_3);
+ }
+ break;
+ case 4:
+ if (cmd_param_4 != 0) {
+ mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_4_OFFSET,
+ cmd_param_4);
+ }
+ break;
+ case 5:
+ if (cmd_param_5 != 0) {
+ mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_5_OFFSET,
+ cmd_param_5);
+ }
+ break;
+ case 6:
+ if (cmd_param_6 != 0) {
+ mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_6_OFFSET,
+ cmd_param_6);
+ }
+ break;
+ default:
+ ERROR("IOSSM: %s: Invalid command parameter\n", __func__);
+ }
+ }
+
+ /* Write CMD_REQ (IP_TYPE, IP_INSTANCE_ID, CMD_TYPE and CMD_OPCODE) */
+ cmd_req = (usr_cmd_opcode << 0) | (usr_cmd_type << 16) | (instance_id << 24) |
+ (ip_type << 29);
+ mmio_write_32(io96b_csr_addr + IOSSM_CMD_REQ_OFFSET, cmd_req);
+ INFO("IOSSM: %s: Write 0x%x to IOSSM_CMD_REQ_OFFSET 0x%llx\n",
+ __func__, cmd_req, io96b_csr_addr + IOSSM_CMD_REQ_OFFSET);
+
+ /* Read CMD_RESPONSE_READY in CMD_RESPONSE_STATUS*/
+ ret = wait_for_bit((const void *)(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET),
+ IOSSM_STATUS_COMMAND_RESPONSE_READY, 1, 10000);
+
+ if (ret != 0) {
+ ERROR("%s: CMD_RESPONSE ERROR:\n", __func__);
+ cmd_resp = (io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
+ ERROR("%s: STATUS_GENERAL_ERROR: 0x%x\n", __func__, (cmd_resp >> 1) & 0xF);
+ ERROR("%s: STATUS_CMD_RESPONSE_ERROR: 0x%x\n", __func__, (cmd_resp >> 5) & 0x7);
+ }
+
+ /* read CMD_RESPONSE_STATUS*/
+ resp->cmd_resp_status = mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
+ INFO("IOSSM: %s: CMD_RESPONSE_STATUS 0x%llx: 0x%x\n",
+ __func__, io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET, resp->cmd_resp_status);
+
+ /* read CMD_RESPONSE_DATA_* */
+ for (i = 0; i < resp_data_len; i++) {
+ switch (i) {
+ case 0:
+ resp->cmd_resp_data_0 =
+ mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_0_OFFSET);
+
+ break;
+ case 1:
+ resp->cmd_resp_data_1 =
+ mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_1_OFFSET);
+
+ break;
+ case 2:
+ resp->cmd_resp_data_2 =
+ mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_2_OFFSET);
+ break;
+ default:
+ ERROR("%s: Invalid response data\n", __func__);
+ }
+ }
+
+ resp->cmd_resp_status = mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
+ INFO("IOSSM: %s: CMD_RESPONSE_STATUS 0x%llx: 0x%x\n",
+ __func__, io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET, resp->cmd_resp_status);
+
+ /* write CMD_RESPONSE_READY = 0 */
+ mmio_clrbits_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET,
+ IOSSM_STATUS_COMMAND_RESPONSE_READY);
+
+ resp->cmd_resp_status = mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
+ INFO("IOSSM: %s: CMD_RESPONSE_READY 0x%llx: 0x%x\n",
+ __func__, io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET, resp->cmd_resp_status);
+
+ return 0;
+}
+
+/*
+ * Initial function to be called to set memory interface IP type and instance ID
+ * IP type and instance ID need to be determined before sending mailbox command
+ */
+void io96b_mb_init(struct io96b_info *io96b_ctrl)
+{
+ struct io96b_mb_resp usr_resp;
+ uint8_t ip_type_ret, instance_id_ret;
+ int i, j, k;
+
+ for (i = 0; i < io96b_ctrl->num_instance; i++) {
+ switch (i) {
+ case 0:
+ /* Get memory interface IP type & instance ID (IP identifier) */
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr, 0, 0,
+ CMD_GET_SYS_INFO, GET_MEM_INTF_INFO, 0, 0,
+ 0, 0, 0, 0, 0, 2, &usr_resp);
+ /* Retrieve number of memory interface(s) */
+ io96b_ctrl->io96b_0.mb_ctrl.num_mem_interface =
+ IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status) & 0x3;
+
+ /* Retrieve memory interface IP type and instance ID (IP identifier) */
+ j = 0;
+ for (k = 0; k < MAX_MEM_INTERFACES_SUPPORTED; k++) {
+ switch (k) {
+ case 0:
+ ip_type_ret = (usr_resp.cmd_resp_data_0 >> 29) & 0x7;
+ instance_id_ret = (usr_resp.cmd_resp_data_0 >> 24) & 0x1F;
+ break;
+ case 1:
+ ip_type_ret = (usr_resp.cmd_resp_data_1 >> 29) & 0x7;
+ instance_id_ret = (usr_resp.cmd_resp_data_1 >> 24) & 0x1F;
+ break;
+ }
+
+ if (ip_type_ret != 0) {
+ io96b_ctrl->io96b_0.mb_ctrl.ip_type[j] = ip_type_ret;
+ io96b_ctrl->io96b_0.mb_ctrl.ip_instance_id[j] =
+ instance_id_ret;
+ j++;
+ }
+ }
+ break;
+ case 1:
+ /* Get memory interface IP type and instance ID (IP identifier) */
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr, 0, 0, CMD_GET_SYS_INFO,
+ GET_MEM_INTF_INFO, 0, 0, 0, 0, 0, 0, 0, 2, &usr_resp);
+
+ /* Retrieve number of memory interface(s) */
+ io96b_ctrl->io96b_1.mb_ctrl.num_mem_interface =
+ IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status) & 0x3;
+
+ /* Retrieve memory interface IP type and instance ID (IP identifier) */
+ j = 0;
+ for (k = 0; k < MAX_MEM_INTERFACES_SUPPORTED; k++) {
+ switch (k) {
+ case 0:
+ ip_type_ret = (usr_resp.cmd_resp_data_0 >> 29) & 0x7;
+ instance_id_ret = (usr_resp.cmd_resp_data_0 >> 24) & 0x1F;
+ break;
+ case 1:
+ ip_type_ret = (usr_resp.cmd_resp_data_1 >> 29) & 0x7;
+ instance_id_ret = (usr_resp.cmd_resp_data_1 >> 24) & 0x1F;
+ break;
+ }
+
+ if (ip_type_ret != 0) {
+ io96b_ctrl->io96b_1.mb_ctrl.ip_type[j] = ip_type_ret;
+ io96b_ctrl->io96b_1.mb_ctrl.ip_instance_id[j] =
+ instance_id_ret;
+ j++;
+ }
+ }
+ break;
+ }
+
+ }
+}
+
+static inline void hang(void)
+{
+ ERROR("IOSSM: %s: system is going to die :(\n", __func__);
+ while (1)
+ ;
+}
+
+int io96b_cal_status(phys_addr_t addr)
+{
+ int cal_busy_status, cal_success_status;
+ phys_addr_t status_addr = addr + IOSSM_STATUS_OFFSET;
+
+ /* Ensure calibration busy status */
+ cal_busy_status = wait_for_bit((const void *)status_addr, IOSSM_STATUS_CAL_BUSY,
+ false, 15000);
+ if (cal_busy_status != 0) {
+ ERROR("IOSSM: One or more EMIF instances are busy with calibration\n");
+ return -EBUSY;
+ }
+
+ /* Calibration success status check */
+ NOTICE("IOSSM: Calibration success status check...\n");
+ cal_success_status = wait_for_bit((const void *)status_addr, IOSSM_STATUS_CAL_SUCCESS,
+ true, 15000);
+ if (cal_success_status != 0) {
+ ERROR("IOSSM: One/more EMIF instances either failed to calibrate/not completed\n");
+ return -EBUSY;
+ }
+
+ NOTICE("IOSSM: All EMIF instances within the IO96 have calibrated successfully!\n");
+ return 0;
+}
+
+void init_mem_cal(struct io96b_info *io96b_ctrl)
+{
+ int count, i, ret;
+
+ /* Initialize overall calibration status */
+ io96b_ctrl->overall_cal_status = false;
+
+ /* Check initial calibration status for the assigned IO96B */
+ count = 0;
+ for (i = 0; i < io96b_ctrl->num_instance; i++) {
+ switch (i) {
+ case 0:
+ ret = io96b_cal_status(io96b_ctrl->io96b_0.io96b_csr_addr);
+ if (ret != 0) {
+ io96b_ctrl->io96b_0.cal_status = false;
+ ERROR("%s: Initial DDR calibration IO96B_0 failed %d\n",
+ __func__, ret);
+ break;
+ }
+ io96b_ctrl->io96b_0.cal_status = true;
+ INFO("IOSSM: %s: Initial DDR calibration IO96B_0 succeed\n", __func__);
+ count++;
+ break;
+ case 1:
+ ret = io96b_cal_status(io96b_ctrl->io96b_1.io96b_csr_addr);
+ if (ret != 0) {
+ io96b_ctrl->io96b_1.cal_status = false;
+ ERROR("%s: Initial DDR calibration IO96B_1 failed %d\n",
+ __func__, ret);
+ break;
+ }
+ io96b_ctrl->io96b_1.cal_status = true;
+ INFO("IOSSM: %s: Initial DDR calibration IO96B_1 succeed\n", __func__);
+ count++;
+ break;
+ }
+ }
+
+ if (count == io96b_ctrl->num_instance)
+ io96b_ctrl->overall_cal_status = true;
+}
+
+/*
+ * Trying 3 times re-calibration if initial calibration failed
+ */
+int trig_mem_cal(struct io96b_info *io96b_ctrl)
+{
+ struct io96b_mb_resp usr_resp;
+ bool recal_success;
+ int i;
+ uint8_t cal_stat;
+
+ for (i = 0; i < io96b_ctrl->num_instance; i++) {
+ switch (i) {
+ case 0:
+ if (!(io96b_ctrl->io96b_0.cal_status)) {
+ /* Get the memory calibration status for first memory interface */
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr, 0, 0,
+ CMD_TRIG_MEM_CAL_OP, GET_MEM_CAL_STATUS, 0,
+ 0, 0, 0, 0, 0, 0, 2, &usr_resp);
+
+ recal_success = false;
+
+ /* Re-calibration first memory interface with failed calibration */
+ for (i = 0; i < 3; i++) {
+ cal_stat = usr_resp.cmd_resp_data_0 & GENMASK(2, 0);
+ if (cal_stat < 0x2) {
+ recal_success = true;
+ break;
+ }
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr,
+ io96b_ctrl->io96b_0.mb_ctrl.ip_type[0],
+ io96b_ctrl->io96b_0.mb_ctrl.ip_instance_id[0],
+ CMD_TRIG_MEM_CAL_OP, TRIG_MEM_CAL, 0, 0, 0, 0,
+ 0, 0, 0, 2, &usr_resp);
+ mdelay(1000);
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr, 0, 0,
+ CMD_TRIG_MEM_CAL_OP, GET_MEM_CAL_STATUS,
+ 0, 0, 0, 0, 0, 0, 0, 2, &usr_resp);
+ }
+
+ if (!recal_success) {
+ ERROR("%s: Error as SDRAM calibration failed\n", __func__);
+ hang();
+ }
+
+ /* Get the memory calibration status for second memory interface */
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr, 0, 0,
+ CMD_TRIG_MEM_CAL_OP, GET_MEM_CAL_STATUS, 0, 0, 0,
+ 0, 0, 0, 0, 2, &usr_resp);
+
+ recal_success = false;
+
+ /* Re-calibration second memory interface with failed calibration*/
+ for (i = 0; i < 3; i++) {
+ cal_stat = usr_resp.cmd_resp_data_1 & GENMASK(2, 0);
+ if (cal_stat < 0x2) {
+ recal_success = true;
+ break;
+ }
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr,
+ io96b_ctrl->io96b_0.mb_ctrl.ip_type[1],
+ io96b_ctrl->io96b_0.mb_ctrl.ip_instance_id[1],
+ CMD_TRIG_MEM_CAL_OP, TRIG_MEM_CAL, 0, 0, 0, 0,
+ 0, 0, 0, 2, &usr_resp);
+ mdelay(1000);
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr, 0, 0,
+ CMD_TRIG_MEM_CAL_OP, GET_MEM_CAL_STATUS,
+ 0, 0, 0, 0, 0, 0, 0, 2, &usr_resp);
+ }
+
+ if (!recal_success) {
+ ERROR("IOSSMM: Error as SDRAM calibration failed\n");
+ hang();
+ }
+
+ io96b_ctrl->io96b_0.cal_status = true;
+ }
+ break;
+ case 1:
+ if (!(io96b_ctrl->io96b_1.cal_status)) {
+ /* Get the memory calibration status for first memory interface */
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr, 0, 0,
+ CMD_TRIG_MEM_CAL_OP, GET_MEM_CAL_STATUS, 0,
+ 0, 0, 0, 0, 0, 0, 2, &usr_resp);
+
+ recal_success = false;
+
+ /* Re-calibration first memory interface with failed calibration */
+ for (i = 0; i < 3; i++) {
+ cal_stat = usr_resp.cmd_resp_data_0 & GENMASK(2, 0);
+ if (cal_stat < 0x2) {
+ recal_success = true;
+ break;
+ }
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr,
+ io96b_ctrl->io96b_1.mb_ctrl.ip_type[0],
+ io96b_ctrl->io96b_1.mb_ctrl.ip_instance_id[0],
+ CMD_TRIG_MEM_CAL_OP, TRIG_MEM_CAL, 0, 0, 0, 0,
+ 0, 0, 0, 2, &usr_resp);
+ mdelay(1000);
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr, 0, 0,
+ CMD_TRIG_MEM_CAL_OP, GET_MEM_CAL_STATUS,
+ 0, 0, 0, 0, 0, 0, 0, 2, &usr_resp);
+ }
+
+ if (!recal_success) {
+ ERROR("IOSSM: Error as SDRAM calibration failed\n");
+ hang();
+ }
+
+ /* Get the memory calibration status for second memory interface */
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr, 0, 0,
+ CMD_TRIG_MEM_CAL_OP, GET_MEM_CAL_STATUS, 0, 0, 0,
+ 0, 0, 0, 0, 2, &usr_resp);
+
+ recal_success = false;
+
+ /* Re-calibration second memory interface with failed calibration*/
+ for (i = 0; i < 3; i++) {
+ cal_stat = usr_resp.cmd_resp_data_0 & GENMASK(2, 0);
+ if (cal_stat < 0x2) {
+ recal_success = true;
+ break;
+ }
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr,
+ io96b_ctrl->io96b_1.mb_ctrl.ip_type[1],
+ io96b_ctrl->io96b_1.mb_ctrl.ip_instance_id[1],
+ CMD_TRIG_MEM_CAL_OP, TRIG_MEM_CAL, 0, 0, 0, 0,
+ 0, 0, 0, 2, &usr_resp);
+ mdelay(1000);
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr, 0, 0,
+ CMD_TRIG_MEM_CAL_OP, GET_MEM_CAL_STATUS,
+ 0, 0, 0, 0, 0, 0, 0, 2, &usr_resp);
+ }
+
+ if (!recal_success) {
+ ERROR("IOSSM: Error as SDRAM calibration failed\n");
+ hang();
+ }
+
+ io96b_ctrl->io96b_1.cal_status = true;
+ }
+ break;
+ }
+ }
+
+ if (io96b_ctrl->io96b_0.cal_status && io96b_ctrl->io96b_1.cal_status) {
+ INFO("IOSSM: %s: Overall SDRAM calibration success\n", __func__);
+ io96b_ctrl->overall_cal_status = true;
+ }
+
+ return 0;
+}
+
+int get_mem_technology(struct io96b_info *io96b_ctrl)
+{
+ struct io96b_mb_resp usr_resp;
+ int i, j;
+ uint8_t ddr_type_ret;
+
+ /* Initialize ddr type */
+ io96b_ctrl->ddr_type = ddr_type_list[6];
+
+ /* Get and ensure all memory interface(s) same DDR type */
+ for (i = 0; i < io96b_ctrl->num_instance; i++) {
+ switch (i) {
+ case 0:
+ for (j = 0; j < io96b_ctrl->io96b_0.mb_ctrl.num_mem_interface; j++) {
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr,
+ io96b_ctrl->io96b_0.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_0.mb_ctrl.ip_instance_id[j],
+ CMD_GET_MEM_INFO, GET_MEM_TECHNOLOGY, 0, 0, 0, 0,
+ 0, 0, 0, 0, &usr_resp);
+
+ ddr_type_ret =
+ IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
+ & GENMASK(2, 0);
+
+ if (strcmp(io96b_ctrl->ddr_type, "UNKNOWN") == 0)
+ io96b_ctrl->ddr_type = ddr_type_list[ddr_type_ret];
+
+ if (ddr_type_list[ddr_type_ret] != io96b_ctrl->ddr_type) {
+ ERROR("IOSSM: Mismatch DDR type on IO96B_0\n");
+ return -ENOEXEC;
+ }
+ }
+ break;
+ case 1:
+ for (j = 0; j < io96b_ctrl->io96b_1.mb_ctrl.num_mem_interface; j++) {
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr,
+ io96b_ctrl->io96b_1.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_1.mb_ctrl.ip_instance_id[j],
+ CMD_GET_MEM_INFO, GET_MEM_TECHNOLOGY, 0, 0, 0,
+ 0, 0, 0, 0, 0, &usr_resp);
+
+ ddr_type_ret =
+ IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
+ & GENMASK(2, 0);
+
+ if (strcmp(io96b_ctrl->ddr_type, "UNKNOWN") == 0)
+ io96b_ctrl->ddr_type = ddr_type_list[ddr_type_ret];
+
+ if (ddr_type_list[ddr_type_ret] != io96b_ctrl->ddr_type) {
+ ERROR("IOSSM: Mismatch DDR type on IO96B_1\n");
+ return -ENOEXEC;
+ }
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int get_mem_width_info(struct io96b_info *io96b_ctrl)
+{
+ struct io96b_mb_resp usr_resp;
+ int i, j;
+ uint16_t memory_size = 0U;
+ uint16_t total_memory_size = 0U;
+
+ /* Get all memory interface(s) total memory size on all instance(s) */
+ for (i = 0; i < io96b_ctrl->num_instance; i++) {
+ switch (i) {
+ case 0:
+ memory_size = 0;
+ for (j = 0; j < io96b_ctrl->io96b_0.mb_ctrl.num_mem_interface; j++) {
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr,
+ io96b_ctrl->io96b_0.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_0.mb_ctrl.ip_instance_id[j],
+ CMD_GET_MEM_INFO, GET_MEM_WIDTH_INFO, 0, 0, 0,
+ 0, 0, 0, 0, 2, &usr_resp);
+
+ memory_size = memory_size +
+ (usr_resp.cmd_resp_data_1 & GENMASK(7, 0));
+ }
+
+ if (memory_size == 0U) {
+ ERROR("IOSSM: %s: Failed to get valid memory size\n", __func__);
+ return -ENOEXEC;
+ }
+
+ io96b_ctrl->io96b_0.size = memory_size;
+
+ break;
+ case 1:
+ memory_size = 0;
+ for (j = 0; j < io96b_ctrl->io96b_1.mb_ctrl.num_mem_interface; j++) {
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr,
+ io96b_ctrl->io96b_1.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_1.mb_ctrl.ip_instance_id[j],
+ CMD_GET_MEM_INFO, GET_MEM_WIDTH_INFO, 0, 0, 0,
+ 0, 0, 0, 0, 2, &usr_resp);
+
+ memory_size = memory_size +
+ (usr_resp.cmd_resp_data_1 & GENMASK(7, 0));
+ }
+
+ if (memory_size == 0U) {
+ ERROR("IOSSM: %s: Failed to get valid memory size\n", __func__);
+ return -ENOEXEC;
+ }
+
+ io96b_ctrl->io96b_1.size = memory_size;
+
+ break;
+ }
+
+ total_memory_size = total_memory_size + memory_size;
+ }
+
+ if (total_memory_size == 0U) {
+ ERROR("IOSSM: %s: Failed to get valid memory size\n", __func__);
+ return -ENOEXEC;
+ }
+
+ io96b_ctrl->overall_size = total_memory_size;
+
+ return 0;
+}
+
+int ecc_enable_status(struct io96b_info *io96b_ctrl)
+{
+ struct io96b_mb_resp usr_resp;
+ int i, j;
+ bool ecc_stat_set = false;
+ bool ecc_stat;
+
+ /* Initialize ECC status */
+ io96b_ctrl->ecc_status = false;
+
+ /* Get and ensure all memory interface(s) same ECC status */
+ for (i = 0; i < io96b_ctrl->num_instance; i++) {
+ switch (i) {
+ case 0:
+ for (j = 0; j < io96b_ctrl->io96b_0.mb_ctrl.num_mem_interface; j++) {
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr,
+ io96b_ctrl->io96b_0.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_0.mb_ctrl.ip_instance_id[j],
+ CMD_TRIG_CONTROLLER_OP, ECC_ENABLE_STATUS, 0, 0,
+ 0, 0, 0, 0, 0, 0, &usr_resp);
+
+ ecc_stat = ((IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
+ & GENMASK(1, 0)) == 0 ? false : true);
+
+ if (!ecc_stat_set) {
+ io96b_ctrl->ecc_status = ecc_stat;
+ ecc_stat_set = true;
+ }
+
+ if (ecc_stat != io96b_ctrl->ecc_status) {
+ ERROR("IOSSM: %s: Mismatch DDR ECC status on IO96B_0\n",
+ __func__);
+ return -ENOEXEC;
+ }
+ }
+ break;
+ case 1:
+ for (j = 0; j < io96b_ctrl->io96b_1.mb_ctrl.num_mem_interface; j++) {
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr,
+ io96b_ctrl->io96b_1.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_1.mb_ctrl.ip_instance_id[j],
+ CMD_TRIG_CONTROLLER_OP, ECC_ENABLE_STATUS, 0, 0,
+ 0, 0, 0, 0, 0, 0, &usr_resp);
+
+ ecc_stat = ((IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
+ & GENMASK(1, 0)) == 0 ? false : true);
+
+ if (!ecc_stat_set) {
+ io96b_ctrl->ecc_status = ecc_stat;
+ ecc_stat_set = true;
+ }
+
+ if (ecc_stat != io96b_ctrl->ecc_status) {
+ ERROR("%s: Mismatch DDR ECC status on IO96B_1\n"
+ , __func__);
+ return -ENOEXEC;
+ }
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+int bist_mem_init_start(struct io96b_info *io96b_ctrl)
+{
+ struct io96b_mb_resp usr_resp;
+ int i, j;
+ bool bist_start, bist_success;
+ uint32_t read_count;
+ uint32_t read_interval_ms;
+
+ /* Full memory initialization BIST performed on all memory interface(s) */
+ for (i = 0; i < io96b_ctrl->num_instance; i++) {
+ switch (i) {
+ case 0:
+ for (j = 0; j < io96b_ctrl->io96b_0.mb_ctrl.num_mem_interface; j++) {
+ bist_start = false;
+ bist_success = false;
+ read_interval_ms = 500U;
+
+ /* Start memory initialization BIST on full memory address */
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr,
+ io96b_ctrl->io96b_0.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_0.mb_ctrl.ip_instance_id[j],
+ CMD_TRIG_CONTROLLER_OP, BIST_MEM_INIT_START, 0x40,
+ 0, 0, 0, 0, 0, 0, 0, &usr_resp);
+
+ bist_start =
+ (IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
+ & 1);
+
+ if (!bist_start) {
+ ERROR("IOSSM: %s: Failed to initialized memory on IO96B_0\n"
+ , __func__);
+ ERROR("IOSSM: %s: BIST_MEM_INIT_START Error code 0x%x\n",
+ __func__,
+ (IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
+ & GENMASK(2, 1)) > 0x1);
+ return -ENOEXEC;
+ }
+
+ /* Polling for the initiated memory initialization BIST status */
+ read_count = read_interval_ms / TIMEOUT;
+ while (!bist_success) {
+ io96b_mb_req(io96b_ctrl->io96b_0.io96b_csr_addr,
+ io96b_ctrl->io96b_0.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_0.mb_ctrl.ip_instance_id[j],
+ CMD_TRIG_CONTROLLER_OP, BIST_MEM_INIT_STATUS,
+ 0, 0, 0, 0, 0, 0, 0, 0, &usr_resp);
+
+ bist_success = (IOSSM_CMD_RESPONSE_DATA_SHORT
+ (usr_resp.cmd_resp_status) & 1);
+
+ if ((!bist_success) && (read_count == 0U)) {
+ ERROR("IOSSM: %s: Timeout init memory on IO96B_0\n"
+ , __func__);
+ ERROR("IOSSM: %s: BIST_MEM_INIT_STATUS Err code%x\n"
+ , __func__, (IOSSM_CMD_RESPONSE_DATA_SHORT
+ (usr_resp.cmd_resp_status)
+ & GENMASK(2, 1)) > 0x1);
+ return -ETIMEDOUT;
+ }
+ read_count--;
+ mdelay(read_interval_ms);
+ }
+ }
+
+ NOTICE("IOSSM: %s: Memory initialized successfully on IO96B_0\n", __func__);
+ break;
+
+ case 1:
+ for (j = 0; j < io96b_ctrl->io96b_1.mb_ctrl.num_mem_interface; j++) {
+ bist_start = false;
+ bist_success = false;
+ read_interval_ms = 500U;
+
+ /* Start memory initialization BIST on full memory address */
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr,
+ io96b_ctrl->io96b_1.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_1.mb_ctrl.ip_instance_id[j],
+ CMD_TRIG_CONTROLLER_OP, BIST_MEM_INIT_START, 0x40,
+ 0, 0, 0, 0, 0, 0, 0, &usr_resp);
+
+ bist_start =
+ (IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
+ & 1);
+
+ if (!bist_start) {
+ ERROR("IOSSM: %s: Failed to initialized memory on IO96B_1\n"
+ , __func__);
+ ERROR("IOSSM: %s: BIST_MEM_INIT_START Error code 0x%x\n",
+ __func__,
+ (IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
+ & GENMASK(2, 1)) > 0x1);
+ return -ENOEXEC;
+ }
+
+ /* Polling for the initiated memory initialization BIST status */
+ read_count = read_interval_ms / TIMEOUT;
+ while (!bist_success) {
+ io96b_mb_req(io96b_ctrl->io96b_1.io96b_csr_addr,
+ io96b_ctrl->io96b_1.mb_ctrl.ip_type[j],
+ io96b_ctrl->io96b_1.mb_ctrl.ip_instance_id[j],
+ CMD_TRIG_CONTROLLER_OP, BIST_MEM_INIT_STATUS,
+ 0, 0, 0, 0, 0, 0, 0, 0, &usr_resp);
+
+ bist_success = (IOSSM_CMD_RESPONSE_DATA_SHORT
+ (usr_resp.cmd_resp_status) & 1);
+
+ if ((!bist_success) && (read_count == 0U)) {
+ ERROR("IOSSM: %s: Timeout init memory on IO96B_1\n"
+ , __func__);
+ ERROR("IOSSM: %s: BIST_MEM_INIT_STATUS ErrCode %x\n"
+ , __func__, (IOSSM_CMD_RESPONSE_DATA_SHORT
+ (usr_resp.cmd_resp_status)
+ & GENMASK(2, 1)) > 0x1);
+ return -ETIMEDOUT;
+ }
+ read_count--;
+ mdelay(read_interval_ms);
+ }
+ }
+
+ NOTICE("IOSSM: %s: Memory initialized successfully on IO96B_1\n", __func__);
+ break;
+ }
+ }
+ return 0;
+}
diff --git a/plat/intel/soc/common/include/socfpga_handoff.h b/plat/intel/soc/common/include/socfpga_handoff.h
index b2913c7..d001887 100644
--- a/plat/intel/soc/common/include/socfpga_handoff.h
+++ b/plat/intel/soc/common/include/socfpga_handoff.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -179,7 +180,7 @@
uint32_t ddr_magic;
uint32_t ddr_length;
uint32_t _pad_0x1C_0x20[2];
- uint32_t ddr_array[4]; /* offset, value */
+ uint32_t ddr_config; /* BIT[0]-Dual Port. BIT[1]-Dual EMIF */
#endif
} handoff;