Chris Packham | 1a07d21 | 2018-05-10 13:28:29 +1200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) Marvell International Ltd. and its affiliates |
| 4 | */ |
| 5 | |
| 6 | #include "mv_ddr_topology.h" |
| 7 | #include "mv_ddr_common.h" |
| 8 | #include "mv_ddr_spd.h" |
| 9 | #include "ddr3_init.h" |
| 10 | #include "ddr_topology_def.h" |
| 11 | #include "ddr3_training_ip_db.h" |
| 12 | #include "ddr3_training_ip.h" |
| 13 | |
| 14 | |
| 15 | unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk) |
| 16 | { |
| 17 | unsigned int cl = ceil_div(taa_min, tclk); |
| 18 | |
| 19 | return mv_ddr_spd_supported_cl_get(cl); |
| 20 | |
| 21 | } |
| 22 | |
| 23 | unsigned int mv_ddr_cwl_calc(unsigned int tclk) |
| 24 | { |
| 25 | unsigned int cwl; |
| 26 | |
| 27 | if (tclk >= 1250) |
| 28 | cwl = 9; |
| 29 | else if (tclk >= 1071) |
| 30 | cwl = 10; |
| 31 | else if (tclk >= 938) |
| 32 | cwl = 11; |
| 33 | else if (tclk >= 833) |
| 34 | cwl = 12; |
| 35 | else |
| 36 | cwl = 0; |
| 37 | |
| 38 | return cwl; |
| 39 | } |
| 40 | |
| 41 | struct mv_ddr_topology_map *mv_ddr_topology_map_update(void) |
| 42 | { |
| 43 | struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); |
| 44 | unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); |
| 45 | enum hws_speed_bin speed_bin_index; |
| 46 | enum hws_ddr_freq freq = DDR_FREQ_LAST; |
| 47 | unsigned int tclk; |
| 48 | unsigned char val = 0; |
| 49 | int i; |
| 50 | |
| 51 | |
| 52 | if (tm->interface_params[0].memory_freq == DDR_FREQ_SAR) |
| 53 | tm->interface_params[0].memory_freq = mv_ddr_init_freq_get(); |
| 54 | |
| 55 | if (tm->cfg_src == MV_DDR_CFG_SPD) { |
| 56 | /* check dram device type */ |
| 57 | val = mv_ddr_spd_dev_type_get(&tm->spd_data); |
| 58 | if (val != MV_DDR_SPD_DEV_TYPE_DDR4) { |
| 59 | printf("mv_ddr: unsupported dram device type found\n"); |
| 60 | return NULL; |
| 61 | } |
| 62 | |
| 63 | /* update topology map with timing data */ |
| 64 | if (mv_ddr_spd_timing_calc(&tm->spd_data, tm->timing_data) > 0) { |
| 65 | printf("mv_ddr: negative timing data found\n"); |
| 66 | return NULL; |
| 67 | } |
| 68 | |
| 69 | /* update device width in topology map */ |
| 70 | tm->interface_params[0].bus_width = mv_ddr_spd_dev_width_get(&tm->spd_data); |
| 71 | |
| 72 | /* update die capacity in topology map */ |
| 73 | tm->interface_params[0].memory_size = mv_ddr_spd_die_capacity_get(&tm->spd_data); |
| 74 | |
| 75 | /* update bus bit mask in topology map */ |
| 76 | tm->bus_act_mask = mv_ddr_bus_bit_mask_get(); |
| 77 | |
| 78 | /* update cs bit mask in topology map */ |
| 79 | val = mv_ddr_spd_cs_bit_mask_get(&tm->spd_data); |
| 80 | for (i = 0; i < octets_per_if_num; i++) { |
| 81 | tm->interface_params[0].as_bus_params[i].cs_bitmask = val; |
| 82 | } |
| 83 | |
| 84 | /* check dram module type */ |
| 85 | val = mv_ddr_spd_module_type_get(&tm->spd_data); |
| 86 | switch (val) { |
| 87 | case MV_DDR_SPD_MODULE_TYPE_UDIMM: |
| 88 | case MV_DDR_SPD_MODULE_TYPE_SO_DIMM: |
| 89 | case MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM: |
| 90 | case MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM: |
| 91 | case MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM: |
| 92 | case MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM: |
| 93 | break; |
| 94 | default: |
| 95 | printf("mv_ddr: unsupported dram module type found\n"); |
| 96 | return NULL; |
| 97 | } |
| 98 | |
| 99 | /* update mirror bit mask in topology map */ |
| 100 | val = mv_ddr_spd_mem_mirror_get(&tm->spd_data); |
| 101 | for (i = 0; i < octets_per_if_num; i++) { |
| 102 | tm->interface_params[0].as_bus_params[i].mirror_enable_bitmask = val << 1; |
| 103 | } |
| 104 | |
| 105 | tclk = 1000000 / freq_val[tm->interface_params[0].memory_freq]; |
| 106 | /* update cas write latency (cwl) */ |
| 107 | val = mv_ddr_cwl_calc(tclk); |
| 108 | if (val == 0) { |
| 109 | printf("mv_ddr: unsupported cas write latency value found\n"); |
| 110 | return NULL; |
| 111 | } |
| 112 | tm->interface_params[0].cas_wl = val; |
| 113 | |
| 114 | /* update cas latency (cl) */ |
| 115 | mv_ddr_spd_supported_cls_calc(&tm->spd_data); |
| 116 | val = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk); |
| 117 | if (val == 0) { |
| 118 | printf("mv_ddr: unsupported cas latency value found\n"); |
| 119 | return NULL; |
| 120 | } |
| 121 | tm->interface_params[0].cas_l = val; |
| 122 | } else if (tm->cfg_src == MV_DDR_CFG_DEFAULT) { |
| 123 | /* set cas and cas-write latencies per speed bin, if they unset */ |
| 124 | speed_bin_index = tm->interface_params[0].speed_bin_index; |
| 125 | freq = tm->interface_params[0].memory_freq; |
| 126 | |
| 127 | if (tm->interface_params[0].cas_l == 0) |
| 128 | tm->interface_params[0].cas_l = |
| 129 | cas_latency_table[speed_bin_index].cl_val[freq]; |
| 130 | |
| 131 | if (tm->interface_params[0].cas_wl == 0) |
| 132 | tm->interface_params[0].cas_wl = |
| 133 | cas_write_latency_table[speed_bin_index].cl_val[freq]; |
| 134 | } |
| 135 | |
| 136 | |
| 137 | return tm; |
| 138 | } |
| 139 | |
| 140 | unsigned short mv_ddr_bus_bit_mask_get(void) |
| 141 | { |
| 142 | unsigned short pri_and_ext_bus_width = 0x0; |
| 143 | struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); |
| 144 | unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); |
| 145 | |
| 146 | if (tm->cfg_src == MV_DDR_CFG_SPD) { |
| 147 | enum mv_ddr_pri_bus_width pri_bus_width = mv_ddr_spd_pri_bus_width_get(&tm->spd_data); |
| 148 | enum mv_ddr_bus_width_ext bus_width_ext = mv_ddr_spd_bus_width_ext_get(&tm->spd_data); |
| 149 | |
| 150 | switch (pri_bus_width) { |
| 151 | case MV_DDR_PRI_BUS_WIDTH_16: |
| 152 | pri_and_ext_bus_width = BUS_MASK_16BIT; |
| 153 | break; |
| 154 | case MV_DDR_PRI_BUS_WIDTH_32: |
| 155 | pri_and_ext_bus_width = BUS_MASK_32BIT; |
| 156 | break; |
| 157 | case MV_DDR_PRI_BUS_WIDTH_64: |
| 158 | pri_and_ext_bus_width = MV_DDR_64BIT_BUS_MASK; |
| 159 | break; |
| 160 | default: |
| 161 | pri_and_ext_bus_width = 0x0; |
| 162 | } |
| 163 | |
| 164 | if (bus_width_ext == MV_DDR_BUS_WIDTH_EXT_8) |
| 165 | pri_and_ext_bus_width |= 1 << (octets_per_if_num - 1); |
| 166 | } |
| 167 | |
| 168 | return pri_and_ext_bus_width; |
| 169 | } |
| 170 | |
| 171 | unsigned int mv_ddr_if_bus_width_get(void) |
| 172 | { |
| 173 | struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); |
| 174 | unsigned int bus_width; |
| 175 | |
| 176 | switch (tm->bus_act_mask) { |
| 177 | case BUS_MASK_16BIT: |
| 178 | case BUS_MASK_16BIT_ECC: |
| 179 | case BUS_MASK_16BIT_ECC_PUP3: |
| 180 | bus_width = 16; |
| 181 | break; |
| 182 | case BUS_MASK_32BIT: |
| 183 | case BUS_MASK_32BIT_ECC: |
| 184 | case MV_DDR_32BIT_ECC_PUP8_BUS_MASK: |
| 185 | bus_width = 32; |
| 186 | break; |
| 187 | case MV_DDR_64BIT_BUS_MASK: |
| 188 | case MV_DDR_64BIT_ECC_PUP8_BUS_MASK: |
| 189 | bus_width = 64; |
| 190 | break; |
| 191 | default: |
| 192 | printf("mv_ddr: unsupported bus active mask parameter found\n"); |
| 193 | bus_width = 0; |
| 194 | } |
| 195 | |
| 196 | return bus_width; |
| 197 | } |