blob: de431b75b66aa9bb1c972fe7b63128e358a9381e [file] [log] [blame]
Varun Wadekarcd5a2f52015-09-20 15:08:22 +05301/*
Harvey Hsiehb9b374f2016-11-15 22:04:51 +08002 * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
Varun Wadekarcd5a2f52015-09-20 15:08:22 +05303 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Varun Wadekarcd5a2f52015-09-20 15:08:22 +05305 */
6
Varun Wadekarcd5a2f52015-09-20 15:08:22 +05307#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <string.h>
9
10#include <arch_helpers.h>
11#include <common/bl_common.h>
12#include <common/debug.h>
13#include <lib/mmio.h>
14#include <lib/utils.h>
15#include <lib/xlat_tables/xlat_tables_v2.h>
16
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053017#include <mce.h>
18#include <memctrl.h>
19#include <memctrl_v2.h>
Varun Wadekar87e44ff2016-03-03 13:22:39 -080020#include <smmu.h>
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053021#include <tegra_def.h>
Varun Wadekare81177d2016-07-18 17:43:41 -070022#include <tegra_platform.h>
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053023
24/* Video Memory base and size (live values) */
25static uint64_t video_mem_base;
Varun Wadekar7058aee2016-04-25 09:01:46 -070026static uint64_t video_mem_size_mb;
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053027
Varun Wadekara0f26972016-03-11 17:18:51 -080028static void tegra_memctrl_reconfig_mss_clients(void)
29{
30#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS
31 uint32_t val, wdata_0, wdata_1;
32
33 /*
34 * Assert Memory Controller's HOTRESET_FLUSH_ENABLE signal for
35 * boot and strongly ordered MSS clients to flush existing memory
36 * traffic and stall future requests.
37 */
38 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
39 assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL);
40
Varun Wadekar4c7fa502016-12-13 13:13:42 -080041 wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB |
42#if ENABLE_AFI_DEVICE
43 MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB |
44#endif
Varun Wadekara0f26972016-03-11 17:18:51 -080045 MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB |
46 MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB |
47 MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB;
48 tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
49
50 /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
51 do {
52 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
53 } while ((val & wdata_0) != wdata_0);
54
55 /* Wait one more time due to SW WAR for known legacy issue */
56 do {
57 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
58 } while ((val & wdata_0) != wdata_0);
59
60 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
61 assert(val == MC_CLIENT_HOTRESET_CTRL1_RESET_VAL);
62
63 wdata_1 = MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB |
64 MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB |
65 MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB |
66 MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB |
67 MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB |
68 MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB |
69 MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB |
70 MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB |
71 MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB |
72 MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB;
73 tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
74
75 /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
76 do {
77 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
78 } while ((val & wdata_1) != wdata_1);
79
80 /* Wait one more time due to SW WAR for known legacy issue */
81 do {
82 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
83 } while ((val & wdata_1) != wdata_1);
84
85 /*
86 * Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and
87 * strongly ordered MSS clients. ROC needs to be single point
88 * of control on overriding the memory type. So, remove TSA's
89 * memtype override.
Krishna Reddy329e2282017-05-25 11:04:33 -070090 *
91 * MC clients with default SO_DEV override still enabled at TSA:
92 * AONW, BPMPW, SCEW, APEW
Varun Wadekara0f26972016-03-11 17:18:51 -080093 */
Varun Wadekar4c7fa502016-12-13 13:13:42 -080094#if ENABLE_AFI_DEVICE
Varun Wadekara0f26972016-03-11 17:18:51 -080095 mc_set_tsa_passthrough(AFIW);
Varun Wadekar4c7fa502016-12-13 13:13:42 -080096#endif
Varun Wadekara0f26972016-03-11 17:18:51 -080097 mc_set_tsa_passthrough(HDAW);
98 mc_set_tsa_passthrough(SATAW);
99 mc_set_tsa_passthrough(XUSB_HOSTW);
100 mc_set_tsa_passthrough(XUSB_DEVW);
101 mc_set_tsa_passthrough(SDMMCWAB);
102 mc_set_tsa_passthrough(APEDMAW);
103 mc_set_tsa_passthrough(SESWR);
104 mc_set_tsa_passthrough(ETRW);
105 mc_set_tsa_passthrough(AXISW);
106 mc_set_tsa_passthrough(EQOSW);
107 mc_set_tsa_passthrough(UFSHCW);
108 mc_set_tsa_passthrough(BPMPDMAW);
109 mc_set_tsa_passthrough(AONDMAW);
110 mc_set_tsa_passthrough(SCEDMAW);
111
Krishna Reddy329e2282017-05-25 11:04:33 -0700112 /* Parker has no IO Coherency support and need the following:
113 * Ordered MC Clients on Parker are AFI, EQOS, SATA, XUSB.
114 * ISO clients(DISP, VI, EQOS) should never snoop caches and
115 * don't need ROC/PCFIFO ordering.
116 * ISO clients(EQOS) that need ordering should use PCFIFO ordering
117 * and bypass ROC ordering by using FORCE_NON_COHERENT path.
118 * FORCE_NON_COHERENT/FORCE_COHERENT config take precedence
119 * over SMMU attributes.
120 * Force all Normal memory transactions from ISO and non-ISO to be
121 * non-coherent(bypass ROC, avoid cache snoop to avoid perf hit).
122 * Force the SO_DEV transactions from ordered ISO clients(EQOS) to
123 * non-coherent path and enable MC PCFIFO interlock for ordering.
124 * Force the SO_DEV transactions from ordered non-ISO clients (PCIe,
125 * XUSB, SATA) to coherent so that the transactions are
126 * ordered by ROC.
127 * PCFIFO ensure write ordering.
128 * Read after Write ordering is maintained/enforced by MC clients.
129 * Clients that need PCIe type write ordering must
130 * go through ROC ordering.
131 * Ordering enable for Read clients is not necessary.
132 * R5's and A9 would get necessary ordering from AXI and
133 * don't need ROC ordering enable:
134 * - MMIO ordering is through dev mapping and MMIO
135 * accesses bypass SMMU.
136 * - Normal memory is accessed through SMMU and ordering is
137 * ensured by client and AXI.
138 * - Ack point for Normal memory is WCAM in MC.
139 * - MMIO's can be early acked and AXI ensures dev memory ordering,
140 * Client ensures read/write direction change ordering.
141 * - See Bug 200312466 for more details.
Varun Wadekara0f26972016-03-11 17:18:51 -0800142 *
Krishna Reddy329e2282017-05-25 11:04:33 -0700143 * CGID_TAG_ADR is only present from T186 A02. As this code is common
144 * between A01 and A02, tegra_memctrl_set_overrides() programs
145 * CGID_TAG_ADR for the necessary clients on A02.
Varun Wadekara0f26972016-03-11 17:18:51 -0800146 */
Krishna Reddy329e2282017-05-25 11:04:33 -0700147 mc_set_txn_override(HDAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
148 mc_set_txn_override(BPMPW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
149 mc_set_txn_override(PTCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
150 mc_set_txn_override(NVDISPLAYR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
151 mc_set_txn_override(EQOSW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
152 mc_set_txn_override(NVJPGSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
153 mc_set_txn_override(ISPRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
154 mc_set_txn_override(SDMMCWAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
155 mc_set_txn_override(VICSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
156 mc_set_txn_override(MPCOREW, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
157 mc_set_txn_override(GPUSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
158 mc_set_txn_override(AXISR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
159 mc_set_txn_override(SCEDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
160 mc_set_txn_override(SDMMCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
161 mc_set_txn_override(EQOSR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
162 /* See bug 200131110 comment #35*/
163 mc_set_txn_override(APEDMAR, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
164 mc_set_txn_override(NVENCSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
165 mc_set_txn_override(SDMMCRAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
166 mc_set_txn_override(VICSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
167 mc_set_txn_override(BPMPDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
168 mc_set_txn_override(VIW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
169 mc_set_txn_override(SDMMCRAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
170 mc_set_txn_override(AXISW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
171 mc_set_txn_override(XUSB_DEVR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
172 mc_set_txn_override(UFSHCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
173 mc_set_txn_override(TSECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
174 mc_set_txn_override(GPUSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
175 mc_set_txn_override(SATAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
176 mc_set_txn_override(XUSB_HOSTW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
177 mc_set_txn_override(TSECSWRB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
178 mc_set_txn_override(GPUSRD2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
179 mc_set_txn_override(SCEDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
180 mc_set_txn_override(GPUSWR2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
181 mc_set_txn_override(AONDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
182 /* See bug 200131110 comment #35*/
183 mc_set_txn_override(APEDMAW, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
184 mc_set_txn_override(AONW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
185 mc_set_txn_override(HOST1XDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
186 mc_set_txn_override(ETRR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
187 mc_set_txn_override(SESWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
188 mc_set_txn_override(NVJPGSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
189 mc_set_txn_override(NVDECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
190 mc_set_txn_override(TSECSRDB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
191 mc_set_txn_override(BPMPDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
192 mc_set_txn_override(APER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
193 mc_set_txn_override(NVDECSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
194 mc_set_txn_override(XUSB_HOSTR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
195 mc_set_txn_override(ISPWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
196 mc_set_txn_override(SESRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
197 mc_set_txn_override(SCER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
198 mc_set_txn_override(AONR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
199 mc_set_txn_override(MPCORER, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
200 mc_set_txn_override(SDMMCWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
201 mc_set_txn_override(HDAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
202 mc_set_txn_override(NVDECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
203 mc_set_txn_override(UFSHCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
204 mc_set_txn_override(AONDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
205 mc_set_txn_override(SATAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
206 mc_set_txn_override(ETRW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
207 mc_set_txn_override(VICSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
208 mc_set_txn_override(NVENCSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
209 /* See bug 200131110 comment #35 */
210 mc_set_txn_override(AFIR, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
211 mc_set_txn_override(SDMMCWAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
212 mc_set_txn_override(SDMMCRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
213 mc_set_txn_override(NVDISPLAYR1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
214 mc_set_txn_override(ISPWB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
215 mc_set_txn_override(BPMPR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
216 mc_set_txn_override(APEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
217 mc_set_txn_override(SDMMCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
218 mc_set_txn_override(XUSB_DEVW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
219 mc_set_txn_override(TSECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
Varun Wadekara0f26972016-03-11 17:18:51 -0800220 /*
221 * See bug 200131110 comment #35 - there are no normal requests
222 * and AWID for SO/DEV requests is hardcoded in RTL for a
223 * particular PCIE controller
224 */
Krishna Reddy329e2282017-05-25 11:04:33 -0700225 mc_set_txn_override(AFIW, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_COHERENT);
226 mc_set_txn_override(SCEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
Varun Wadekara0f26972016-03-11 17:18:51 -0800227
228 /*
229 * At this point, ordering can occur at ROC. So, remove PCFIFO's
230 * control over ordering requests.
231 *
232 * Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for
233 * boot and strongly ordered MSS clients
234 */
235 val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL &
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800236#if ENABLE_AFI_DEVICE
Varun Wadekara0f26972016-03-11 17:18:51 -0800237 mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) &
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800238#endif
Varun Wadekara0f26972016-03-11 17:18:51 -0800239 mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) &
240 mc_set_pcfifo_unordered_boot_so_mss(1, SATAW);
241 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val);
242
243 val = MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL &
244 mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_HOSTW) &
245 mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_DEVW);
246 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG2, val);
247
248 val = MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL &
249 mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWAB);
250 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG3, val);
251
252 val = MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL &
253 mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) &
254 mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) &
255 mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) &
Varun Wadekara0f26972016-03-11 17:18:51 -0800256 mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) &
257 mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) &
258 mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) &
259 mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW);
Krishna Reddy329e2282017-05-25 11:04:33 -0700260 /* EQOSW is the only client that has PCFIFO order enabled. */
261 val |= mc_set_pcfifo_ordered_boot_so_mss(4, EQOSW);
Varun Wadekara0f26972016-03-11 17:18:51 -0800262 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val);
263
264 val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL &
265 mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW);
266 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val);
267
268 /*
Varun Wadekara0f26972016-03-11 17:18:51 -0800269 * Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS
270 * clients to allow memory traffic from all clients to start passing
271 * through ROC
272 */
273 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
274 assert(val == wdata_0);
275
276 wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL;
277 tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
278
Varun Wadekara0f26972016-03-11 17:18:51 -0800279 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
280 assert(val == wdata_1);
281
282 wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL;
283 tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
284
Varun Wadekara0f26972016-03-11 17:18:51 -0800285#endif
286}
287
Varun Wadekarad45ef72017-04-03 13:44:57 -0700288static void tegra_memctrl_set_overrides(void)
289{
290 tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
291 const mc_txn_override_cfg_t *mc_txn_override_cfgs;
292 uint32_t num_txn_override_cfgs;
293 uint32_t i, val;
294
295 /* Get the settings from the platform */
296 assert(plat_mc_settings);
297 mc_txn_override_cfgs = plat_mc_settings->txn_override_cfg;
298 num_txn_override_cfgs = plat_mc_settings->num_txn_override_cfgs;
299
300 /*
301 * Set the MC_TXN_OVERRIDE registers for write clients.
302 */
303 if ((tegra_chipid_is_t186()) &&
304 (!tegra_platform_is_silicon() ||
305 (tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1)))) {
306
307 /*
308 * GPU and NVENC settings for Tegra186 simulation and
309 * Silicon rev. A01
310 */
311 val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
312 val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
313 tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
314 val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
315
316 val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2);
317 val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
318 tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2,
319 val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
320
321 val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR);
322 val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
323 tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR,
324 val | MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID);
325
326 } else {
327
328 /*
329 * Settings for Tegra186 silicon rev. A02 and onwards.
330 */
331 for (i = 0; i < num_txn_override_cfgs; i++) {
332 val = tegra_mc_read_32(mc_txn_override_cfgs[i].offset);
333 val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
334 tegra_mc_write_32(mc_txn_override_cfgs[i].offset,
335 val | mc_txn_override_cfgs[i].cgid_tag);
336 }
337 }
338}
339
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530340/*
Varun Wadekar87e44ff2016-03-03 13:22:39 -0800341 * Init Memory controller during boot.
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530342 */
343void tegra_memctrl_setup(void)
344{
345 uint32_t val;
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530346 const uint32_t *mc_streamid_override_regs;
347 uint32_t num_streamid_override_regs;
348 const mc_streamid_security_cfg_t *mc_streamid_sec_cfgs;
349 uint32_t num_streamid_sec_cfgs;
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530350 tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
Varun Wadekarad45ef72017-04-03 13:44:57 -0700351 uint32_t i;
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530352
353 INFO("Tegra Memory Controller (v2)\n");
354
Varun Wadekar6cb25f92016-12-19 11:17:54 -0800355#if ENABLE_SMMU_DEVICE
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530356 /* Program the SMMU pagesize */
Varun Wadekar87e44ff2016-03-03 13:22:39 -0800357 tegra_smmu_init();
Varun Wadekar6cb25f92016-12-19 11:17:54 -0800358#endif
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530359 /* Get the settings from the platform */
360 assert(plat_mc_settings);
361 mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg;
362 num_streamid_override_regs = plat_mc_settings->num_streamid_override_cfgs;
363 mc_streamid_sec_cfgs = plat_mc_settings->streamid_security_cfg;
364 num_streamid_sec_cfgs = plat_mc_settings->num_streamid_security_cfgs;
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530365
366 /* Program all the Stream ID overrides */
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530367 for (i = 0; i < num_streamid_override_regs; i++)
368 tegra_mc_streamid_write_32(mc_streamid_override_regs[i],
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530369 MC_STREAM_ID_MAX);
370
371 /* Program the security config settings for all Stream IDs */
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530372 for (i = 0; i < num_streamid_sec_cfgs; i++) {
373 val = mc_streamid_sec_cfgs[i].override_enable << 16 |
374 mc_streamid_sec_cfgs[i].override_client_inputs << 8 |
375 mc_streamid_sec_cfgs[i].override_client_ns_flag << 0;
376 tegra_mc_streamid_write_32(mc_streamid_sec_cfgs[i].offset, val);
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530377 }
378
379 /*
380 * All requests at boot time, and certain requests during
381 * normal run time, are physically addressed and must bypass
382 * the SMMU. The client hub logic implements a hardware bypass
383 * path around the Translation Buffer Units (TBU). During
384 * boot-time, the SMMU_BYPASS_CTRL register (which defaults to
385 * TBU_BYPASS mode) will be used to steer all requests around
386 * the uninitialized TBUs. During normal operation, this register
387 * is locked into TBU_BYPASS_SID config, which routes requests
388 * with special StreamID 0x7f on the bypass path and all others
389 * through the selected TBU. This is done to disable SMMU Bypass
390 * mode, as it could be used to circumvent SMMU security checks.
391 */
392 tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG,
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530393 MC_SMMU_BYPASS_CONFIG_SETTINGS);
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530394
Varun Wadekarc9ac3e42016-02-17 15:07:49 -0800395 /*
Varun Wadekara0f26972016-03-11 17:18:51 -0800396 * Re-configure MSS to allow ROC to deal with ordering of the
397 * Memory Controller traffic. This is needed as the Memory Controller
398 * boots with MSS having all control, but ROC provides a performance
399 * boost as compared to MSS.
400 */
401 tegra_memctrl_reconfig_mss_clients();
402
Varun Wadekarad45ef72017-04-03 13:44:57 -0700403 /* Program overrides for MC transactions */
404 tegra_memctrl_set_overrides();
Varun Wadekar87e44ff2016-03-03 13:22:39 -0800405}
Varun Wadekarc9ac3e42016-02-17 15:07:49 -0800406
Varun Wadekar87e44ff2016-03-03 13:22:39 -0800407/*
408 * Restore Memory Controller settings after "System Suspend"
409 */
410void tegra_memctrl_restore_settings(void)
411{
Varun Wadekara0f26972016-03-11 17:18:51 -0800412 /*
413 * Re-configure MSS to allow ROC to deal with ordering of the
414 * Memory Controller traffic. This is needed as the Memory Controller
415 * resets during System Suspend with MSS having all control, but ROC
416 * provides a performance boost as compared to MSS.
417 */
418 tegra_memctrl_reconfig_mss_clients();
419
Varun Wadekarad45ef72017-04-03 13:44:57 -0700420 /* Program overrides for MC transactions */
421 tegra_memctrl_set_overrides();
422
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530423 /* video memory carveout region */
424 if (video_mem_base) {
425 tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO,
426 (uint32_t)video_mem_base);
427 tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
428 (uint32_t)(video_mem_base >> 32));
Varun Wadekar7058aee2016-04-25 09:01:46 -0700429 tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size_mb);
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530430
431 /*
Varun Wadekar153982c2016-12-21 14:50:18 -0800432 * MCE propagates the VideoMem configuration values across the
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530433 * CCPLEX.
434 */
435 mce_update_gsc_videomem();
436 }
437}
438
439/*
440 * Secure the BL31 DRAM aperture.
441 *
442 * phys_base = physical base of TZDRAM aperture
443 * size_in_bytes = size of aperture in bytes
444 */
445void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
446{
447 /*
448 * Setup the Memory controller to allow only secure accesses to
449 * the TZDRAM carveout
450 */
451 INFO("Configuring TrustZone DRAM Memory Carveout\n");
452
453 tegra_mc_write_32(MC_SECURITY_CFG0_0, (uint32_t)phys_base);
454 tegra_mc_write_32(MC_SECURITY_CFG3_0, (uint32_t)(phys_base >> 32));
455 tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
456
457 /*
Harvey Hsiehc95802d2016-07-29 20:10:59 +0800458 * When TZ encryption enabled,
459 * We need setup TZDRAM before CPU to access TZ Carveout,
460 * otherwise CPU will fetch non-decrypted data.
461 * So save TZDRAM setting for retore by SC7 resume FW.
462 */
463
464 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_LO,
465 tegra_mc_read_32(MC_SECURITY_CFG0_0));
466 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_HI,
467 tegra_mc_read_32(MC_SECURITY_CFG3_0));
468 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV54_HI,
469 tegra_mc_read_32(MC_SECURITY_CFG1_0));
470
471 /*
Varun Wadekar153982c2016-12-21 14:50:18 -0800472 * MCE propagates the security configuration values across the
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530473 * CCPLEX.
474 */
475 mce_update_gsc_tzdram();
476}
477
478/*
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800479 * Secure the BL31 TZRAM aperture.
480 *
481 * phys_base = physical base of TZRAM aperture
482 * size_in_bytes = size of aperture in bytes
483 */
484void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
485{
Varun Wadekare6d43222016-05-25 16:35:04 -0700486 uint32_t index;
487 uint32_t total_128kb_blocks = size_in_bytes >> 17;
Varun Wadekar153982c2016-12-21 14:50:18 -0800488 uint32_t residual_4kb_blocks = (size_in_bytes & (uint32_t)0x1FFFF) >> 12;
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800489 uint32_t val;
490
Varun Wadekar153982c2016-12-21 14:50:18 -0800491 INFO("Configuring TrustZone SRAM Memory Carveout\n");
492
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800493 /*
Varun Wadekare6d43222016-05-25 16:35:04 -0700494 * Reset the access configuration registers to restrict access
495 * to the TZRAM aperture
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800496 */
Varun Wadekar153982c2016-12-21 14:50:18 -0800497 for (index = MC_TZRAM_CLIENT_ACCESS_CFG0;
498 index < ((uint32_t)MC_TZRAM_CARVEOUT_CFG + (uint32_t)MC_GSC_CONFIG_REGS_SIZE);
499 index += 4U) {
Varun Wadekare6d43222016-05-25 16:35:04 -0700500 tegra_mc_write_32(index, 0);
Varun Wadekar153982c2016-12-21 14:50:18 -0800501 }
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800502
503 /*
Varun Wadekare6d43222016-05-25 16:35:04 -0700504 * Set the TZRAM base. TZRAM base must be 4k aligned, at least.
505 */
Varun Wadekar153982c2016-12-21 14:50:18 -0800506 assert((phys_base & (uint64_t)0xFFF) == 0U);
Varun Wadekare6d43222016-05-25 16:35:04 -0700507 tegra_mc_write_32(MC_TZRAM_BASE_LO, (uint32_t)phys_base);
508 tegra_mc_write_32(MC_TZRAM_BASE_HI,
Varun Wadekar153982c2016-12-21 14:50:18 -0800509 (uint32_t)(phys_base >> 32) & MC_GSC_BASE_HI_MASK);
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800510
Varun Wadekare6d43222016-05-25 16:35:04 -0700511 /*
512 * Set the TZRAM size
513 *
514 * total size = (number of 128KB blocks) + (number of remaining 4KB
515 * blocks)
516 *
517 */
Varun Wadekar153982c2016-12-21 14:50:18 -0800518 val = (residual_4kb_blocks << MC_GSC_SIZE_RANGE_4KB_SHIFT) |
Varun Wadekare6d43222016-05-25 16:35:04 -0700519 total_128kb_blocks;
520 tegra_mc_write_32(MC_TZRAM_SIZE, val);
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800521
Varun Wadekare6d43222016-05-25 16:35:04 -0700522 /*
523 * Lock the configuration settings by disabling TZ-only lock
524 * and locking the configuration against any future changes
525 * at all.
526 */
527 val = tegra_mc_read_32(MC_TZRAM_CARVEOUT_CFG);
Varun Wadekar153982c2016-12-21 14:50:18 -0800528 val &= ~MC_GSC_ENABLE_TZ_LOCK_BIT;
529 val |= MC_GSC_LOCK_CFG_SETTINGS_BIT;
Varun Wadekare6d43222016-05-25 16:35:04 -0700530 tegra_mc_write_32(MC_TZRAM_CARVEOUT_CFG, val);
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800531
532 /*
Varun Wadekar153982c2016-12-21 14:50:18 -0800533 * MCE propagates the security configuration values across the
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800534 * CCPLEX.
535 */
536 mce_update_gsc_tzram();
537}
538
Varun Wadekar153982c2016-12-21 14:50:18 -0800539static void tegra_lock_videomem_nonoverlap(uint64_t phys_base,
540 uint64_t size_in_bytes)
541{
542 uint32_t index;
543 uint64_t total_128kb_blocks = size_in_bytes >> 17;
544 uint64_t residual_4kb_blocks = (size_in_bytes & (uint32_t)0x1FFFF) >> 12;
545 uint64_t val;
546
547 /*
548 * Reset the access configuration registers to restrict access to
549 * old Videomem aperture
550 */
551 for (index = MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0;
552 index < ((uint32_t)MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0 + (uint32_t)MC_GSC_CONFIG_REGS_SIZE);
553 index += 4U) {
554 tegra_mc_write_32(index, 0);
555 }
556
557 /*
558 * Set the base. It must be 4k aligned, at least.
559 */
560 assert((phys_base & (uint64_t)0xFFF) == 0U);
561 tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_LO, (uint32_t)phys_base);
562 tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_HI,
563 (uint32_t)(phys_base >> 32) & (uint32_t)MC_GSC_BASE_HI_MASK);
564
565 /*
566 * Set the aperture size
567 *
568 * total size = (number of 128KB blocks) + (number of remaining 4KB
569 * blocks)
570 *
571 */
572 val = (uint32_t)((residual_4kb_blocks << MC_GSC_SIZE_RANGE_4KB_SHIFT) |
573 total_128kb_blocks);
574 tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_SIZE, (uint32_t)val);
575
576 /*
577 * Lock the configuration settings by enabling TZ-only lock and
578 * locking the configuration against any future changes from NS
579 * world.
580 */
581 tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_CFG,
582 (uint32_t)MC_GSC_ENABLE_TZ_LOCK_BIT);
583
584 /*
585 * MCE propagates the GSC configuration values across the
586 * CCPLEX.
587 */
588}
589
590static void tegra_unlock_videomem_nonoverlap(void)
591{
592 /* Clear the base */
593 tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_LO, 0);
594 tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_HI, 0);
595
596 /* Clear the size */
597 tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_SIZE, 0);
598}
599
600static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
601 unsigned long long non_overlap_area_size)
602{
603 /*
604 * Map the NS memory first, clean it and then unmap it.
605 */
606 mmap_add_dynamic_region(non_overlap_area_start, /* PA */
607 non_overlap_area_start, /* VA */
608 non_overlap_area_size, /* size */
609 MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */
610
611 zero_normalmem((void *)non_overlap_area_start, non_overlap_area_size);
612 flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
613
614 mmap_remove_dynamic_region(non_overlap_area_start,
615 non_overlap_area_size);
616}
617
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800618/*
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530619 * Program the Video Memory carveout region
620 *
621 * phys_base = physical base of aperture
622 * size_in_bytes = size of aperture in bytes
623 */
624void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
625{
Varun Wadekar153982c2016-12-21 14:50:18 -0800626 uintptr_t vmem_end_old = video_mem_base + (video_mem_size_mb << 20);
627 uintptr_t vmem_end_new = phys_base + size_in_bytes;
Varun Wadekar153982c2016-12-21 14:50:18 -0800628 unsigned long long non_overlap_area_size;
Varun Wadekare60f1bf2016-02-17 10:10:50 -0800629
630 /*
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530631 * Setup the Memory controller to restrict CPU accesses to the Video
632 * Memory region
633 */
634 INFO("Configuring Video Memory Carveout\n");
635
Varun Wadekar153982c2016-12-21 14:50:18 -0800636 /*
637 * Configure Memory Controller directly for the first time.
638 */
639 if (video_mem_base == 0U)
640 goto done;
641
642 /*
643 * Lock the non overlapping memory being cleared so that other masters
644 * do not accidently write to it. The memory would be unlocked once
645 * the non overlapping region is cleared and the new memory
646 * settings take effect.
647 */
648 tegra_lock_videomem_nonoverlap(video_mem_base,
649 video_mem_size_mb << 20);
650
651 /*
652 * Clear the old regions now being exposed. The following cases
653 * can occur -
654 *
655 * 1. clear whole old region (no overlap with new region)
656 * 2. clear old sub-region below new base
657 * 3. clear old sub-region above new end
658 */
659 INFO("Cleaning previous Video Memory Carveout\n");
660
661 if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) {
662 tegra_clear_videomem(video_mem_base,
663 (uint64_t)video_mem_size_mb << 20);
664 } else {
665 if (video_mem_base < phys_base) {
666 non_overlap_area_size = phys_base - video_mem_base;
667 tegra_clear_videomem(video_mem_base, non_overlap_area_size);
668 }
669 if (vmem_end_old > vmem_end_new) {
670 non_overlap_area_size = vmem_end_old - vmem_end_new;
671 tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
672 }
673 }
674
675done:
676 /* program the Videomem aperture */
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530677 tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
678 tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
679 (uint32_t)(phys_base >> 32));
Varun Wadekar7058aee2016-04-25 09:01:46 -0700680 tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20);
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530681
Varun Wadekar153982c2016-12-21 14:50:18 -0800682 /* unlock the previous locked nonoverlapping aperture */
683 tegra_unlock_videomem_nonoverlap();
684
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530685 /* store new values */
686 video_mem_base = phys_base;
Varun Wadekar7058aee2016-04-25 09:01:46 -0700687 video_mem_size_mb = size_in_bytes >> 20;
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530688
689 /*
Varun Wadekar153982c2016-12-21 14:50:18 -0800690 * MCE propagates the VideoMem configuration values across the
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530691 * CCPLEX.
692 */
693 mce_update_gsc_videomem();
694}
Varun Wadekarc92050b2017-03-29 14:57:29 -0700695
696/*
697 * This feature exists only for v1 of the Tegra Memory Controller.
698 */
699void tegra_memctrl_disable_ahb_redirection(void)
700{
701 ; /* do nothing */
702}