blob: 41a4ede80a1e600c9f631895860f42274970513d [file] [log] [blame]
Varun Wadekarcd5a2f52015-09-20 15:08:22 +05301/*
Harvey Hsiehb9b374f2016-11-15 22:04:51 +08002 * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
Varun Wadekarcd5a2f52015-09-20 15:08:22 +05303 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
32#include <assert.h>
Varun Wadekarad45ef72017-04-03 13:44:57 -070033#include <bl_common.h>
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053034#include <debug.h>
35#include <mce.h>
36#include <memctrl.h>
37#include <memctrl_v2.h>
38#include <mmio.h>
Varun Wadekar87e44ff2016-03-03 13:22:39 -080039#include <smmu.h>
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053040#include <string.h>
41#include <tegra_def.h>
Varun Wadekare81177d2016-07-18 17:43:41 -070042#include <tegra_platform.h>
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053043#include <xlat_tables.h>
44
Varun Wadekare60f1bf2016-02-17 10:10:50 -080045#define TEGRA_GPU_RESET_REG_OFFSET 0x30
46#define GPU_RESET_BIT (1 << 0)
47
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053048/* Video Memory base and size (live values) */
49static uint64_t video_mem_base;
Varun Wadekar7058aee2016-04-25 09:01:46 -070050static uint64_t video_mem_size_mb;
Varun Wadekarcd5a2f52015-09-20 15:08:22 +053051
Varun Wadekara0f26972016-03-11 17:18:51 -080052static void tegra_memctrl_reconfig_mss_clients(void)
53{
54#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS
55 uint32_t val, wdata_0, wdata_1;
56
57 /*
58 * Assert Memory Controller's HOTRESET_FLUSH_ENABLE signal for
59 * boot and strongly ordered MSS clients to flush existing memory
60 * traffic and stall future requests.
61 */
62 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
63 assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL);
64
Varun Wadekar4c7fa502016-12-13 13:13:42 -080065 wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB |
66#if ENABLE_AFI_DEVICE
67 MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB |
68#endif
Varun Wadekara0f26972016-03-11 17:18:51 -080069 MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB |
70 MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB |
71 MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB;
72 tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
73
74 /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
75 do {
76 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
77 } while ((val & wdata_0) != wdata_0);
78
79 /* Wait one more time due to SW WAR for known legacy issue */
80 do {
81 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
82 } while ((val & wdata_0) != wdata_0);
83
84 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
85 assert(val == MC_CLIENT_HOTRESET_CTRL1_RESET_VAL);
86
87 wdata_1 = MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB |
88 MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB |
89 MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB |
90 MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB |
91 MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB |
92 MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB |
93 MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB |
94 MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB |
95 MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB |
96 MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB;
97 tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
98
99 /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
100 do {
101 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
102 } while ((val & wdata_1) != wdata_1);
103
104 /* Wait one more time due to SW WAR for known legacy issue */
105 do {
106 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
107 } while ((val & wdata_1) != wdata_1);
108
109 /*
110 * Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and
111 * strongly ordered MSS clients. ROC needs to be single point
112 * of control on overriding the memory type. So, remove TSA's
113 * memtype override.
114 */
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800115#if ENABLE_AFI_DEVICE
Varun Wadekara0f26972016-03-11 17:18:51 -0800116 mc_set_tsa_passthrough(AFIW);
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800117#endif
Varun Wadekara0f26972016-03-11 17:18:51 -0800118 mc_set_tsa_passthrough(HDAW);
119 mc_set_tsa_passthrough(SATAW);
120 mc_set_tsa_passthrough(XUSB_HOSTW);
121 mc_set_tsa_passthrough(XUSB_DEVW);
122 mc_set_tsa_passthrough(SDMMCWAB);
123 mc_set_tsa_passthrough(APEDMAW);
124 mc_set_tsa_passthrough(SESWR);
125 mc_set_tsa_passthrough(ETRW);
126 mc_set_tsa_passthrough(AXISW);
127 mc_set_tsa_passthrough(EQOSW);
128 mc_set_tsa_passthrough(UFSHCW);
129 mc_set_tsa_passthrough(BPMPDMAW);
130 mc_set_tsa_passthrough(AONDMAW);
131 mc_set_tsa_passthrough(SCEDMAW);
132
133 /*
134 * Change COH_PATH_OVERRIDE_SO_DEV from NO_OVERRIDE -> FORCE_COHERENT
135 * for boot and strongly ordered MSS clients. This steers all sodev
136 * transactions to ROC.
137 *
138 * Change AXID_OVERRIDE/AXID_OVERRIDE_SO_DEV only for some clients
139 * whose AXI IDs we know and trust.
140 */
141
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800142#if ENABLE_AFI_DEVICE
Varun Wadekara0f26972016-03-11 17:18:51 -0800143 /* Match AFIW */
144 mc_set_forced_coherent_so_dev_cfg(AFIR);
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800145#endif
Varun Wadekara0f26972016-03-11 17:18:51 -0800146
147 /*
148 * See bug 200131110 comment #35 - there are no normal requests
149 * and AWID for SO/DEV requests is hardcoded in RTL for a
150 * particular PCIE controller
151 */
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800152#if ENABLE_AFI_DEVICE
Varun Wadekara0f26972016-03-11 17:18:51 -0800153 mc_set_forced_coherent_so_dev_cfg(AFIW);
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800154#endif
Varun Wadekara0f26972016-03-11 17:18:51 -0800155 mc_set_forced_coherent_cfg(HDAR);
156 mc_set_forced_coherent_cfg(HDAW);
157 mc_set_forced_coherent_cfg(SATAR);
158 mc_set_forced_coherent_cfg(SATAW);
159 mc_set_forced_coherent_cfg(XUSB_HOSTR);
160 mc_set_forced_coherent_cfg(XUSB_HOSTW);
161 mc_set_forced_coherent_cfg(XUSB_DEVR);
162 mc_set_forced_coherent_cfg(XUSB_DEVW);
163 mc_set_forced_coherent_cfg(SDMMCRAB);
164 mc_set_forced_coherent_cfg(SDMMCWAB);
165
166 /* Match APEDMAW */
167 mc_set_forced_coherent_axid_so_dev_cfg(APEDMAR);
168
169 /*
170 * See bug 200131110 comment #35 - AWID for normal requests
171 * is 0x80 and AWID for SO/DEV requests is 0x01
172 */
173 mc_set_forced_coherent_axid_so_dev_cfg(APEDMAW);
174 mc_set_forced_coherent_cfg(SESRD);
175 mc_set_forced_coherent_cfg(SESWR);
176 mc_set_forced_coherent_cfg(ETRR);
177 mc_set_forced_coherent_cfg(ETRW);
178 mc_set_forced_coherent_cfg(AXISR);
179 mc_set_forced_coherent_cfg(AXISW);
180 mc_set_forced_coherent_cfg(EQOSR);
181 mc_set_forced_coherent_cfg(EQOSW);
182 mc_set_forced_coherent_cfg(UFSHCR);
183 mc_set_forced_coherent_cfg(UFSHCW);
184 mc_set_forced_coherent_cfg(BPMPDMAR);
185 mc_set_forced_coherent_cfg(BPMPDMAW);
186 mc_set_forced_coherent_cfg(AONDMAR);
187 mc_set_forced_coherent_cfg(AONDMAW);
188 mc_set_forced_coherent_cfg(SCEDMAR);
189 mc_set_forced_coherent_cfg(SCEDMAW);
190
191 /*
192 * At this point, ordering can occur at ROC. So, remove PCFIFO's
193 * control over ordering requests.
194 *
195 * Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for
196 * boot and strongly ordered MSS clients
197 */
198 val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL &
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800199#if ENABLE_AFI_DEVICE
Varun Wadekara0f26972016-03-11 17:18:51 -0800200 mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) &
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800201#endif
Varun Wadekara0f26972016-03-11 17:18:51 -0800202 mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) &
203 mc_set_pcfifo_unordered_boot_so_mss(1, SATAW);
204 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val);
205
206 val = MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL &
207 mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_HOSTW) &
208 mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_DEVW);
209 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG2, val);
210
211 val = MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL &
212 mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWAB);
213 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG3, val);
214
215 val = MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL &
216 mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) &
217 mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) &
218 mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) &
219 mc_set_pcfifo_unordered_boot_so_mss(4, EQOSW) &
220 mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) &
221 mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) &
222 mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) &
223 mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW);
224 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val);
225
226 val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL &
227 mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW);
228 tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val);
229
230 /*
231 * At this point, ordering can occur at ROC. SMMU need not
232 * reorder any requests.
233 *
234 * Change SMMU_*_ORDERED_CLIENT from ORDERED -> UNORDERED
235 * for boot and strongly ordered MSS clients
236 */
237 val = MC_SMMU_CLIENT_CONFIG1_RESET_VAL &
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800238#if ENABLE_AFI_DEVICE
Varun Wadekara0f26972016-03-11 17:18:51 -0800239 mc_set_smmu_unordered_boot_so_mss(1, AFIW) &
Varun Wadekar4c7fa502016-12-13 13:13:42 -0800240#endif
Varun Wadekara0f26972016-03-11 17:18:51 -0800241 mc_set_smmu_unordered_boot_so_mss(1, HDAW) &
242 mc_set_smmu_unordered_boot_so_mss(1, SATAW);
243 tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG1, val);
244
245 val = MC_SMMU_CLIENT_CONFIG2_RESET_VAL &
246 mc_set_smmu_unordered_boot_so_mss(2, XUSB_HOSTW) &
247 mc_set_smmu_unordered_boot_so_mss(2, XUSB_DEVW);
248 tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG2, val);
249
250 val = MC_SMMU_CLIENT_CONFIG3_RESET_VAL &
251 mc_set_smmu_unordered_boot_so_mss(3, SDMMCWAB);
252 tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG3, val);
253
254 val = MC_SMMU_CLIENT_CONFIG4_RESET_VAL &
255 mc_set_smmu_unordered_boot_so_mss(4, SESWR) &
256 mc_set_smmu_unordered_boot_so_mss(4, ETRW) &
257 mc_set_smmu_unordered_boot_so_mss(4, AXISW) &
258 mc_set_smmu_unordered_boot_so_mss(4, EQOSW) &
259 mc_set_smmu_unordered_boot_so_mss(4, UFSHCW) &
260 mc_set_smmu_unordered_boot_so_mss(4, BPMPDMAW) &
261 mc_set_smmu_unordered_boot_so_mss(4, AONDMAW) &
262 mc_set_smmu_unordered_boot_so_mss(4, SCEDMAW);
263 tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG4, val);
264
265 val = MC_SMMU_CLIENT_CONFIG5_RESET_VAL &
266 mc_set_smmu_unordered_boot_so_mss(5, APEDMAW);
267 tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG5, val);
268
269 /*
270 * Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS
271 * clients to allow memory traffic from all clients to start passing
272 * through ROC
273 */
274 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
275 assert(val == wdata_0);
276
277 wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL;
278 tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
279
280 /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
281 do {
282 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
283 } while ((val & wdata_0) != wdata_0);
284
285 /* Wait one more time due to SW WAR for known legacy issue */
286 do {
287 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
288 } while ((val & wdata_0) != wdata_0);
289
290 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
291 assert(val == wdata_1);
292
293 wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL;
294 tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
295
296 /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
297 do {
298 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
299 } while ((val & wdata_1) != wdata_1);
300
301 /* Wait one more time due to SW WAR for known legacy issue */
302 do {
303 val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
304 } while ((val & wdata_1) != wdata_1);
305
306#endif
307}
308
Varun Wadekarad45ef72017-04-03 13:44:57 -0700309static void tegra_memctrl_set_overrides(void)
310{
311 tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
312 const mc_txn_override_cfg_t *mc_txn_override_cfgs;
313 uint32_t num_txn_override_cfgs;
314 uint32_t i, val;
315
316 /* Get the settings from the platform */
317 assert(plat_mc_settings);
318 mc_txn_override_cfgs = plat_mc_settings->txn_override_cfg;
319 num_txn_override_cfgs = plat_mc_settings->num_txn_override_cfgs;
320
321 /*
322 * Set the MC_TXN_OVERRIDE registers for write clients.
323 */
324 if ((tegra_chipid_is_t186()) &&
325 (!tegra_platform_is_silicon() ||
326 (tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1)))) {
327
328 /*
329 * GPU and NVENC settings for Tegra186 simulation and
330 * Silicon rev. A01
331 */
332 val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
333 val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
334 tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
335 val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
336
337 val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2);
338 val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
339 tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2,
340 val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
341
342 val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR);
343 val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
344 tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR,
345 val | MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID);
346
347 } else {
348
349 /*
350 * Settings for Tegra186 silicon rev. A02 and onwards.
351 */
352 for (i = 0; i < num_txn_override_cfgs; i++) {
353 val = tegra_mc_read_32(mc_txn_override_cfgs[i].offset);
354 val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
355 tegra_mc_write_32(mc_txn_override_cfgs[i].offset,
356 val | mc_txn_override_cfgs[i].cgid_tag);
357 }
358 }
359}
360
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530361/*
Varun Wadekar87e44ff2016-03-03 13:22:39 -0800362 * Init Memory controller during boot.
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530363 */
364void tegra_memctrl_setup(void)
365{
366 uint32_t val;
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530367 const uint32_t *mc_streamid_override_regs;
368 uint32_t num_streamid_override_regs;
369 const mc_streamid_security_cfg_t *mc_streamid_sec_cfgs;
370 uint32_t num_streamid_sec_cfgs;
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530371 tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
Varun Wadekarad45ef72017-04-03 13:44:57 -0700372 uint32_t i;
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530373
374 INFO("Tegra Memory Controller (v2)\n");
375
Varun Wadekar6cb25f92016-12-19 11:17:54 -0800376#if ENABLE_SMMU_DEVICE
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530377 /* Program the SMMU pagesize */
Varun Wadekar87e44ff2016-03-03 13:22:39 -0800378 tegra_smmu_init();
Varun Wadekar6cb25f92016-12-19 11:17:54 -0800379#endif
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530380 /* Get the settings from the platform */
381 assert(plat_mc_settings);
382 mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg;
383 num_streamid_override_regs = plat_mc_settings->num_streamid_override_cfgs;
384 mc_streamid_sec_cfgs = plat_mc_settings->streamid_security_cfg;
385 num_streamid_sec_cfgs = plat_mc_settings->num_streamid_security_cfgs;
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530386
387 /* Program all the Stream ID overrides */
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530388 for (i = 0; i < num_streamid_override_regs; i++)
389 tegra_mc_streamid_write_32(mc_streamid_override_regs[i],
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530390 MC_STREAM_ID_MAX);
391
392 /* Program the security config settings for all Stream IDs */
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530393 for (i = 0; i < num_streamid_sec_cfgs; i++) {
394 val = mc_streamid_sec_cfgs[i].override_enable << 16 |
395 mc_streamid_sec_cfgs[i].override_client_inputs << 8 |
396 mc_streamid_sec_cfgs[i].override_client_ns_flag << 0;
397 tegra_mc_streamid_write_32(mc_streamid_sec_cfgs[i].offset, val);
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530398 }
399
400 /*
401 * All requests at boot time, and certain requests during
402 * normal run time, are physically addressed and must bypass
403 * the SMMU. The client hub logic implements a hardware bypass
404 * path around the Translation Buffer Units (TBU). During
405 * boot-time, the SMMU_BYPASS_CTRL register (which defaults to
406 * TBU_BYPASS mode) will be used to steer all requests around
407 * the uninitialized TBUs. During normal operation, this register
408 * is locked into TBU_BYPASS_SID config, which routes requests
409 * with special StreamID 0x7f on the bypass path and all others
410 * through the selected TBU. This is done to disable SMMU Bypass
411 * mode, as it could be used to circumvent SMMU security checks.
412 */
413 tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG,
Pritesh Raithatha9eb5db52017-01-02 19:42:31 +0530414 MC_SMMU_BYPASS_CONFIG_SETTINGS);
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530415
Varun Wadekarc9ac3e42016-02-17 15:07:49 -0800416 /*
Varun Wadekara0f26972016-03-11 17:18:51 -0800417 * Re-configure MSS to allow ROC to deal with ordering of the
418 * Memory Controller traffic. This is needed as the Memory Controller
419 * boots with MSS having all control, but ROC provides a performance
420 * boost as compared to MSS.
421 */
422 tegra_memctrl_reconfig_mss_clients();
423
Varun Wadekarad45ef72017-04-03 13:44:57 -0700424 /* Program overrides for MC transactions */
425 tegra_memctrl_set_overrides();
Varun Wadekar87e44ff2016-03-03 13:22:39 -0800426}
Varun Wadekarc9ac3e42016-02-17 15:07:49 -0800427
Varun Wadekar87e44ff2016-03-03 13:22:39 -0800428/*
429 * Restore Memory Controller settings after "System Suspend"
430 */
431void tegra_memctrl_restore_settings(void)
432{
Varun Wadekara0f26972016-03-11 17:18:51 -0800433 /*
434 * Re-configure MSS to allow ROC to deal with ordering of the
435 * Memory Controller traffic. This is needed as the Memory Controller
436 * resets during System Suspend with MSS having all control, but ROC
437 * provides a performance boost as compared to MSS.
438 */
439 tegra_memctrl_reconfig_mss_clients();
440
Varun Wadekarad45ef72017-04-03 13:44:57 -0700441 /* Program overrides for MC transactions */
442 tegra_memctrl_set_overrides();
443
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530444 /* video memory carveout region */
445 if (video_mem_base) {
446 tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO,
447 (uint32_t)video_mem_base);
448 tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
449 (uint32_t)(video_mem_base >> 32));
Varun Wadekar7058aee2016-04-25 09:01:46 -0700450 tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size_mb);
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530451
452 /*
453 * MCE propogates the VideoMem configuration values across the
454 * CCPLEX.
455 */
456 mce_update_gsc_videomem();
457 }
458}
459
460/*
461 * Secure the BL31 DRAM aperture.
462 *
463 * phys_base = physical base of TZDRAM aperture
464 * size_in_bytes = size of aperture in bytes
465 */
466void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
467{
468 /*
469 * Setup the Memory controller to allow only secure accesses to
470 * the TZDRAM carveout
471 */
472 INFO("Configuring TrustZone DRAM Memory Carveout\n");
473
474 tegra_mc_write_32(MC_SECURITY_CFG0_0, (uint32_t)phys_base);
475 tegra_mc_write_32(MC_SECURITY_CFG3_0, (uint32_t)(phys_base >> 32));
476 tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
477
478 /*
Harvey Hsiehc95802d2016-07-29 20:10:59 +0800479 * When TZ encryption enabled,
480 * We need setup TZDRAM before CPU to access TZ Carveout,
481 * otherwise CPU will fetch non-decrypted data.
482 * So save TZDRAM setting for retore by SC7 resume FW.
483 */
484
485 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_LO,
486 tegra_mc_read_32(MC_SECURITY_CFG0_0));
487 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_HI,
488 tegra_mc_read_32(MC_SECURITY_CFG3_0));
489 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV54_HI,
490 tegra_mc_read_32(MC_SECURITY_CFG1_0));
491
492 /*
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530493 * MCE propogates the security configuration values across the
494 * CCPLEX.
495 */
496 mce_update_gsc_tzdram();
497}
498
499/*
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800500 * Secure the BL31 TZRAM aperture.
501 *
502 * phys_base = physical base of TZRAM aperture
503 * size_in_bytes = size of aperture in bytes
504 */
505void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
506{
Varun Wadekare6d43222016-05-25 16:35:04 -0700507 uint32_t index;
508 uint32_t total_128kb_blocks = size_in_bytes >> 17;
509 uint32_t residual_4kb_blocks = (size_in_bytes & 0x1FFFF) >> 12;
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800510 uint32_t val;
511
512 /*
Varun Wadekare6d43222016-05-25 16:35:04 -0700513 * Reset the access configuration registers to restrict access
514 * to the TZRAM aperture
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800515 */
Varun Wadekare6d43222016-05-25 16:35:04 -0700516 for (index = MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG0;
517 index <= MC_TZRAM_CARVEOUT_FORCE_INTERNAL_ACCESS5;
518 index += 4)
519 tegra_mc_write_32(index, 0);
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800520
521 /*
Varun Wadekare6d43222016-05-25 16:35:04 -0700522 * Set the TZRAM base. TZRAM base must be 4k aligned, at least.
523 */
524 assert(!(phys_base & 0xFFF));
525 tegra_mc_write_32(MC_TZRAM_BASE_LO, (uint32_t)phys_base);
526 tegra_mc_write_32(MC_TZRAM_BASE_HI,
527 (uint32_t)(phys_base >> 32) & TZRAM_BASE_HI_MASK);
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800528
Varun Wadekare6d43222016-05-25 16:35:04 -0700529 /*
530 * Set the TZRAM size
531 *
532 * total size = (number of 128KB blocks) + (number of remaining 4KB
533 * blocks)
534 *
535 */
536 val = (residual_4kb_blocks << TZRAM_SIZE_RANGE_4KB_SHIFT) |
537 total_128kb_blocks;
538 tegra_mc_write_32(MC_TZRAM_SIZE, val);
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800539
Varun Wadekare6d43222016-05-25 16:35:04 -0700540 /*
541 * Lock the configuration settings by disabling TZ-only lock
542 * and locking the configuration against any future changes
543 * at all.
544 */
545 val = tegra_mc_read_32(MC_TZRAM_CARVEOUT_CFG);
546 val &= ~TZRAM_ENABLE_TZ_LOCK_BIT;
547 val |= TZRAM_LOCK_CFG_SETTINGS_BIT;
548 tegra_mc_write_32(MC_TZRAM_CARVEOUT_CFG, val);
Varun Wadekar13e7dc42015-12-30 15:15:08 -0800549
550 /*
551 * MCE propogates the security configuration values across the
552 * CCPLEX.
553 */
554 mce_update_gsc_tzram();
555}
556
557/*
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530558 * Program the Video Memory carveout region
559 *
560 * phys_base = physical base of aperture
561 * size_in_bytes = size of aperture in bytes
562 */
563void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
564{
Varun Wadekare60f1bf2016-02-17 10:10:50 -0800565 uint32_t regval;
566
567 /*
568 * The GPU is the user of the Video Memory region. In order to
569 * transition to the new memory region smoothly, we program the
570 * new base/size ONLY if the GPU is in reset mode.
571 */
572 regval = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET);
573 if ((regval & GPU_RESET_BIT) == 0) {
574 ERROR("GPU not in reset! Video Memory setup failed\n");
575 return;
576 }
577
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530578 /*
579 * Setup the Memory controller to restrict CPU accesses to the Video
580 * Memory region
581 */
582 INFO("Configuring Video Memory Carveout\n");
583
584 tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
585 tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
586 (uint32_t)(phys_base >> 32));
Varun Wadekar7058aee2016-04-25 09:01:46 -0700587 tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20);
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530588
589 /* store new values */
590 video_mem_base = phys_base;
Varun Wadekar7058aee2016-04-25 09:01:46 -0700591 video_mem_size_mb = size_in_bytes >> 20;
Varun Wadekarcd5a2f52015-09-20 15:08:22 +0530592
593 /*
594 * MCE propogates the VideoMem configuration values across the
595 * CCPLEX.
596 */
597 mce_update_gsc_videomem();
598}
Varun Wadekarc92050b2017-03-29 14:57:29 -0700599
600/*
601 * This feature exists only for v1 of the Tegra Memory Controller.
602 */
603void tegra_memctrl_disable_ahb_redirection(void)
604{
605 ; /* do nothing */
606}