blob: 6451f02c5aa0385fd71fe417671c70306bc3367c [file] [log] [blame]
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07001/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <common/debug.h>
10#include <denver.h>
Steven Kao2cdb6782017-01-05 17:04:40 +080011#include <errno.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070012#include <lib/mmio.h>
13#include <mce_private.h>
Steven Kao2cdb6782017-01-05 17:04:40 +080014#include <platform_def.h>
15#include <t194_nvg.h>
Steven Kao40359022017-06-22 12:54:06 +080016#include <tegra_private.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070017
Steven Kao238d6d22017-08-16 20:12:00 +080018#define ID_AFR0_EL1_CACHE_OPS_SHIFT 12
19#define ID_AFR0_EL1_CACHE_OPS_MASK 0xFU
Steven Kao2cdb6782017-01-05 17:04:40 +080020/*
21 * Reports the major and minor version of this interface.
22 *
23 * NVGDATA[0:31]: SW(R) Minor Version
24 * NVGDATA[32:63]: SW(R) Major Version
25 */
26uint64_t nvg_get_version(void)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070027{
Steven Kao2cdb6782017-01-05 17:04:40 +080028 nvg_set_request(TEGRA_NVG_CHANNEL_VERSION);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070029
Steven Kao2cdb6782017-01-05 17:04:40 +080030 return (uint64_t)nvg_get_result();
31}
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070032
Steven Kao2cdb6782017-01-05 17:04:40 +080033/*
34 * Enable the perf per watt mode.
35 *
36 * NVGDATA[0]: SW(RW), 1 = enable perf per watt mode
37 */
38int32_t nvg_enable_power_perf_mode(void)
39{
40 nvg_set_request_data(TEGRA_NVG_CHANNEL_POWER_PERF, 1U);
41
42 return 0;
43}
44
45/*
46 * Disable the perf per watt mode.
47 *
48 * NVGDATA[0]: SW(RW), 0 = disable perf per watt mode
49 */
50int32_t nvg_disable_power_perf_mode(void)
51{
52 nvg_set_request_data(TEGRA_NVG_CHANNEL_POWER_PERF, 0U);
53
54 return 0;
55}
56
57/*
58 * Enable the battery saver mode.
59 *
60 * NVGDATA[2]: SW(RW), 1 = enable battery saver mode
61 */
62int32_t nvg_enable_power_saver_modes(void)
63{
64 nvg_set_request_data(TEGRA_NVG_CHANNEL_POWER_MODES, 1U);
65
66 return 0;
67}
68
69/*
70 * Disable the battery saver mode.
71 *
72 * NVGDATA[2]: SW(RW), 0 = disable battery saver mode
73 */
74int32_t nvg_disable_power_saver_modes(void)
75{
76 nvg_set_request_data(TEGRA_NVG_CHANNEL_POWER_MODES, 0U);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070077
78 return 0;
79}
80
81/*
Steven Kao2cdb6782017-01-05 17:04:40 +080082 * Set the expected wake time in TSC ticks for the next low-power state the
83 * core enters.
84 *
85 * NVGDATA[0:31]: SW(RW), WAKE_TIME
86 */
87void nvg_set_wake_time(uint32_t wake_time)
88{
89 /* time (TSC ticks) until the core is expected to get a wake event */
90 nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, (uint64_t)wake_time);
91}
92
93/*
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070094 * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
95 * SYSTEM_CSTATE values.
Steven Kao2cdb6782017-01-05 17:04:40 +080096 *
97 * NVGDATA[0:2]: SW(RW), CLUSTER_CSTATE
98 * NVGDATA[7]: SW(W), update cluster flag
99 * NVGDATA[8:9]: SW(RW), CG_CSTATE
100 * NVGDATA[15]: SW(W), update ccplex flag
101 * NVGDATA[16:19]: SW(RW), SYSTEM_CSTATE
102 * NVGDATA[23]: SW(W), update system flag
103 * NVGDATA[31]: SW(W), update wake mask flag
104 * NVGDATA[32:63]: SW(RW), WAKE_MASK
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700105 */
Steven Kao2cdb6782017-01-05 17:04:40 +0800106void nvg_update_cstate_info(uint32_t cluster, uint32_t ccplex,
107 uint32_t system, uint32_t wake_mask, uint8_t update_wake_mask)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700108{
109 uint64_t val = 0;
110
111 /* update CLUSTER_CSTATE? */
Steven Kao2cdb6782017-01-05 17:04:40 +0800112 if (cluster != 0U) {
113 val |= ((uint64_t)cluster & CLUSTER_CSTATE_MASK) |
114 CLUSTER_CSTATE_UPDATE_BIT;
115 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700116
117 /* update CCPLEX_CSTATE? */
Steven Kao2cdb6782017-01-05 17:04:40 +0800118 if (ccplex != 0U) {
119 val |= (((uint64_t)ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
120 CCPLEX_CSTATE_UPDATE_BIT;
121 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700122
123 /* update SYSTEM_CSTATE? */
Steven Kao2cdb6782017-01-05 17:04:40 +0800124 if (system != 0U) {
125 val |= (((uint64_t)system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
126 SYSTEM_CSTATE_UPDATE_BIT;
127 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700128
129 /* update wake mask value? */
Steven Kao2cdb6782017-01-05 17:04:40 +0800130 if (update_wake_mask != 0U) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700131 val |= CSTATE_WAKE_MASK_UPDATE_BIT;
Steven Kao2cdb6782017-01-05 17:04:40 +0800132 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700133
134 /* set the wake mask */
Steven Kao2cdb6782017-01-05 17:04:40 +0800135 val |= ((uint64_t)wake_mask & CSTATE_WAKE_MASK_CLEAR) << CSTATE_WAKE_MASK_SHIFT;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700136
137 /* set the updated cstate info */
138 nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700139}
140
Steven Kao2cdb6782017-01-05 17:04:40 +0800141/*
142 * Indices gives MTS the crossover point in TSC ticks for when it becomes
143 * no longer viable to enter the named state
144 *
Steven Kao6f373a22017-09-29 18:09:17 +0800145 * Type 5 : NVGDATA[0:31]: C6 Lower bound
146 * Type 6 : NVGDATA[0:31]: CC6 Lower bound
147 * Type 8 : NVGDATA[0:31]: CG7 Lower bound
Steven Kao2cdb6782017-01-05 17:04:40 +0800148 */
149int32_t nvg_update_crossover_time(uint32_t type, uint32_t time)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700150{
Steven Kao2cdb6782017-01-05 17:04:40 +0800151 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700152
Steven Kao2cdb6782017-01-05 17:04:40 +0800153 switch (type) {
Steven Kao6f373a22017-09-29 18:09:17 +0800154 case TEGRA_NVG_CHANNEL_CROSSOVER_C6_LOWER_BOUND:
Steven Kao2cdb6782017-01-05 17:04:40 +0800155 nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_C6_LOWER_BOUND,
156 (uint64_t)time);
157 break;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700158
Steven Kao6f373a22017-09-29 18:09:17 +0800159 case TEGRA_NVG_CHANNEL_CROSSOVER_CC6_LOWER_BOUND:
Steven Kao2cdb6782017-01-05 17:04:40 +0800160 nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_CC6_LOWER_BOUND,
161 (uint64_t)time);
162 break;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700163
Steven Kao6f373a22017-09-29 18:09:17 +0800164 case TEGRA_NVG_CHANNEL_CROSSOVER_CG7_LOWER_BOUND:
Steven Kao2cdb6782017-01-05 17:04:40 +0800165 nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_CG7_LOWER_BOUND,
166 (uint64_t)time);
167 break;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700168
Steven Kao2cdb6782017-01-05 17:04:40 +0800169 default:
170 ERROR("%s: unknown crossover type (%d)\n", __func__, type);
171 ret = EINVAL;
172 break;
173 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700174
Steven Kao2cdb6782017-01-05 17:04:40 +0800175 return ret;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700176}
177
Steven Kao2cdb6782017-01-05 17:04:40 +0800178/*
179 * These NVG calls allow ARM SW to access CSTATE statistical information
180 *
181 * NVGDATA[0:3]: SW(RW) Core/cluster/cg id
182 * NVGDATA[16:31]: SW(RW) Stat id
183 */
184int32_t nvg_set_cstate_stat_query_value(uint64_t data)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700185{
Steven Kao2cdb6782017-01-05 17:04:40 +0800186 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700187
Krishna Sitaraman09f68172017-05-24 17:21:22 -0700188 /* sanity check stat id and core id*/
189 if ((data >> MCE_STAT_ID_SHIFT) >
190 (uint64_t)NVG_STAT_QUERY_C7_RESIDENCY_SUM) {
191 ERROR("%s: unknown stat id (%d)\n", __func__,
192 (uint32_t)(data >> MCE_STAT_ID_SHIFT));
193 ret = EINVAL;
194 } else if ((data & MCE_CORE_ID_MASK) > (uint64_t)PLATFORM_CORE_COUNT) {
195 ERROR("%s: unknown core id (%d)\n", __func__,
196 (uint32_t)(data & MCE_CORE_ID_MASK));
Steven Kao2cdb6782017-01-05 17:04:40 +0800197 ret = EINVAL;
198 } else {
199 nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_STAT_QUERY_REQUEST, data);
200 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700201
Steven Kao2cdb6782017-01-05 17:04:40 +0800202 return ret;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700203}
204
Steven Kao2cdb6782017-01-05 17:04:40 +0800205/*
206 * The read-only value associated with the CSTATE_STAT_QUERY_REQUEST
207 *
208 * NVGDATA[0:63]: SW(R) Stat count
209 */
210uint64_t nvg_get_cstate_stat_query_value(void)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700211{
Steven Kao2cdb6782017-01-05 17:04:40 +0800212 nvg_set_request(TEGRA_NVG_CHANNEL_CSTATE_STAT_QUERY_VALUE);
213
214 return (uint64_t)nvg_get_result();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700215}
216
Steven Kao2cdb6782017-01-05 17:04:40 +0800217/*
218 * Return a non-zero value if the CCPLEX is able to enter SC7
219 *
220 * NVGDATA[0]: SW(R), Is allowed result
221 */
222int32_t nvg_is_sc7_allowed(void)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700223{
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700224 /* issue command to check if SC7 is allowed */
Steven Kao2cdb6782017-01-05 17:04:40 +0800225 nvg_set_request(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700226
227 /* 1 = SC7 allowed, 0 = SC7 not allowed */
Steven Kao2cdb6782017-01-05 17:04:40 +0800228 return (int32_t)nvg_get_result();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700229}
230
Steven Kao2cdb6782017-01-05 17:04:40 +0800231/*
232 * Wake an offlined logical core. Note that a core is offlined by entering
233 * a C-state where the WAKE_MASK is all 0.
234 *
235 * NVGDATA[0:3]: SW(W) logical core to online
236 */
237int32_t nvg_online_core(uint32_t core)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700238{
Steven Kao2cdb6782017-01-05 17:04:40 +0800239 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700240
Steven Kao2cdb6782017-01-05 17:04:40 +0800241 /* sanity check the core ID value */
242 if (core > (uint32_t)PLATFORM_CORE_COUNT) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700243 ERROR("%s: unknown core id (%d)\n", __func__, core);
Steven Kao2cdb6782017-01-05 17:04:40 +0800244 ret = EINVAL;
245 } else {
246 /* get a core online */
247 nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE,
248 (uint64_t)core & MCE_CORE_ID_MASK);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700249 }
250
Steven Kao2cdb6782017-01-05 17:04:40 +0800251 return ret;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700252}
253
Steven Kao2cdb6782017-01-05 17:04:40 +0800254/*
255 * Enables and controls the voltage/frequency hint for CC3. CC3 is disabled
256 * by default.
257 *
258 * NVGDATA[7:0] SW(RW) frequency request
259 * NVGDATA[31:31] SW(RW) enable bit
260 */
261int32_t nvg_cc3_ctrl(uint32_t freq, uint8_t enable)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700262{
Steven Kao2cdb6782017-01-05 17:04:40 +0800263 uint64_t val = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700264
265 /*
266 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
Steven Kao2cdb6782017-01-05 17:04:40 +0800267 * the SW visible frequency request registers for all non
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700268 * floorswept cores valid independent of StandbyWFI and disabling
Steven Kao2cdb6782017-01-05 17:04:40 +0800269 * the IDLE frequency request register. If set, Auto-CC3
270 * will be enabled by setting the ARM SW visible frequency
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700271 * request registers for all non floorswept cores to be enabled by
272 * StandbyWFI or the equivalent signal, and always keeping the IDLE
Steven Kao2cdb6782017-01-05 17:04:40 +0800273 * frequency request register enabled.
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700274 */
Steven Kao2cdb6782017-01-05 17:04:40 +0800275 if (enable != 0U) {
276 val = ((uint64_t)freq & MCE_AUTO_CC3_FREQ_MASK) | MCE_AUTO_CC3_ENABLE_BIT;
277 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700278 nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, val);
279
280 return 0;
281}
Steven Kao2cdb6782017-01-05 17:04:40 +0800282
283/*
284 * MC GSC (General Security Carveout) register values are expected to be
285 * changed by TrustZone ARM code after boot.
286 *
287 * NVGDATA[0:15] SW(R) GSC enun
288 */
289int32_t nvg_update_ccplex_gsc(uint32_t gsc_idx)
290{
Steven Kao6f373a22017-09-29 18:09:17 +0800291 int32_t ret;
Steven Kao2cdb6782017-01-05 17:04:40 +0800292
293 /* sanity check GSC ID */
Steven Kao6f373a22017-09-29 18:09:17 +0800294 if (gsc_idx > (uint32_t)TEGRA_NVG_CHANNEL_UPDATE_GSC_VPR) {
295 ERROR("%s: unknown gsc_idx (%u)\n", __func__, gsc_idx);
Steven Kao2cdb6782017-01-05 17:04:40 +0800296 ret = EINVAL;
297 } else {
298 nvg_set_request_data(TEGRA_NVG_CHANNEL_UPDATE_CCPLEX_GSC,
299 (uint64_t)gsc_idx);
300 }
301
302 return ret;
303}
304
305/*
306 * Cache clean operation for all CCPLEX caches.
Steven Kao2cdb6782017-01-05 17:04:40 +0800307 */
308int32_t nvg_roc_clean_cache(void)
309{
Steven Kao238d6d22017-08-16 20:12:00 +0800310 int32_t ret = 0;
Steven Kao2cdb6782017-01-05 17:04:40 +0800311
Steven Kao238d6d22017-08-16 20:12:00 +0800312 /* check if cache flush through mts is supported */
313 if (((read_id_afr0_el1() >> ID_AFR0_EL1_CACHE_OPS_SHIFT) &
314 ID_AFR0_EL1_CACHE_OPS_MASK) == 1U) {
315 if (nvg_cache_clean() == 0U) {
316 ERROR("%s: failed\n", __func__);
317 ret = EINVAL;
318 }
319 } else {
320 ret = EINVAL;
321 }
322 return ret;
Steven Kao2cdb6782017-01-05 17:04:40 +0800323}
324
325/*
326 * Cache clean and invalidate operation for all CCPLEX caches.
Steven Kao2cdb6782017-01-05 17:04:40 +0800327 */
328int32_t nvg_roc_flush_cache(void)
329{
Steven Kao238d6d22017-08-16 20:12:00 +0800330 int32_t ret = 0;
Steven Kao2cdb6782017-01-05 17:04:40 +0800331
Steven Kao238d6d22017-08-16 20:12:00 +0800332 /* check if cache flush through mts is supported */
333 if (((read_id_afr0_el1() >> ID_AFR0_EL1_CACHE_OPS_SHIFT) &
334 ID_AFR0_EL1_CACHE_OPS_MASK) == 1U) {
335 if (nvg_cache_clean_inval() == 0U) {
336 ERROR("%s: failed\n", __func__);
337 ret = EINVAL;
338 }
339 } else {
340 ret = EINVAL;
341 }
342 return ret;
Steven Kao2cdb6782017-01-05 17:04:40 +0800343}
344
345/*
346 * Cache clean and invalidate, clear TR-bit operation for all CCPLEX caches.
Steven Kao2cdb6782017-01-05 17:04:40 +0800347 */
348int32_t nvg_roc_clean_cache_trbits(void)
349{
Steven Kao238d6d22017-08-16 20:12:00 +0800350 int32_t ret = 0;
Steven Kao2cdb6782017-01-05 17:04:40 +0800351
Steven Kao238d6d22017-08-16 20:12:00 +0800352 /* check if cache flush through mts is supported */
353 if (((read_id_afr0_el1() >> ID_AFR0_EL1_CACHE_OPS_SHIFT) &
354 ID_AFR0_EL1_CACHE_OPS_MASK) == 1U) {
355 if (nvg_cache_inval_all() == 0U) {
356 ERROR("%s: failed\n", __func__);
357 ret = EINVAL;
358 }
359 } else {
360 ret = EINVAL;
361 }
362 return ret;
Steven Kao2cdb6782017-01-05 17:04:40 +0800363}
364
365/*
366 * Set the power state for a core
367 */
368int32_t nvg_enter_cstate(uint32_t state, uint32_t wake_time)
369{
370 int32_t ret = 0;
Steven Kao40359022017-06-22 12:54:06 +0800371 uint64_t val = 0ULL;
Steven Kao2cdb6782017-01-05 17:04:40 +0800372
373 /* check for allowed power state */
374 if ((state != (uint32_t)TEGRA_NVG_CORE_C0) &&
375 (state != (uint32_t)TEGRA_NVG_CORE_C1) &&
376 (state != (uint32_t)TEGRA_NVG_CORE_C6) &&
377 (state != (uint32_t)TEGRA_NVG_CORE_C7))
378 {
379 ERROR("%s: unknown cstate (%d)\n", __func__, state);
380 ret = EINVAL;
381 } else {
382 /* time (TSC ticks) until the core is expected to get a wake event */
383 nvg_set_wake_time(wake_time);
384
385 /* set the core cstate */
Steven Kao40359022017-06-22 12:54:06 +0800386 val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
387 write_actlr_el1(val | (uint64_t)state);
Steven Kao2cdb6782017-01-05 17:04:40 +0800388 }
389
390 return ret;
391}