Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 2 | /* |
| 3 | * (C) Copyright 2013 |
| 4 | * David Feng <fenghua@phytium.com.cn> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <common.h> |
Simon Glass | 1ea9789 | 2020-05-10 11:40:00 -0600 | [diff] [blame] | 8 | #include <bootstage.h> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 9 | #include <command.h> |
Simon Glass | 45c7890 | 2019-11-14 12:57:26 -0700 | [diff] [blame] | 10 | #include <time.h> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 11 | #include <asm/system.h> |
Simon Glass | 4dcacfc | 2020-05-10 11:40:13 -0600 | [diff] [blame] | 12 | #include <linux/bitops.h> |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 13 | |
Andre Przywara | de223fd | 2016-11-03 00:56:25 +0000 | [diff] [blame] | 14 | DECLARE_GLOBAL_DATA_PTR; |
| 15 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 16 | /* |
| 17 | * Generic timer implementation of get_tbclk() |
| 18 | */ |
| 19 | unsigned long get_tbclk(void) |
| 20 | { |
| 21 | unsigned long cntfrq; |
| 22 | asm volatile("mrs %0, cntfrq_el0" : "=r" (cntfrq)); |
| 23 | return cntfrq; |
| 24 | } |
| 25 | |
Andre Przywara | 60b7865 | 2018-06-27 01:42:52 +0100 | [diff] [blame] | 26 | #ifdef CONFIG_SYS_FSL_ERRATUM_A008585 |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 27 | /* |
Andre Przywara | 60b7865 | 2018-06-27 01:42:52 +0100 | [diff] [blame] | 28 | * FSL erratum A-008585 says that the ARM generic timer counter "has the |
| 29 | * potential to contain an erroneous value for a small number of core |
| 30 | * clock cycles every time the timer value changes". |
| 31 | * This sometimes leads to a consecutive counter read returning a lower |
| 32 | * value than the previous one, thus reporting the time to go backwards. |
| 33 | * The workaround is to read the counter twice and only return when the value |
| 34 | * was the same in both reads. |
| 35 | * Assumes that the CPU runs in much higher frequency than the timer. |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 36 | */ |
| 37 | unsigned long timer_read_counter(void) |
| 38 | { |
| 39 | unsigned long cntpct; |
York Sun | a7686cf | 2015-03-20 19:28:05 -0700 | [diff] [blame] | 40 | unsigned long temp; |
Andre Przywara | 60b7865 | 2018-06-27 01:42:52 +0100 | [diff] [blame] | 41 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 42 | isb(); |
| 43 | asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct)); |
York Sun | a7686cf | 2015-03-20 19:28:05 -0700 | [diff] [blame] | 44 | asm volatile("mrs %0, cntpct_el0" : "=r" (temp)); |
| 45 | while (temp != cntpct) { |
| 46 | asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct)); |
| 47 | asm volatile("mrs %0, cntpct_el0" : "=r" (temp)); |
| 48 | } |
Andre Przywara | 60b7865 | 2018-06-27 01:42:52 +0100 | [diff] [blame] | 49 | |
David Feng | 85fd5f1 | 2013-12-14 11:47:35 +0800 | [diff] [blame] | 50 | return cntpct; |
| 51 | } |
Andre Przywara | d1de0bb | 2018-06-27 01:42:53 +0100 | [diff] [blame] | 52 | #elif CONFIG_SUNXI_A64_TIMER_ERRATUM |
| 53 | /* |
| 54 | * This erratum sometimes flips the lower 11 bits of the counter value |
| 55 | * to all 0's or all 1's, leading to jumps forwards or backwards. |
| 56 | * Backwards jumps might be interpreted all roll-overs and be treated as |
| 57 | * huge jumps forward. |
| 58 | * The workaround is to check whether the lower 11 bits of the counter are |
| 59 | * all 0 or all 1, then discard this value and read again. |
| 60 | * This occasionally discards valid values, but will catch all erroneous |
| 61 | * reads and fixes the problem reliably. Also this mostly requires only a |
| 62 | * single read, so does not have any significant overhead. |
| 63 | * The algorithm was conceived by Samuel Holland. |
| 64 | */ |
| 65 | unsigned long timer_read_counter(void) |
| 66 | { |
| 67 | unsigned long cntpct; |
| 68 | |
| 69 | isb(); |
| 70 | do { |
| 71 | asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct)); |
| 72 | } while (((cntpct + 1) & GENMASK(10, 0)) <= 1); |
| 73 | |
| 74 | return cntpct; |
| 75 | } |
Andre Przywara | 60b7865 | 2018-06-27 01:42:52 +0100 | [diff] [blame] | 76 | #else |
| 77 | /* |
| 78 | * timer_read_counter() using the Arm Generic Timer (aka arch timer). |
| 79 | */ |
| 80 | unsigned long timer_read_counter(void) |
| 81 | { |
| 82 | unsigned long cntpct; |
| 83 | |
| 84 | isb(); |
| 85 | asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct)); |
| 86 | |
| 87 | return cntpct; |
| 88 | } |
| 89 | #endif |
Aneesh Bansal | d1074b4 | 2015-12-08 13:54:26 +0530 | [diff] [blame] | 90 | |
Simon Glass | e987393 | 2017-04-05 17:53:17 -0600 | [diff] [blame] | 91 | uint64_t get_ticks(void) |
Andre Przywara | de223fd | 2016-11-03 00:56:25 +0000 | [diff] [blame] | 92 | { |
| 93 | unsigned long ticks = timer_read_counter(); |
| 94 | |
| 95 | gd->arch.tbl = ticks; |
| 96 | |
| 97 | return ticks; |
| 98 | } |
| 99 | |
Aneesh Bansal | d1074b4 | 2015-12-08 13:54:26 +0530 | [diff] [blame] | 100 | unsigned long usec2ticks(unsigned long usec) |
| 101 | { |
| 102 | ulong ticks; |
| 103 | if (usec < 1000) |
| 104 | ticks = ((usec * (get_tbclk()/1000)) + 500) / 1000; |
| 105 | else |
| 106 | ticks = ((usec / 10) * (get_tbclk() / 100000)); |
| 107 | |
| 108 | return ticks; |
| 109 | } |
Michal Simek | 16d73b9 | 2018-05-15 16:47:02 +0200 | [diff] [blame] | 110 | |
| 111 | ulong timer_get_boot_us(void) |
| 112 | { |
| 113 | u64 val = get_ticks() * 1000000; |
| 114 | |
| 115 | return val / get_tbclk(); |
| 116 | } |