blob: e9961c38f22bef615366d175b82ba7ef5927546a [file] [log] [blame]
developer1ec68072024-10-17 14:25:33 +08001From 85fa9116d39817791f3da4dd642549db8916cb8e Mon Sep 17 00:00:00 2001
2From: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
3Date: Tue, 30 Jan 2024 12:29:37 +0800
4Subject: [PATCH] 999-1715-v6.2-net-ptp-introduce-adjust-by-scaled-ppm.patch
5
6---
7 drivers/ptp/ptp_clock.c | 21 ------
8 include/linux/math64.h | 12 ++++
9 include/linux/ptp_clock_kernel.h | 72 +++++++++++++++++++
10 lib/math/div64.c | 42 +++++++++++
11 4 files changed, 126 insertions(+), 21 deletions(-)
12
13diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
14index eedf067..5cca99f 100644
15--- a/drivers/ptp/ptp_clock.c
16+++ b/drivers/ptp/ptp_clock.c
17@@ -63,27 +63,6 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
18 spin_unlock_irqrestore(&queue->lock, flags);
19 }
20
21-long scaled_ppm_to_ppb(long ppm)
22-{
23- /*
24- * The 'freq' field in the 'struct timex' is in parts per
25- * million, but with a 16 bit binary fractional field.
26- *
27- * We want to calculate
28- *
29- * ppb = scaled_ppm * 1000 / 2^16
30- *
31- * which simplifies to
32- *
33- * ppb = scaled_ppm * 125 / 2^13
34- */
35- s64 ppb = 1 + ppm;
36- ppb *= 125;
37- ppb >>= 13;
38- return (long) ppb;
39-}
40-EXPORT_SYMBOL(scaled_ppm_to_ppb);
41-
42 /* posix clock implementation */
43
44 static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
45diff --git a/include/linux/math64.h b/include/linux/math64.h
46index 65bef21..a593096 100644
47--- a/include/linux/math64.h
48+++ b/include/linux/math64.h
49@@ -281,6 +281,18 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
50 }
51 #endif /* mul_u64_u32_div */
52
53+u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
54+
55+/**
56+ * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
57+ * @ll: unsigned 64bit dividend
58+ * @d: unsigned 64bit divisor
59+ *
60+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
61+ * and round up.
62+ *
63+ * Return: dividend / divisor rounded up
64+ */
65 #define DIV64_U64_ROUND_UP(ll, d) \
66 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
67
68diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
69index 874f7e7..2ff9afe 100644
70--- a/include/linux/ptp_clock_kernel.h
71+++ b/include/linux/ptp_clock_kernel.h
72@@ -169,6 +169,78 @@ struct ptp_clock_event {
73 };
74 };
75
76+/**
77+ * scaled_ppm_to_ppb() - convert scaled ppm to ppb
78+ *
79+ * @ppm: Parts per million, but with a 16 bit binary fractional field
80+ */
81+static inline long scaled_ppm_to_ppb(long ppm)
82+{
83+ /*
84+ * The 'freq' field in the 'struct timex' is in parts per
85+ * million, but with a 16 bit binary fractional field.
86+ *
87+ * We want to calculate
88+ *
89+ * ppb = scaled_ppm * 1000 / 2^16
90+ *
91+ * which simplifies to
92+ *
93+ * ppb = scaled_ppm * 125 / 2^13
94+ */
95+ s64 ppb = 1 + ppm;
96+
97+ ppb *= 125;
98+ ppb >>= 13;
99+ return (long)ppb;
100+}
101+
102+/**
103+ * diff_by_scaled_ppm - Calculate difference using scaled ppm
104+ * @base: the base increment value to adjust
105+ * @scaled_ppm: scaled parts per million to adjust by
106+ * @diff: on return, the absolute value of calculated diff
107+ *
108+ * Calculate the difference to adjust the base increment using scaled parts
109+ * per million.
110+ *
111+ * Use mul_u64_u64_div_u64 to perform the difference calculation in avoid
112+ * possible overflow.
113+ *
114+ * Returns: true if scaled_ppm is negative, false otherwise
115+ */
116+static inline bool diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff)
117+{
118+ bool negative = false;
119+
120+ if (scaled_ppm < 0) {
121+ negative = true;
122+ scaled_ppm = -scaled_ppm;
123+ }
124+
125+ *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm, 1000000ULL << 16);
126+
127+ return negative;
128+}
129+
130+/**
131+ * adjust_by_scaled_ppm - Adjust a base increment by scaled parts per million
132+ * @base: the base increment value to adjust
133+ * @scaled_ppm: scaled parts per million frequency adjustment
134+ *
135+ * Helper function which calculates a new increment value based on the
136+ * requested scaled parts per million adjustment.
137+ */
138+static inline u64 adjust_by_scaled_ppm(u64 base, long scaled_ppm)
139+{
140+ u64 diff;
141+
142+ if (diff_by_scaled_ppm(base, scaled_ppm, &diff))
143+ return base - diff;
144+
145+ return base + diff;
146+}
147+
148 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
149
150 /**
151diff --git a/lib/math/div64.c b/lib/math/div64.c
152index 368ca7f..edd1090 100644
153--- a/lib/math/div64.c
154+++ b/lib/math/div64.c
155@@ -190,3 +190,45 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
156 return __iter_div_u64_rem(dividend, divisor, remainder);
157 }
158 EXPORT_SYMBOL(iter_div_u64_rem);
159+
160+#ifndef mul_u64_u64_div_u64
161+u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
162+{
163+ u64 res = 0, div, rem;
164+ int shift;
165+
166+ /* can a * b overflow ? */
167+ if (ilog2(a) + ilog2(b) > 62) {
168+ /*
169+ * (b * a) / c is equal to
170+ *
171+ * (b / c) * a +
172+ * (b % c) * a / c
173+ *
174+ * if nothing overflows. Can the 1st multiplication
175+ * overflow? Yes, but we do not care: this can only
176+ * happen if the end result can't fit in u64 anyway.
177+ *
178+ * So the code below does
179+ *
180+ * res = (b / c) * a;
181+ * b = b % c;
182+ */
183+ div = div64_u64_rem(b, c, &rem);
184+ res = div * a;
185+ b = rem;
186+
187+ shift = ilog2(a) + ilog2(b) - 62;
188+ if (shift > 0) {
189+ /* drop precision */
190+ b >>= shift;
191+ c >>= shift;
192+ if (!c)
193+ return res;
194+ }
195+ }
196+
197+ return res + div64_u64(a * b, c);
198+}
199+EXPORT_SYMBOL(mul_u64_u64_div_u64);
200+#endif
201--
2022.18.0