blob: 7aa162c2f70290a6dce1f2d76f99745215f57590 [file] [log] [blame]
Tero Kristo82ceb0d2021-06-11 11:45:14 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 clock driver
4 *
Nishanth Menoneaa39c62023-11-01 15:56:03 -05005 * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
Tero Kristo82ceb0d2021-06-11 11:45:14 +03006 * Tero Kristo <t-kristo@ti.com>
7 */
8
9#include <common.h>
10#include <dm.h>
11#include <errno.h>
12#include <soc.h>
13#include <clk-uclass.h>
Udit Kumarc648daa2023-09-21 22:30:38 +053014#include <k3-avs.h>
Tero Kristo82ceb0d2021-06-11 11:45:14 +030015#include "k3-clk.h"
16
17#define PLL_MIN_FREQ 800000000
18#define PLL_MAX_FREQ 3200000000UL
19#define PLL_MAX_DIV 127
20
21/**
22 * struct clk_map - mapping from dev/clk id tuples towards physical clocks
23 * @dev_id: device ID for the clock
24 * @clk_id: clock ID for the clock
25 * @clk: pointer to the registered clock entry for the mapping
26 */
27struct clk_map {
28 u16 dev_id;
29 u32 clk_id;
30 struct clk *clk;
31};
32
33/**
34 * struct ti_clk_data - clock controller information structure
35 * @map: mapping from dev/clk id tuples to physical clock entries
36 * @size: number of entries in the map
37 */
38struct ti_clk_data {
39 struct clk_map *map;
40 int size;
41};
42
43static ulong osc_freq;
44
45static void clk_add_map(struct ti_clk_data *data, struct clk *clk,
46 u32 dev_id, u32 clk_id)
47{
48 struct clk_map *map;
49
50 debug("%s: added clk=%p, data=%p, dev=%d, clk=%d\n", __func__,
51 clk, data, dev_id, clk_id);
52 if (!clk)
53 return;
54
55 map = data->map + data->size++;
56
57 map->dev_id = dev_id;
58 map->clk_id = clk_id;
59 map->clk = clk;
60}
61
62static const struct soc_attr ti_k3_soc_clk_data[] = {
63#if IS_ENABLED(CONFIG_SOC_K3_J721E)
64 {
65 .family = "J721E",
66 .data = &j721e_clk_platdata,
67 },
68 {
69 .family = "J7200",
70 .data = &j7200_clk_platdata,
71 },
David Huange04854b2022-01-25 20:56:33 +053072#elif CONFIG_SOC_K3_J721S2
73 {
74 .family = "J721S2",
75 .data = &j721s2_clk_platdata,
76 },
Tero Kristo82ceb0d2021-06-11 11:45:14 +030077#endif
Suman Annaa9768c92022-05-25 13:38:43 +053078#ifdef CONFIG_SOC_K3_AM625
79 {
80 .family = "AM62X",
81 .data = &am62x_clk_platdata,
82 },
83#endif
Bryan Brattlof2d982b72022-11-03 19:13:56 -050084#ifdef CONFIG_SOC_K3_AM62A7
85 {
86 .family = "AM62AX",
87 .data = &am62ax_clk_platdata,
88 },
89#endif
Apurva Nandanb93ab922024-02-24 01:51:44 +053090#ifdef CONFIG_SOC_K3_J784S4
91 {
92 .family = "J784S4",
93 .data = &j784s4_clk_platdata,
94 },
95#endif
Bryan Brattlof9a83dcd2024-03-12 15:20:21 -050096#ifdef CONFIG_SOC_K3_AM62P5
97 {
98 .family = "AM62PX",
99 .data = &am62px_clk_platdata,
100 },
101#endif
Tero Kristo82ceb0d2021-06-11 11:45:14 +0300102 { /* sentinel */ }
103};
104
105static int ti_clk_probe(struct udevice *dev)
106{
107 struct ti_clk_data *data = dev_get_priv(dev);
108 struct clk *clk;
109 const char *name;
110 const struct clk_data *ti_clk_data;
111 int i, j;
112 const struct soc_attr *soc_match_data;
113 const struct ti_k3_clk_platdata *pdata;
114
115 debug("%s(dev=%p)\n", __func__, dev);
116
117 soc_match_data = soc_device_match(ti_k3_soc_clk_data);
118 if (!soc_match_data)
119 return -ENODEV;
120
121 pdata = (const struct ti_k3_clk_platdata *)soc_match_data->data;
122
123 data->map = kcalloc(pdata->soc_dev_clk_data_cnt, sizeof(*data->map),
124 GFP_KERNEL);
125 data->size = 0;
126
127 for (i = 0; i < pdata->clk_list_cnt; i++) {
128 ti_clk_data = &pdata->clk_list[i];
129
130 switch (ti_clk_data->type) {
131 case CLK_TYPE_FIXED_RATE:
132 name = ti_clk_data->clk.fixed_rate.name;
133 clk = clk_register_fixed_rate(NULL,
134 name,
135 ti_clk_data->clk.fixed_rate.rate);
136 break;
137 case CLK_TYPE_DIV:
138 name = ti_clk_data->clk.div.name;
139 clk = clk_register_divider(NULL, name,
140 ti_clk_data->clk.div.parent,
141 ti_clk_data->clk.div.flags,
142 map_physmem(ti_clk_data->clk.div.reg, 0, MAP_NOCACHE),
143 ti_clk_data->clk.div.shift,
144 ti_clk_data->clk.div.width,
Suman Annadb4c2dc2021-09-07 17:16:58 -0500145 ti_clk_data->clk.div.div_flags);
Tero Kristo82ceb0d2021-06-11 11:45:14 +0300146 break;
147 case CLK_TYPE_MUX:
148 name = ti_clk_data->clk.mux.name;
149 clk = clk_register_mux(NULL, name,
150 ti_clk_data->clk.mux.parents,
151 ti_clk_data->clk.mux.num_parents,
152 ti_clk_data->clk.mux.flags,
153 map_physmem(ti_clk_data->clk.mux.reg, 0, MAP_NOCACHE),
154 ti_clk_data->clk.mux.shift,
155 ti_clk_data->clk.mux.width,
156 0);
157 break;
158 case CLK_TYPE_PLL:
159 name = ti_clk_data->clk.pll.name;
160 clk = clk_register_ti_pll(name,
161 ti_clk_data->clk.pll.parent,
162 map_physmem(ti_clk_data->clk.pll.reg, 0, MAP_NOCACHE));
163
164 if (!osc_freq)
165 osc_freq = clk_get_rate(clk_get_parent(clk));
166 break;
167 default:
168 name = NULL;
169 clk = NULL;
170 printf("WARNING: %s has encountered unknown clk type %d\n",
171 __func__, ti_clk_data->type);
172 }
173
174 if (clk && ti_clk_data->default_freq)
175 clk_set_rate(clk, ti_clk_data->default_freq);
176
177 if (clk && name) {
178 for (j = 0; j < pdata->soc_dev_clk_data_cnt; j++) {
179 if (!strcmp(name, pdata->soc_dev_clk_data[j].clk_name)) {
180 clk_add_map(data, clk, pdata->soc_dev_clk_data[j].dev_id,
181 pdata->soc_dev_clk_data[j].clk_id);
182 }
183 }
184 }
185 }
186
187 return 0;
188}
189
190static int _clk_cmp(u32 dev_id, u32 clk_id, const struct clk_map *map)
191{
192 if (map->dev_id == dev_id && map->clk_id == clk_id)
193 return 0;
194 if (map->dev_id > dev_id ||
195 (map->dev_id == dev_id && map->clk_id > clk_id))
196 return -1;
197 return 1;
198}
199
200static int bsearch(u32 dev_id, u32 clk_id, struct clk_map *map, int num)
201{
202 int result;
203 int idx;
204
205 for (idx = 0; idx < num; idx++) {
206 result = _clk_cmp(dev_id, clk_id, &map[idx]);
207
208 if (result == 0)
209 return idx;
210 }
211
212 return -ENOENT;
213}
214
215static int ti_clk_of_xlate(struct clk *clk,
216 struct ofnode_phandle_args *args)
217{
218 struct ti_clk_data *data = dev_get_priv(clk->dev);
219 int idx;
220
221 debug("%s(clk=%p, args_count=%d [0]=%d [1]=%d)\n", __func__, clk,
222 args->args_count, args->args[0], args->args[1]);
223
224 if (args->args_count != 2) {
225 debug("Invalid args_count: %d\n", args->args_count);
226 return -EINVAL;
227 }
228
229 if (!data->size)
230 return -EPROBE_DEFER;
231
232 idx = bsearch(args->args[0], args->args[1], data->map, data->size);
233 if (idx < 0)
234 return idx;
235
236 clk->id = idx;
237
238 return 0;
239}
240
241static ulong ti_clk_get_rate(struct clk *clk)
242{
243 struct ti_clk_data *data = dev_get_priv(clk->dev);
244 struct clk *clkp = data->map[clk->id].clk;
245
246 return clk_get_rate(clkp);
247}
248
249static ulong ti_clk_set_rate(struct clk *clk, ulong rate)
250{
251 struct ti_clk_data *data = dev_get_priv(clk->dev);
252 struct clk *clkp = data->map[clk->id].clk;
253 int div = 1;
254 ulong child_rate;
255 const struct clk_ops *ops;
256 ulong new_rate, rem;
257 ulong diff, new_diff;
Udit Kumarc648daa2023-09-21 22:30:38 +0530258 int freq_scale_up = rate >= ti_clk_get_rate(clk) ? 1 : 0;
Tero Kristo82ceb0d2021-06-11 11:45:14 +0300259
Udit Kumarc648daa2023-09-21 22:30:38 +0530260 if (IS_ENABLED(CONFIG_K3_AVS0) && freq_scale_up)
261 k3_avs_notify_freq(data->map[clk->id].dev_id,
262 data->map[clk->id].clk_id, rate);
Tero Kristo82ceb0d2021-06-11 11:45:14 +0300263 /*
264 * We must propagate rate change to parent if current clock type
265 * does not allow setting it.
266 */
267 while (clkp) {
268 ops = clkp->dev->driver->ops;
269 if (ops->set_rate)
270 break;
271
272 /*
273 * Store child rate so we can calculate the clock rate
274 * that must be passed to parent
275 */
276 child_rate = clk_get_rate(clkp);
277 clkp = clk_get_parent(clkp);
278 if (clkp) {
279 debug("%s: propagating rate change to parent %s, rate=%u.\n",
280 __func__, clkp->dev->name, (u32)rate / div);
281 div *= clk_get_rate(clkp) / child_rate;
282 }
283 }
284
285 if (!clkp)
286 return -ENOSYS;
287
288 child_rate = clk_get_rate(clkp);
289
290 new_rate = clk_set_rate(clkp, rate / div);
291
292 diff = abs(new_rate - rate / div);
293
294 debug("%s: clk=%s, div=%d, rate=%u, new_rate=%u, diff=%u\n", __func__,
295 clkp->dev->name, div, (u32)rate, (u32)new_rate, (u32)diff);
296
297 /*
298 * If the new rate differs by 50% of the target,
299 * modify parent. This handles typical cases where we have a hsdiv
300 * following directly a PLL
301 */
302
303 if (diff > rate / div / 2) {
304 ulong pll_tgt;
305 int pll_div = 0;
306
307 clk = clkp;
308
309 debug("%s: propagating rate change to parent, rate=%u.\n",
310 __func__, (u32)rate / div);
311
312 clkp = clk_get_parent(clkp);
313
314 if (rate > osc_freq) {
315 if (rate > PLL_MAX_FREQ / 2 && rate < PLL_MAX_FREQ) {
316 pll_tgt = rate;
317 pll_div = 1;
318 } else {
319 for (pll_div = 2; pll_div < PLL_MAX_DIV; pll_div++) {
320 pll_tgt = rate / div * pll_div;
321 if (pll_tgt >= PLL_MIN_FREQ && pll_tgt <= PLL_MAX_FREQ)
322 break;
323 }
324 }
325 } else {
326 pll_tgt = osc_freq;
327 pll_div = rate / div / osc_freq;
328 }
329
330 debug("%s: pll_tgt=%u, rate=%u, div=%u\n", __func__,
331 (u32)pll_tgt, (u32)rate, pll_div);
332
333 clk_set_rate(clkp, pll_tgt);
334
335 return clk_set_rate(clk, rate / div) * div;
336 }
337
338 /*
339 * If the new rate differs by at least 5% of the target,
340 * we must check for rounding error in a divider, so try
341 * set rate with rate + (parent_freq % rate).
342 */
343
344 if (diff > rate / div / 20) {
345 u64 parent_freq = clk_get_parent_rate(clkp);
346
347 rem = parent_freq % rate;
348 new_rate = clk_set_rate(clkp, (rate / div) + rem);
349 new_diff = abs(new_rate - rate / div);
350
351 if (new_diff > diff) {
352 new_rate = clk_set_rate(clkp, rate / div);
353 } else {
354 debug("%s: Using better rate %lu that gives diff %lu\n",
355 __func__, new_rate, new_diff);
356 }
357 }
358
Udit Kumarc648daa2023-09-21 22:30:38 +0530359 if (IS_ENABLED(CONFIG_K3_AVS0) && !freq_scale_up)
360 k3_avs_notify_freq(data->map[clk->id].dev_id,
361 data->map[clk->id].clk_id, rate);
362
Tero Kristo82ceb0d2021-06-11 11:45:14 +0300363 return new_rate;
364}
365
366static int ti_clk_set_parent(struct clk *clk, struct clk *parent)
367{
368 struct ti_clk_data *data = dev_get_priv(clk->dev);
369 struct clk *clkp = data->map[clk->id].clk;
370 struct clk *parentp = data->map[parent->id].clk;
371
372 return clk_set_parent(clkp, parentp);
373}
374
375static int ti_clk_enable(struct clk *clk)
376{
377 struct ti_clk_data *data = dev_get_priv(clk->dev);
378 struct clk *clkp = data->map[clk->id].clk;
379
380 return clk_enable(clkp);
381}
382
383static int ti_clk_disable(struct clk *clk)
384{
385 struct ti_clk_data *data = dev_get_priv(clk->dev);
386 struct clk *clkp = data->map[clk->id].clk;
387
388 return clk_disable(clkp);
389}
390
391static const struct udevice_id ti_clk_of_match[] = {
392 { .compatible = "ti,k2g-sci-clk" },
393 { /* sentinel */ },
394};
395
396static const struct clk_ops ti_clk_ops = {
397 .of_xlate = ti_clk_of_xlate,
398 .set_rate = ti_clk_set_rate,
399 .get_rate = ti_clk_get_rate,
400 .enable = ti_clk_enable,
401 .disable = ti_clk_disable,
402 .set_parent = ti_clk_set_parent,
403};
404
405U_BOOT_DRIVER(ti_clk) = {
406 .name = "ti-clk",
407 .id = UCLASS_CLK,
408 .of_match = ti_clk_of_match,
409 .probe = ti_clk_probe,
410 .priv_auto = sizeof(struct ti_clk_data),
411 .ops = &ti_clk_ops,
412};