blob: 35ee56d0693d6b1dc8c67d277887e1403b372c57 [file] [log] [blame]
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2019 Xilinx, Inc.
Michal Simeka8c94362023-07-10 14:35:49 +02004 * Siva Durga Prasad Paladugu <siva.durga.prasad.paladugu@amd.com>>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +05305 */
6
Simon Glass0f2af882020-05-10 11:40:05 -06007#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -06008#include <asm/cache.h>
Simon Glass6b9f0102020-05-10 11:40:06 -06009#include <asm/ptrace.h>
Simon Glass9bc15642020-02-03 07:36:16 -070010#include <dm/device_compat.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053011#include <linux/bitops.h>
12#include <linux/bitfield.h>
13#include <malloc.h>
14#include <clk-uclass.h>
15#include <clk.h>
16#include <dm.h>
17#include <asm/arch/sys_proto.h>
Michal Simeke50c1042019-10-04 15:25:18 +020018#include <zynqmp_firmware.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070019#include <linux/err.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053020
21#define MAX_PARENT 100
22#define MAX_NODES 6
23#define MAX_NAME_LEN 50
24
25#define CLK_TYPE_SHIFT 2
26
27#define PM_API_PAYLOAD_LEN 3
28
29#define NA_PARENT 0xFFFFFFFF
30#define DUMMY_PARENT 0xFFFFFFFE
31
32#define CLK_TYPE_FIELD_LEN 4
33#define CLK_TOPOLOGY_NODE_OFFSET 16
34#define NODES_PER_RESP 3
35
36#define CLK_TYPE_FIELD_MASK 0xF
37#define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
38#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
39#define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
40#define CLK_TYPE_FLAG_BITS 8
41
42#define CLK_PARENTS_ID_LEN 16
43#define CLK_PARENTS_ID_MASK 0xFFFF
44
45#define END_OF_TOPOLOGY_NODE 1
46#define END_OF_PARENTS 1
47
48#define CLK_VALID_MASK 0x1
49#define NODE_CLASS_SHIFT 26U
50#define NODE_SUBCLASS_SHIFT 20U
51#define NODE_TYPE_SHIFT 14U
52#define NODE_INDEX_SHIFT 0U
53
54#define CLK_GET_NAME_RESP_LEN 16
55#define CLK_GET_TOPOLOGY_RESP_WORDS 3
56#define CLK_GET_PARENTS_RESP_WORDS 3
57#define CLK_GET_ATTR_RESP_WORDS 1
58
59#define NODE_SUBCLASS_CLOCK_PLL 1
60#define NODE_SUBCLASS_CLOCK_OUT 2
61#define NODE_SUBCLASS_CLOCK_REF 3
62
63#define NODE_CLASS_CLOCK 2
64#define NODE_CLASS_MASK 0x3F
65
66#define CLOCK_NODE_TYPE_MUX 1
67#define CLOCK_NODE_TYPE_DIV 4
68#define CLOCK_NODE_TYPE_GATE 6
69
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +053070#define PM_CLK_REF_CLK (0x830c06aU)
71#define PM_CLK_PL_ALT_REF_CLK (0x830c06bU)
72#define PM_CLK_MUXED_IRO (0x830c06cU)
73#define PM_CLK_EMIO (0x830c071U)
74
75#define TOPOLOGY_TYPE_FIXEDFACTOR 0x3
76
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053077enum clk_type {
78 CLK_TYPE_OUTPUT,
79 CLK_TYPE_EXTERNAL,
80};
81
82struct clock_parent {
83 char name[MAX_NAME_LEN];
84 int id;
85 u32 flag;
86};
87
88struct clock_topology {
89 u32 type;
90 u32 flag;
91 u32 type_flag;
92};
93
94struct versal_clock {
95 char clk_name[MAX_NAME_LEN];
96 u32 valid;
97 enum clk_type type;
98 struct clock_topology node[MAX_NODES];
99 u32 num_nodes;
100 struct clock_parent parent[MAX_PARENT];
101 u32 num_parents;
102 u32 clk_id;
103};
104
105struct versal_clk_priv {
106 struct versal_clock *clk;
107};
108
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530109static ulong pl_alt_ref_clk;
110static ulong ref_clk;
111
112struct versal_pm_query_data {
113 u32 qid;
114 u32 arg1;
115 u32 arg2;
116 u32 arg3;
117};
118
119static struct versal_clock *clock;
120static unsigned int clock_max_idx;
121
122#define PM_QUERY_DATA 35
123
124static int versal_pm_query(struct versal_pm_query_data qdata, u32 *ret_payload)
125{
126 struct pt_regs regs;
127
128 regs.regs[0] = PM_SIP_SVC | PM_QUERY_DATA;
129 regs.regs[1] = ((u64)qdata.arg1 << 32) | qdata.qid;
130 regs.regs[2] = ((u64)qdata.arg3 << 32) | qdata.arg2;
131
132 smc_call(&regs);
133
134 if (ret_payload) {
135 ret_payload[0] = (u32)regs.regs[0];
136 ret_payload[1] = upper_32_bits(regs.regs[0]);
137 ret_payload[2] = (u32)regs.regs[1];
138 ret_payload[3] = upper_32_bits(regs.regs[1]);
139 ret_payload[4] = (u32)regs.regs[2];
140 }
141
142 return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : regs.regs[0];
143}
144
145static inline int versal_is_valid_clock(u32 clk_id)
146{
147 if (clk_id >= clock_max_idx)
148 return -ENODEV;
149
150 return clock[clk_id].valid;
151}
152
153static int versal_get_clock_name(u32 clk_id, char *clk_name)
154{
155 int ret;
156
157 ret = versal_is_valid_clock(clk_id);
158 if (ret == 1) {
159 strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
160 return 0;
161 }
162
163 return ret == 0 ? -EINVAL : ret;
164}
165
166static int versal_get_clock_type(u32 clk_id, u32 *type)
167{
168 int ret;
169
170 ret = versal_is_valid_clock(clk_id);
171 if (ret == 1) {
172 *type = clock[clk_id].type;
173 return 0;
174 }
175
176 return ret == 0 ? -EINVAL : ret;
177}
178
179static int versal_pm_clock_get_num_clocks(u32 *nclocks)
180{
181 struct versal_pm_query_data qdata = {0};
182 u32 ret_payload[PAYLOAD_ARG_CNT];
183 int ret;
184
185 qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
186
187 ret = versal_pm_query(qdata, ret_payload);
188 *nclocks = ret_payload[1];
189
190 return ret;
191}
192
193static int versal_pm_clock_get_name(u32 clock_id, char *name)
194{
195 struct versal_pm_query_data qdata = {0};
196 u32 ret_payload[PAYLOAD_ARG_CNT];
197 int ret;
198
199 qdata.qid = PM_QID_CLOCK_GET_NAME;
200 qdata.arg1 = clock_id;
201
202 ret = versal_pm_query(qdata, ret_payload);
203 if (ret)
204 return ret;
205 memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
206
207 return 0;
208}
209
210static int versal_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
211{
212 struct versal_pm_query_data qdata = {0};
213 u32 ret_payload[PAYLOAD_ARG_CNT];
214 int ret;
215
216 qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
217 qdata.arg1 = clock_id;
218 qdata.arg2 = index;
219
220 ret = versal_pm_query(qdata, ret_payload);
221 memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
222
223 return ret;
224}
225
226static int versal_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
227{
228 struct versal_pm_query_data qdata = {0};
229 u32 ret_payload[PAYLOAD_ARG_CNT];
230 int ret;
231
232 qdata.qid = PM_QID_CLOCK_GET_PARENTS;
233 qdata.arg1 = clock_id;
234 qdata.arg2 = index;
235
236 ret = versal_pm_query(qdata, ret_payload);
237 memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
238
239 return ret;
240}
241
242static int versal_pm_clock_get_attributes(u32 clock_id, u32 *attr)
243{
244 struct versal_pm_query_data qdata = {0};
245 u32 ret_payload[PAYLOAD_ARG_CNT];
246 int ret;
247
248 qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
249 qdata.arg1 = clock_id;
250
251 ret = versal_pm_query(qdata, ret_payload);
252 memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
253
254 return ret;
255}
256
257static int __versal_clock_get_topology(struct clock_topology *topology,
258 u32 *data, u32 *nnodes)
259{
260 int i;
261
262 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
263 if (!(data[i] & CLK_TYPE_FIELD_MASK))
264 return END_OF_TOPOLOGY_NODE;
265 topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
266 topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
267 data[i]);
268 topology[*nnodes].type_flag =
269 FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
270 topology[*nnodes].type_flag |=
271 FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, data[i]) <<
272 CLK_TYPE_FLAG_BITS;
273 debug("topology type:0x%x, flag:0x%x, type_flag:0x%x\n",
274 topology[*nnodes].type, topology[*nnodes].flag,
275 topology[*nnodes].type_flag);
276 (*nnodes)++;
277 }
278
279 return 0;
280}
281
282static int versal_clock_get_topology(u32 clk_id,
283 struct clock_topology *topology,
284 u32 *num_nodes)
285{
286 int j, ret;
287 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
288
289 *num_nodes = 0;
290 for (j = 0; j <= MAX_NODES; j += 3) {
291 ret = versal_pm_clock_get_topology(clock[clk_id].clk_id, j,
292 pm_resp);
293 if (ret)
294 return ret;
295 ret = __versal_clock_get_topology(topology, pm_resp, num_nodes);
296 if (ret == END_OF_TOPOLOGY_NODE)
297 return 0;
298 }
299
300 return 0;
301}
302
303static int __versal_clock_get_parents(struct clock_parent *parents, u32 *data,
304 u32 *nparent)
305{
306 int i;
307 struct clock_parent *parent;
308
309 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
310 if (data[i] == NA_PARENT)
311 return END_OF_PARENTS;
312
313 parent = &parents[i];
314 parent->id = data[i] & CLK_PARENTS_ID_MASK;
315 if (data[i] == DUMMY_PARENT) {
316 strcpy(parent->name, "dummy_name");
317 parent->flag = 0;
318 } else {
319 parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
320 if (versal_get_clock_name(parent->id, parent->name))
321 continue;
322 }
323 debug("parent name:%s\n", parent->name);
324 *nparent += 1;
325 }
326
327 return 0;
328}
329
330static int versal_clock_get_parents(u32 clk_id, struct clock_parent *parents,
331 u32 *num_parents)
332{
333 int j = 0, ret;
334 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
335
336 *num_parents = 0;
337 do {
338 /* Get parents from firmware */
339 ret = versal_pm_clock_get_parents(clock[clk_id].clk_id, j,
340 pm_resp);
341 if (ret)
342 return ret;
343
344 ret = __versal_clock_get_parents(&parents[j], pm_resp,
345 num_parents);
346 if (ret == END_OF_PARENTS)
347 return 0;
348 j += PM_API_PAYLOAD_LEN;
349 } while (*num_parents <= MAX_PARENT);
350
351 return 0;
352}
353
354static u32 versal_clock_get_div(u32 clk_id)
355{
356 u32 ret_payload[PAYLOAD_ARG_CNT];
357 u32 div;
358
Michal Simek142fb5b2019-10-04 15:52:43 +0200359 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530360 div = ret_payload[1];
361
362 return div;
363}
364
365static u32 versal_clock_set_div(u32 clk_id, u32 div)
366{
367 u32 ret_payload[PAYLOAD_ARG_CNT];
368
Michal Simek142fb5b2019-10-04 15:52:43 +0200369 xilinx_pm_request(PM_CLOCK_SETDIVIDER, clk_id, div, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530370
371 return div;
372}
373
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530374static u64 versal_clock_get_ref_rate(u32 clk_id)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530375{
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530376 if (clk_id == PM_CLK_REF_CLK || clk_id == PM_CLK_MUXED_IRO || clk_id == PM_CLK_EMIO)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530377 return ref_clk;
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530378 else if (clk_id == PM_CLK_PL_ALT_REF_CLK)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530379 return pl_alt_ref_clk;
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530380 else
381 return 0;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530382}
383
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530384static int versal_clock_get_fixed_factor_rate(u32 clock_id, u32 parent_id)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530385{
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530386 struct versal_pm_query_data qdata = {0};
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530387 u32 ret_payload[PAYLOAD_ARG_CNT];
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530388 u32 mult, div;
389 u32 parent_rate;
390 int ret;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530391
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530392 qdata.qid = PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS;
393 qdata.arg1 = clock_id;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530394
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530395 ret = versal_pm_query(qdata, ret_payload);
396 if (ret)
397 return ret;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530398
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530399 mult = ret_payload[1];
400 div = ret_payload[2];
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530401
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530402 parent_rate = versal_clock_get_ref_rate(parent_id);
403 return parent_rate * mult / div;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530404
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530405}
406
407static u32 versal_clock_mux(u32 clk_id)
408{
409 int i;
410 u32 id = clk_id & 0xFFF;
411
412 for (i = 0; i < clock[id].num_nodes; i++)
413 if (clock[id].node[i].type == CLOCK_NODE_TYPE_MUX)
414 return 1;
415
416 return 0;
417}
418
419static u32 versal_clock_get_parentid(u32 clk_id)
420{
421 u32 parent_id = 0;
422 u32 ret_payload[PAYLOAD_ARG_CNT];
423 u32 id = clk_id & 0xFFF;
424
425 if (versal_clock_mux(clk_id)) {
Michal Simek142fb5b2019-10-04 15:52:43 +0200426 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0,
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530427 ret_payload);
428 parent_id = ret_payload[1];
429 }
430
431 debug("parent_id:0x%x\n", clock[clock[id].parent[parent_id].id].clk_id);
432 return clock[clock[id].parent[parent_id].id].clk_id;
433}
434
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530435static u64 versal_clock_get_pll_rate(u32 clk_id)
436{
437 u32 ret_payload[PAYLOAD_ARG_CNT];
438 u32 fbdiv;
439 u32 res;
440 u32 frac;
441 u64 freq;
442 u32 parent_rate, parent_id, parent_ref_clk_id;
443 u32 id = clk_id & 0xFFF;
444
445 xilinx_pm_request(PM_CLOCK_GETSTATE, clk_id, 0, 0, 0, ret_payload);
446 res = ret_payload[1];
447 if (!res) {
448 printf("0%x PLL not enabled\n", clk_id);
449 return 0;
450 }
451
452 parent_id = clock[clock[id].parent[0].id].clk_id;
453 parent_ref_clk_id = versal_clock_get_parentid(parent_id);
454 parent_rate = versal_clock_get_ref_rate(parent_ref_clk_id);
455
456 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
457 fbdiv = ret_payload[1];
458 xilinx_pm_request(PM_CLOCK_PLL_GETPARAM, clk_id, 2, 0, 0, ret_payload);
459 frac = ret_payload[1];
460
461 freq = (fbdiv * parent_rate) >> (1 << frac);
462
463 return freq;
464}
465
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530466static u32 versal_clock_gate(u32 clk_id)
467{
468 u32 id = clk_id & 0xFFF;
469 int i;
470
471 for (i = 0; i < clock[id].num_nodes; i++)
472 if (clock[id].node[i].type == CLOCK_NODE_TYPE_GATE)
473 return 1;
474
475 return 0;
476}
477
478static u32 versal_clock_div(u32 clk_id)
479{
480 int i;
481 u32 id = clk_id & 0xFFF;
482
483 for (i = 0; i < clock[id].num_nodes; i++)
484 if (clock[id].node[i].type == CLOCK_NODE_TYPE_DIV)
485 return 1;
486
487 return 0;
488}
489
490static u32 versal_clock_pll(u32 clk_id, u64 *clk_rate)
491{
492 if (((clk_id >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK) ==
493 NODE_SUBCLASS_CLOCK_PLL &&
494 ((clk_id >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK) ==
495 NODE_CLASS_CLOCK) {
496 *clk_rate = versal_clock_get_pll_rate(clk_id);
497 return 1;
498 }
499
500 return 0;
501}
502
503static u64 versal_clock_calc(u32 clk_id)
504{
505 u32 parent_id;
506 u64 clk_rate;
507 u32 div;
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530508 struct clock_topology topology;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530509
510 if (versal_clock_pll(clk_id, &clk_rate))
511 return clk_rate;
512
513 parent_id = versal_clock_get_parentid(clk_id);
514 if (((parent_id >> NODE_SUBCLASS_SHIFT) &
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530515 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF) {
516 topology = clock[clk_id & 0x3FF].node[0];
517 if (topology.type == TOPOLOGY_TYPE_FIXEDFACTOR)
518 return versal_clock_get_fixed_factor_rate(clk_id, parent_id);
519 return versal_clock_get_ref_rate(parent_id);
520 }
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530521
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600522 if (!parent_id)
523 return 0;
524
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530525 clk_rate = versal_clock_calc(parent_id);
526
527 if (versal_clock_div(clk_id)) {
528 div = versal_clock_get_div(clk_id);
529 clk_rate = DIV_ROUND_CLOSEST(clk_rate, div);
530 }
531
532 return clk_rate;
533}
534
535static int versal_clock_get_rate(u32 clk_id, u64 *clk_rate)
536{
537 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
538 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530539 *clk_rate = versal_clock_get_ref_rate(clk_id);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530540
541 if (versal_clock_pll(clk_id, clk_rate))
542 return 0;
543
544 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
545 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_OUT &&
546 ((clk_id >> NODE_CLASS_SHIFT) &
547 NODE_CLASS_MASK) == NODE_CLASS_CLOCK) {
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600548 if (!versal_clock_gate(clk_id) && !versal_clock_mux(clk_id))
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530549 return -EINVAL;
550 *clk_rate = versal_clock_calc(clk_id);
551 return 0;
552 }
553
554 return 0;
555}
556
Igor Prusov1a3427b2023-11-09 13:55:15 +0300557#if IS_ENABLED(CONFIG_CMD_CLK)
558static void versal_clk_dump(struct udevice __always_unused *dev)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530559{
560 u64 clk_rate = 0;
561 u32 type, ret, i = 0;
562
563 printf("\n ****** VERSAL CLOCKS *****\n");
564
Rajan Vaja98697a12020-05-04 22:53:56 -0700565 printf("pl_alt_ref_clk:%ld ref_clk:%ld\n", pl_alt_ref_clk, ref_clk);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530566 for (i = 0; i < clock_max_idx; i++) {
567 debug("%s\n", clock[i].clk_name);
568 ret = versal_get_clock_type(i, &type);
569 if (ret || type != CLK_TYPE_OUTPUT)
570 continue;
571
572 ret = versal_clock_get_rate(clock[i].clk_id, &clk_rate);
573
574 if (ret != -EINVAL)
575 printf("clk: %s freq:%lld\n",
576 clock[i].clk_name, clk_rate);
577 }
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530578}
Igor Prusov1a3427b2023-11-09 13:55:15 +0300579#endif
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530580
581static void versal_get_clock_info(void)
582{
583 int i, ret;
584 u32 attr, type = 0, nodetype, subclass, class;
585
586 for (i = 0; i < clock_max_idx; i++) {
587 ret = versal_pm_clock_get_attributes(i, &attr);
588 if (ret)
589 continue;
590
591 clock[i].valid = attr & CLK_VALID_MASK;
Rajan Vaja5986efc2020-01-16 03:55:05 -0800592
593 /* skip query for Invalid clock */
594 ret = versal_is_valid_clock(i);
595 if (ret != CLK_VALID_MASK)
596 continue;
597
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530598 clock[i].type = ((attr >> CLK_TYPE_SHIFT) & 0x1) ?
599 CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
600 nodetype = (attr >> NODE_TYPE_SHIFT) & NODE_CLASS_MASK;
601 subclass = (attr >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK;
602 class = (attr >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK;
603
604 clock[i].clk_id = (class << NODE_CLASS_SHIFT) |
605 (subclass << NODE_SUBCLASS_SHIFT) |
606 (nodetype << NODE_TYPE_SHIFT) |
607 (i << NODE_INDEX_SHIFT);
608
609 ret = versal_pm_clock_get_name(clock[i].clk_id,
610 clock[i].clk_name);
611 if (ret)
612 continue;
613 debug("clk name:%s, Valid:%d, type:%d, clk_id:0x%x\n",
614 clock[i].clk_name, clock[i].valid,
615 clock[i].type, clock[i].clk_id);
616 }
617
618 /* Get topology of all clock */
619 for (i = 0; i < clock_max_idx; i++) {
620 ret = versal_get_clock_type(i, &type);
621 if (ret || type != CLK_TYPE_OUTPUT)
622 continue;
623 debug("clk name:%s\n", clock[i].clk_name);
624 ret = versal_clock_get_topology(i, clock[i].node,
625 &clock[i].num_nodes);
626 if (ret)
627 continue;
628
629 ret = versal_clock_get_parents(i, clock[i].parent,
630 &clock[i].num_parents);
631 if (ret)
632 continue;
633 }
634}
635
Venkatesh Yadav Abbarapue3184f82022-10-07 16:25:35 +0530636static int versal_clock_setup(void)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530637{
638 int ret;
639
640 ret = versal_pm_clock_get_num_clocks(&clock_max_idx);
641 if (ret)
642 return ret;
643
644 debug("%s, clock_max_idx:0x%x\n", __func__, clock_max_idx);
645 clock = calloc(clock_max_idx, sizeof(*clock));
646 if (!clock)
647 return -ENOMEM;
648
649 versal_get_clock_info();
650
651 return 0;
652}
653
654static int versal_clock_get_freq_by_name(char *name, struct udevice *dev,
655 ulong *freq)
656{
657 struct clk clk;
658 int ret;
659
660 ret = clk_get_by_name(dev, name, &clk);
661 if (ret < 0) {
662 dev_err(dev, "failed to get %s\n", name);
663 return ret;
664 }
665
666 *freq = clk_get_rate(&clk);
667 if (IS_ERR_VALUE(*freq)) {
668 dev_err(dev, "failed to get rate %s\n", name);
669 return -EINVAL;
670 }
671
672 return 0;
673}
674
675static int versal_clk_probe(struct udevice *dev)
676{
677 int ret;
678 struct versal_clk_priv *priv = dev_get_priv(dev);
679
680 debug("%s\n", __func__);
681
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530682 ret = versal_clock_get_freq_by_name("pl_alt_ref_clk",
683 dev, &pl_alt_ref_clk);
684 if (ret < 0)
685 return -EINVAL;
686
687 ret = versal_clock_get_freq_by_name("ref_clk", dev, &ref_clk);
688 if (ret < 0)
689 return -EINVAL;
690
Jay Buddhabhattid628e672023-01-10 08:23:44 +0100691 ret = versal_clock_setup();
692 if (ret < 0)
693 return ret;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530694
695 priv->clk = clock;
696
697 return ret;
698}
699
700static ulong versal_clk_get_rate(struct clk *clk)
701{
702 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
703 u32 id = clk->id;
704 u32 clk_id;
705 u64 clk_rate = 0;
706
707 debug("%s\n", __func__);
708
709 clk_id = priv->clk[id].clk_id;
710
711 versal_clock_get_rate(clk_id, &clk_rate);
712
713 return clk_rate;
714}
715
716static ulong versal_clk_set_rate(struct clk *clk, ulong rate)
717{
718 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
719 u32 id = clk->id;
720 u32 clk_id;
721 u64 clk_rate = 0;
722 u32 div;
723 int ret;
724
725 debug("%s\n", __func__);
726
727 clk_id = priv->clk[id].clk_id;
728
729 ret = versal_clock_get_rate(clk_id, &clk_rate);
730 if (ret) {
731 printf("Clock is not a Gate:0x%x\n", clk_id);
732 return 0;
733 }
734
735 do {
736 if (versal_clock_div(clk_id)) {
737 div = versal_clock_get_div(clk_id);
738 clk_rate *= div;
739 div = DIV_ROUND_CLOSEST(clk_rate, rate);
740 versal_clock_set_div(clk_id, div);
741 debug("%s, div:%d, newrate:%lld\n", __func__,
742 div, DIV_ROUND_CLOSEST(clk_rate, div));
743 return DIV_ROUND_CLOSEST(clk_rate, div);
744 }
745 clk_id = versal_clock_get_parentid(clk_id);
746 } while (((clk_id >> NODE_SUBCLASS_SHIFT) &
747 NODE_CLASS_MASK) != NODE_SUBCLASS_CLOCK_REF);
748
749 printf("Clock didn't has Divisors:0x%x\n", priv->clk[id].clk_id);
750
751 return clk_rate;
752}
753
T Karthik Reddy277300f2021-02-03 03:10:47 -0700754static int versal_clk_enable(struct clk *clk)
755{
756 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
757 u32 clk_id;
758
759 clk_id = priv->clk[clk->id].clk_id;
760
T Karthik Reddyb5ef0ba2021-09-28 11:30:27 +0530761 if (versal_clock_gate(clk_id))
762 return xilinx_pm_request(PM_CLOCK_ENABLE, clk_id, 0, 0, 0, NULL);
763
764 return 0;
T Karthik Reddy277300f2021-02-03 03:10:47 -0700765}
766
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530767static struct clk_ops versal_clk_ops = {
768 .set_rate = versal_clk_set_rate,
769 .get_rate = versal_clk_get_rate,
T Karthik Reddy277300f2021-02-03 03:10:47 -0700770 .enable = versal_clk_enable,
Igor Prusov1a3427b2023-11-09 13:55:15 +0300771#if IS_ENABLED(CONFIG_CMD_CLK)
772 .dump = versal_clk_dump,
773#endif
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530774};
775
776static const struct udevice_id versal_clk_ids[] = {
777 { .compatible = "xlnx,versal-clk" },
778 { }
779};
780
781U_BOOT_DRIVER(versal_clk) = {
782 .name = "versal-clk",
783 .id = UCLASS_CLK,
784 .of_match = versal_clk_ids,
785 .probe = versal_clk_probe,
786 .ops = &versal_clk_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700787 .priv_auto = sizeof(struct versal_clk_priv),
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530788};