blob: a9dd57b098fe57fa3de28502a6a4f468902aafbc [file] [log] [blame]
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2019 Xilinx, Inc.
4 * Siva Durga Prasad Paladugu <siva.durga.paladugu@xilinx.com>
5 */
6
7#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -06009#include <asm/cache.h>
Simon Glass6b9f0102020-05-10 11:40:06 -060010#include <asm/ptrace.h>
Simon Glass9bc15642020-02-03 07:36:16 -070011#include <dm/device_compat.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053012#include <linux/bitops.h>
13#include <linux/bitfield.h>
14#include <malloc.h>
15#include <clk-uclass.h>
16#include <clk.h>
17#include <dm.h>
18#include <asm/arch/sys_proto.h>
Michal Simeke50c1042019-10-04 15:25:18 +020019#include <zynqmp_firmware.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070020#include <linux/err.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053021
22#define MAX_PARENT 100
23#define MAX_NODES 6
24#define MAX_NAME_LEN 50
25
26#define CLK_TYPE_SHIFT 2
27
28#define PM_API_PAYLOAD_LEN 3
29
30#define NA_PARENT 0xFFFFFFFF
31#define DUMMY_PARENT 0xFFFFFFFE
32
33#define CLK_TYPE_FIELD_LEN 4
34#define CLK_TOPOLOGY_NODE_OFFSET 16
35#define NODES_PER_RESP 3
36
37#define CLK_TYPE_FIELD_MASK 0xF
38#define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
39#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
40#define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
41#define CLK_TYPE_FLAG_BITS 8
42
43#define CLK_PARENTS_ID_LEN 16
44#define CLK_PARENTS_ID_MASK 0xFFFF
45
46#define END_OF_TOPOLOGY_NODE 1
47#define END_OF_PARENTS 1
48
49#define CLK_VALID_MASK 0x1
50#define NODE_CLASS_SHIFT 26U
51#define NODE_SUBCLASS_SHIFT 20U
52#define NODE_TYPE_SHIFT 14U
53#define NODE_INDEX_SHIFT 0U
54
55#define CLK_GET_NAME_RESP_LEN 16
56#define CLK_GET_TOPOLOGY_RESP_WORDS 3
57#define CLK_GET_PARENTS_RESP_WORDS 3
58#define CLK_GET_ATTR_RESP_WORDS 1
59
60#define NODE_SUBCLASS_CLOCK_PLL 1
61#define NODE_SUBCLASS_CLOCK_OUT 2
62#define NODE_SUBCLASS_CLOCK_REF 3
63
64#define NODE_CLASS_CLOCK 2
65#define NODE_CLASS_MASK 0x3F
66
67#define CLOCK_NODE_TYPE_MUX 1
68#define CLOCK_NODE_TYPE_DIV 4
69#define CLOCK_NODE_TYPE_GATE 6
70
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053071enum clk_type {
72 CLK_TYPE_OUTPUT,
73 CLK_TYPE_EXTERNAL,
74};
75
76struct clock_parent {
77 char name[MAX_NAME_LEN];
78 int id;
79 u32 flag;
80};
81
82struct clock_topology {
83 u32 type;
84 u32 flag;
85 u32 type_flag;
86};
87
88struct versal_clock {
89 char clk_name[MAX_NAME_LEN];
90 u32 valid;
91 enum clk_type type;
92 struct clock_topology node[MAX_NODES];
93 u32 num_nodes;
94 struct clock_parent parent[MAX_PARENT];
95 u32 num_parents;
96 u32 clk_id;
97};
98
99struct versal_clk_priv {
100 struct versal_clock *clk;
101};
102
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530103static ulong pl_alt_ref_clk;
104static ulong ref_clk;
105
106struct versal_pm_query_data {
107 u32 qid;
108 u32 arg1;
109 u32 arg2;
110 u32 arg3;
111};
112
113static struct versal_clock *clock;
114static unsigned int clock_max_idx;
115
116#define PM_QUERY_DATA 35
117
118static int versal_pm_query(struct versal_pm_query_data qdata, u32 *ret_payload)
119{
120 struct pt_regs regs;
121
122 regs.regs[0] = PM_SIP_SVC | PM_QUERY_DATA;
123 regs.regs[1] = ((u64)qdata.arg1 << 32) | qdata.qid;
124 regs.regs[2] = ((u64)qdata.arg3 << 32) | qdata.arg2;
125
126 smc_call(&regs);
127
128 if (ret_payload) {
129 ret_payload[0] = (u32)regs.regs[0];
130 ret_payload[1] = upper_32_bits(regs.regs[0]);
131 ret_payload[2] = (u32)regs.regs[1];
132 ret_payload[3] = upper_32_bits(regs.regs[1]);
133 ret_payload[4] = (u32)regs.regs[2];
134 }
135
136 return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : regs.regs[0];
137}
138
139static inline int versal_is_valid_clock(u32 clk_id)
140{
141 if (clk_id >= clock_max_idx)
142 return -ENODEV;
143
144 return clock[clk_id].valid;
145}
146
147static int versal_get_clock_name(u32 clk_id, char *clk_name)
148{
149 int ret;
150
151 ret = versal_is_valid_clock(clk_id);
152 if (ret == 1) {
153 strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
154 return 0;
155 }
156
157 return ret == 0 ? -EINVAL : ret;
158}
159
160static int versal_get_clock_type(u32 clk_id, u32 *type)
161{
162 int ret;
163
164 ret = versal_is_valid_clock(clk_id);
165 if (ret == 1) {
166 *type = clock[clk_id].type;
167 return 0;
168 }
169
170 return ret == 0 ? -EINVAL : ret;
171}
172
173static int versal_pm_clock_get_num_clocks(u32 *nclocks)
174{
175 struct versal_pm_query_data qdata = {0};
176 u32 ret_payload[PAYLOAD_ARG_CNT];
177 int ret;
178
179 qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
180
181 ret = versal_pm_query(qdata, ret_payload);
182 *nclocks = ret_payload[1];
183
184 return ret;
185}
186
187static int versal_pm_clock_get_name(u32 clock_id, char *name)
188{
189 struct versal_pm_query_data qdata = {0};
190 u32 ret_payload[PAYLOAD_ARG_CNT];
191 int ret;
192
193 qdata.qid = PM_QID_CLOCK_GET_NAME;
194 qdata.arg1 = clock_id;
195
196 ret = versal_pm_query(qdata, ret_payload);
197 if (ret)
198 return ret;
199 memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
200
201 return 0;
202}
203
204static int versal_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
205{
206 struct versal_pm_query_data qdata = {0};
207 u32 ret_payload[PAYLOAD_ARG_CNT];
208 int ret;
209
210 qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
211 qdata.arg1 = clock_id;
212 qdata.arg2 = index;
213
214 ret = versal_pm_query(qdata, ret_payload);
215 memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
216
217 return ret;
218}
219
220static int versal_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
221{
222 struct versal_pm_query_data qdata = {0};
223 u32 ret_payload[PAYLOAD_ARG_CNT];
224 int ret;
225
226 qdata.qid = PM_QID_CLOCK_GET_PARENTS;
227 qdata.arg1 = clock_id;
228 qdata.arg2 = index;
229
230 ret = versal_pm_query(qdata, ret_payload);
231 memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
232
233 return ret;
234}
235
236static int versal_pm_clock_get_attributes(u32 clock_id, u32 *attr)
237{
238 struct versal_pm_query_data qdata = {0};
239 u32 ret_payload[PAYLOAD_ARG_CNT];
240 int ret;
241
242 qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
243 qdata.arg1 = clock_id;
244
245 ret = versal_pm_query(qdata, ret_payload);
246 memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
247
248 return ret;
249}
250
251static int __versal_clock_get_topology(struct clock_topology *topology,
252 u32 *data, u32 *nnodes)
253{
254 int i;
255
256 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
257 if (!(data[i] & CLK_TYPE_FIELD_MASK))
258 return END_OF_TOPOLOGY_NODE;
259 topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
260 topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
261 data[i]);
262 topology[*nnodes].type_flag =
263 FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
264 topology[*nnodes].type_flag |=
265 FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, data[i]) <<
266 CLK_TYPE_FLAG_BITS;
267 debug("topology type:0x%x, flag:0x%x, type_flag:0x%x\n",
268 topology[*nnodes].type, topology[*nnodes].flag,
269 topology[*nnodes].type_flag);
270 (*nnodes)++;
271 }
272
273 return 0;
274}
275
276static int versal_clock_get_topology(u32 clk_id,
277 struct clock_topology *topology,
278 u32 *num_nodes)
279{
280 int j, ret;
281 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
282
283 *num_nodes = 0;
284 for (j = 0; j <= MAX_NODES; j += 3) {
285 ret = versal_pm_clock_get_topology(clock[clk_id].clk_id, j,
286 pm_resp);
287 if (ret)
288 return ret;
289 ret = __versal_clock_get_topology(topology, pm_resp, num_nodes);
290 if (ret == END_OF_TOPOLOGY_NODE)
291 return 0;
292 }
293
294 return 0;
295}
296
297static int __versal_clock_get_parents(struct clock_parent *parents, u32 *data,
298 u32 *nparent)
299{
300 int i;
301 struct clock_parent *parent;
302
303 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
304 if (data[i] == NA_PARENT)
305 return END_OF_PARENTS;
306
307 parent = &parents[i];
308 parent->id = data[i] & CLK_PARENTS_ID_MASK;
309 if (data[i] == DUMMY_PARENT) {
310 strcpy(parent->name, "dummy_name");
311 parent->flag = 0;
312 } else {
313 parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
314 if (versal_get_clock_name(parent->id, parent->name))
315 continue;
316 }
317 debug("parent name:%s\n", parent->name);
318 *nparent += 1;
319 }
320
321 return 0;
322}
323
324static int versal_clock_get_parents(u32 clk_id, struct clock_parent *parents,
325 u32 *num_parents)
326{
327 int j = 0, ret;
328 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
329
330 *num_parents = 0;
331 do {
332 /* Get parents from firmware */
333 ret = versal_pm_clock_get_parents(clock[clk_id].clk_id, j,
334 pm_resp);
335 if (ret)
336 return ret;
337
338 ret = __versal_clock_get_parents(&parents[j], pm_resp,
339 num_parents);
340 if (ret == END_OF_PARENTS)
341 return 0;
342 j += PM_API_PAYLOAD_LEN;
343 } while (*num_parents <= MAX_PARENT);
344
345 return 0;
346}
347
348static u32 versal_clock_get_div(u32 clk_id)
349{
350 u32 ret_payload[PAYLOAD_ARG_CNT];
351 u32 div;
352
Michal Simek142fb5b2019-10-04 15:52:43 +0200353 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530354 div = ret_payload[1];
355
356 return div;
357}
358
359static u32 versal_clock_set_div(u32 clk_id, u32 div)
360{
361 u32 ret_payload[PAYLOAD_ARG_CNT];
362
Michal Simek142fb5b2019-10-04 15:52:43 +0200363 xilinx_pm_request(PM_CLOCK_SETDIVIDER, clk_id, div, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530364
365 return div;
366}
367
368static u64 versal_clock_ref(u32 clk_id)
369{
370 u32 ret_payload[PAYLOAD_ARG_CNT];
371 int ref;
372
Michal Simek142fb5b2019-10-04 15:52:43 +0200373 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530374 ref = ret_payload[0];
375 if (!(ref & 1))
376 return ref_clk;
377 if (ref & 2)
378 return pl_alt_ref_clk;
379 return 0;
380}
381
382static u64 versal_clock_get_pll_rate(u32 clk_id)
383{
384 u32 ret_payload[PAYLOAD_ARG_CNT];
385 u32 fbdiv;
386 u32 res;
387 u32 frac;
388 u64 freq;
389 u32 parent_rate, parent_id;
390 u32 id = clk_id & 0xFFF;
391
Michal Simek142fb5b2019-10-04 15:52:43 +0200392 xilinx_pm_request(PM_CLOCK_GETSTATE, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530393 res = ret_payload[1];
394 if (!res) {
395 printf("0%x PLL not enabled\n", clk_id);
396 return 0;
397 }
398
399 parent_id = clock[clock[id].parent[0].id].clk_id;
400 parent_rate = versal_clock_ref(parent_id);
401
Michal Simek142fb5b2019-10-04 15:52:43 +0200402 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530403 fbdiv = ret_payload[1];
Michal Simek142fb5b2019-10-04 15:52:43 +0200404 xilinx_pm_request(PM_CLOCK_PLL_GETPARAM, clk_id, 2, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530405 frac = ret_payload[1];
406
407 freq = (fbdiv * parent_rate) >> (1 << frac);
408
409 return freq;
410}
411
412static u32 versal_clock_mux(u32 clk_id)
413{
414 int i;
415 u32 id = clk_id & 0xFFF;
416
417 for (i = 0; i < clock[id].num_nodes; i++)
418 if (clock[id].node[i].type == CLOCK_NODE_TYPE_MUX)
419 return 1;
420
421 return 0;
422}
423
424static u32 versal_clock_get_parentid(u32 clk_id)
425{
426 u32 parent_id = 0;
427 u32 ret_payload[PAYLOAD_ARG_CNT];
428 u32 id = clk_id & 0xFFF;
429
430 if (versal_clock_mux(clk_id)) {
Michal Simek142fb5b2019-10-04 15:52:43 +0200431 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0,
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530432 ret_payload);
433 parent_id = ret_payload[1];
434 }
435
436 debug("parent_id:0x%x\n", clock[clock[id].parent[parent_id].id].clk_id);
437 return clock[clock[id].parent[parent_id].id].clk_id;
438}
439
440static u32 versal_clock_gate(u32 clk_id)
441{
442 u32 id = clk_id & 0xFFF;
443 int i;
444
445 for (i = 0; i < clock[id].num_nodes; i++)
446 if (clock[id].node[i].type == CLOCK_NODE_TYPE_GATE)
447 return 1;
448
449 return 0;
450}
451
452static u32 versal_clock_div(u32 clk_id)
453{
454 int i;
455 u32 id = clk_id & 0xFFF;
456
457 for (i = 0; i < clock[id].num_nodes; i++)
458 if (clock[id].node[i].type == CLOCK_NODE_TYPE_DIV)
459 return 1;
460
461 return 0;
462}
463
464static u32 versal_clock_pll(u32 clk_id, u64 *clk_rate)
465{
466 if (((clk_id >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK) ==
467 NODE_SUBCLASS_CLOCK_PLL &&
468 ((clk_id >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK) ==
469 NODE_CLASS_CLOCK) {
470 *clk_rate = versal_clock_get_pll_rate(clk_id);
471 return 1;
472 }
473
474 return 0;
475}
476
477static u64 versal_clock_calc(u32 clk_id)
478{
479 u32 parent_id;
480 u64 clk_rate;
481 u32 div;
482
483 if (versal_clock_pll(clk_id, &clk_rate))
484 return clk_rate;
485
486 parent_id = versal_clock_get_parentid(clk_id);
487 if (((parent_id >> NODE_SUBCLASS_SHIFT) &
488 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
489 return versal_clock_ref(clk_id);
490
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600491 if (!parent_id)
492 return 0;
493
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530494 clk_rate = versal_clock_calc(parent_id);
495
496 if (versal_clock_div(clk_id)) {
497 div = versal_clock_get_div(clk_id);
498 clk_rate = DIV_ROUND_CLOSEST(clk_rate, div);
499 }
500
501 return clk_rate;
502}
503
504static int versal_clock_get_rate(u32 clk_id, u64 *clk_rate)
505{
506 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
507 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
508 *clk_rate = versal_clock_ref(clk_id);
509
510 if (versal_clock_pll(clk_id, clk_rate))
511 return 0;
512
513 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
514 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_OUT &&
515 ((clk_id >> NODE_CLASS_SHIFT) &
516 NODE_CLASS_MASK) == NODE_CLASS_CLOCK) {
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600517 if (!versal_clock_gate(clk_id) && !versal_clock_mux(clk_id))
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530518 return -EINVAL;
519 *clk_rate = versal_clock_calc(clk_id);
520 return 0;
521 }
522
523 return 0;
524}
525
526int soc_clk_dump(void)
527{
528 u64 clk_rate = 0;
529 u32 type, ret, i = 0;
530
531 printf("\n ****** VERSAL CLOCKS *****\n");
532
Rajan Vaja98697a12020-05-04 22:53:56 -0700533 printf("pl_alt_ref_clk:%ld ref_clk:%ld\n", pl_alt_ref_clk, ref_clk);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530534 for (i = 0; i < clock_max_idx; i++) {
535 debug("%s\n", clock[i].clk_name);
536 ret = versal_get_clock_type(i, &type);
537 if (ret || type != CLK_TYPE_OUTPUT)
538 continue;
539
540 ret = versal_clock_get_rate(clock[i].clk_id, &clk_rate);
541
542 if (ret != -EINVAL)
543 printf("clk: %s freq:%lld\n",
544 clock[i].clk_name, clk_rate);
545 }
546
547 return 0;
548}
549
550static void versal_get_clock_info(void)
551{
552 int i, ret;
553 u32 attr, type = 0, nodetype, subclass, class;
554
555 for (i = 0; i < clock_max_idx; i++) {
556 ret = versal_pm_clock_get_attributes(i, &attr);
557 if (ret)
558 continue;
559
560 clock[i].valid = attr & CLK_VALID_MASK;
Rajan Vaja5986efc2020-01-16 03:55:05 -0800561
562 /* skip query for Invalid clock */
563 ret = versal_is_valid_clock(i);
564 if (ret != CLK_VALID_MASK)
565 continue;
566
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530567 clock[i].type = ((attr >> CLK_TYPE_SHIFT) & 0x1) ?
568 CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
569 nodetype = (attr >> NODE_TYPE_SHIFT) & NODE_CLASS_MASK;
570 subclass = (attr >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK;
571 class = (attr >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK;
572
573 clock[i].clk_id = (class << NODE_CLASS_SHIFT) |
574 (subclass << NODE_SUBCLASS_SHIFT) |
575 (nodetype << NODE_TYPE_SHIFT) |
576 (i << NODE_INDEX_SHIFT);
577
578 ret = versal_pm_clock_get_name(clock[i].clk_id,
579 clock[i].clk_name);
580 if (ret)
581 continue;
582 debug("clk name:%s, Valid:%d, type:%d, clk_id:0x%x\n",
583 clock[i].clk_name, clock[i].valid,
584 clock[i].type, clock[i].clk_id);
585 }
586
587 /* Get topology of all clock */
588 for (i = 0; i < clock_max_idx; i++) {
589 ret = versal_get_clock_type(i, &type);
590 if (ret || type != CLK_TYPE_OUTPUT)
591 continue;
592 debug("clk name:%s\n", clock[i].clk_name);
593 ret = versal_clock_get_topology(i, clock[i].node,
594 &clock[i].num_nodes);
595 if (ret)
596 continue;
597
598 ret = versal_clock_get_parents(i, clock[i].parent,
599 &clock[i].num_parents);
600 if (ret)
601 continue;
602 }
603}
604
605int versal_clock_setup(void)
606{
607 int ret;
608
609 ret = versal_pm_clock_get_num_clocks(&clock_max_idx);
610 if (ret)
611 return ret;
612
613 debug("%s, clock_max_idx:0x%x\n", __func__, clock_max_idx);
614 clock = calloc(clock_max_idx, sizeof(*clock));
615 if (!clock)
616 return -ENOMEM;
617
618 versal_get_clock_info();
619
620 return 0;
621}
622
623static int versal_clock_get_freq_by_name(char *name, struct udevice *dev,
624 ulong *freq)
625{
626 struct clk clk;
627 int ret;
628
629 ret = clk_get_by_name(dev, name, &clk);
630 if (ret < 0) {
631 dev_err(dev, "failed to get %s\n", name);
632 return ret;
633 }
634
635 *freq = clk_get_rate(&clk);
636 if (IS_ERR_VALUE(*freq)) {
637 dev_err(dev, "failed to get rate %s\n", name);
638 return -EINVAL;
639 }
640
641 return 0;
642}
643
644static int versal_clk_probe(struct udevice *dev)
645{
646 int ret;
647 struct versal_clk_priv *priv = dev_get_priv(dev);
648
649 debug("%s\n", __func__);
650
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530651 ret = versal_clock_get_freq_by_name("pl_alt_ref_clk",
652 dev, &pl_alt_ref_clk);
653 if (ret < 0)
654 return -EINVAL;
655
656 ret = versal_clock_get_freq_by_name("ref_clk", dev, &ref_clk);
657 if (ret < 0)
658 return -EINVAL;
659
660 versal_clock_setup();
661
662 priv->clk = clock;
663
664 return ret;
665}
666
667static ulong versal_clk_get_rate(struct clk *clk)
668{
669 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
670 u32 id = clk->id;
671 u32 clk_id;
672 u64 clk_rate = 0;
673
674 debug("%s\n", __func__);
675
676 clk_id = priv->clk[id].clk_id;
677
678 versal_clock_get_rate(clk_id, &clk_rate);
679
680 return clk_rate;
681}
682
683static ulong versal_clk_set_rate(struct clk *clk, ulong rate)
684{
685 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
686 u32 id = clk->id;
687 u32 clk_id;
688 u64 clk_rate = 0;
689 u32 div;
690 int ret;
691
692 debug("%s\n", __func__);
693
694 clk_id = priv->clk[id].clk_id;
695
696 ret = versal_clock_get_rate(clk_id, &clk_rate);
697 if (ret) {
698 printf("Clock is not a Gate:0x%x\n", clk_id);
699 return 0;
700 }
701
702 do {
703 if (versal_clock_div(clk_id)) {
704 div = versal_clock_get_div(clk_id);
705 clk_rate *= div;
706 div = DIV_ROUND_CLOSEST(clk_rate, rate);
707 versal_clock_set_div(clk_id, div);
708 debug("%s, div:%d, newrate:%lld\n", __func__,
709 div, DIV_ROUND_CLOSEST(clk_rate, div));
710 return DIV_ROUND_CLOSEST(clk_rate, div);
711 }
712 clk_id = versal_clock_get_parentid(clk_id);
713 } while (((clk_id >> NODE_SUBCLASS_SHIFT) &
714 NODE_CLASS_MASK) != NODE_SUBCLASS_CLOCK_REF);
715
716 printf("Clock didn't has Divisors:0x%x\n", priv->clk[id].clk_id);
717
718 return clk_rate;
719}
720
T Karthik Reddy277300f2021-02-03 03:10:47 -0700721static int versal_clk_enable(struct clk *clk)
722{
723 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
724 u32 clk_id;
725
726 clk_id = priv->clk[clk->id].clk_id;
727
T Karthik Reddyb5ef0ba2021-09-28 11:30:27 +0530728 if (versal_clock_gate(clk_id))
729 return xilinx_pm_request(PM_CLOCK_ENABLE, clk_id, 0, 0, 0, NULL);
730
731 return 0;
T Karthik Reddy277300f2021-02-03 03:10:47 -0700732}
733
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530734static struct clk_ops versal_clk_ops = {
735 .set_rate = versal_clk_set_rate,
736 .get_rate = versal_clk_get_rate,
T Karthik Reddy277300f2021-02-03 03:10:47 -0700737 .enable = versal_clk_enable,
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530738};
739
740static const struct udevice_id versal_clk_ids[] = {
741 { .compatible = "xlnx,versal-clk" },
742 { }
743};
744
745U_BOOT_DRIVER(versal_clk) = {
746 .name = "versal-clk",
747 .id = UCLASS_CLK,
748 .of_match = versal_clk_ids,
749 .probe = versal_clk_probe,
750 .ops = &versal_clk_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700751 .priv_auto = sizeof(struct versal_clk_priv),
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530752};