blob: 2fb3171d71fa76cbd0036582256b8bea88a4561f [file] [log] [blame]
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2019 Xilinx, Inc.
4 * Siva Durga Prasad Paladugu <siva.durga.paladugu@xilinx.com>
5 */
6
7#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -06009#include <asm/cache.h>
Simon Glass6b9f0102020-05-10 11:40:06 -060010#include <asm/ptrace.h>
Simon Glass9bc15642020-02-03 07:36:16 -070011#include <dm/device_compat.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053012#include <linux/bitops.h>
13#include <linux/bitfield.h>
14#include <malloc.h>
15#include <clk-uclass.h>
16#include <clk.h>
17#include <dm.h>
18#include <asm/arch/sys_proto.h>
Michal Simeke50c1042019-10-04 15:25:18 +020019#include <zynqmp_firmware.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070020#include <linux/err.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053021
22#define MAX_PARENT 100
23#define MAX_NODES 6
24#define MAX_NAME_LEN 50
25
26#define CLK_TYPE_SHIFT 2
27
28#define PM_API_PAYLOAD_LEN 3
29
30#define NA_PARENT 0xFFFFFFFF
31#define DUMMY_PARENT 0xFFFFFFFE
32
33#define CLK_TYPE_FIELD_LEN 4
34#define CLK_TOPOLOGY_NODE_OFFSET 16
35#define NODES_PER_RESP 3
36
37#define CLK_TYPE_FIELD_MASK 0xF
38#define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
39#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
40#define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
41#define CLK_TYPE_FLAG_BITS 8
42
43#define CLK_PARENTS_ID_LEN 16
44#define CLK_PARENTS_ID_MASK 0xFFFF
45
46#define END_OF_TOPOLOGY_NODE 1
47#define END_OF_PARENTS 1
48
49#define CLK_VALID_MASK 0x1
50#define NODE_CLASS_SHIFT 26U
51#define NODE_SUBCLASS_SHIFT 20U
52#define NODE_TYPE_SHIFT 14U
53#define NODE_INDEX_SHIFT 0U
54
55#define CLK_GET_NAME_RESP_LEN 16
56#define CLK_GET_TOPOLOGY_RESP_WORDS 3
57#define CLK_GET_PARENTS_RESP_WORDS 3
58#define CLK_GET_ATTR_RESP_WORDS 1
59
60#define NODE_SUBCLASS_CLOCK_PLL 1
61#define NODE_SUBCLASS_CLOCK_OUT 2
62#define NODE_SUBCLASS_CLOCK_REF 3
63
64#define NODE_CLASS_CLOCK 2
65#define NODE_CLASS_MASK 0x3F
66
67#define CLOCK_NODE_TYPE_MUX 1
68#define CLOCK_NODE_TYPE_DIV 4
69#define CLOCK_NODE_TYPE_GATE 6
70
71enum pm_query_id {
72 PM_QID_INVALID,
73 PM_QID_CLOCK_GET_NAME,
74 PM_QID_CLOCK_GET_TOPOLOGY,
75 PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
76 PM_QID_CLOCK_GET_PARENTS,
77 PM_QID_CLOCK_GET_ATTRIBUTES,
78 PM_QID_PINCTRL_GET_NUM_PINS,
79 PM_QID_PINCTRL_GET_NUM_FUNCTIONS,
80 PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS,
81 PM_QID_PINCTRL_GET_FUNCTION_NAME,
82 PM_QID_PINCTRL_GET_FUNCTION_GROUPS,
83 PM_QID_PINCTRL_GET_PIN_GROUPS,
84 PM_QID_CLOCK_GET_NUM_CLOCKS,
85 PM_QID_CLOCK_GET_MAX_DIVISOR,
86};
87
88enum clk_type {
89 CLK_TYPE_OUTPUT,
90 CLK_TYPE_EXTERNAL,
91};
92
93struct clock_parent {
94 char name[MAX_NAME_LEN];
95 int id;
96 u32 flag;
97};
98
99struct clock_topology {
100 u32 type;
101 u32 flag;
102 u32 type_flag;
103};
104
105struct versal_clock {
106 char clk_name[MAX_NAME_LEN];
107 u32 valid;
108 enum clk_type type;
109 struct clock_topology node[MAX_NODES];
110 u32 num_nodes;
111 struct clock_parent parent[MAX_PARENT];
112 u32 num_parents;
113 u32 clk_id;
114};
115
116struct versal_clk_priv {
117 struct versal_clock *clk;
118};
119
120static ulong alt_ref_clk;
121static ulong pl_alt_ref_clk;
122static ulong ref_clk;
123
124struct versal_pm_query_data {
125 u32 qid;
126 u32 arg1;
127 u32 arg2;
128 u32 arg3;
129};
130
131static struct versal_clock *clock;
132static unsigned int clock_max_idx;
133
134#define PM_QUERY_DATA 35
135
136static int versal_pm_query(struct versal_pm_query_data qdata, u32 *ret_payload)
137{
138 struct pt_regs regs;
139
140 regs.regs[0] = PM_SIP_SVC | PM_QUERY_DATA;
141 regs.regs[1] = ((u64)qdata.arg1 << 32) | qdata.qid;
142 regs.regs[2] = ((u64)qdata.arg3 << 32) | qdata.arg2;
143
144 smc_call(&regs);
145
146 if (ret_payload) {
147 ret_payload[0] = (u32)regs.regs[0];
148 ret_payload[1] = upper_32_bits(regs.regs[0]);
149 ret_payload[2] = (u32)regs.regs[1];
150 ret_payload[3] = upper_32_bits(regs.regs[1]);
151 ret_payload[4] = (u32)regs.regs[2];
152 }
153
154 return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : regs.regs[0];
155}
156
157static inline int versal_is_valid_clock(u32 clk_id)
158{
159 if (clk_id >= clock_max_idx)
160 return -ENODEV;
161
162 return clock[clk_id].valid;
163}
164
165static int versal_get_clock_name(u32 clk_id, char *clk_name)
166{
167 int ret;
168
169 ret = versal_is_valid_clock(clk_id);
170 if (ret == 1) {
171 strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
172 return 0;
173 }
174
175 return ret == 0 ? -EINVAL : ret;
176}
177
178static int versal_get_clock_type(u32 clk_id, u32 *type)
179{
180 int ret;
181
182 ret = versal_is_valid_clock(clk_id);
183 if (ret == 1) {
184 *type = clock[clk_id].type;
185 return 0;
186 }
187
188 return ret == 0 ? -EINVAL : ret;
189}
190
191static int versal_pm_clock_get_num_clocks(u32 *nclocks)
192{
193 struct versal_pm_query_data qdata = {0};
194 u32 ret_payload[PAYLOAD_ARG_CNT];
195 int ret;
196
197 qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
198
199 ret = versal_pm_query(qdata, ret_payload);
200 *nclocks = ret_payload[1];
201
202 return ret;
203}
204
205static int versal_pm_clock_get_name(u32 clock_id, char *name)
206{
207 struct versal_pm_query_data qdata = {0};
208 u32 ret_payload[PAYLOAD_ARG_CNT];
209 int ret;
210
211 qdata.qid = PM_QID_CLOCK_GET_NAME;
212 qdata.arg1 = clock_id;
213
214 ret = versal_pm_query(qdata, ret_payload);
215 if (ret)
216 return ret;
217 memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
218
219 return 0;
220}
221
222static int versal_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
223{
224 struct versal_pm_query_data qdata = {0};
225 u32 ret_payload[PAYLOAD_ARG_CNT];
226 int ret;
227
228 qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
229 qdata.arg1 = clock_id;
230 qdata.arg2 = index;
231
232 ret = versal_pm_query(qdata, ret_payload);
233 memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
234
235 return ret;
236}
237
238static int versal_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
239{
240 struct versal_pm_query_data qdata = {0};
241 u32 ret_payload[PAYLOAD_ARG_CNT];
242 int ret;
243
244 qdata.qid = PM_QID_CLOCK_GET_PARENTS;
245 qdata.arg1 = clock_id;
246 qdata.arg2 = index;
247
248 ret = versal_pm_query(qdata, ret_payload);
249 memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
250
251 return ret;
252}
253
254static int versal_pm_clock_get_attributes(u32 clock_id, u32 *attr)
255{
256 struct versal_pm_query_data qdata = {0};
257 u32 ret_payload[PAYLOAD_ARG_CNT];
258 int ret;
259
260 qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
261 qdata.arg1 = clock_id;
262
263 ret = versal_pm_query(qdata, ret_payload);
264 memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
265
266 return ret;
267}
268
269static int __versal_clock_get_topology(struct clock_topology *topology,
270 u32 *data, u32 *nnodes)
271{
272 int i;
273
274 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
275 if (!(data[i] & CLK_TYPE_FIELD_MASK))
276 return END_OF_TOPOLOGY_NODE;
277 topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
278 topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
279 data[i]);
280 topology[*nnodes].type_flag =
281 FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
282 topology[*nnodes].type_flag |=
283 FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, data[i]) <<
284 CLK_TYPE_FLAG_BITS;
285 debug("topology type:0x%x, flag:0x%x, type_flag:0x%x\n",
286 topology[*nnodes].type, topology[*nnodes].flag,
287 topology[*nnodes].type_flag);
288 (*nnodes)++;
289 }
290
291 return 0;
292}
293
294static int versal_clock_get_topology(u32 clk_id,
295 struct clock_topology *topology,
296 u32 *num_nodes)
297{
298 int j, ret;
299 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
300
301 *num_nodes = 0;
302 for (j = 0; j <= MAX_NODES; j += 3) {
303 ret = versal_pm_clock_get_topology(clock[clk_id].clk_id, j,
304 pm_resp);
305 if (ret)
306 return ret;
307 ret = __versal_clock_get_topology(topology, pm_resp, num_nodes);
308 if (ret == END_OF_TOPOLOGY_NODE)
309 return 0;
310 }
311
312 return 0;
313}
314
315static int __versal_clock_get_parents(struct clock_parent *parents, u32 *data,
316 u32 *nparent)
317{
318 int i;
319 struct clock_parent *parent;
320
321 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
322 if (data[i] == NA_PARENT)
323 return END_OF_PARENTS;
324
325 parent = &parents[i];
326 parent->id = data[i] & CLK_PARENTS_ID_MASK;
327 if (data[i] == DUMMY_PARENT) {
328 strcpy(parent->name, "dummy_name");
329 parent->flag = 0;
330 } else {
331 parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
332 if (versal_get_clock_name(parent->id, parent->name))
333 continue;
334 }
335 debug("parent name:%s\n", parent->name);
336 *nparent += 1;
337 }
338
339 return 0;
340}
341
342static int versal_clock_get_parents(u32 clk_id, struct clock_parent *parents,
343 u32 *num_parents)
344{
345 int j = 0, ret;
346 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
347
348 *num_parents = 0;
349 do {
350 /* Get parents from firmware */
351 ret = versal_pm_clock_get_parents(clock[clk_id].clk_id, j,
352 pm_resp);
353 if (ret)
354 return ret;
355
356 ret = __versal_clock_get_parents(&parents[j], pm_resp,
357 num_parents);
358 if (ret == END_OF_PARENTS)
359 return 0;
360 j += PM_API_PAYLOAD_LEN;
361 } while (*num_parents <= MAX_PARENT);
362
363 return 0;
364}
365
366static u32 versal_clock_get_div(u32 clk_id)
367{
368 u32 ret_payload[PAYLOAD_ARG_CNT];
369 u32 div;
370
Michal Simek142fb5b2019-10-04 15:52:43 +0200371 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530372 div = ret_payload[1];
373
374 return div;
375}
376
377static u32 versal_clock_set_div(u32 clk_id, u32 div)
378{
379 u32 ret_payload[PAYLOAD_ARG_CNT];
380
Michal Simek142fb5b2019-10-04 15:52:43 +0200381 xilinx_pm_request(PM_CLOCK_SETDIVIDER, clk_id, div, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530382
383 return div;
384}
385
386static u64 versal_clock_ref(u32 clk_id)
387{
388 u32 ret_payload[PAYLOAD_ARG_CNT];
389 int ref;
390
Michal Simek142fb5b2019-10-04 15:52:43 +0200391 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530392 ref = ret_payload[0];
393 if (!(ref & 1))
394 return ref_clk;
395 if (ref & 2)
396 return pl_alt_ref_clk;
397 return 0;
398}
399
400static u64 versal_clock_get_pll_rate(u32 clk_id)
401{
402 u32 ret_payload[PAYLOAD_ARG_CNT];
403 u32 fbdiv;
404 u32 res;
405 u32 frac;
406 u64 freq;
407 u32 parent_rate, parent_id;
408 u32 id = clk_id & 0xFFF;
409
Michal Simek142fb5b2019-10-04 15:52:43 +0200410 xilinx_pm_request(PM_CLOCK_GETSTATE, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530411 res = ret_payload[1];
412 if (!res) {
413 printf("0%x PLL not enabled\n", clk_id);
414 return 0;
415 }
416
417 parent_id = clock[clock[id].parent[0].id].clk_id;
418 parent_rate = versal_clock_ref(parent_id);
419
Michal Simek142fb5b2019-10-04 15:52:43 +0200420 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530421 fbdiv = ret_payload[1];
Michal Simek142fb5b2019-10-04 15:52:43 +0200422 xilinx_pm_request(PM_CLOCK_PLL_GETPARAM, clk_id, 2, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530423 frac = ret_payload[1];
424
425 freq = (fbdiv * parent_rate) >> (1 << frac);
426
427 return freq;
428}
429
430static u32 versal_clock_mux(u32 clk_id)
431{
432 int i;
433 u32 id = clk_id & 0xFFF;
434
435 for (i = 0; i < clock[id].num_nodes; i++)
436 if (clock[id].node[i].type == CLOCK_NODE_TYPE_MUX)
437 return 1;
438
439 return 0;
440}
441
442static u32 versal_clock_get_parentid(u32 clk_id)
443{
444 u32 parent_id = 0;
445 u32 ret_payload[PAYLOAD_ARG_CNT];
446 u32 id = clk_id & 0xFFF;
447
448 if (versal_clock_mux(clk_id)) {
Michal Simek142fb5b2019-10-04 15:52:43 +0200449 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0,
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530450 ret_payload);
451 parent_id = ret_payload[1];
452 }
453
454 debug("parent_id:0x%x\n", clock[clock[id].parent[parent_id].id].clk_id);
455 return clock[clock[id].parent[parent_id].id].clk_id;
456}
457
458static u32 versal_clock_gate(u32 clk_id)
459{
460 u32 id = clk_id & 0xFFF;
461 int i;
462
463 for (i = 0; i < clock[id].num_nodes; i++)
464 if (clock[id].node[i].type == CLOCK_NODE_TYPE_GATE)
465 return 1;
466
467 return 0;
468}
469
470static u32 versal_clock_div(u32 clk_id)
471{
472 int i;
473 u32 id = clk_id & 0xFFF;
474
475 for (i = 0; i < clock[id].num_nodes; i++)
476 if (clock[id].node[i].type == CLOCK_NODE_TYPE_DIV)
477 return 1;
478
479 return 0;
480}
481
482static u32 versal_clock_pll(u32 clk_id, u64 *clk_rate)
483{
484 if (((clk_id >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK) ==
485 NODE_SUBCLASS_CLOCK_PLL &&
486 ((clk_id >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK) ==
487 NODE_CLASS_CLOCK) {
488 *clk_rate = versal_clock_get_pll_rate(clk_id);
489 return 1;
490 }
491
492 return 0;
493}
494
495static u64 versal_clock_calc(u32 clk_id)
496{
497 u32 parent_id;
498 u64 clk_rate;
499 u32 div;
500
501 if (versal_clock_pll(clk_id, &clk_rate))
502 return clk_rate;
503
504 parent_id = versal_clock_get_parentid(clk_id);
505 if (((parent_id >> NODE_SUBCLASS_SHIFT) &
506 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
507 return versal_clock_ref(clk_id);
508
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600509 if (!parent_id)
510 return 0;
511
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530512 clk_rate = versal_clock_calc(parent_id);
513
514 if (versal_clock_div(clk_id)) {
515 div = versal_clock_get_div(clk_id);
516 clk_rate = DIV_ROUND_CLOSEST(clk_rate, div);
517 }
518
519 return clk_rate;
520}
521
522static int versal_clock_get_rate(u32 clk_id, u64 *clk_rate)
523{
524 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
525 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
526 *clk_rate = versal_clock_ref(clk_id);
527
528 if (versal_clock_pll(clk_id, clk_rate))
529 return 0;
530
531 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
532 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_OUT &&
533 ((clk_id >> NODE_CLASS_SHIFT) &
534 NODE_CLASS_MASK) == NODE_CLASS_CLOCK) {
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600535 if (!versal_clock_gate(clk_id) && !versal_clock_mux(clk_id))
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530536 return -EINVAL;
537 *clk_rate = versal_clock_calc(clk_id);
538 return 0;
539 }
540
541 return 0;
542}
543
544int soc_clk_dump(void)
545{
546 u64 clk_rate = 0;
547 u32 type, ret, i = 0;
548
549 printf("\n ****** VERSAL CLOCKS *****\n");
550
551 printf("alt_ref_clk:%ld pl_alt_ref_clk:%ld ref_clk:%ld\n",
552 alt_ref_clk, pl_alt_ref_clk, ref_clk);
553 for (i = 0; i < clock_max_idx; i++) {
554 debug("%s\n", clock[i].clk_name);
555 ret = versal_get_clock_type(i, &type);
556 if (ret || type != CLK_TYPE_OUTPUT)
557 continue;
558
559 ret = versal_clock_get_rate(clock[i].clk_id, &clk_rate);
560
561 if (ret != -EINVAL)
562 printf("clk: %s freq:%lld\n",
563 clock[i].clk_name, clk_rate);
564 }
565
566 return 0;
567}
568
569static void versal_get_clock_info(void)
570{
571 int i, ret;
572 u32 attr, type = 0, nodetype, subclass, class;
573
574 for (i = 0; i < clock_max_idx; i++) {
575 ret = versal_pm_clock_get_attributes(i, &attr);
576 if (ret)
577 continue;
578
579 clock[i].valid = attr & CLK_VALID_MASK;
Rajan Vaja5986efc2020-01-16 03:55:05 -0800580
581 /* skip query for Invalid clock */
582 ret = versal_is_valid_clock(i);
583 if (ret != CLK_VALID_MASK)
584 continue;
585
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530586 clock[i].type = ((attr >> CLK_TYPE_SHIFT) & 0x1) ?
587 CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
588 nodetype = (attr >> NODE_TYPE_SHIFT) & NODE_CLASS_MASK;
589 subclass = (attr >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK;
590 class = (attr >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK;
591
592 clock[i].clk_id = (class << NODE_CLASS_SHIFT) |
593 (subclass << NODE_SUBCLASS_SHIFT) |
594 (nodetype << NODE_TYPE_SHIFT) |
595 (i << NODE_INDEX_SHIFT);
596
597 ret = versal_pm_clock_get_name(clock[i].clk_id,
598 clock[i].clk_name);
599 if (ret)
600 continue;
601 debug("clk name:%s, Valid:%d, type:%d, clk_id:0x%x\n",
602 clock[i].clk_name, clock[i].valid,
603 clock[i].type, clock[i].clk_id);
604 }
605
606 /* Get topology of all clock */
607 for (i = 0; i < clock_max_idx; i++) {
608 ret = versal_get_clock_type(i, &type);
609 if (ret || type != CLK_TYPE_OUTPUT)
610 continue;
611 debug("clk name:%s\n", clock[i].clk_name);
612 ret = versal_clock_get_topology(i, clock[i].node,
613 &clock[i].num_nodes);
614 if (ret)
615 continue;
616
617 ret = versal_clock_get_parents(i, clock[i].parent,
618 &clock[i].num_parents);
619 if (ret)
620 continue;
621 }
622}
623
624int versal_clock_setup(void)
625{
626 int ret;
627
628 ret = versal_pm_clock_get_num_clocks(&clock_max_idx);
629 if (ret)
630 return ret;
631
632 debug("%s, clock_max_idx:0x%x\n", __func__, clock_max_idx);
633 clock = calloc(clock_max_idx, sizeof(*clock));
634 if (!clock)
635 return -ENOMEM;
636
637 versal_get_clock_info();
638
639 return 0;
640}
641
642static int versal_clock_get_freq_by_name(char *name, struct udevice *dev,
643 ulong *freq)
644{
645 struct clk clk;
646 int ret;
647
648 ret = clk_get_by_name(dev, name, &clk);
649 if (ret < 0) {
650 dev_err(dev, "failed to get %s\n", name);
651 return ret;
652 }
653
654 *freq = clk_get_rate(&clk);
655 if (IS_ERR_VALUE(*freq)) {
656 dev_err(dev, "failed to get rate %s\n", name);
657 return -EINVAL;
658 }
659
660 return 0;
661}
662
663static int versal_clk_probe(struct udevice *dev)
664{
665 int ret;
666 struct versal_clk_priv *priv = dev_get_priv(dev);
667
668 debug("%s\n", __func__);
669
670 ret = versal_clock_get_freq_by_name("alt_ref_clk", dev, &alt_ref_clk);
671 if (ret < 0)
672 return -EINVAL;
673
674 ret = versal_clock_get_freq_by_name("pl_alt_ref_clk",
675 dev, &pl_alt_ref_clk);
676 if (ret < 0)
677 return -EINVAL;
678
679 ret = versal_clock_get_freq_by_name("ref_clk", dev, &ref_clk);
680 if (ret < 0)
681 return -EINVAL;
682
683 versal_clock_setup();
684
685 priv->clk = clock;
686
687 return ret;
688}
689
690static ulong versal_clk_get_rate(struct clk *clk)
691{
692 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
693 u32 id = clk->id;
694 u32 clk_id;
695 u64 clk_rate = 0;
696
697 debug("%s\n", __func__);
698
699 clk_id = priv->clk[id].clk_id;
700
701 versal_clock_get_rate(clk_id, &clk_rate);
702
703 return clk_rate;
704}
705
706static ulong versal_clk_set_rate(struct clk *clk, ulong rate)
707{
708 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
709 u32 id = clk->id;
710 u32 clk_id;
711 u64 clk_rate = 0;
712 u32 div;
713 int ret;
714
715 debug("%s\n", __func__);
716
717 clk_id = priv->clk[id].clk_id;
718
719 ret = versal_clock_get_rate(clk_id, &clk_rate);
720 if (ret) {
721 printf("Clock is not a Gate:0x%x\n", clk_id);
722 return 0;
723 }
724
725 do {
726 if (versal_clock_div(clk_id)) {
727 div = versal_clock_get_div(clk_id);
728 clk_rate *= div;
729 div = DIV_ROUND_CLOSEST(clk_rate, rate);
730 versal_clock_set_div(clk_id, div);
731 debug("%s, div:%d, newrate:%lld\n", __func__,
732 div, DIV_ROUND_CLOSEST(clk_rate, div));
733 return DIV_ROUND_CLOSEST(clk_rate, div);
734 }
735 clk_id = versal_clock_get_parentid(clk_id);
736 } while (((clk_id >> NODE_SUBCLASS_SHIFT) &
737 NODE_CLASS_MASK) != NODE_SUBCLASS_CLOCK_REF);
738
739 printf("Clock didn't has Divisors:0x%x\n", priv->clk[id].clk_id);
740
741 return clk_rate;
742}
743
744static struct clk_ops versal_clk_ops = {
745 .set_rate = versal_clk_set_rate,
746 .get_rate = versal_clk_get_rate,
747};
748
749static const struct udevice_id versal_clk_ids[] = {
750 { .compatible = "xlnx,versal-clk" },
751 { }
752};
753
754U_BOOT_DRIVER(versal_clk) = {
755 .name = "versal-clk",
756 .id = UCLASS_CLK,
757 .of_match = versal_clk_ids,
758 .probe = versal_clk_probe,
759 .ops = &versal_clk_ops,
760 .priv_auto_alloc_size = sizeof(struct versal_clk_priv),
761};