blob: 6f82b60f04dbe94908b0fe2a35d8bb18b158a718 [file] [log] [blame]
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2019 Xilinx, Inc.
4 * Siva Durga Prasad Paladugu <siva.durga.paladugu@xilinx.com>
5 */
6
7#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -06009#include <asm/cache.h>
Simon Glass6b9f0102020-05-10 11:40:06 -060010#include <asm/ptrace.h>
Simon Glass9bc15642020-02-03 07:36:16 -070011#include <dm/device_compat.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053012#include <linux/bitops.h>
13#include <linux/bitfield.h>
14#include <malloc.h>
15#include <clk-uclass.h>
16#include <clk.h>
17#include <dm.h>
18#include <asm/arch/sys_proto.h>
Michal Simeke50c1042019-10-04 15:25:18 +020019#include <zynqmp_firmware.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070020#include <linux/err.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053021
22#define MAX_PARENT 100
23#define MAX_NODES 6
24#define MAX_NAME_LEN 50
25
26#define CLK_TYPE_SHIFT 2
27
28#define PM_API_PAYLOAD_LEN 3
29
30#define NA_PARENT 0xFFFFFFFF
31#define DUMMY_PARENT 0xFFFFFFFE
32
33#define CLK_TYPE_FIELD_LEN 4
34#define CLK_TOPOLOGY_NODE_OFFSET 16
35#define NODES_PER_RESP 3
36
37#define CLK_TYPE_FIELD_MASK 0xF
38#define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
39#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
40#define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
41#define CLK_TYPE_FLAG_BITS 8
42
43#define CLK_PARENTS_ID_LEN 16
44#define CLK_PARENTS_ID_MASK 0xFFFF
45
46#define END_OF_TOPOLOGY_NODE 1
47#define END_OF_PARENTS 1
48
49#define CLK_VALID_MASK 0x1
50#define NODE_CLASS_SHIFT 26U
51#define NODE_SUBCLASS_SHIFT 20U
52#define NODE_TYPE_SHIFT 14U
53#define NODE_INDEX_SHIFT 0U
54
55#define CLK_GET_NAME_RESP_LEN 16
56#define CLK_GET_TOPOLOGY_RESP_WORDS 3
57#define CLK_GET_PARENTS_RESP_WORDS 3
58#define CLK_GET_ATTR_RESP_WORDS 1
59
60#define NODE_SUBCLASS_CLOCK_PLL 1
61#define NODE_SUBCLASS_CLOCK_OUT 2
62#define NODE_SUBCLASS_CLOCK_REF 3
63
64#define NODE_CLASS_CLOCK 2
65#define NODE_CLASS_MASK 0x3F
66
67#define CLOCK_NODE_TYPE_MUX 1
68#define CLOCK_NODE_TYPE_DIV 4
69#define CLOCK_NODE_TYPE_GATE 6
70
71enum pm_query_id {
72 PM_QID_INVALID,
73 PM_QID_CLOCK_GET_NAME,
74 PM_QID_CLOCK_GET_TOPOLOGY,
75 PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
76 PM_QID_CLOCK_GET_PARENTS,
77 PM_QID_CLOCK_GET_ATTRIBUTES,
78 PM_QID_PINCTRL_GET_NUM_PINS,
79 PM_QID_PINCTRL_GET_NUM_FUNCTIONS,
80 PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS,
81 PM_QID_PINCTRL_GET_FUNCTION_NAME,
82 PM_QID_PINCTRL_GET_FUNCTION_GROUPS,
83 PM_QID_PINCTRL_GET_PIN_GROUPS,
84 PM_QID_CLOCK_GET_NUM_CLOCKS,
85 PM_QID_CLOCK_GET_MAX_DIVISOR,
86};
87
88enum clk_type {
89 CLK_TYPE_OUTPUT,
90 CLK_TYPE_EXTERNAL,
91};
92
93struct clock_parent {
94 char name[MAX_NAME_LEN];
95 int id;
96 u32 flag;
97};
98
99struct clock_topology {
100 u32 type;
101 u32 flag;
102 u32 type_flag;
103};
104
105struct versal_clock {
106 char clk_name[MAX_NAME_LEN];
107 u32 valid;
108 enum clk_type type;
109 struct clock_topology node[MAX_NODES];
110 u32 num_nodes;
111 struct clock_parent parent[MAX_PARENT];
112 u32 num_parents;
113 u32 clk_id;
114};
115
116struct versal_clk_priv {
117 struct versal_clock *clk;
118};
119
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530120static ulong pl_alt_ref_clk;
121static ulong ref_clk;
122
123struct versal_pm_query_data {
124 u32 qid;
125 u32 arg1;
126 u32 arg2;
127 u32 arg3;
128};
129
130static struct versal_clock *clock;
131static unsigned int clock_max_idx;
132
133#define PM_QUERY_DATA 35
134
135static int versal_pm_query(struct versal_pm_query_data qdata, u32 *ret_payload)
136{
137 struct pt_regs regs;
138
139 regs.regs[0] = PM_SIP_SVC | PM_QUERY_DATA;
140 regs.regs[1] = ((u64)qdata.arg1 << 32) | qdata.qid;
141 regs.regs[2] = ((u64)qdata.arg3 << 32) | qdata.arg2;
142
143 smc_call(&regs);
144
145 if (ret_payload) {
146 ret_payload[0] = (u32)regs.regs[0];
147 ret_payload[1] = upper_32_bits(regs.regs[0]);
148 ret_payload[2] = (u32)regs.regs[1];
149 ret_payload[3] = upper_32_bits(regs.regs[1]);
150 ret_payload[4] = (u32)regs.regs[2];
151 }
152
153 return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : regs.regs[0];
154}
155
156static inline int versal_is_valid_clock(u32 clk_id)
157{
158 if (clk_id >= clock_max_idx)
159 return -ENODEV;
160
161 return clock[clk_id].valid;
162}
163
164static int versal_get_clock_name(u32 clk_id, char *clk_name)
165{
166 int ret;
167
168 ret = versal_is_valid_clock(clk_id);
169 if (ret == 1) {
170 strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
171 return 0;
172 }
173
174 return ret == 0 ? -EINVAL : ret;
175}
176
177static int versal_get_clock_type(u32 clk_id, u32 *type)
178{
179 int ret;
180
181 ret = versal_is_valid_clock(clk_id);
182 if (ret == 1) {
183 *type = clock[clk_id].type;
184 return 0;
185 }
186
187 return ret == 0 ? -EINVAL : ret;
188}
189
190static int versal_pm_clock_get_num_clocks(u32 *nclocks)
191{
192 struct versal_pm_query_data qdata = {0};
193 u32 ret_payload[PAYLOAD_ARG_CNT];
194 int ret;
195
196 qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
197
198 ret = versal_pm_query(qdata, ret_payload);
199 *nclocks = ret_payload[1];
200
201 return ret;
202}
203
204static int versal_pm_clock_get_name(u32 clock_id, char *name)
205{
206 struct versal_pm_query_data qdata = {0};
207 u32 ret_payload[PAYLOAD_ARG_CNT];
208 int ret;
209
210 qdata.qid = PM_QID_CLOCK_GET_NAME;
211 qdata.arg1 = clock_id;
212
213 ret = versal_pm_query(qdata, ret_payload);
214 if (ret)
215 return ret;
216 memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
217
218 return 0;
219}
220
221static int versal_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
222{
223 struct versal_pm_query_data qdata = {0};
224 u32 ret_payload[PAYLOAD_ARG_CNT];
225 int ret;
226
227 qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
228 qdata.arg1 = clock_id;
229 qdata.arg2 = index;
230
231 ret = versal_pm_query(qdata, ret_payload);
232 memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
233
234 return ret;
235}
236
237static int versal_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
238{
239 struct versal_pm_query_data qdata = {0};
240 u32 ret_payload[PAYLOAD_ARG_CNT];
241 int ret;
242
243 qdata.qid = PM_QID_CLOCK_GET_PARENTS;
244 qdata.arg1 = clock_id;
245 qdata.arg2 = index;
246
247 ret = versal_pm_query(qdata, ret_payload);
248 memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
249
250 return ret;
251}
252
253static int versal_pm_clock_get_attributes(u32 clock_id, u32 *attr)
254{
255 struct versal_pm_query_data qdata = {0};
256 u32 ret_payload[PAYLOAD_ARG_CNT];
257 int ret;
258
259 qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
260 qdata.arg1 = clock_id;
261
262 ret = versal_pm_query(qdata, ret_payload);
263 memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
264
265 return ret;
266}
267
268static int __versal_clock_get_topology(struct clock_topology *topology,
269 u32 *data, u32 *nnodes)
270{
271 int i;
272
273 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
274 if (!(data[i] & CLK_TYPE_FIELD_MASK))
275 return END_OF_TOPOLOGY_NODE;
276 topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
277 topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
278 data[i]);
279 topology[*nnodes].type_flag =
280 FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
281 topology[*nnodes].type_flag |=
282 FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, data[i]) <<
283 CLK_TYPE_FLAG_BITS;
284 debug("topology type:0x%x, flag:0x%x, type_flag:0x%x\n",
285 topology[*nnodes].type, topology[*nnodes].flag,
286 topology[*nnodes].type_flag);
287 (*nnodes)++;
288 }
289
290 return 0;
291}
292
293static int versal_clock_get_topology(u32 clk_id,
294 struct clock_topology *topology,
295 u32 *num_nodes)
296{
297 int j, ret;
298 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
299
300 *num_nodes = 0;
301 for (j = 0; j <= MAX_NODES; j += 3) {
302 ret = versal_pm_clock_get_topology(clock[clk_id].clk_id, j,
303 pm_resp);
304 if (ret)
305 return ret;
306 ret = __versal_clock_get_topology(topology, pm_resp, num_nodes);
307 if (ret == END_OF_TOPOLOGY_NODE)
308 return 0;
309 }
310
311 return 0;
312}
313
314static int __versal_clock_get_parents(struct clock_parent *parents, u32 *data,
315 u32 *nparent)
316{
317 int i;
318 struct clock_parent *parent;
319
320 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
321 if (data[i] == NA_PARENT)
322 return END_OF_PARENTS;
323
324 parent = &parents[i];
325 parent->id = data[i] & CLK_PARENTS_ID_MASK;
326 if (data[i] == DUMMY_PARENT) {
327 strcpy(parent->name, "dummy_name");
328 parent->flag = 0;
329 } else {
330 parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
331 if (versal_get_clock_name(parent->id, parent->name))
332 continue;
333 }
334 debug("parent name:%s\n", parent->name);
335 *nparent += 1;
336 }
337
338 return 0;
339}
340
341static int versal_clock_get_parents(u32 clk_id, struct clock_parent *parents,
342 u32 *num_parents)
343{
344 int j = 0, ret;
345 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
346
347 *num_parents = 0;
348 do {
349 /* Get parents from firmware */
350 ret = versal_pm_clock_get_parents(clock[clk_id].clk_id, j,
351 pm_resp);
352 if (ret)
353 return ret;
354
355 ret = __versal_clock_get_parents(&parents[j], pm_resp,
356 num_parents);
357 if (ret == END_OF_PARENTS)
358 return 0;
359 j += PM_API_PAYLOAD_LEN;
360 } while (*num_parents <= MAX_PARENT);
361
362 return 0;
363}
364
365static u32 versal_clock_get_div(u32 clk_id)
366{
367 u32 ret_payload[PAYLOAD_ARG_CNT];
368 u32 div;
369
Michal Simek142fb5b2019-10-04 15:52:43 +0200370 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530371 div = ret_payload[1];
372
373 return div;
374}
375
376static u32 versal_clock_set_div(u32 clk_id, u32 div)
377{
378 u32 ret_payload[PAYLOAD_ARG_CNT];
379
Michal Simek142fb5b2019-10-04 15:52:43 +0200380 xilinx_pm_request(PM_CLOCK_SETDIVIDER, clk_id, div, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530381
382 return div;
383}
384
385static u64 versal_clock_ref(u32 clk_id)
386{
387 u32 ret_payload[PAYLOAD_ARG_CNT];
388 int ref;
389
Michal Simek142fb5b2019-10-04 15:52:43 +0200390 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530391 ref = ret_payload[0];
392 if (!(ref & 1))
393 return ref_clk;
394 if (ref & 2)
395 return pl_alt_ref_clk;
396 return 0;
397}
398
399static u64 versal_clock_get_pll_rate(u32 clk_id)
400{
401 u32 ret_payload[PAYLOAD_ARG_CNT];
402 u32 fbdiv;
403 u32 res;
404 u32 frac;
405 u64 freq;
406 u32 parent_rate, parent_id;
407 u32 id = clk_id & 0xFFF;
408
Michal Simek142fb5b2019-10-04 15:52:43 +0200409 xilinx_pm_request(PM_CLOCK_GETSTATE, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530410 res = ret_payload[1];
411 if (!res) {
412 printf("0%x PLL not enabled\n", clk_id);
413 return 0;
414 }
415
416 parent_id = clock[clock[id].parent[0].id].clk_id;
417 parent_rate = versal_clock_ref(parent_id);
418
Michal Simek142fb5b2019-10-04 15:52:43 +0200419 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530420 fbdiv = ret_payload[1];
Michal Simek142fb5b2019-10-04 15:52:43 +0200421 xilinx_pm_request(PM_CLOCK_PLL_GETPARAM, clk_id, 2, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530422 frac = ret_payload[1];
423
424 freq = (fbdiv * parent_rate) >> (1 << frac);
425
426 return freq;
427}
428
429static u32 versal_clock_mux(u32 clk_id)
430{
431 int i;
432 u32 id = clk_id & 0xFFF;
433
434 for (i = 0; i < clock[id].num_nodes; i++)
435 if (clock[id].node[i].type == CLOCK_NODE_TYPE_MUX)
436 return 1;
437
438 return 0;
439}
440
441static u32 versal_clock_get_parentid(u32 clk_id)
442{
443 u32 parent_id = 0;
444 u32 ret_payload[PAYLOAD_ARG_CNT];
445 u32 id = clk_id & 0xFFF;
446
447 if (versal_clock_mux(clk_id)) {
Michal Simek142fb5b2019-10-04 15:52:43 +0200448 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0,
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530449 ret_payload);
450 parent_id = ret_payload[1];
451 }
452
453 debug("parent_id:0x%x\n", clock[clock[id].parent[parent_id].id].clk_id);
454 return clock[clock[id].parent[parent_id].id].clk_id;
455}
456
457static u32 versal_clock_gate(u32 clk_id)
458{
459 u32 id = clk_id & 0xFFF;
460 int i;
461
462 for (i = 0; i < clock[id].num_nodes; i++)
463 if (clock[id].node[i].type == CLOCK_NODE_TYPE_GATE)
464 return 1;
465
466 return 0;
467}
468
469static u32 versal_clock_div(u32 clk_id)
470{
471 int i;
472 u32 id = clk_id & 0xFFF;
473
474 for (i = 0; i < clock[id].num_nodes; i++)
475 if (clock[id].node[i].type == CLOCK_NODE_TYPE_DIV)
476 return 1;
477
478 return 0;
479}
480
481static u32 versal_clock_pll(u32 clk_id, u64 *clk_rate)
482{
483 if (((clk_id >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK) ==
484 NODE_SUBCLASS_CLOCK_PLL &&
485 ((clk_id >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK) ==
486 NODE_CLASS_CLOCK) {
487 *clk_rate = versal_clock_get_pll_rate(clk_id);
488 return 1;
489 }
490
491 return 0;
492}
493
494static u64 versal_clock_calc(u32 clk_id)
495{
496 u32 parent_id;
497 u64 clk_rate;
498 u32 div;
499
500 if (versal_clock_pll(clk_id, &clk_rate))
501 return clk_rate;
502
503 parent_id = versal_clock_get_parentid(clk_id);
504 if (((parent_id >> NODE_SUBCLASS_SHIFT) &
505 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
506 return versal_clock_ref(clk_id);
507
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600508 if (!parent_id)
509 return 0;
510
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530511 clk_rate = versal_clock_calc(parent_id);
512
513 if (versal_clock_div(clk_id)) {
514 div = versal_clock_get_div(clk_id);
515 clk_rate = DIV_ROUND_CLOSEST(clk_rate, div);
516 }
517
518 return clk_rate;
519}
520
521static int versal_clock_get_rate(u32 clk_id, u64 *clk_rate)
522{
523 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
524 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
525 *clk_rate = versal_clock_ref(clk_id);
526
527 if (versal_clock_pll(clk_id, clk_rate))
528 return 0;
529
530 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
531 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_OUT &&
532 ((clk_id >> NODE_CLASS_SHIFT) &
533 NODE_CLASS_MASK) == NODE_CLASS_CLOCK) {
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600534 if (!versal_clock_gate(clk_id) && !versal_clock_mux(clk_id))
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530535 return -EINVAL;
536 *clk_rate = versal_clock_calc(clk_id);
537 return 0;
538 }
539
540 return 0;
541}
542
543int soc_clk_dump(void)
544{
545 u64 clk_rate = 0;
546 u32 type, ret, i = 0;
547
548 printf("\n ****** VERSAL CLOCKS *****\n");
549
Rajan Vaja98697a12020-05-04 22:53:56 -0700550 printf("pl_alt_ref_clk:%ld ref_clk:%ld\n", pl_alt_ref_clk, ref_clk);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530551 for (i = 0; i < clock_max_idx; i++) {
552 debug("%s\n", clock[i].clk_name);
553 ret = versal_get_clock_type(i, &type);
554 if (ret || type != CLK_TYPE_OUTPUT)
555 continue;
556
557 ret = versal_clock_get_rate(clock[i].clk_id, &clk_rate);
558
559 if (ret != -EINVAL)
560 printf("clk: %s freq:%lld\n",
561 clock[i].clk_name, clk_rate);
562 }
563
564 return 0;
565}
566
567static void versal_get_clock_info(void)
568{
569 int i, ret;
570 u32 attr, type = 0, nodetype, subclass, class;
571
572 for (i = 0; i < clock_max_idx; i++) {
573 ret = versal_pm_clock_get_attributes(i, &attr);
574 if (ret)
575 continue;
576
577 clock[i].valid = attr & CLK_VALID_MASK;
Rajan Vaja5986efc2020-01-16 03:55:05 -0800578
579 /* skip query for Invalid clock */
580 ret = versal_is_valid_clock(i);
581 if (ret != CLK_VALID_MASK)
582 continue;
583
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530584 clock[i].type = ((attr >> CLK_TYPE_SHIFT) & 0x1) ?
585 CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
586 nodetype = (attr >> NODE_TYPE_SHIFT) & NODE_CLASS_MASK;
587 subclass = (attr >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK;
588 class = (attr >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK;
589
590 clock[i].clk_id = (class << NODE_CLASS_SHIFT) |
591 (subclass << NODE_SUBCLASS_SHIFT) |
592 (nodetype << NODE_TYPE_SHIFT) |
593 (i << NODE_INDEX_SHIFT);
594
595 ret = versal_pm_clock_get_name(clock[i].clk_id,
596 clock[i].clk_name);
597 if (ret)
598 continue;
599 debug("clk name:%s, Valid:%d, type:%d, clk_id:0x%x\n",
600 clock[i].clk_name, clock[i].valid,
601 clock[i].type, clock[i].clk_id);
602 }
603
604 /* Get topology of all clock */
605 for (i = 0; i < clock_max_idx; i++) {
606 ret = versal_get_clock_type(i, &type);
607 if (ret || type != CLK_TYPE_OUTPUT)
608 continue;
609 debug("clk name:%s\n", clock[i].clk_name);
610 ret = versal_clock_get_topology(i, clock[i].node,
611 &clock[i].num_nodes);
612 if (ret)
613 continue;
614
615 ret = versal_clock_get_parents(i, clock[i].parent,
616 &clock[i].num_parents);
617 if (ret)
618 continue;
619 }
620}
621
622int versal_clock_setup(void)
623{
624 int ret;
625
626 ret = versal_pm_clock_get_num_clocks(&clock_max_idx);
627 if (ret)
628 return ret;
629
630 debug("%s, clock_max_idx:0x%x\n", __func__, clock_max_idx);
631 clock = calloc(clock_max_idx, sizeof(*clock));
632 if (!clock)
633 return -ENOMEM;
634
635 versal_get_clock_info();
636
637 return 0;
638}
639
640static int versal_clock_get_freq_by_name(char *name, struct udevice *dev,
641 ulong *freq)
642{
643 struct clk clk;
644 int ret;
645
646 ret = clk_get_by_name(dev, name, &clk);
647 if (ret < 0) {
648 dev_err(dev, "failed to get %s\n", name);
649 return ret;
650 }
651
652 *freq = clk_get_rate(&clk);
653 if (IS_ERR_VALUE(*freq)) {
654 dev_err(dev, "failed to get rate %s\n", name);
655 return -EINVAL;
656 }
657
658 return 0;
659}
660
661static int versal_clk_probe(struct udevice *dev)
662{
663 int ret;
664 struct versal_clk_priv *priv = dev_get_priv(dev);
665
666 debug("%s\n", __func__);
667
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530668 ret = versal_clock_get_freq_by_name("pl_alt_ref_clk",
669 dev, &pl_alt_ref_clk);
670 if (ret < 0)
671 return -EINVAL;
672
673 ret = versal_clock_get_freq_by_name("ref_clk", dev, &ref_clk);
674 if (ret < 0)
675 return -EINVAL;
676
677 versal_clock_setup();
678
679 priv->clk = clock;
680
681 return ret;
682}
683
684static ulong versal_clk_get_rate(struct clk *clk)
685{
686 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
687 u32 id = clk->id;
688 u32 clk_id;
689 u64 clk_rate = 0;
690
691 debug("%s\n", __func__);
692
693 clk_id = priv->clk[id].clk_id;
694
695 versal_clock_get_rate(clk_id, &clk_rate);
696
697 return clk_rate;
698}
699
700static ulong versal_clk_set_rate(struct clk *clk, ulong rate)
701{
702 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
703 u32 id = clk->id;
704 u32 clk_id;
705 u64 clk_rate = 0;
706 u32 div;
707 int ret;
708
709 debug("%s\n", __func__);
710
711 clk_id = priv->clk[id].clk_id;
712
713 ret = versal_clock_get_rate(clk_id, &clk_rate);
714 if (ret) {
715 printf("Clock is not a Gate:0x%x\n", clk_id);
716 return 0;
717 }
718
719 do {
720 if (versal_clock_div(clk_id)) {
721 div = versal_clock_get_div(clk_id);
722 clk_rate *= div;
723 div = DIV_ROUND_CLOSEST(clk_rate, rate);
724 versal_clock_set_div(clk_id, div);
725 debug("%s, div:%d, newrate:%lld\n", __func__,
726 div, DIV_ROUND_CLOSEST(clk_rate, div));
727 return DIV_ROUND_CLOSEST(clk_rate, div);
728 }
729 clk_id = versal_clock_get_parentid(clk_id);
730 } while (((clk_id >> NODE_SUBCLASS_SHIFT) &
731 NODE_CLASS_MASK) != NODE_SUBCLASS_CLOCK_REF);
732
733 printf("Clock didn't has Divisors:0x%x\n", priv->clk[id].clk_id);
734
735 return clk_rate;
736}
737
738static struct clk_ops versal_clk_ops = {
739 .set_rate = versal_clk_set_rate,
740 .get_rate = versal_clk_get_rate,
741};
742
743static const struct udevice_id versal_clk_ids[] = {
744 { .compatible = "xlnx,versal-clk" },
745 { }
746};
747
748U_BOOT_DRIVER(versal_clk) = {
749 .name = "versal-clk",
750 .id = UCLASS_CLK,
751 .of_match = versal_clk_ids,
752 .probe = versal_clk_probe,
753 .ops = &versal_clk_ops,
754 .priv_auto_alloc_size = sizeof(struct versal_clk_priv),
755};