blob: 2e004beca2f952ab32d27becd0533844af15a4e5 [file] [log] [blame]
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2019 Xilinx, Inc.
Michal Simeka8c94362023-07-10 14:35:49 +02004 * Siva Durga Prasad Paladugu <siva.durga.prasad.paladugu@amd.com>>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +05305 */
6
7#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -06009#include <asm/cache.h>
Simon Glass6b9f0102020-05-10 11:40:06 -060010#include <asm/ptrace.h>
Simon Glass9bc15642020-02-03 07:36:16 -070011#include <dm/device_compat.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053012#include <linux/bitops.h>
13#include <linux/bitfield.h>
14#include <malloc.h>
15#include <clk-uclass.h>
16#include <clk.h>
17#include <dm.h>
18#include <asm/arch/sys_proto.h>
Michal Simeke50c1042019-10-04 15:25:18 +020019#include <zynqmp_firmware.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070020#include <linux/err.h>
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053021
22#define MAX_PARENT 100
23#define MAX_NODES 6
24#define MAX_NAME_LEN 50
25
26#define CLK_TYPE_SHIFT 2
27
28#define PM_API_PAYLOAD_LEN 3
29
30#define NA_PARENT 0xFFFFFFFF
31#define DUMMY_PARENT 0xFFFFFFFE
32
33#define CLK_TYPE_FIELD_LEN 4
34#define CLK_TOPOLOGY_NODE_OFFSET 16
35#define NODES_PER_RESP 3
36
37#define CLK_TYPE_FIELD_MASK 0xF
38#define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
39#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
40#define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
41#define CLK_TYPE_FLAG_BITS 8
42
43#define CLK_PARENTS_ID_LEN 16
44#define CLK_PARENTS_ID_MASK 0xFFFF
45
46#define END_OF_TOPOLOGY_NODE 1
47#define END_OF_PARENTS 1
48
49#define CLK_VALID_MASK 0x1
50#define NODE_CLASS_SHIFT 26U
51#define NODE_SUBCLASS_SHIFT 20U
52#define NODE_TYPE_SHIFT 14U
53#define NODE_INDEX_SHIFT 0U
54
55#define CLK_GET_NAME_RESP_LEN 16
56#define CLK_GET_TOPOLOGY_RESP_WORDS 3
57#define CLK_GET_PARENTS_RESP_WORDS 3
58#define CLK_GET_ATTR_RESP_WORDS 1
59
60#define NODE_SUBCLASS_CLOCK_PLL 1
61#define NODE_SUBCLASS_CLOCK_OUT 2
62#define NODE_SUBCLASS_CLOCK_REF 3
63
64#define NODE_CLASS_CLOCK 2
65#define NODE_CLASS_MASK 0x3F
66
67#define CLOCK_NODE_TYPE_MUX 1
68#define CLOCK_NODE_TYPE_DIV 4
69#define CLOCK_NODE_TYPE_GATE 6
70
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +053071#define PM_CLK_REF_CLK (0x830c06aU)
72#define PM_CLK_PL_ALT_REF_CLK (0x830c06bU)
73#define PM_CLK_MUXED_IRO (0x830c06cU)
74#define PM_CLK_EMIO (0x830c071U)
75
76#define TOPOLOGY_TYPE_FIXEDFACTOR 0x3
77
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +053078enum clk_type {
79 CLK_TYPE_OUTPUT,
80 CLK_TYPE_EXTERNAL,
81};
82
83struct clock_parent {
84 char name[MAX_NAME_LEN];
85 int id;
86 u32 flag;
87};
88
89struct clock_topology {
90 u32 type;
91 u32 flag;
92 u32 type_flag;
93};
94
95struct versal_clock {
96 char clk_name[MAX_NAME_LEN];
97 u32 valid;
98 enum clk_type type;
99 struct clock_topology node[MAX_NODES];
100 u32 num_nodes;
101 struct clock_parent parent[MAX_PARENT];
102 u32 num_parents;
103 u32 clk_id;
104};
105
106struct versal_clk_priv {
107 struct versal_clock *clk;
108};
109
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530110static ulong pl_alt_ref_clk;
111static ulong ref_clk;
112
113struct versal_pm_query_data {
114 u32 qid;
115 u32 arg1;
116 u32 arg2;
117 u32 arg3;
118};
119
120static struct versal_clock *clock;
121static unsigned int clock_max_idx;
122
123#define PM_QUERY_DATA 35
124
125static int versal_pm_query(struct versal_pm_query_data qdata, u32 *ret_payload)
126{
127 struct pt_regs regs;
128
129 regs.regs[0] = PM_SIP_SVC | PM_QUERY_DATA;
130 regs.regs[1] = ((u64)qdata.arg1 << 32) | qdata.qid;
131 regs.regs[2] = ((u64)qdata.arg3 << 32) | qdata.arg2;
132
133 smc_call(&regs);
134
135 if (ret_payload) {
136 ret_payload[0] = (u32)regs.regs[0];
137 ret_payload[1] = upper_32_bits(regs.regs[0]);
138 ret_payload[2] = (u32)regs.regs[1];
139 ret_payload[3] = upper_32_bits(regs.regs[1]);
140 ret_payload[4] = (u32)regs.regs[2];
141 }
142
143 return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : regs.regs[0];
144}
145
146static inline int versal_is_valid_clock(u32 clk_id)
147{
148 if (clk_id >= clock_max_idx)
149 return -ENODEV;
150
151 return clock[clk_id].valid;
152}
153
154static int versal_get_clock_name(u32 clk_id, char *clk_name)
155{
156 int ret;
157
158 ret = versal_is_valid_clock(clk_id);
159 if (ret == 1) {
160 strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
161 return 0;
162 }
163
164 return ret == 0 ? -EINVAL : ret;
165}
166
167static int versal_get_clock_type(u32 clk_id, u32 *type)
168{
169 int ret;
170
171 ret = versal_is_valid_clock(clk_id);
172 if (ret == 1) {
173 *type = clock[clk_id].type;
174 return 0;
175 }
176
177 return ret == 0 ? -EINVAL : ret;
178}
179
180static int versal_pm_clock_get_num_clocks(u32 *nclocks)
181{
182 struct versal_pm_query_data qdata = {0};
183 u32 ret_payload[PAYLOAD_ARG_CNT];
184 int ret;
185
186 qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
187
188 ret = versal_pm_query(qdata, ret_payload);
189 *nclocks = ret_payload[1];
190
191 return ret;
192}
193
194static int versal_pm_clock_get_name(u32 clock_id, char *name)
195{
196 struct versal_pm_query_data qdata = {0};
197 u32 ret_payload[PAYLOAD_ARG_CNT];
198 int ret;
199
200 qdata.qid = PM_QID_CLOCK_GET_NAME;
201 qdata.arg1 = clock_id;
202
203 ret = versal_pm_query(qdata, ret_payload);
204 if (ret)
205 return ret;
206 memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
207
208 return 0;
209}
210
211static int versal_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
212{
213 struct versal_pm_query_data qdata = {0};
214 u32 ret_payload[PAYLOAD_ARG_CNT];
215 int ret;
216
217 qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
218 qdata.arg1 = clock_id;
219 qdata.arg2 = index;
220
221 ret = versal_pm_query(qdata, ret_payload);
222 memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
223
224 return ret;
225}
226
227static int versal_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
228{
229 struct versal_pm_query_data qdata = {0};
230 u32 ret_payload[PAYLOAD_ARG_CNT];
231 int ret;
232
233 qdata.qid = PM_QID_CLOCK_GET_PARENTS;
234 qdata.arg1 = clock_id;
235 qdata.arg2 = index;
236
237 ret = versal_pm_query(qdata, ret_payload);
238 memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
239
240 return ret;
241}
242
243static int versal_pm_clock_get_attributes(u32 clock_id, u32 *attr)
244{
245 struct versal_pm_query_data qdata = {0};
246 u32 ret_payload[PAYLOAD_ARG_CNT];
247 int ret;
248
249 qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
250 qdata.arg1 = clock_id;
251
252 ret = versal_pm_query(qdata, ret_payload);
253 memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
254
255 return ret;
256}
257
258static int __versal_clock_get_topology(struct clock_topology *topology,
259 u32 *data, u32 *nnodes)
260{
261 int i;
262
263 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
264 if (!(data[i] & CLK_TYPE_FIELD_MASK))
265 return END_OF_TOPOLOGY_NODE;
266 topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
267 topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
268 data[i]);
269 topology[*nnodes].type_flag =
270 FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
271 topology[*nnodes].type_flag |=
272 FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, data[i]) <<
273 CLK_TYPE_FLAG_BITS;
274 debug("topology type:0x%x, flag:0x%x, type_flag:0x%x\n",
275 topology[*nnodes].type, topology[*nnodes].flag,
276 topology[*nnodes].type_flag);
277 (*nnodes)++;
278 }
279
280 return 0;
281}
282
283static int versal_clock_get_topology(u32 clk_id,
284 struct clock_topology *topology,
285 u32 *num_nodes)
286{
287 int j, ret;
288 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
289
290 *num_nodes = 0;
291 for (j = 0; j <= MAX_NODES; j += 3) {
292 ret = versal_pm_clock_get_topology(clock[clk_id].clk_id, j,
293 pm_resp);
294 if (ret)
295 return ret;
296 ret = __versal_clock_get_topology(topology, pm_resp, num_nodes);
297 if (ret == END_OF_TOPOLOGY_NODE)
298 return 0;
299 }
300
301 return 0;
302}
303
304static int __versal_clock_get_parents(struct clock_parent *parents, u32 *data,
305 u32 *nparent)
306{
307 int i;
308 struct clock_parent *parent;
309
310 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
311 if (data[i] == NA_PARENT)
312 return END_OF_PARENTS;
313
314 parent = &parents[i];
315 parent->id = data[i] & CLK_PARENTS_ID_MASK;
316 if (data[i] == DUMMY_PARENT) {
317 strcpy(parent->name, "dummy_name");
318 parent->flag = 0;
319 } else {
320 parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
321 if (versal_get_clock_name(parent->id, parent->name))
322 continue;
323 }
324 debug("parent name:%s\n", parent->name);
325 *nparent += 1;
326 }
327
328 return 0;
329}
330
331static int versal_clock_get_parents(u32 clk_id, struct clock_parent *parents,
332 u32 *num_parents)
333{
334 int j = 0, ret;
335 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
336
337 *num_parents = 0;
338 do {
339 /* Get parents from firmware */
340 ret = versal_pm_clock_get_parents(clock[clk_id].clk_id, j,
341 pm_resp);
342 if (ret)
343 return ret;
344
345 ret = __versal_clock_get_parents(&parents[j], pm_resp,
346 num_parents);
347 if (ret == END_OF_PARENTS)
348 return 0;
349 j += PM_API_PAYLOAD_LEN;
350 } while (*num_parents <= MAX_PARENT);
351
352 return 0;
353}
354
355static u32 versal_clock_get_div(u32 clk_id)
356{
357 u32 ret_payload[PAYLOAD_ARG_CNT];
358 u32 div;
359
Michal Simek142fb5b2019-10-04 15:52:43 +0200360 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530361 div = ret_payload[1];
362
363 return div;
364}
365
366static u32 versal_clock_set_div(u32 clk_id, u32 div)
367{
368 u32 ret_payload[PAYLOAD_ARG_CNT];
369
Michal Simek142fb5b2019-10-04 15:52:43 +0200370 xilinx_pm_request(PM_CLOCK_SETDIVIDER, clk_id, div, 0, 0, ret_payload);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530371
372 return div;
373}
374
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530375static u64 versal_clock_get_ref_rate(u32 clk_id)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530376{
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530377 if (clk_id == PM_CLK_REF_CLK || clk_id == PM_CLK_MUXED_IRO || clk_id == PM_CLK_EMIO)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530378 return ref_clk;
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530379 else if (clk_id == PM_CLK_PL_ALT_REF_CLK)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530380 return pl_alt_ref_clk;
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530381 else
382 return 0;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530383}
384
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530385static int versal_clock_get_fixed_factor_rate(u32 clock_id, u32 parent_id)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530386{
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530387 struct versal_pm_query_data qdata = {0};
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530388 u32 ret_payload[PAYLOAD_ARG_CNT];
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530389 u32 mult, div;
390 u32 parent_rate;
391 int ret;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530392
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530393 qdata.qid = PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS;
394 qdata.arg1 = clock_id;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530395
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530396 ret = versal_pm_query(qdata, ret_payload);
397 if (ret)
398 return ret;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530399
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530400 mult = ret_payload[1];
401 div = ret_payload[2];
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530402
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530403 parent_rate = versal_clock_get_ref_rate(parent_id);
404 return parent_rate * mult / div;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530405
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530406}
407
408static u32 versal_clock_mux(u32 clk_id)
409{
410 int i;
411 u32 id = clk_id & 0xFFF;
412
413 for (i = 0; i < clock[id].num_nodes; i++)
414 if (clock[id].node[i].type == CLOCK_NODE_TYPE_MUX)
415 return 1;
416
417 return 0;
418}
419
420static u32 versal_clock_get_parentid(u32 clk_id)
421{
422 u32 parent_id = 0;
423 u32 ret_payload[PAYLOAD_ARG_CNT];
424 u32 id = clk_id & 0xFFF;
425
426 if (versal_clock_mux(clk_id)) {
Michal Simek142fb5b2019-10-04 15:52:43 +0200427 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0,
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530428 ret_payload);
429 parent_id = ret_payload[1];
430 }
431
432 debug("parent_id:0x%x\n", clock[clock[id].parent[parent_id].id].clk_id);
433 return clock[clock[id].parent[parent_id].id].clk_id;
434}
435
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530436static u64 versal_clock_get_pll_rate(u32 clk_id)
437{
438 u32 ret_payload[PAYLOAD_ARG_CNT];
439 u32 fbdiv;
440 u32 res;
441 u32 frac;
442 u64 freq;
443 u32 parent_rate, parent_id, parent_ref_clk_id;
444 u32 id = clk_id & 0xFFF;
445
446 xilinx_pm_request(PM_CLOCK_GETSTATE, clk_id, 0, 0, 0, ret_payload);
447 res = ret_payload[1];
448 if (!res) {
449 printf("0%x PLL not enabled\n", clk_id);
450 return 0;
451 }
452
453 parent_id = clock[clock[id].parent[0].id].clk_id;
454 parent_ref_clk_id = versal_clock_get_parentid(parent_id);
455 parent_rate = versal_clock_get_ref_rate(parent_ref_clk_id);
456
457 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
458 fbdiv = ret_payload[1];
459 xilinx_pm_request(PM_CLOCK_PLL_GETPARAM, clk_id, 2, 0, 0, ret_payload);
460 frac = ret_payload[1];
461
462 freq = (fbdiv * parent_rate) >> (1 << frac);
463
464 return freq;
465}
466
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530467static u32 versal_clock_gate(u32 clk_id)
468{
469 u32 id = clk_id & 0xFFF;
470 int i;
471
472 for (i = 0; i < clock[id].num_nodes; i++)
473 if (clock[id].node[i].type == CLOCK_NODE_TYPE_GATE)
474 return 1;
475
476 return 0;
477}
478
479static u32 versal_clock_div(u32 clk_id)
480{
481 int i;
482 u32 id = clk_id & 0xFFF;
483
484 for (i = 0; i < clock[id].num_nodes; i++)
485 if (clock[id].node[i].type == CLOCK_NODE_TYPE_DIV)
486 return 1;
487
488 return 0;
489}
490
491static u32 versal_clock_pll(u32 clk_id, u64 *clk_rate)
492{
493 if (((clk_id >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK) ==
494 NODE_SUBCLASS_CLOCK_PLL &&
495 ((clk_id >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK) ==
496 NODE_CLASS_CLOCK) {
497 *clk_rate = versal_clock_get_pll_rate(clk_id);
498 return 1;
499 }
500
501 return 0;
502}
503
504static u64 versal_clock_calc(u32 clk_id)
505{
506 u32 parent_id;
507 u64 clk_rate;
508 u32 div;
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530509 struct clock_topology topology;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530510
511 if (versal_clock_pll(clk_id, &clk_rate))
512 return clk_rate;
513
514 parent_id = versal_clock_get_parentid(clk_id);
515 if (((parent_id >> NODE_SUBCLASS_SHIFT) &
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530516 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF) {
517 topology = clock[clk_id & 0x3FF].node[0];
518 if (topology.type == TOPOLOGY_TYPE_FIXEDFACTOR)
519 return versal_clock_get_fixed_factor_rate(clk_id, parent_id);
520 return versal_clock_get_ref_rate(parent_id);
521 }
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530522
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600523 if (!parent_id)
524 return 0;
525
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530526 clk_rate = versal_clock_calc(parent_id);
527
528 if (versal_clock_div(clk_id)) {
529 div = versal_clock_get_div(clk_id);
530 clk_rate = DIV_ROUND_CLOSEST(clk_rate, div);
531 }
532
533 return clk_rate;
534}
535
536static int versal_clock_get_rate(u32 clk_id, u64 *clk_rate)
537{
538 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
539 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
Venkatesh Yadav Abbarapuc0f52c02023-09-12 09:00:55 +0530540 *clk_rate = versal_clock_get_ref_rate(clk_id);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530541
542 if (versal_clock_pll(clk_id, clk_rate))
543 return 0;
544
545 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
546 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_OUT &&
547 ((clk_id >> NODE_CLASS_SHIFT) &
548 NODE_CLASS_MASK) == NODE_CLASS_CLOCK) {
T Karthik Reddyaa12d512020-04-08 21:34:54 -0600549 if (!versal_clock_gate(clk_id) && !versal_clock_mux(clk_id))
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530550 return -EINVAL;
551 *clk_rate = versal_clock_calc(clk_id);
552 return 0;
553 }
554
555 return 0;
556}
557
558int soc_clk_dump(void)
559{
560 u64 clk_rate = 0;
561 u32 type, ret, i = 0;
562
563 printf("\n ****** VERSAL CLOCKS *****\n");
564
Rajan Vaja98697a12020-05-04 22:53:56 -0700565 printf("pl_alt_ref_clk:%ld ref_clk:%ld\n", pl_alt_ref_clk, ref_clk);
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530566 for (i = 0; i < clock_max_idx; i++) {
567 debug("%s\n", clock[i].clk_name);
568 ret = versal_get_clock_type(i, &type);
569 if (ret || type != CLK_TYPE_OUTPUT)
570 continue;
571
572 ret = versal_clock_get_rate(clock[i].clk_id, &clk_rate);
573
574 if (ret != -EINVAL)
575 printf("clk: %s freq:%lld\n",
576 clock[i].clk_name, clk_rate);
577 }
578
579 return 0;
580}
581
582static void versal_get_clock_info(void)
583{
584 int i, ret;
585 u32 attr, type = 0, nodetype, subclass, class;
586
587 for (i = 0; i < clock_max_idx; i++) {
588 ret = versal_pm_clock_get_attributes(i, &attr);
589 if (ret)
590 continue;
591
592 clock[i].valid = attr & CLK_VALID_MASK;
Rajan Vaja5986efc2020-01-16 03:55:05 -0800593
594 /* skip query for Invalid clock */
595 ret = versal_is_valid_clock(i);
596 if (ret != CLK_VALID_MASK)
597 continue;
598
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530599 clock[i].type = ((attr >> CLK_TYPE_SHIFT) & 0x1) ?
600 CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
601 nodetype = (attr >> NODE_TYPE_SHIFT) & NODE_CLASS_MASK;
602 subclass = (attr >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK;
603 class = (attr >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK;
604
605 clock[i].clk_id = (class << NODE_CLASS_SHIFT) |
606 (subclass << NODE_SUBCLASS_SHIFT) |
607 (nodetype << NODE_TYPE_SHIFT) |
608 (i << NODE_INDEX_SHIFT);
609
610 ret = versal_pm_clock_get_name(clock[i].clk_id,
611 clock[i].clk_name);
612 if (ret)
613 continue;
614 debug("clk name:%s, Valid:%d, type:%d, clk_id:0x%x\n",
615 clock[i].clk_name, clock[i].valid,
616 clock[i].type, clock[i].clk_id);
617 }
618
619 /* Get topology of all clock */
620 for (i = 0; i < clock_max_idx; i++) {
621 ret = versal_get_clock_type(i, &type);
622 if (ret || type != CLK_TYPE_OUTPUT)
623 continue;
624 debug("clk name:%s\n", clock[i].clk_name);
625 ret = versal_clock_get_topology(i, clock[i].node,
626 &clock[i].num_nodes);
627 if (ret)
628 continue;
629
630 ret = versal_clock_get_parents(i, clock[i].parent,
631 &clock[i].num_parents);
632 if (ret)
633 continue;
634 }
635}
636
Venkatesh Yadav Abbarapue3184f82022-10-07 16:25:35 +0530637static int versal_clock_setup(void)
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530638{
639 int ret;
640
641 ret = versal_pm_clock_get_num_clocks(&clock_max_idx);
642 if (ret)
643 return ret;
644
645 debug("%s, clock_max_idx:0x%x\n", __func__, clock_max_idx);
646 clock = calloc(clock_max_idx, sizeof(*clock));
647 if (!clock)
648 return -ENOMEM;
649
650 versal_get_clock_info();
651
652 return 0;
653}
654
655static int versal_clock_get_freq_by_name(char *name, struct udevice *dev,
656 ulong *freq)
657{
658 struct clk clk;
659 int ret;
660
661 ret = clk_get_by_name(dev, name, &clk);
662 if (ret < 0) {
663 dev_err(dev, "failed to get %s\n", name);
664 return ret;
665 }
666
667 *freq = clk_get_rate(&clk);
668 if (IS_ERR_VALUE(*freq)) {
669 dev_err(dev, "failed to get rate %s\n", name);
670 return -EINVAL;
671 }
672
673 return 0;
674}
675
676static int versal_clk_probe(struct udevice *dev)
677{
678 int ret;
679 struct versal_clk_priv *priv = dev_get_priv(dev);
680
681 debug("%s\n", __func__);
682
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530683 ret = versal_clock_get_freq_by_name("pl_alt_ref_clk",
684 dev, &pl_alt_ref_clk);
685 if (ret < 0)
686 return -EINVAL;
687
688 ret = versal_clock_get_freq_by_name("ref_clk", dev, &ref_clk);
689 if (ret < 0)
690 return -EINVAL;
691
Jay Buddhabhattid628e672023-01-10 08:23:44 +0100692 ret = versal_clock_setup();
693 if (ret < 0)
694 return ret;
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530695
696 priv->clk = clock;
697
698 return ret;
699}
700
701static ulong versal_clk_get_rate(struct clk *clk)
702{
703 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
704 u32 id = clk->id;
705 u32 clk_id;
706 u64 clk_rate = 0;
707
708 debug("%s\n", __func__);
709
710 clk_id = priv->clk[id].clk_id;
711
712 versal_clock_get_rate(clk_id, &clk_rate);
713
714 return clk_rate;
715}
716
717static ulong versal_clk_set_rate(struct clk *clk, ulong rate)
718{
719 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
720 u32 id = clk->id;
721 u32 clk_id;
722 u64 clk_rate = 0;
723 u32 div;
724 int ret;
725
726 debug("%s\n", __func__);
727
728 clk_id = priv->clk[id].clk_id;
729
730 ret = versal_clock_get_rate(clk_id, &clk_rate);
731 if (ret) {
732 printf("Clock is not a Gate:0x%x\n", clk_id);
733 return 0;
734 }
735
736 do {
737 if (versal_clock_div(clk_id)) {
738 div = versal_clock_get_div(clk_id);
739 clk_rate *= div;
740 div = DIV_ROUND_CLOSEST(clk_rate, rate);
741 versal_clock_set_div(clk_id, div);
742 debug("%s, div:%d, newrate:%lld\n", __func__,
743 div, DIV_ROUND_CLOSEST(clk_rate, div));
744 return DIV_ROUND_CLOSEST(clk_rate, div);
745 }
746 clk_id = versal_clock_get_parentid(clk_id);
747 } while (((clk_id >> NODE_SUBCLASS_SHIFT) &
748 NODE_CLASS_MASK) != NODE_SUBCLASS_CLOCK_REF);
749
750 printf("Clock didn't has Divisors:0x%x\n", priv->clk[id].clk_id);
751
752 return clk_rate;
753}
754
T Karthik Reddy277300f2021-02-03 03:10:47 -0700755static int versal_clk_enable(struct clk *clk)
756{
757 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
758 u32 clk_id;
759
760 clk_id = priv->clk[clk->id].clk_id;
761
T Karthik Reddyb5ef0ba2021-09-28 11:30:27 +0530762 if (versal_clock_gate(clk_id))
763 return xilinx_pm_request(PM_CLOCK_ENABLE, clk_id, 0, 0, 0, NULL);
764
765 return 0;
T Karthik Reddy277300f2021-02-03 03:10:47 -0700766}
767
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530768static struct clk_ops versal_clk_ops = {
769 .set_rate = versal_clk_set_rate,
770 .get_rate = versal_clk_get_rate,
T Karthik Reddy277300f2021-02-03 03:10:47 -0700771 .enable = versal_clk_enable,
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530772};
773
774static const struct udevice_id versal_clk_ids[] = {
775 { .compatible = "xlnx,versal-clk" },
Jay Buddhabhattid52796d2022-09-19 14:21:05 +0200776 { .compatible = "xlnx,versal-net-clk" },
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530777 { }
778};
779
780U_BOOT_DRIVER(versal_clk) = {
781 .name = "versal-clk",
782 .id = UCLASS_CLK,
783 .of_match = versal_clk_ids,
784 .probe = versal_clk_probe,
785 .ops = &versal_clk_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700786 .priv_auto = sizeof(struct versal_clk_priv),
Siva Durga Prasad Paladuguf7a71202019-06-23 12:24:57 +0530787};