blob: b75056718bf4fb06c08e0816bc6f955dba79a8af [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glass36ad2342015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warrena9622432016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsich9cf03b02018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glass36ad2342015-06-23 15:39:15 -06007 */
8
9#include <common.h>
10#include <clk.h>
Stephen Warrena9622432016-06-17 09:44:00 -060011#include <clk-uclass.h>
Simon Glass36ad2342015-06-23 15:39:15 -060012#include <dm.h>
Simon Glass589d9152016-07-04 11:58:03 -060013#include <dt-structs.h>
Simon Glass36ad2342015-06-23 15:39:15 -060014#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <malloc.h>
Claudiu Bezneac8c16002020-09-07 17:46:34 +030017#include <dm/device-internal.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070018#include <dm/devres.h>
19#include <dm/read.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060020#include <linux/bug.h>
Lukasz Majewski9e38dc32019-06-24 15:50:42 +020021#include <linux/clk-provider.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070022#include <linux/err.h>
Simon Glass36ad2342015-06-23 15:39:15 -060023
Mario Six799fe562018-01-15 11:06:51 +010024static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glass36ad2342015-06-23 15:39:15 -060025{
Mario Six799fe562018-01-15 11:06:51 +010026 return (const struct clk_ops *)dev->driver->ops;
Simon Glass36ad2342015-06-23 15:39:15 -060027}
28
Simon Glass43033962020-07-19 10:15:56 -060029struct clk *dev_get_clk_ptr(struct udevice *dev)
30{
31 return (struct clk *)dev_get_uclass_priv(dev);
32}
33
Stephen Warrena9622432016-06-17 09:44:00 -060034#if CONFIG_IS_ENABLED(OF_CONTROL)
Simon Glass589d9152016-07-04 11:58:03 -060035# if CONFIG_IS_ENABLED(OF_PLATDATA)
Walter Lozanodc5b4372020-06-25 01:10:13 -030036int clk_get_by_driver_info(struct udevice *dev, struct phandle_1_arg *cells,
37 struct clk *clk)
Simon Glass589d9152016-07-04 11:58:03 -060038{
39 int ret;
40
Simon Glass5792f4b2020-10-03 11:31:40 -060041 ret = device_get_by_driver_info_idx(cells->idx, &clk->dev);
Simon Glass589d9152016-07-04 11:58:03 -060042 if (ret)
43 return ret;
Walter Lozanodc5b4372020-06-25 01:10:13 -030044 clk->id = cells->arg[0];
Simon Glass589d9152016-07-04 11:58:03 -060045
46 return 0;
47}
48# else
Stephen Warrena9622432016-06-17 09:44:00 -060049static int clk_of_xlate_default(struct clk *clk,
Simon Glassb7ae2772017-05-18 20:09:40 -060050 struct ofnode_phandle_args *args)
Simon Glass36ad2342015-06-23 15:39:15 -060051{
Stephen Warrena9622432016-06-17 09:44:00 -060052 debug("%s(clk=%p)\n", __func__, clk);
Simon Glass36ad2342015-06-23 15:39:15 -060053
Stephen Warrena9622432016-06-17 09:44:00 -060054 if (args->args_count > 1) {
55 debug("Invaild args_count: %d\n", args->args_count);
56 return -EINVAL;
57 }
Simon Glass36ad2342015-06-23 15:39:15 -060058
Stephen Warrena9622432016-06-17 09:44:00 -060059 if (args->args_count)
60 clk->id = args->args[0];
61 else
62 clk->id = 0;
Simon Glass36ad2342015-06-23 15:39:15 -060063
Sekhar Nori3d23abd2019-07-11 14:30:24 +053064 clk->data = 0;
65
Stephen Warrena9622432016-06-17 09:44:00 -060066 return 0;
Simon Glass36ad2342015-06-23 15:39:15 -060067}
Simon Glass0342bd22016-01-20 19:43:02 -070068
Jagan Tekifc7c7ce2019-02-28 00:26:52 +053069static int clk_get_by_index_tail(int ret, ofnode node,
70 struct ofnode_phandle_args *args,
71 const char *list_name, int index,
72 struct clk *clk)
73{
74 struct udevice *dev_clk;
75 const struct clk_ops *ops;
76
77 assert(clk);
78 clk->dev = NULL;
79 if (ret)
80 goto err;
81
82 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
83 if (ret) {
84 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
85 __func__, ret);
86 return ret;
87 }
88
89 clk->dev = dev_clk;
90
91 ops = clk_dev_ops(dev_clk);
92
93 if (ops->of_xlate)
94 ret = ops->of_xlate(clk, args);
95 else
96 ret = clk_of_xlate_default(clk, args);
97 if (ret) {
98 debug("of_xlate() failed: %d\n", ret);
99 return ret;
100 }
101
102 return clk_request(dev_clk, clk);
103err:
104 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
105 __func__, ofnode_get_name(node), list_name, index, ret);
106 return ret;
107}
108
Philipp Tomsichf7604342018-01-08 11:18:18 +0100109static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
110 int index, struct clk *clk)
Simon Glass0342bd22016-01-20 19:43:02 -0700111{
Simon Glass0342bd22016-01-20 19:43:02 -0700112 int ret;
Simon Glass2558bff2017-05-30 21:47:29 -0600113 struct ofnode_phandle_args args;
Simon Glass0342bd22016-01-20 19:43:02 -0700114
Stephen Warrena9622432016-06-17 09:44:00 -0600115 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
116
117 assert(clk);
Patrice Chotard96fc03d2017-07-18 11:57:07 +0200118 clk->dev = NULL;
119
Philipp Tomsichf7604342018-01-08 11:18:18 +0100120 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six799fe562018-01-15 11:06:51 +0100121 index, &args);
Simon Glass0342bd22016-01-20 19:43:02 -0700122 if (ret) {
123 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
124 __func__, ret);
125 return ret;
126 }
127
Stephen Warrena9622432016-06-17 09:44:00 -0600128
Jagan Tekia77add32019-02-28 00:26:53 +0530129 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Andersonf0d5a6b2020-06-24 06:41:08 -0400130 index, clk);
Stephen Warrena9622432016-06-17 09:44:00 -0600131}
Philipp Tomsichf7604342018-01-08 11:18:18 +0100132
133int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
134{
Jagan Tekifc7c7ce2019-02-28 00:26:52 +0530135 struct ofnode_phandle_args args;
136 int ret;
137
138 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
139 index, &args);
140
141 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Andersonf0d5a6b2020-06-24 06:41:08 -0400142 index, clk);
Jagan Tekifc7c7ce2019-02-28 00:26:52 +0530143}
144
145int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
146{
147 struct ofnode_phandle_args args;
148 int ret;
149
150 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
Sean Andersonf0d5a6b2020-06-24 06:41:08 -0400151 index, &args);
Jagan Tekifc7c7ce2019-02-28 00:26:52 +0530152
153 return clk_get_by_index_tail(ret, node, &args, "clocks",
Sean Andersonf0d5a6b2020-06-24 06:41:08 -0400154 index, clk);
Philipp Tomsichf7604342018-01-08 11:18:18 +0100155}
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100156
Neil Armstrong8a275a02018-04-03 11:44:18 +0200157int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
158{
159 int i, ret, err, count;
160
161 bulk->count = 0;
162
Patrick Delaunayd776a842020-09-25 09:41:14 +0200163 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
Neil Armstrong52b26d92018-04-17 11:30:31 +0200164 if (count < 1)
165 return count;
Neil Armstrong8a275a02018-04-03 11:44:18 +0200166
167 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
168 if (!bulk->clks)
169 return -ENOMEM;
170
171 for (i = 0; i < count; i++) {
172 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
173 if (ret < 0)
174 goto bulk_get_err;
175
176 ++bulk->count;
177 }
178
179 return 0;
180
181bulk_get_err:
182 err = clk_release_all(bulk->clks, bulk->count);
183 if (err)
184 debug("%s: could release all clocks for %p\n",
185 __func__, dev);
186
187 return ret;
188}
189
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300190static struct clk *clk_set_default_get_by_id(struct clk *clk)
191{
192 struct clk *c = clk;
193
194 if (CONFIG_IS_ENABLED(CLK_CCF)) {
195 int ret = clk_get_by_id(clk->id, &c);
196
197 if (ret) {
198 debug("%s(): could not get parent clock pointer, id %lu\n",
199 __func__, clk->id);
200 ERR_PTR(ret);
201 }
202 }
203
204 return c;
205}
206
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200207static int clk_set_default_parents(struct udevice *dev, int stage)
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100208{
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300209 struct clk clk, parent_clk, *c, *p;
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100210 int index;
211 int num_parents;
212 int ret;
213
214 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
Patrick Delaunayd776a842020-09-25 09:41:14 +0200215 "#clock-cells", 0);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100216 if (num_parents < 0) {
217 debug("%s: could not read assigned-clock-parents for %p\n",
218 __func__, dev);
219 return 0;
220 }
221
222 for (index = 0; index < num_parents; index++) {
223 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
224 index, &parent_clk);
Neil Armstrongf3cc6312018-07-26 15:19:32 +0200225 /* If -ENOENT, this is a no-op entry */
226 if (ret == -ENOENT)
227 continue;
228
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100229 if (ret) {
230 debug("%s: could not get parent clock %d for %s\n",
231 __func__, index, dev_read_name(dev));
232 return ret;
233 }
234
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300235 p = clk_set_default_get_by_id(&parent_clk);
236 if (IS_ERR(p))
237 return PTR_ERR(p);
238
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100239 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
240 index, &clk);
241 if (ret) {
242 debug("%s: could not get assigned clock %d for %s\n",
243 __func__, index, dev_read_name(dev));
244 return ret;
245 }
246
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200247 /* This is clk provider device trying to reparent itself
248 * It cannot be done right now but need to wait after the
249 * device is probed
250 */
251 if (stage == 0 && clk.dev == dev)
252 continue;
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100253
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200254 if (stage > 0 && clk.dev != dev)
255 /* do not setup twice the parent clocks */
256 continue;
257
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300258 c = clk_set_default_get_by_id(&clk);
259 if (IS_ERR(c))
260 return PTR_ERR(c);
261
262 ret = clk_set_parent(c, p);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100263 /*
264 * Not all drivers may support clock-reparenting (as of now).
265 * Ignore errors due to this.
266 */
267 if (ret == -ENOSYS)
268 continue;
269
Jean-Jacques Hiblotb2320812019-09-26 15:42:42 +0200270 if (ret < 0) {
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100271 debug("%s: failed to reparent clock %d for %s\n",
272 __func__, index, dev_read_name(dev));
273 return ret;
274 }
275 }
276
277 return 0;
278}
279
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200280static int clk_set_default_rates(struct udevice *dev, int stage)
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100281{
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300282 struct clk clk, *c;
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100283 int index;
284 int num_rates;
285 int size;
286 int ret = 0;
287 u32 *rates = NULL;
288
289 size = dev_read_size(dev, "assigned-clock-rates");
290 if (size < 0)
291 return 0;
292
293 num_rates = size / sizeof(u32);
294 rates = calloc(num_rates, sizeof(u32));
295 if (!rates)
296 return -ENOMEM;
297
298 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
299 if (ret)
300 goto fail;
301
302 for (index = 0; index < num_rates; index++) {
Neil Armstrongf3cc6312018-07-26 15:19:32 +0200303 /* If 0 is passed, this is a no-op */
304 if (!rates[index])
305 continue;
306
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100307 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
308 index, &clk);
309 if (ret) {
310 debug("%s: could not get assigned clock %d for %s\n",
311 __func__, index, dev_read_name(dev));
312 continue;
313 }
314
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200315 /* This is clk provider device trying to program itself
316 * It cannot be done right now but need to wait after the
317 * device is probed
318 */
319 if (stage == 0 && clk.dev == dev)
320 continue;
321
322 if (stage > 0 && clk.dev != dev)
323 /* do not setup twice the parent clocks */
324 continue;
325
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300326 c = clk_set_default_get_by_id(&clk);
327 if (IS_ERR(c))
328 return PTR_ERR(c);
329
330 ret = clk_set_rate(c, rates[index]);
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200331
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100332 if (ret < 0) {
Simon Glass33363732019-01-21 14:53:19 -0700333 debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
334 __func__, index, clk.id, dev_read_name(dev));
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100335 break;
336 }
337 }
338
339fail:
340 free(rates);
341 return ret;
342}
343
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200344int clk_set_defaults(struct udevice *dev, int stage)
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100345{
346 int ret;
347
Simon Glassf1d50f72020-12-19 10:40:13 -0700348 if (!dev_has_ofnode(dev))
Peng Fan40ec4e42019-07-31 07:01:49 +0000349 return 0;
350
Philipp Tomsiche546ec82018-11-26 20:20:19 +0100351 /* If this not in SPL and pre-reloc state, don't take any action. */
352 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
353 return 0;
354
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100355 debug("%s(%s)\n", __func__, dev_read_name(dev));
356
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200357 ret = clk_set_default_parents(dev, stage);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100358 if (ret)
359 return ret;
360
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200361 ret = clk_set_default_rates(dev, stage);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100362 if (ret < 0)
363 return ret;
364
365 return 0;
366}
Stephen Warrena9622432016-06-17 09:44:00 -0600367
368int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
369{
370 int index;
371
372 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
Patrice Chotard96fc03d2017-07-18 11:57:07 +0200373 clk->dev = NULL;
Stephen Warrena9622432016-06-17 09:44:00 -0600374
Simon Glass2558bff2017-05-30 21:47:29 -0600375 index = dev_read_stringlist_search(dev, "clock-names", name);
Stephen Warrena9622432016-06-17 09:44:00 -0600376 if (index < 0) {
Simon Glassb0ea7402016-10-02 17:59:28 -0600377 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warrena9622432016-06-17 09:44:00 -0600378 return index;
379 }
380
381 return clk_get_by_index(dev, index, clk);
Simon Glass0342bd22016-01-20 19:43:02 -0700382}
Giulio Benetti6c910872019-12-12 23:53:19 +0100383# endif /* OF_PLATDATA */
Patrice Chotardcafc3412017-07-25 13:24:45 +0200384
developerbdc786d2020-01-09 11:35:07 +0800385int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
386{
387 int index;
388
389 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
390 ofnode_get_name(node), name, clk);
391 clk->dev = NULL;
392
393 index = ofnode_stringlist_search(node, "clock-names", name);
394 if (index < 0) {
395 debug("fdt_stringlist_search() failed: %d\n", index);
396 return index;
397 }
398
399 return clk_get_by_index_nodev(node, index, clk);
400}
401
402int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
403{
404 int ret;
405
406 ret = clk_get_by_name_nodev(node, name, clk);
407 if (ret == -ENODATA)
408 return 0;
409
410 return ret;
411}
412
Patrice Chotardcafc3412017-07-25 13:24:45 +0200413int clk_release_all(struct clk *clk, int count)
414{
415 int i, ret;
416
417 for (i = 0; i < count; i++) {
418 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
419
420 /* check if clock has been previously requested */
421 if (!clk[i].dev)
422 continue;
423
424 ret = clk_disable(&clk[i]);
425 if (ret && ret != -ENOSYS)
426 return ret;
427
428 ret = clk_free(&clk[i]);
429 if (ret && ret != -ENOSYS)
430 return ret;
431 }
432
433 return 0;
434}
435
Simon Glass589d9152016-07-04 11:58:03 -0600436#endif /* OF_CONTROL */
Stephen Warrena9622432016-06-17 09:44:00 -0600437
438int clk_request(struct udevice *dev, struct clk *clk)
439{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200440 const struct clk_ops *ops;
Stephen Warrena9622432016-06-17 09:44:00 -0600441
442 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200443 if (!clk)
444 return 0;
445 ops = clk_dev_ops(dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600446
447 clk->dev = dev;
448
449 if (!ops->request)
450 return 0;
451
452 return ops->request(clk);
453}
454
455int clk_free(struct clk *clk)
456{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200457 const struct clk_ops *ops;
Stephen Warrena9622432016-06-17 09:44:00 -0600458
459 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800460 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200461 return 0;
462 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600463
Simon Glass2cdd3f42020-02-03 07:35:54 -0700464 if (!ops->rfree)
Stephen Warrena9622432016-06-17 09:44:00 -0600465 return 0;
466
Simon Glass2cdd3f42020-02-03 07:35:54 -0700467 return ops->rfree(clk);
Stephen Warrena9622432016-06-17 09:44:00 -0600468}
469
470ulong clk_get_rate(struct clk *clk)
471{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200472 const struct clk_ops *ops;
Stephen Warrena9622432016-06-17 09:44:00 -0600473
474 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800475 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200476 return 0;
477 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600478
479 if (!ops->get_rate)
480 return -ENOSYS;
481
482 return ops->get_rate(clk);
483}
484
Lukasz Majewski9e38dc32019-06-24 15:50:42 +0200485struct clk *clk_get_parent(struct clk *clk)
486{
487 struct udevice *pdev;
488 struct clk *pclk;
489
490 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800491 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200492 return NULL;
Lukasz Majewski9e38dc32019-06-24 15:50:42 +0200493
494 pdev = dev_get_parent(clk->dev);
495 pclk = dev_get_clk_ptr(pdev);
496 if (!pclk)
497 return ERR_PTR(-ENODEV);
498
499 return pclk;
500}
501
Lukasz Majewski53155da2019-06-24 15:50:43 +0200502long long clk_get_parent_rate(struct clk *clk)
503{
504 const struct clk_ops *ops;
505 struct clk *pclk;
506
507 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800508 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200509 return 0;
Lukasz Majewski53155da2019-06-24 15:50:43 +0200510
511 pclk = clk_get_parent(clk);
512 if (IS_ERR(pclk))
513 return -ENODEV;
514
515 ops = clk_dev_ops(pclk->dev);
516 if (!ops->get_rate)
517 return -ENOSYS;
518
Lukasz Majewski4ef32172019-06-24 15:50:46 +0200519 /* Read the 'rate' if not already set or if proper flag set*/
520 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski53155da2019-06-24 15:50:43 +0200521 pclk->rate = clk_get_rate(pclk);
522
523 return pclk->rate;
524}
525
Dario Binacchib7f85892020-12-30 00:06:31 +0100526ulong clk_round_rate(struct clk *clk, ulong rate)
527{
528 const struct clk_ops *ops;
529
530 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
531 if (!clk_valid(clk))
532 return 0;
533
534 ops = clk_dev_ops(clk->dev);
535 if (!ops->round_rate)
536 return -ENOSYS;
537
538 return ops->round_rate(clk, rate);
539}
540
Stephen Warrena9622432016-06-17 09:44:00 -0600541ulong clk_set_rate(struct clk *clk, ulong rate)
542{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200543 const struct clk_ops *ops;
Stephen Warrena9622432016-06-17 09:44:00 -0600544
545 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
developerdc338d32020-01-09 11:35:06 +0800546 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200547 return 0;
548 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600549
550 if (!ops->set_rate)
551 return -ENOSYS;
552
553 return ops->set_rate(clk, rate);
554}
555
Philipp Tomsichf8e02b22018-01-08 11:15:08 +0100556int clk_set_parent(struct clk *clk, struct clk *parent)
557{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200558 const struct clk_ops *ops;
Claudiu Bezneac8c16002020-09-07 17:46:34 +0300559 int ret;
Philipp Tomsichf8e02b22018-01-08 11:15:08 +0100560
561 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
developerdc338d32020-01-09 11:35:06 +0800562 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200563 return 0;
564 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf8e02b22018-01-08 11:15:08 +0100565
566 if (!ops->set_parent)
567 return -ENOSYS;
568
Claudiu Bezneac8c16002020-09-07 17:46:34 +0300569 ret = ops->set_parent(clk, parent);
570 if (ret)
571 return ret;
572
573 if (CONFIG_IS_ENABLED(CLK_CCF))
574 ret = device_reparent(clk->dev, parent->dev);
575
576 return ret;
Philipp Tomsichf8e02b22018-01-08 11:15:08 +0100577}
578
Stephen Warrena9622432016-06-17 09:44:00 -0600579int clk_enable(struct clk *clk)
580{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200581 const struct clk_ops *ops;
Peng Fan82628e22019-08-21 13:35:09 +0000582 struct clk *clkp = NULL;
583 int ret;
Stephen Warrena9622432016-06-17 09:44:00 -0600584
585 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800586 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200587 return 0;
588 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600589
Peng Fan82628e22019-08-21 13:35:09 +0000590 if (CONFIG_IS_ENABLED(CLK_CCF)) {
591 /* Take id 0 as a non-valid clk, such as dummy */
592 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
593 if (clkp->enable_count) {
594 clkp->enable_count++;
595 return 0;
596 }
597 if (clkp->dev->parent &&
598 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
599 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
600 if (ret) {
601 printf("Enable %s failed\n",
602 clkp->dev->parent->name);
603 return ret;
604 }
605 }
606 }
Stephen Warrena9622432016-06-17 09:44:00 -0600607
Peng Fan82628e22019-08-21 13:35:09 +0000608 if (ops->enable) {
609 ret = ops->enable(clk);
610 if (ret) {
611 printf("Enable %s failed\n", clk->dev->name);
612 return ret;
613 }
614 }
615 if (clkp)
616 clkp->enable_count++;
617 } else {
618 if (!ops->enable)
619 return -ENOSYS;
620 return ops->enable(clk);
621 }
622
623 return 0;
Stephen Warrena9622432016-06-17 09:44:00 -0600624}
625
Neil Armstrong8a275a02018-04-03 11:44:18 +0200626int clk_enable_bulk(struct clk_bulk *bulk)
627{
628 int i, ret;
629
630 for (i = 0; i < bulk->count; i++) {
631 ret = clk_enable(&bulk->clks[i]);
632 if (ret < 0 && ret != -ENOSYS)
633 return ret;
634 }
635
636 return 0;
637}
638
Stephen Warrena9622432016-06-17 09:44:00 -0600639int clk_disable(struct clk *clk)
640{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200641 const struct clk_ops *ops;
Peng Fan82628e22019-08-21 13:35:09 +0000642 struct clk *clkp = NULL;
643 int ret;
Stephen Warrena9622432016-06-17 09:44:00 -0600644
645 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800646 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200647 return 0;
648 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600649
Peng Fan82628e22019-08-21 13:35:09 +0000650 if (CONFIG_IS_ENABLED(CLK_CCF)) {
651 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
Claudiu Bezneab02e8dd2020-09-07 17:46:35 +0300652 if (clkp->flags & CLK_IS_CRITICAL)
653 return 0;
654
Peng Fan82628e22019-08-21 13:35:09 +0000655 if (clkp->enable_count == 0) {
656 printf("clk %s already disabled\n",
657 clkp->dev->name);
658 return 0;
659 }
Stephen Warrena9622432016-06-17 09:44:00 -0600660
Peng Fan82628e22019-08-21 13:35:09 +0000661 if (--clkp->enable_count > 0)
662 return 0;
663 }
664
665 if (ops->disable) {
666 ret = ops->disable(clk);
667 if (ret)
668 return ret;
669 }
670
671 if (clkp && clkp->dev->parent &&
672 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
673 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
674 if (ret) {
675 printf("Disable %s failed\n",
676 clkp->dev->parent->name);
677 return ret;
678 }
679 }
680 } else {
681 if (!ops->disable)
682 return -ENOSYS;
683
684 return ops->disable(clk);
685 }
686
687 return 0;
Stephen Warrena9622432016-06-17 09:44:00 -0600688}
Simon Glass36ad2342015-06-23 15:39:15 -0600689
Neil Armstrong8a275a02018-04-03 11:44:18 +0200690int clk_disable_bulk(struct clk_bulk *bulk)
691{
692 int i, ret;
693
694 for (i = 0; i < bulk->count; i++) {
695 ret = clk_disable(&bulk->clks[i]);
696 if (ret < 0 && ret != -ENOSYS)
697 return ret;
698 }
699
700 return 0;
701}
702
Lukasz Majewski12014be2019-06-24 15:50:44 +0200703int clk_get_by_id(ulong id, struct clk **clkp)
704{
705 struct udevice *dev;
706 struct uclass *uc;
707 int ret;
708
709 ret = uclass_get(UCLASS_CLK, &uc);
710 if (ret)
711 return ret;
712
713 uclass_foreach_dev(dev, uc) {
714 struct clk *clk = dev_get_clk_ptr(dev);
715
716 if (clk && clk->id == id) {
717 *clkp = clk;
718 return 0;
719 }
720 }
721
722 return -ENOENT;
723}
724
Sekhar Noricf3119d2019-08-01 19:12:55 +0530725bool clk_is_match(const struct clk *p, const struct clk *q)
726{
727 /* trivial case: identical struct clk's or both NULL */
728 if (p == q)
729 return true;
730
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200731 /* trivial case #2: on the clk pointer is NULL */
732 if (!p || !q)
733 return false;
734
Sekhar Noricf3119d2019-08-01 19:12:55 +0530735 /* same device, id and data */
736 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
737 return true;
738
739 return false;
740}
741
Jean-Jacques Hiblot6e66b2d2019-10-22 14:00:04 +0200742static void devm_clk_release(struct udevice *dev, void *res)
743{
744 clk_free(res);
745}
746
747static int devm_clk_match(struct udevice *dev, void *res, void *data)
748{
749 return res == data;
750}
751
752struct clk *devm_clk_get(struct udevice *dev, const char *id)
753{
754 int rc;
755 struct clk *clk;
756
757 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
758 if (unlikely(!clk))
759 return ERR_PTR(-ENOMEM);
760
761 rc = clk_get_by_name(dev, id, clk);
762 if (rc)
763 return ERR_PTR(rc);
764
765 devres_add(dev, clk);
766 return clk;
767}
768
769struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
770{
771 struct clk *clk = devm_clk_get(dev, id);
772
developer5e108fb2020-01-09 11:35:05 +0800773 if (PTR_ERR(clk) == -ENODATA)
Jean-Jacques Hiblot6e66b2d2019-10-22 14:00:04 +0200774 return NULL;
775
776 return clk;
777}
778
779void devm_clk_put(struct udevice *dev, struct clk *clk)
780{
781 int rc;
782
783 if (!clk)
784 return;
785
786 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
787 WARN_ON(rc);
788}
789
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200790int clk_uclass_post_probe(struct udevice *dev)
791{
792 /*
793 * when a clock provider is probed. Call clk_set_defaults()
794 * also after the device is probed. This takes care of cases
795 * where the DT is used to setup default parents and rates
796 * using assigned-clocks
797 */
798 clk_set_defaults(dev, 1);
799
800 return 0;
801}
802
Simon Glass36ad2342015-06-23 15:39:15 -0600803UCLASS_DRIVER(clk) = {
804 .id = UCLASS_CLK,
805 .name = "clk",
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200806 .post_probe = clk_uclass_post_probe,
Simon Glass36ad2342015-06-23 15:39:15 -0600807};