blob: 7262e89b5128927796d81e880dc043cfbe13aa91 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glass36ad2342015-06-23 15:39:15 -06002/*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
Stephen Warrena9622432016-06-17 09:44:00 -06005 * Copyright (c) 2016, NVIDIA CORPORATION.
Philipp Tomsich9cf03b02018-01-08 13:59:18 +01006 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
Simon Glass36ad2342015-06-23 15:39:15 -06007 */
8
Patrick Delaunay81313352021-04-27 11:02:19 +02009#define LOG_CATEGORY UCLASS_CLK
10
Simon Glass36ad2342015-06-23 15:39:15 -060011#include <clk.h>
Stephen Warrena9622432016-06-17 09:44:00 -060012#include <clk-uclass.h>
Simon Glass36ad2342015-06-23 15:39:15 -060013#include <dm.h>
Simon Glass589d9152016-07-04 11:58:03 -060014#include <dt-structs.h>
Simon Glass36ad2342015-06-23 15:39:15 -060015#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060016#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <malloc.h>
Patrick Delaunay283dadf2021-11-19 15:12:06 +010018#include <asm/global_data.h>
Sean Andersond7ac3732021-04-08 22:13:03 -040019#include <dm/device_compat.h>
Claudiu Bezneac8c16002020-09-07 17:46:34 +030020#include <dm/device-internal.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070021#include <dm/devres.h>
22#include <dm/read.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060023#include <linux/bug.h>
Lukasz Majewski9e38dc32019-06-24 15:50:42 +020024#include <linux/clk-provider.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070025#include <linux/err.h>
Simon Glass36ad2342015-06-23 15:39:15 -060026
Mario Six799fe562018-01-15 11:06:51 +010027static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
Simon Glass36ad2342015-06-23 15:39:15 -060028{
Mario Six799fe562018-01-15 11:06:51 +010029 return (const struct clk_ops *)dev->driver->ops;
Simon Glass36ad2342015-06-23 15:39:15 -060030}
31
Simon Glass43033962020-07-19 10:15:56 -060032struct clk *dev_get_clk_ptr(struct udevice *dev)
33{
34 return (struct clk *)dev_get_uclass_priv(dev);
35}
36
Patrick Delaunaycfe57af2025-05-27 15:27:46 +020037ulong clk_get_id(const struct clk *clk)
38{
39 return (ulong)(clk->id & CLK_ID_MSK);
40}
41
Simon Glass3580f6d2021-08-07 07:24:03 -060042#if CONFIG_IS_ENABLED(OF_PLATDATA)
Simon Glass1257efc2021-08-07 07:24:09 -060043int clk_get_by_phandle(struct udevice *dev, const struct phandle_1_arg *cells,
44 struct clk *clk)
Simon Glass589d9152016-07-04 11:58:03 -060045{
46 int ret;
47
Simon Glass0000e0d2021-03-15 17:25:28 +130048 ret = device_get_by_ofplat_idx(cells->idx, &clk->dev);
Simon Glass589d9152016-07-04 11:58:03 -060049 if (ret)
50 return ret;
Patrick Delaunaycfe57af2025-05-27 15:27:46 +020051 clk->id = CLK_ID(dev, cells->arg[0]);
Simon Glass589d9152016-07-04 11:58:03 -060052
53 return 0;
54}
Simon Glass3580f6d2021-08-07 07:24:03 -060055#endif
56
57#if CONFIG_IS_ENABLED(OF_REAL)
Stephen Warrena9622432016-06-17 09:44:00 -060058static int clk_of_xlate_default(struct clk *clk,
Simon Glassb7ae2772017-05-18 20:09:40 -060059 struct ofnode_phandle_args *args)
Simon Glass36ad2342015-06-23 15:39:15 -060060{
Stephen Warrena9622432016-06-17 09:44:00 -060061 debug("%s(clk=%p)\n", __func__, clk);
Simon Glass36ad2342015-06-23 15:39:15 -060062
Stephen Warrena9622432016-06-17 09:44:00 -060063 if (args->args_count > 1) {
Sean Andersona1b654b2021-12-01 14:26:53 -050064 debug("Invalid args_count: %d\n", args->args_count);
Stephen Warrena9622432016-06-17 09:44:00 -060065 return -EINVAL;
66 }
Simon Glass36ad2342015-06-23 15:39:15 -060067
Stephen Warrena9622432016-06-17 09:44:00 -060068 if (args->args_count)
Patrick Delaunaycfe57af2025-05-27 15:27:46 +020069 clk->id = CLK_ID(clk->dev, args->args[0]);
Stephen Warrena9622432016-06-17 09:44:00 -060070 else
71 clk->id = 0;
Simon Glass36ad2342015-06-23 15:39:15 -060072
Sekhar Nori3d23abd2019-07-11 14:30:24 +053073 clk->data = 0;
74
Stephen Warrena9622432016-06-17 09:44:00 -060075 return 0;
Simon Glass36ad2342015-06-23 15:39:15 -060076}
Simon Glass0342bd22016-01-20 19:43:02 -070077
Jagan Tekifc7c7ce2019-02-28 00:26:52 +053078static int clk_get_by_index_tail(int ret, ofnode node,
79 struct ofnode_phandle_args *args,
80 const char *list_name, int index,
81 struct clk *clk)
82{
83 struct udevice *dev_clk;
84 const struct clk_ops *ops;
85
86 assert(clk);
87 clk->dev = NULL;
88 if (ret)
89 goto err;
90
91 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
92 if (ret) {
93 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
94 __func__, ret);
Simon Glassf73f5812021-01-21 13:57:11 -070095 return log_msg_ret("get", ret);
Jagan Tekifc7c7ce2019-02-28 00:26:52 +053096 }
97
98 clk->dev = dev_clk;
99
100 ops = clk_dev_ops(dev_clk);
101
102 if (ops->of_xlate)
103 ret = ops->of_xlate(clk, args);
104 else
105 ret = clk_of_xlate_default(clk, args);
106 if (ret) {
107 debug("of_xlate() failed: %d\n", ret);
Simon Glassf73f5812021-01-21 13:57:11 -0700108 return log_msg_ret("xlate", ret);
Jagan Tekifc7c7ce2019-02-28 00:26:52 +0530109 }
110
111 return clk_request(dev_clk, clk);
112err:
113 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
114 __func__, ofnode_get_name(node), list_name, index, ret);
Simon Glassf73f5812021-01-21 13:57:11 -0700115
116 return log_msg_ret("prop", ret);
Jagan Tekifc7c7ce2019-02-28 00:26:52 +0530117}
118
Philipp Tomsichf7604342018-01-08 11:18:18 +0100119static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
120 int index, struct clk *clk)
Simon Glass0342bd22016-01-20 19:43:02 -0700121{
Simon Glass0342bd22016-01-20 19:43:02 -0700122 int ret;
Simon Glass2558bff2017-05-30 21:47:29 -0600123 struct ofnode_phandle_args args;
Simon Glass0342bd22016-01-20 19:43:02 -0700124
Stephen Warrena9622432016-06-17 09:44:00 -0600125 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
126
127 assert(clk);
Patrice Chotard96fc03d2017-07-18 11:57:07 +0200128 clk->dev = NULL;
129
Philipp Tomsichf7604342018-01-08 11:18:18 +0100130 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
Mario Six799fe562018-01-15 11:06:51 +0100131 index, &args);
Simon Glass0342bd22016-01-20 19:43:02 -0700132 if (ret) {
133 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
134 __func__, ret);
Simon Glassf73f5812021-01-21 13:57:11 -0700135 return log_ret(ret);
Simon Glass0342bd22016-01-20 19:43:02 -0700136 }
137
Jagan Tekia77add32019-02-28 00:26:53 +0530138 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
Sean Andersonf0d5a6b2020-06-24 06:41:08 -0400139 index, clk);
Stephen Warrena9622432016-06-17 09:44:00 -0600140}
Philipp Tomsichf7604342018-01-08 11:18:18 +0100141
142int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
143{
Sean Anderson07435de2022-02-27 14:01:13 -0500144 return clk_get_by_index_nodev(dev_ofnode(dev), index, clk);
Jagan Tekifc7c7ce2019-02-28 00:26:52 +0530145}
146
147int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
148{
149 struct ofnode_phandle_args args;
150 int ret;
151
152 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
Sean Andersonf0d5a6b2020-06-24 06:41:08 -0400153 index, &args);
Jagan Tekifc7c7ce2019-02-28 00:26:52 +0530154
155 return clk_get_by_index_tail(ret, node, &args, "clocks",
Sean Andersonf0d5a6b2020-06-24 06:41:08 -0400156 index, clk);
Philipp Tomsichf7604342018-01-08 11:18:18 +0100157}
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100158
Neil Armstrong8a275a02018-04-03 11:44:18 +0200159int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
160{
161 int i, ret, err, count;
Patrick Delaunayb9c32142021-04-27 10:57:54 +0200162
Neil Armstrong8a275a02018-04-03 11:44:18 +0200163 bulk->count = 0;
164
Patrick Delaunayd776a842020-09-25 09:41:14 +0200165 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
Neil Armstrong52b26d92018-04-17 11:30:31 +0200166 if (count < 1)
167 return count;
Neil Armstrong8a275a02018-04-03 11:44:18 +0200168
169 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
170 if (!bulk->clks)
171 return -ENOMEM;
172
173 for (i = 0; i < count; i++) {
174 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
175 if (ret < 0)
176 goto bulk_get_err;
177
178 ++bulk->count;
179 }
180
181 return 0;
182
183bulk_get_err:
184 err = clk_release_all(bulk->clks, bulk->count);
185 if (err)
Jan Kiszka46bba6e2024-03-09 13:27:09 +0100186 debug("%s: could not release all clocks for %p\n",
Neil Armstrong8a275a02018-04-03 11:44:18 +0200187 __func__, dev);
188
189 return ret;
190}
191
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300192static struct clk *clk_set_default_get_by_id(struct clk *clk)
193{
194 struct clk *c = clk;
195
196 if (CONFIG_IS_ENABLED(CLK_CCF)) {
197 int ret = clk_get_by_id(clk->id, &c);
198
199 if (ret) {
200 debug("%s(): could not get parent clock pointer, id %lu\n",
201 __func__, clk->id);
202 ERR_PTR(ret);
203 }
204 }
205
206 return c;
207}
208
Sean Anderson08d531c2021-06-11 00:16:07 -0400209static int clk_set_default_parents(struct udevice *dev,
210 enum clk_defaults_stage stage)
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100211{
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300212 struct clk clk, parent_clk, *c, *p;
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100213 int index;
214 int num_parents;
215 int ret;
216
217 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
Patrick Delaunayd776a842020-09-25 09:41:14 +0200218 "#clock-cells", 0);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100219 if (num_parents < 0) {
220 debug("%s: could not read assigned-clock-parents for %p\n",
221 __func__, dev);
222 return 0;
223 }
224
225 for (index = 0; index < num_parents; index++) {
226 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
227 index, &parent_clk);
Neil Armstrongf3cc6312018-07-26 15:19:32 +0200228 /* If -ENOENT, this is a no-op entry */
229 if (ret == -ENOENT)
230 continue;
231
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100232 if (ret) {
233 debug("%s: could not get parent clock %d for %s\n",
234 __func__, index, dev_read_name(dev));
235 return ret;
236 }
237
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300238 p = clk_set_default_get_by_id(&parent_clk);
239 if (IS_ERR(p))
240 return PTR_ERR(p);
241
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100242 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
243 index, &clk);
Tero Kristod41b2b32021-06-11 11:45:11 +0300244 /*
245 * If the clock provider is not ready yet, let it handle
246 * the re-programming later.
247 */
248 if (ret == -EPROBE_DEFER) {
249 ret = 0;
250 continue;
251 }
252
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100253 if (ret) {
254 debug("%s: could not get assigned clock %d for %s\n",
255 __func__, index, dev_read_name(dev));
256 return ret;
257 }
258
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200259 /* This is clk provider device trying to reparent itself
260 * It cannot be done right now but need to wait after the
261 * device is probed
262 */
Sean Anderson08d531c2021-06-11 00:16:07 -0400263 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200264 continue;
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100265
Sean Anderson08d531c2021-06-11 00:16:07 -0400266 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200267 /* do not setup twice the parent clocks */
268 continue;
269
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300270 c = clk_set_default_get_by_id(&clk);
271 if (IS_ERR(c))
272 return PTR_ERR(c);
273
274 ret = clk_set_parent(c, p);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100275 /*
276 * Not all drivers may support clock-reparenting (as of now).
277 * Ignore errors due to this.
278 */
279 if (ret == -ENOSYS)
280 continue;
281
Jean-Jacques Hiblotb2320812019-09-26 15:42:42 +0200282 if (ret < 0) {
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100283 debug("%s: failed to reparent clock %d for %s\n",
284 __func__, index, dev_read_name(dev));
285 return ret;
286 }
287 }
288
289 return 0;
290}
291
Sean Anderson08d531c2021-06-11 00:16:07 -0400292static int clk_set_default_rates(struct udevice *dev,
293 enum clk_defaults_stage stage)
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100294{
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300295 struct clk clk, *c;
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100296 int index;
297 int num_rates;
298 int size;
299 int ret = 0;
300 u32 *rates = NULL;
301
302 size = dev_read_size(dev, "assigned-clock-rates");
303 if (size < 0)
304 return 0;
305
306 num_rates = size / sizeof(u32);
307 rates = calloc(num_rates, sizeof(u32));
308 if (!rates)
309 return -ENOMEM;
310
311 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
312 if (ret)
313 goto fail;
314
315 for (index = 0; index < num_rates; index++) {
Neil Armstrongf3cc6312018-07-26 15:19:32 +0200316 /* If 0 is passed, this is a no-op */
317 if (!rates[index])
318 continue;
319
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100320 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
321 index, &clk);
Tero Kristod41b2b32021-06-11 11:45:11 +0300322 /*
323 * If the clock provider is not ready yet, let it handle
324 * the re-programming later.
325 */
326 if (ret == -EPROBE_DEFER) {
327 ret = 0;
328 continue;
329 }
330
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100331 if (ret) {
Sean Andersond7ac3732021-04-08 22:13:03 -0400332 dev_dbg(dev,
333 "could not get assigned clock %d (err = %d)\n",
334 index, ret);
Ashok Reddy Soma8f03cef2023-08-30 10:31:42 +0200335 /* Skip if it is empty */
336 if (ret == -ENOENT) {
337 ret = 0;
338 continue;
339 }
340
341 return ret;
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100342 }
343
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200344 /* This is clk provider device trying to program itself
345 * It cannot be done right now but need to wait after the
346 * device is probed
347 */
Sean Anderson08d531c2021-06-11 00:16:07 -0400348 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200349 continue;
350
Sean Anderson08d531c2021-06-11 00:16:07 -0400351 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200352 /* do not setup twice the parent clocks */
353 continue;
354
Claudiu Bezneab91eee62020-09-07 17:46:36 +0300355 c = clk_set_default_get_by_id(&clk);
356 if (IS_ERR(c))
357 return PTR_ERR(c);
358
359 ret = clk_set_rate(c, rates[index]);
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200360
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100361 if (ret < 0) {
Sean Andersond7ac3732021-04-08 22:13:03 -0400362 dev_warn(dev,
363 "failed to set rate on clock index %d (%ld) (error = %d)\n",
364 index, clk.id, ret);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100365 break;
366 }
367 }
368
369fail:
370 free(rates);
371 return ret;
372}
373
Sean Anderson08d531c2021-06-11 00:16:07 -0400374int clk_set_defaults(struct udevice *dev, enum clk_defaults_stage stage)
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100375{
376 int ret;
377
Simon Glassf1d50f72020-12-19 10:40:13 -0700378 if (!dev_has_ofnode(dev))
Peng Fan40ec4e42019-07-31 07:01:49 +0000379 return 0;
380
Sean Anderson08d531c2021-06-11 00:16:07 -0400381 /*
382 * To avoid setting defaults twice, don't set them before relocation.
383 * However, still set them for SPL. And still set them if explicitly
384 * asked.
385 */
Simon Glass7ec24132024-09-29 19:49:48 -0600386 if (!(IS_ENABLED(CONFIG_XPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
Sean Anderson08d531c2021-06-11 00:16:07 -0400387 if (stage != CLK_DEFAULTS_POST_FORCE)
388 return 0;
Philipp Tomsiche546ec82018-11-26 20:20:19 +0100389
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100390 debug("%s(%s)\n", __func__, dev_read_name(dev));
391
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200392 ret = clk_set_default_parents(dev, stage);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100393 if (ret)
394 return ret;
395
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200396 ret = clk_set_default_rates(dev, stage);
Philipp Tomsich9cf03b02018-01-08 13:59:18 +0100397 if (ret < 0)
398 return ret;
399
400 return 0;
401}
Stephen Warrena9622432016-06-17 09:44:00 -0600402
403int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
404{
Sean Anderson07435de2022-02-27 14:01:13 -0500405 return clk_get_by_name_nodev(dev_ofnode(dev), name, clk);
Simon Glass0342bd22016-01-20 19:43:02 -0700406}
Simon Glass1257efc2021-08-07 07:24:09 -0600407#endif /* OF_REAL */
Patrice Chotardcafc3412017-07-25 13:24:45 +0200408
developerbdc786d2020-01-09 11:35:07 +0800409int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
410{
Samuel Hollandbae0f4f2023-01-21 18:02:51 -0600411 int index = 0;
developerbdc786d2020-01-09 11:35:07 +0800412
413 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
414 ofnode_get_name(node), name, clk);
415 clk->dev = NULL;
416
Samuel Hollandbae0f4f2023-01-21 18:02:51 -0600417 if (name) {
418 index = ofnode_stringlist_search(node, "clock-names", name);
419 if (index < 0) {
420 debug("fdt_stringlist_search() failed: %d\n", index);
421 return index;
422 }
developerbdc786d2020-01-09 11:35:07 +0800423 }
424
425 return clk_get_by_index_nodev(node, index, clk);
426}
427
Marek Vasut1e090df2025-03-23 16:58:30 +0100428const char *
429clk_resolve_parent_clk(struct udevice *dev, const char *name)
430{
431 struct udevice *parent;
432 struct clk clk;
433 int ret;
434
435 ret = uclass_get_device_by_name(UCLASS_CLK, name, &parent);
436 if (!ret)
437 return name;
438
439 ret = clk_get_by_name(dev, name, &clk);
440 if (!clk.dev)
441 return name;
442
443 return clk.dev->name;
444}
445
Eugen Hristev70e32ba2023-06-19 13:47:52 +0300446int clk_release_all(struct clk *clk, unsigned int count)
Patrice Chotardcafc3412017-07-25 13:24:45 +0200447{
Eugen Hristev70e32ba2023-06-19 13:47:52 +0300448 unsigned int i;
449 int ret;
Patrice Chotardcafc3412017-07-25 13:24:45 +0200450
451 for (i = 0; i < count; i++) {
Eugen Hristev70e32ba2023-06-19 13:47:52 +0300452 debug("%s(clk[%u]=%p)\n", __func__, i, &clk[i]);
Patrice Chotardcafc3412017-07-25 13:24:45 +0200453
454 /* check if clock has been previously requested */
455 if (!clk[i].dev)
456 continue;
457
458 ret = clk_disable(&clk[i]);
459 if (ret && ret != -ENOSYS)
460 return ret;
Patrice Chotardcafc3412017-07-25 13:24:45 +0200461 }
462
463 return 0;
464}
465
Stephen Warrena9622432016-06-17 09:44:00 -0600466int clk_request(struct udevice *dev, struct clk *clk)
467{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200468 const struct clk_ops *ops;
Stephen Warrena9622432016-06-17 09:44:00 -0600469
470 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200471 if (!clk)
472 return 0;
473 ops = clk_dev_ops(dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600474
475 clk->dev = dev;
476
477 if (!ops->request)
478 return 0;
479
480 return ops->request(clk);
481}
482
Stephen Warrena9622432016-06-17 09:44:00 -0600483ulong clk_get_rate(struct clk *clk)
484{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200485 const struct clk_ops *ops;
Stephen Warrena9622432016-06-17 09:44:00 -0600486
487 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800488 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200489 return 0;
490 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600491
492 if (!ops->get_rate)
493 return -ENOSYS;
494
Julien Massonb5de0b92023-12-15 15:09:43 +0100495 return ops->get_rate(clk);
Stephen Warrena9622432016-06-17 09:44:00 -0600496}
497
Lukasz Majewski9e38dc32019-06-24 15:50:42 +0200498struct clk *clk_get_parent(struct clk *clk)
499{
500 struct udevice *pdev;
501 struct clk *pclk;
502
503 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800504 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200505 return NULL;
Lukasz Majewski9e38dc32019-06-24 15:50:42 +0200506
507 pdev = dev_get_parent(clk->dev);
Tero Kristof04dfff2021-06-11 11:45:08 +0300508 if (!pdev)
509 return ERR_PTR(-ENODEV);
Lukasz Majewski9e38dc32019-06-24 15:50:42 +0200510 pclk = dev_get_clk_ptr(pdev);
511 if (!pclk)
512 return ERR_PTR(-ENODEV);
513
514 return pclk;
515}
516
Michal Suchanek0d4d5e42022-09-28 12:37:57 +0200517ulong clk_get_parent_rate(struct clk *clk)
Lukasz Majewski53155da2019-06-24 15:50:43 +0200518{
519 const struct clk_ops *ops;
520 struct clk *pclk;
521
522 debug("%s(clk=%p)\n", __func__, clk);
developerdc338d32020-01-09 11:35:06 +0800523 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200524 return 0;
Lukasz Majewski53155da2019-06-24 15:50:43 +0200525
526 pclk = clk_get_parent(clk);
527 if (IS_ERR(pclk))
528 return -ENODEV;
529
530 ops = clk_dev_ops(pclk->dev);
531 if (!ops->get_rate)
532 return -ENOSYS;
533
Lukasz Majewski4ef32172019-06-24 15:50:46 +0200534 /* Read the 'rate' if not already set or if proper flag set*/
535 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
Lukasz Majewski53155da2019-06-24 15:50:43 +0200536 pclk->rate = clk_get_rate(pclk);
537
538 return pclk->rate;
539}
540
Dario Binacchib7f85892020-12-30 00:06:31 +0100541ulong clk_round_rate(struct clk *clk, ulong rate)
542{
543 const struct clk_ops *ops;
544
545 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
546 if (!clk_valid(clk))
547 return 0;
548
549 ops = clk_dev_ops(clk->dev);
550 if (!ops->round_rate)
551 return -ENOSYS;
552
553 return ops->round_rate(clk, rate);
554}
555
Patrick Delaunayd867a2872022-06-20 15:37:25 +0200556static void clk_get_priv(struct clk *clk, struct clk **clkp)
557{
558 *clkp = clk;
559
560 /* get private clock struct associated to the provided clock */
561 if (CONFIG_IS_ENABLED(CLK_CCF)) {
562 /* Take id 0 as a non-valid clk, such as dummy */
563 if (clk->id)
564 clk_get_by_id(clk->id, clkp);
565 }
566}
567
568/* clean cache, called with private clock struct */
Tero Kristo9ab78c12021-06-11 11:45:12 +0300569static void clk_clean_rate_cache(struct clk *clk)
570{
571 struct udevice *child_dev;
572 struct clk *clkp;
573
574 if (!clk)
575 return;
576
577 clk->rate = 0;
578
579 list_for_each_entry(child_dev, &clk->dev->child_head, sibling_node) {
Christian Marangi9789a562025-03-15 10:24:14 +0100580 if (device_get_uclass_id(child_dev) != UCLASS_CLK)
581 continue;
582
Tero Kristo9ab78c12021-06-11 11:45:12 +0300583 clkp = dev_get_clk_ptr(child_dev);
584 clk_clean_rate_cache(clkp);
585 }
586}
587
Stephen Warrena9622432016-06-17 09:44:00 -0600588ulong clk_set_rate(struct clk *clk, ulong rate)
589{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200590 const struct clk_ops *ops;
Patrick Delaunayd867a2872022-06-20 15:37:25 +0200591 struct clk *clkp;
Stephen Warrena9622432016-06-17 09:44:00 -0600592
593 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
developerdc338d32020-01-09 11:35:06 +0800594 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200595 return 0;
596 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600597
Sam Protsenko2d9fbe52024-03-07 18:04:32 -0600598 /* Try to find parents which can set rate */
599 while (!ops->set_rate) {
600 struct clk *parent;
601
602 if (!(clk->flags & CLK_SET_RATE_PARENT))
603 return -ENOSYS;
604
605 parent = clk_get_parent(clk);
606 if (IS_ERR_OR_NULL(parent) || !clk_valid(parent))
607 return -ENODEV;
608
609 clk = parent;
610 ops = clk_dev_ops(clk->dev);
611 }
Stephen Warrena9622432016-06-17 09:44:00 -0600612
Patrick Delaunayd867a2872022-06-20 15:37:25 +0200613 /* get private clock struct used for cache */
614 clk_get_priv(clk, &clkp);
Tero Kristo9ab78c12021-06-11 11:45:12 +0300615 /* Clean up cached rates for us and all child clocks */
Patrick Delaunayd867a2872022-06-20 15:37:25 +0200616 clk_clean_rate_cache(clkp);
Tero Kristo9ab78c12021-06-11 11:45:12 +0300617
Stephen Warrena9622432016-06-17 09:44:00 -0600618 return ops->set_rate(clk, rate);
619}
620
Philipp Tomsichf8e02b22018-01-08 11:15:08 +0100621int clk_set_parent(struct clk *clk, struct clk *parent)
622{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200623 const struct clk_ops *ops;
Claudiu Bezneac8c16002020-09-07 17:46:34 +0300624 int ret;
Philipp Tomsichf8e02b22018-01-08 11:15:08 +0100625
626 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
developerdc338d32020-01-09 11:35:06 +0800627 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200628 return 0;
629 ops = clk_dev_ops(clk->dev);
Philipp Tomsichf8e02b22018-01-08 11:15:08 +0100630
631 if (!ops->set_parent)
632 return -ENOSYS;
633
Miquel Raynal06ed64d2025-04-03 09:39:06 +0200634 ret = clk_enable(parent);
Jonas Karlman43e53412025-05-10 15:32:01 +0000635 if (ret && ret != -ENOSYS) {
Miquel Raynal06ed64d2025-04-03 09:39:06 +0200636 printf("Cannot enable parent %s\n", parent->dev->name);
637 return ret;
638 }
639
Claudiu Bezneac8c16002020-09-07 17:46:34 +0300640 ret = ops->set_parent(clk, parent);
Miquel Raynal06ed64d2025-04-03 09:39:06 +0200641 if (ret) {
642 clk_disable(parent);
Claudiu Bezneac8c16002020-09-07 17:46:34 +0300643 return ret;
Miquel Raynal06ed64d2025-04-03 09:39:06 +0200644 }
Claudiu Bezneac8c16002020-09-07 17:46:34 +0300645
Miquel Raynal06ed64d2025-04-03 09:39:06 +0200646 if (CONFIG_IS_ENABLED(CLK_CCF)) {
Claudiu Bezneac8c16002020-09-07 17:46:34 +0300647 ret = device_reparent(clk->dev, parent->dev);
Miquel Raynal06ed64d2025-04-03 09:39:06 +0200648 if (ret) {
649 clk_disable(parent);
650 return ret;
651 }
652 }
Claudiu Bezneac8c16002020-09-07 17:46:34 +0300653
Miquel Raynal06ed64d2025-04-03 09:39:06 +0200654 return 0;
Philipp Tomsichf8e02b22018-01-08 11:15:08 +0100655}
656
Stephen Warrena9622432016-06-17 09:44:00 -0600657int clk_enable(struct clk *clk)
658{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200659 const struct clk_ops *ops;
Peng Fan82628e22019-08-21 13:35:09 +0000660 struct clk *clkp = NULL;
661 int ret;
Stephen Warrena9622432016-06-17 09:44:00 -0600662
Michael Trimarchif84d65c2024-07-09 08:28:13 +0200663 debug("%s(clk=%p name=%s)\n", __func__, clk, clk->dev->name);
developerdc338d32020-01-09 11:35:06 +0800664 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200665 return 0;
666 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600667
Peng Fan82628e22019-08-21 13:35:09 +0000668 if (CONFIG_IS_ENABLED(CLK_CCF)) {
669 /* Take id 0 as a non-valid clk, such as dummy */
670 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
Yang Xiwencb34b1c2023-11-19 06:10:06 +0800671 ops = clk_dev_ops(clkp->dev);
Peng Fan82628e22019-08-21 13:35:09 +0000672 if (clkp->enable_count) {
673 clkp->enable_count++;
674 return 0;
675 }
676 if (clkp->dev->parent &&
Patrick Delaunaydb9b1a12022-01-24 14:17:14 +0100677 device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
Peng Fan82628e22019-08-21 13:35:09 +0000678 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
679 if (ret) {
680 printf("Enable %s failed\n",
681 clkp->dev->parent->name);
682 return ret;
683 }
684 }
685 }
Stephen Warrena9622432016-06-17 09:44:00 -0600686
Peng Fan82628e22019-08-21 13:35:09 +0000687 if (ops->enable) {
Maksim Kiselev77e11512023-09-06 01:16:49 +0300688 ret = ops->enable(clkp ? clkp : clk);
Peng Fan82628e22019-08-21 13:35:09 +0000689 if (ret) {
690 printf("Enable %s failed\n", clk->dev->name);
691 return ret;
692 }
693 }
694 if (clkp)
695 clkp->enable_count++;
696 } else {
697 if (!ops->enable)
698 return -ENOSYS;
699 return ops->enable(clk);
700 }
701
702 return 0;
Stephen Warrena9622432016-06-17 09:44:00 -0600703}
704
Neil Armstrong8a275a02018-04-03 11:44:18 +0200705int clk_enable_bulk(struct clk_bulk *bulk)
706{
707 int i, ret;
708
709 for (i = 0; i < bulk->count; i++) {
710 ret = clk_enable(&bulk->clks[i]);
711 if (ret < 0 && ret != -ENOSYS)
712 return ret;
713 }
714
715 return 0;
716}
717
Stephen Warrena9622432016-06-17 09:44:00 -0600718int clk_disable(struct clk *clk)
719{
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200720 const struct clk_ops *ops;
Peng Fan82628e22019-08-21 13:35:09 +0000721 struct clk *clkp = NULL;
722 int ret;
Stephen Warrena9622432016-06-17 09:44:00 -0600723
Michael Trimarchif84d65c2024-07-09 08:28:13 +0200724 debug("%s(clk=%p name=%s)\n", __func__, clk, clk->dev->name);
developerdc338d32020-01-09 11:35:06 +0800725 if (!clk_valid(clk))
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200726 return 0;
727 ops = clk_dev_ops(clk->dev);
Stephen Warrena9622432016-06-17 09:44:00 -0600728
Peng Fan82628e22019-08-21 13:35:09 +0000729 if (CONFIG_IS_ENABLED(CLK_CCF)) {
730 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
Yang Xiwencb34b1c2023-11-19 06:10:06 +0800731 ops = clk_dev_ops(clkp->dev);
Claudiu Bezneab02e8dd2020-09-07 17:46:35 +0300732 if (clkp->flags & CLK_IS_CRITICAL)
733 return 0;
734
Peng Fan82628e22019-08-21 13:35:09 +0000735 if (clkp->enable_count == 0) {
736 printf("clk %s already disabled\n",
737 clkp->dev->name);
738 return 0;
739 }
Stephen Warrena9622432016-06-17 09:44:00 -0600740
Peng Fan82628e22019-08-21 13:35:09 +0000741 if (--clkp->enable_count > 0)
742 return 0;
743 }
744
745 if (ops->disable) {
Maksim Kiselev77e11512023-09-06 01:16:49 +0300746 ret = ops->disable(clkp ? clkp : clk);
Peng Fan82628e22019-08-21 13:35:09 +0000747 if (ret)
748 return ret;
749 }
750
751 if (clkp && clkp->dev->parent &&
Patrick Delaunaydb9b1a12022-01-24 14:17:14 +0100752 device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
Peng Fan82628e22019-08-21 13:35:09 +0000753 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
754 if (ret) {
755 printf("Disable %s failed\n",
756 clkp->dev->parent->name);
757 return ret;
758 }
759 }
760 } else {
761 if (!ops->disable)
762 return -ENOSYS;
763
764 return ops->disable(clk);
765 }
766
767 return 0;
Stephen Warrena9622432016-06-17 09:44:00 -0600768}
Simon Glass36ad2342015-06-23 15:39:15 -0600769
Neil Armstrong8a275a02018-04-03 11:44:18 +0200770int clk_disable_bulk(struct clk_bulk *bulk)
771{
772 int i, ret;
773
774 for (i = 0; i < bulk->count; i++) {
775 ret = clk_disable(&bulk->clks[i]);
776 if (ret < 0 && ret != -ENOSYS)
777 return ret;
778 }
779
780 return 0;
781}
782
Lukasz Majewski12014be2019-06-24 15:50:44 +0200783int clk_get_by_id(ulong id, struct clk **clkp)
784{
785 struct udevice *dev;
786 struct uclass *uc;
787 int ret;
788
789 ret = uclass_get(UCLASS_CLK, &uc);
790 if (ret)
791 return ret;
792
793 uclass_foreach_dev(dev, uc) {
794 struct clk *clk = dev_get_clk_ptr(dev);
795
796 if (clk && clk->id == id) {
797 *clkp = clk;
798 return 0;
799 }
800 }
801
802 return -ENOENT;
803}
804
Sekhar Noricf3119d2019-08-01 19:12:55 +0530805bool clk_is_match(const struct clk *p, const struct clk *q)
806{
807 /* trivial case: identical struct clk's or both NULL */
808 if (p == q)
809 return true;
810
Jean-Jacques Hiblot718039b2019-10-22 14:00:03 +0200811 /* trivial case #2: on the clk pointer is NULL */
812 if (!p || !q)
813 return false;
814
Sekhar Noricf3119d2019-08-01 19:12:55 +0530815 /* same device, id and data */
816 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
817 return true;
818
819 return false;
820}
821
Jean-Jacques Hiblot6e66b2d2019-10-22 14:00:04 +0200822struct clk *devm_clk_get(struct udevice *dev, const char *id)
823{
824 int rc;
825 struct clk *clk;
826
Sean Andersond318eb32023-12-16 14:38:42 -0500827 clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
Jean-Jacques Hiblot6e66b2d2019-10-22 14:00:04 +0200828 if (unlikely(!clk))
829 return ERR_PTR(-ENOMEM);
830
831 rc = clk_get_by_name(dev, id, clk);
832 if (rc)
833 return ERR_PTR(rc);
834
Jean-Jacques Hiblot6e66b2d2019-10-22 14:00:04 +0200835 return clk;
836}
837
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200838int clk_uclass_post_probe(struct udevice *dev)
839{
840 /*
841 * when a clock provider is probed. Call clk_set_defaults()
842 * also after the device is probed. This takes care of cases
843 * where the DT is used to setup default parents and rates
844 * using assigned-clocks
845 */
Marek Vasut05e3d8e2022-01-01 19:51:39 +0100846 clk_set_defaults(dev, CLK_DEFAULTS_POST);
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200847
848 return 0;
849}
850
Simon Glass36ad2342015-06-23 15:39:15 -0600851UCLASS_DRIVER(clk) = {
852 .id = UCLASS_CLK,
853 .name = "clk",
Jean-Jacques Hiblot9601f322019-10-22 14:00:06 +0200854 .post_probe = clk_uclass_post_probe,
Simon Glass36ad2342015-06-23 15:39:15 -0600855};