blob: fe4cebf54f1587b49ad527cf127079891867a682 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warren185ad872016-06-17 09:43:58 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
Stephen Warren185ad872016-06-17 09:43:58 -06004 */
5
Patrick Delaunay81313352021-04-27 11:02:19 +02006#define LOG_CATEGORY UCLASS_RESET
7
Stephen Warren185ad872016-06-17 09:43:58 -06008#include <dm.h>
9#include <fdtdec.h>
Simon Glass0f2af882020-05-10 11:40:05 -060010#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070011#include <malloc.h>
Stephen Warren185ad872016-06-17 09:43:58 -060012#include <reset.h>
13#include <reset-uclass.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070014#include <dm/devres.h>
Jean-Jacques Hiblot5be97a32020-09-09 15:37:03 +053015#include <dm/lists.h>
Stephen Warren185ad872016-06-17 09:43:58 -060016
Stephen Warren185ad872016-06-17 09:43:58 -060017static inline struct reset_ops *reset_dev_ops(struct udevice *dev)
18{
19 return (struct reset_ops *)dev->driver->ops;
20}
21
22static int reset_of_xlate_default(struct reset_ctl *reset_ctl,
Simon Glassaec8cc02017-05-18 20:09:50 -060023 struct ofnode_phandle_args *args)
Stephen Warren185ad872016-06-17 09:43:58 -060024{
25 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
26
27 if (args->args_count != 1) {
Sean Andersona1b654b2021-12-01 14:26:53 -050028 debug("Invalid args_count: %d\n", args->args_count);
Stephen Warren185ad872016-06-17 09:43:58 -060029 return -EINVAL;
30 }
31
32 reset_ctl->id = args->args[0];
33
34 return 0;
35}
36
Jagan Teki01af1152019-02-28 00:26:55 +053037static int reset_get_by_index_tail(int ret, ofnode node,
38 struct ofnode_phandle_args *args,
39 const char *list_name, int index,
40 struct reset_ctl *reset_ctl)
Stephen Warren185ad872016-06-17 09:43:58 -060041{
Stephen Warren185ad872016-06-17 09:43:58 -060042 struct udevice *dev_reset;
43 struct reset_ops *ops;
44
Jagan Teki01af1152019-02-28 00:26:55 +053045 assert(reset_ctl);
Patrice Chotarde4d368e2017-07-18 11:57:06 +020046 reset_ctl->dev = NULL;
Jagan Teki01af1152019-02-28 00:26:55 +053047 if (ret)
Stephen Warren185ad872016-06-17 09:43:58 -060048 return ret;
Stephen Warren185ad872016-06-17 09:43:58 -060049
Jagan Teki01af1152019-02-28 00:26:55 +053050 ret = uclass_get_device_by_ofnode(UCLASS_RESET, args->node,
Simon Glassaec8cc02017-05-18 20:09:50 -060051 &dev_reset);
Stephen Warren185ad872016-06-17 09:43:58 -060052 if (ret) {
Simon Glassaec8cc02017-05-18 20:09:50 -060053 debug("%s: uclass_get_device_by_ofnode() failed: %d\n",
Stephen Warren185ad872016-06-17 09:43:58 -060054 __func__, ret);
Jagan Teki01af1152019-02-28 00:26:55 +053055 debug("%s %d\n", ofnode_get_name(args->node), args->args[0]);
Stephen Warren185ad872016-06-17 09:43:58 -060056 return ret;
57 }
58 ops = reset_dev_ops(dev_reset);
59
60 reset_ctl->dev = dev_reset;
61 if (ops->of_xlate)
Jagan Teki01af1152019-02-28 00:26:55 +053062 ret = ops->of_xlate(reset_ctl, args);
Stephen Warren185ad872016-06-17 09:43:58 -060063 else
Jagan Teki01af1152019-02-28 00:26:55 +053064 ret = reset_of_xlate_default(reset_ctl, args);
Stephen Warren185ad872016-06-17 09:43:58 -060065 if (ret) {
66 debug("of_xlate() failed: %d\n", ret);
67 return ret;
68 }
69
Marek Vasutf817d172022-04-26 23:43:30 +020070 ret = ops->request ? ops->request(reset_ctl) : 0;
Stephen Warren185ad872016-06-17 09:43:58 -060071 if (ret) {
72 debug("ops->request() failed: %d\n", ret);
73 return ret;
74 }
75
76 return 0;
77}
78
Jagan Teki01af1152019-02-28 00:26:55 +053079int reset_get_by_index(struct udevice *dev, int index,
80 struct reset_ctl *reset_ctl)
81{
82 struct ofnode_phandle_args args;
83 int ret;
84
85 ret = dev_read_phandle_with_args(dev, "resets", "#reset-cells", 0,
86 index, &args);
87
88 return reset_get_by_index_tail(ret, dev_ofnode(dev), &args, "resets",
89 index > 0, reset_ctl);
90}
91
92int reset_get_by_index_nodev(ofnode node, int index,
93 struct reset_ctl *reset_ctl)
94{
95 struct ofnode_phandle_args args;
96 int ret;
97
98 ret = ofnode_parse_phandle_with_args(node, "resets", "#reset-cells", 0,
Neil Armstrong6198db62021-04-20 10:42:26 +020099 index, &args);
Jagan Teki01af1152019-02-28 00:26:55 +0530100
101 return reset_get_by_index_tail(ret, node, &args, "resets",
102 index > 0, reset_ctl);
103}
104
Jean-Jacques Hiblot5be97a32020-09-09 15:37:03 +0530105static int __reset_get_bulk(struct udevice *dev, ofnode node,
106 struct reset_ctl_bulk *bulk)
Neil Armstrongdbb26d32018-04-03 11:40:50 +0200107{
108 int i, ret, err, count;
Jean-Jacques Hiblot5be97a32020-09-09 15:37:03 +0530109
Neil Armstrongdbb26d32018-04-03 11:40:50 +0200110 bulk->count = 0;
111
Patrick Delaunayd776a842020-09-25 09:41:14 +0200112 count = ofnode_count_phandle_with_args(node, "resets", "#reset-cells",
113 0);
Neil Armstrong6acc49a2018-04-17 11:30:22 +0200114 if (count < 1)
115 return count;
Neil Armstrongdbb26d32018-04-03 11:40:50 +0200116
117 bulk->resets = devm_kcalloc(dev, count, sizeof(struct reset_ctl),
118 GFP_KERNEL);
119 if (!bulk->resets)
120 return -ENOMEM;
121
122 for (i = 0; i < count; i++) {
Jean-Jacques Hiblot5be97a32020-09-09 15:37:03 +0530123 ret = reset_get_by_index_nodev(node, i, &bulk->resets[i]);
Neil Armstrongdbb26d32018-04-03 11:40:50 +0200124 if (ret < 0)
125 goto bulk_get_err;
126
127 ++bulk->count;
128 }
129
130 return 0;
131
132bulk_get_err:
133 err = reset_release_all(bulk->resets, bulk->count);
134 if (err)
135 debug("%s: could release all resets for %p\n",
136 __func__, dev);
137
138 return ret;
139}
140
Jean-Jacques Hiblot5be97a32020-09-09 15:37:03 +0530141int reset_get_bulk(struct udevice *dev, struct reset_ctl_bulk *bulk)
142{
143 return __reset_get_bulk(dev, dev_ofnode(dev), bulk);
144}
145
Stephen Warren185ad872016-06-17 09:43:58 -0600146int reset_get_by_name(struct udevice *dev, const char *name,
147 struct reset_ctl *reset_ctl)
148{
Samuel Holland6078bff2023-01-21 18:02:52 -0600149 int index = 0;
Stephen Warren185ad872016-06-17 09:43:58 -0600150
151 debug("%s(dev=%p, name=%s, reset_ctl=%p)\n", __func__, dev, name,
152 reset_ctl);
Patrice Chotarde4d368e2017-07-18 11:57:06 +0200153 reset_ctl->dev = NULL;
Stephen Warren185ad872016-06-17 09:43:58 -0600154
Samuel Holland6078bff2023-01-21 18:02:52 -0600155 if (name) {
156 index = dev_read_stringlist_search(dev, "reset-names", name);
157 if (index < 0) {
158 debug("fdt_stringlist_search() failed: %d\n", index);
159 return index;
160 }
Stephen Warren185ad872016-06-17 09:43:58 -0600161 }
162
163 return reset_get_by_index(dev, index, reset_ctl);
164}
165
Patrice Chotard76290e32017-07-18 11:57:05 +0200166int reset_request(struct reset_ctl *reset_ctl)
167{
168 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
169
170 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
171
Marek Vasutf817d172022-04-26 23:43:30 +0200172 return ops->request ? ops->request(reset_ctl) : 0;
Patrice Chotard76290e32017-07-18 11:57:05 +0200173}
174
Stephen Warren185ad872016-06-17 09:43:58 -0600175int reset_free(struct reset_ctl *reset_ctl)
176{
177 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
178
179 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
180
Marek Vasutf817d172022-04-26 23:43:30 +0200181 return ops->rfree ? ops->rfree(reset_ctl) : 0;
Stephen Warren185ad872016-06-17 09:43:58 -0600182}
183
184int reset_assert(struct reset_ctl *reset_ctl)
185{
186 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
187
188 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
189
Marek Vasutf817d172022-04-26 23:43:30 +0200190 return ops->rst_assert ? ops->rst_assert(reset_ctl) : 0;
Stephen Warren185ad872016-06-17 09:43:58 -0600191}
192
Neil Armstrongdbb26d32018-04-03 11:40:50 +0200193int reset_assert_bulk(struct reset_ctl_bulk *bulk)
194{
195 int i, ret;
196
197 for (i = 0; i < bulk->count; i++) {
198 ret = reset_assert(&bulk->resets[i]);
199 if (ret < 0)
200 return ret;
201 }
202
203 return 0;
204}
205
Stephen Warren185ad872016-06-17 09:43:58 -0600206int reset_deassert(struct reset_ctl *reset_ctl)
207{
208 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
209
210 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
211
Marek Vasutf817d172022-04-26 23:43:30 +0200212 return ops->rst_deassert ? ops->rst_deassert(reset_ctl) : 0;
Stephen Warren185ad872016-06-17 09:43:58 -0600213}
214
Neil Armstrongdbb26d32018-04-03 11:40:50 +0200215int reset_deassert_bulk(struct reset_ctl_bulk *bulk)
216{
217 int i, ret;
218
219 for (i = 0; i < bulk->count; i++) {
220 ret = reset_deassert(&bulk->resets[i]);
221 if (ret < 0)
222 return ret;
223 }
224
225 return 0;
226}
227
Andreas Dannenberge2bc7402018-08-27 15:57:39 +0530228int reset_status(struct reset_ctl *reset_ctl)
229{
230 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
231
232 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
233
Marek Vasutf817d172022-04-26 23:43:30 +0200234 return ops->rst_status ? ops->rst_status(reset_ctl) : 0;
Andreas Dannenberge2bc7402018-08-27 15:57:39 +0530235}
236
Patrice Chotarde4d368e2017-07-18 11:57:06 +0200237int reset_release_all(struct reset_ctl *reset_ctl, int count)
238{
239 int i, ret;
240
241 for (i = 0; i < count; i++) {
242 debug("%s(reset_ctl[%d]=%p)\n", __func__, i, &reset_ctl[i]);
243
244 /* check if reset has been previously requested */
245 if (!reset_ctl[i].dev)
246 continue;
247
248 ret = reset_assert(&reset_ctl[i]);
249 if (ret)
250 return ret;
251
252 ret = reset_free(&reset_ctl[i]);
253 if (ret)
254 return ret;
255 }
256
257 return 0;
258}
259
Jean-Jacques Hiblot5be97a32020-09-09 15:37:03 +0530260static void devm_reset_release(struct udevice *dev, void *res)
261{
262 reset_free(res);
263}
264
265struct reset_ctl *devm_reset_control_get_by_index(struct udevice *dev,
266 int index)
267{
268 int rc;
269 struct reset_ctl *reset_ctl;
270
271 reset_ctl = devres_alloc(devm_reset_release, sizeof(struct reset_ctl),
272 __GFP_ZERO);
273 if (unlikely(!reset_ctl))
274 return ERR_PTR(-ENOMEM);
275
276 rc = reset_get_by_index(dev, index, reset_ctl);
277 if (rc)
278 return ERR_PTR(rc);
279
280 devres_add(dev, reset_ctl);
281 return reset_ctl;
282}
283
284struct reset_ctl *devm_reset_control_get(struct udevice *dev, const char *id)
285{
286 int rc;
287 struct reset_ctl *reset_ctl;
288
289 reset_ctl = devres_alloc(devm_reset_release, sizeof(struct reset_ctl),
290 __GFP_ZERO);
291 if (unlikely(!reset_ctl))
292 return ERR_PTR(-ENOMEM);
293
294 rc = reset_get_by_name(dev, id, reset_ctl);
295 if (rc)
296 return ERR_PTR(rc);
297
298 devres_add(dev, reset_ctl);
299 return reset_ctl;
300}
301
302struct reset_ctl *devm_reset_control_get_optional(struct udevice *dev,
303 const char *id)
304{
305 struct reset_ctl *r = devm_reset_control_get(dev, id);
306
307 if (IS_ERR(r))
308 return NULL;
309
310 return r;
311}
312
313static void devm_reset_bulk_release(struct udevice *dev, void *res)
314{
315 struct reset_ctl_bulk *bulk = res;
316
317 reset_release_all(bulk->resets, bulk->count);
318}
319
320struct reset_ctl_bulk *devm_reset_bulk_get_by_node(struct udevice *dev,
321 ofnode node)
322{
323 int rc;
324 struct reset_ctl_bulk *bulk;
325
326 bulk = devres_alloc(devm_reset_bulk_release,
327 sizeof(struct reset_ctl_bulk),
328 __GFP_ZERO);
Simon Glass956cd302021-05-13 19:39:21 -0600329
330 /* this looks like a leak, but devres takes care of it */
Jean-Jacques Hiblot5be97a32020-09-09 15:37:03 +0530331 if (unlikely(!bulk))
332 return ERR_PTR(-ENOMEM);
333
334 rc = __reset_get_bulk(dev, node, bulk);
335 if (rc)
336 return ERR_PTR(rc);
337
338 devres_add(dev, bulk);
339 return bulk;
340}
341
342struct reset_ctl_bulk *devm_reset_bulk_get_optional_by_node(struct udevice *dev,
343 ofnode node)
344{
345 struct reset_ctl_bulk *bulk;
346
347 bulk = devm_reset_bulk_get_by_node(dev, node);
348
349 if (IS_ERR(bulk))
350 return NULL;
351
352 return bulk;
353}
354
355struct reset_ctl_bulk *devm_reset_bulk_get(struct udevice *dev)
356{
357 return devm_reset_bulk_get_by_node(dev, dev_ofnode(dev));
358}
359
360struct reset_ctl_bulk *devm_reset_bulk_get_optional(struct udevice *dev)
361{
362 return devm_reset_bulk_get_optional_by_node(dev, dev_ofnode(dev));
363}
364
Stephen Warren185ad872016-06-17 09:43:58 -0600365UCLASS_DRIVER(reset) = {
366 .id = UCLASS_RESET,
367 .name = "reset",
368};