blob: 49e2ec25c28b3f1debfb820a676ad4f4cabbe35c [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02002/*
3 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
4 * Written by Jean-Jacques Hiblot <jjhiblot@ti.com>
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02005 */
6
Patrick Delaunay81313352021-04-27 11:02:19 +02007#define LOG_CATEGORY UCLASS_PHY
8
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02009#include <common.h>
10#include <dm.h>
Sean Anderson03036a22020-10-04 21:39:47 -040011#include <dm/device_compat.h>
developer272bde62020-05-02 11:35:11 +020012#include <dm/devres.h>
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020013#include <generic-phy.h>
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030014#include <linux/list.h>
15
16/**
17 * struct phy_counts - Init and power-on counts of a single PHY port
18 *
19 * This structure is used to keep track of PHY initialization and power
20 * state change requests, so that we don't power off and deinitialize a
21 * PHY instance until all of its users want it done. Otherwise, multiple
22 * consumers using the same PHY port can cause problems (e.g. one might
23 * call power_off() after another's exit() and hang indefinitely).
24 *
25 * @id: The PHY ID within a PHY provider
26 * @power_on_count: Times generic_phy_power_on() was called for this ID
27 * without a matching generic_phy_power_off() afterwards
28 * @init_count: Times generic_phy_init() was called for this ID
29 * without a matching generic_phy_exit() afterwards
30 * @list: Handle for a linked list of these structures corresponding to
31 * ports of the same PHY provider
32 */
33struct phy_counts {
34 unsigned long id;
35 int power_on_count;
36 int init_count;
37 struct list_head list;
38};
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020039
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020040static inline struct phy_ops *phy_dev_ops(struct udevice *dev)
41{
42 return (struct phy_ops *)dev->driver->ops;
43}
44
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030045static struct phy_counts *phy_get_counts(struct phy *phy)
46{
47 struct list_head *uc_priv;
48 struct phy_counts *counts;
49
50 if (!generic_phy_valid(phy))
51 return NULL;
52
53 uc_priv = dev_get_uclass_priv(phy->dev);
54 list_for_each_entry(counts, uc_priv, list)
55 if (counts->id == phy->id)
56 return counts;
57
58 return NULL;
59}
60
61static int phy_alloc_counts(struct phy *phy)
62{
63 struct list_head *uc_priv;
64 struct phy_counts *counts;
65
66 if (!generic_phy_valid(phy))
67 return 0;
68 if (phy_get_counts(phy))
69 return 0;
70
71 uc_priv = dev_get_uclass_priv(phy->dev);
72 counts = kzalloc(sizeof(*counts), GFP_KERNEL);
73 if (!counts)
74 return -ENOMEM;
75
76 counts->id = phy->id;
77 counts->power_on_count = 0;
78 counts->init_count = 0;
79 list_add(&counts->list, uc_priv);
80
81 return 0;
82}
83
84static int phy_uclass_pre_probe(struct udevice *dev)
85{
86 struct list_head *uc_priv = dev_get_uclass_priv(dev);
87
88 INIT_LIST_HEAD(uc_priv);
89
90 return 0;
91}
92
93static int phy_uclass_pre_remove(struct udevice *dev)
94{
95 struct list_head *uc_priv = dev_get_uclass_priv(dev);
96 struct phy_counts *counts, *next;
97
98 list_for_each_entry_safe(counts, next, uc_priv, list)
99 kfree(counts);
100
101 return 0;
102}
103
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200104static int generic_phy_xlate_offs_flags(struct phy *phy,
Simon Glass81955512017-05-18 20:09:47 -0600105 struct ofnode_phandle_args *args)
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200106{
107 debug("%s(phy=%p)\n", __func__, phy);
108
109 if (args->args_count > 1) {
Sean Andersona1b654b2021-12-01 14:26:53 -0500110 debug("Invalid args_count: %d\n", args->args_count);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200111 return -EINVAL;
112 }
113
114 if (args->args_count)
115 phy->id = args->args[0];
116 else
117 phy->id = 0;
118
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200119 return 0;
120}
121
Jagan Tekia4e8eee2020-05-01 23:44:18 +0530122int generic_phy_get_by_index_nodev(ofnode node, int index, struct phy *phy)
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200123{
Simon Glass81955512017-05-18 20:09:47 -0600124 struct ofnode_phandle_args args;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200125 struct phy_ops *ops;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200126 struct udevice *phydev;
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200127 int i, ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200128
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200129 debug("%s(node=%s, index=%d, phy=%p)\n",
130 __func__, ofnode_get_name(node), index, phy);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200131
132 assert(phy);
Patrice Chotard956b7ad2017-07-18 11:38:42 +0200133 phy->dev = NULL;
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200134 ret = ofnode_parse_phandle_with_args(node, "phys", "#phy-cells", 0,
135 index, &args);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200136 if (ret) {
Simon Glass81955512017-05-18 20:09:47 -0600137 debug("%s: dev_read_phandle_with_args failed: err=%d\n",
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200138 __func__, ret);
139 return ret;
140 }
141
Simon Glass81955512017-05-18 20:09:47 -0600142 ret = uclass_get_device_by_ofnode(UCLASS_PHY, args.node, &phydev);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200143 if (ret) {
Simon Glass81955512017-05-18 20:09:47 -0600144 debug("%s: uclass_get_device_by_ofnode failed: err=%d\n",
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200145 __func__, ret);
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200146
147 /* Check if args.node's parent is a PHY provider */
148 ret = uclass_get_device_by_ofnode(UCLASS_PHY,
149 ofnode_get_parent(args.node),
150 &phydev);
151 if (ret)
152 return ret;
153
154 /* insert phy idx at first position into args array */
Marek Vasut61b17ed2018-08-07 12:24:35 +0200155 for (i = args.args_count; i >= 1 ; i--)
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200156 args.args[i] = args.args[i - 1];
157
158 args.args_count++;
159 args.args[0] = ofnode_read_u32_default(args.node, "reg", -1);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200160 }
161
162 phy->dev = phydev;
163
164 ops = phy_dev_ops(phydev);
165
166 if (ops->of_xlate)
167 ret = ops->of_xlate(phy, &args);
168 else
169 ret = generic_phy_xlate_offs_flags(phy, &args);
170 if (ret) {
171 debug("of_xlate() failed: %d\n", ret);
172 goto err;
173 }
174
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300175 ret = phy_alloc_counts(phy);
176 if (ret) {
177 debug("phy_alloc_counts() failed: %d\n", ret);
178 goto err;
179 }
180
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200181 return 0;
182
183err:
184 return ret;
185}
186
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200187int generic_phy_get_by_index(struct udevice *dev, int index,
188 struct phy *phy)
189{
Jagan Tekia4e8eee2020-05-01 23:44:18 +0530190 return generic_phy_get_by_index_nodev(dev_ofnode(dev), index, phy);
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200191}
192
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200193int generic_phy_get_by_name(struct udevice *dev, const char *phy_name,
194 struct phy *phy)
195{
196 int index;
197
198 debug("%s(dev=%p, name=%s, phy=%p)\n", __func__, dev, phy_name, phy);
199
Simon Glass81955512017-05-18 20:09:47 -0600200 index = dev_read_stringlist_search(dev, "phy-names", phy_name);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200201 if (index < 0) {
Simon Glass81955512017-05-18 20:09:47 -0600202 debug("dev_read_stringlist_search() failed: %d\n", index);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200203 return index;
204 }
205
206 return generic_phy_get_by_index(dev, index, phy);
207}
208
209int generic_phy_init(struct phy *phy)
210{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300211 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200212 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200213 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200214
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530215 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200216 return 0;
217 ops = phy_dev_ops(phy->dev);
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200218 if (!ops->init)
219 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300220
221 counts = phy_get_counts(phy);
222 if (counts->init_count > 0) {
223 counts->init_count++;
224 return 0;
225 }
226
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200227 ret = ops->init(phy);
228 if (ret)
229 dev_err(phy->dev, "PHY: Failed to init %s: %d.\n",
230 phy->dev->name, ret);
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300231 else
232 counts->init_count = 1;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200233
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200234 return ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200235}
236
237int generic_phy_reset(struct phy *phy)
238{
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200239 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200240 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200241
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530242 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200243 return 0;
244 ops = phy_dev_ops(phy->dev);
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200245 if (!ops->reset)
246 return 0;
247 ret = ops->reset(phy);
248 if (ret)
249 dev_err(phy->dev, "PHY: Failed to reset %s: %d.\n",
250 phy->dev->name, ret);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200251
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200252 return ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200253}
254
255int generic_phy_exit(struct phy *phy)
256{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300257 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200258 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200259 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200260
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530261 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200262 return 0;
263 ops = phy_dev_ops(phy->dev);
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200264 if (!ops->exit)
265 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300266
267 counts = phy_get_counts(phy);
268 if (counts->init_count == 0)
269 return 0;
270 if (counts->init_count > 1) {
271 counts->init_count--;
272 return 0;
273 }
274
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200275 ret = ops->exit(phy);
276 if (ret)
277 dev_err(phy->dev, "PHY: Failed to exit %s: %d.\n",
278 phy->dev->name, ret);
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300279 else
280 counts->init_count = 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200281
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200282 return ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200283}
284
285int generic_phy_power_on(struct phy *phy)
286{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300287 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200288 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200289 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200290
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530291 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200292 return 0;
293 ops = phy_dev_ops(phy->dev);
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200294 if (!ops->power_on)
295 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300296
297 counts = phy_get_counts(phy);
298 if (counts->power_on_count > 0) {
299 counts->power_on_count++;
300 return 0;
301 }
302
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200303 ret = ops->power_on(phy);
304 if (ret)
305 dev_err(phy->dev, "PHY: Failed to power on %s: %d.\n",
306 phy->dev->name, ret);
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300307 else
308 counts->power_on_count = 1;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200309
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200310 return ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200311}
312
313int generic_phy_power_off(struct phy *phy)
314{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300315 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200316 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200317 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200318
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530319 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200320 return 0;
321 ops = phy_dev_ops(phy->dev);
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200322 if (!ops->power_off)
323 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300324
325 counts = phy_get_counts(phy);
326 if (counts->power_on_count == 0)
327 return 0;
328 if (counts->power_on_count > 1) {
329 counts->power_on_count--;
330 return 0;
331 }
332
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200333 ret = ops->power_off(phy);
334 if (ret)
335 dev_err(phy->dev, "PHY: Failed to power off %s: %d.\n",
336 phy->dev->name, ret);
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300337 else
338 counts->power_on_count = 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200339
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200340 return ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200341}
342
Neil Armstrong963ae6c2020-12-29 14:58:59 +0100343int generic_phy_configure(struct phy *phy, void *params)
344{
345 struct phy_ops const *ops;
346
347 if (!generic_phy_valid(phy))
348 return 0;
349 ops = phy_dev_ops(phy->dev);
350
351 return ops->configure ? ops->configure(phy, params) : 0;
352}
353
developer272bde62020-05-02 11:35:11 +0200354int generic_phy_get_bulk(struct udevice *dev, struct phy_bulk *bulk)
355{
356 int i, ret, count;
357
358 bulk->count = 0;
359
360 /* Return if no phy declared */
361 if (!dev_read_prop(dev, "phys", NULL))
362 return 0;
363
Patrick Delaunayd776a842020-09-25 09:41:14 +0200364 count = dev_count_phandle_with_args(dev, "phys", "#phy-cells", 0);
developer272bde62020-05-02 11:35:11 +0200365 if (count < 1)
366 return count;
367
368 bulk->phys = devm_kcalloc(dev, count, sizeof(struct phy), GFP_KERNEL);
369 if (!bulk->phys)
370 return -ENOMEM;
371
372 for (i = 0; i < count; i++) {
373 ret = generic_phy_get_by_index(dev, i, &bulk->phys[i]);
374 if (ret) {
375 pr_err("Failed to get PHY%d for %s\n", i, dev->name);
376 return ret;
377 }
378 bulk->count++;
379 }
380
381 return 0;
382}
383
384int generic_phy_init_bulk(struct phy_bulk *bulk)
385{
386 struct phy *phys = bulk->phys;
387 int i, ret;
388
389 for (i = 0; i < bulk->count; i++) {
390 ret = generic_phy_init(&phys[i]);
391 if (ret) {
392 pr_err("Can't init PHY%d\n", i);
393 goto phys_init_err;
394 }
395 }
396
397 return 0;
398
399phys_init_err:
400 for (; i > 0; i--)
401 generic_phy_exit(&phys[i - 1]);
402
403 return ret;
404}
405
406int generic_phy_exit_bulk(struct phy_bulk *bulk)
407{
408 struct phy *phys = bulk->phys;
409 int i, ret = 0;
410
411 for (i = 0; i < bulk->count; i++)
412 ret |= generic_phy_exit(&phys[i]);
413
414 return ret;
415}
416
417int generic_phy_power_on_bulk(struct phy_bulk *bulk)
418{
419 struct phy *phys = bulk->phys;
420 int i, ret;
421
422 for (i = 0; i < bulk->count; i++) {
423 ret = generic_phy_power_on(&phys[i]);
424 if (ret) {
425 pr_err("Can't power on PHY%d\n", i);
426 goto phys_poweron_err;
427 }
428 }
429
430 return 0;
431
432phys_poweron_err:
433 for (; i > 0; i--)
434 generic_phy_power_off(&phys[i - 1]);
435
436 return ret;
437}
438
439int generic_phy_power_off_bulk(struct phy_bulk *bulk)
440{
441 struct phy *phys = bulk->phys;
442 int i, ret = 0;
443
444 for (i = 0; i < bulk->count; i++)
445 ret |= generic_phy_power_off(&phys[i]);
446
447 return ret;
448}
449
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200450UCLASS_DRIVER(phy) = {
451 .id = UCLASS_PHY,
452 .name = "phy",
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300453 .pre_probe = phy_uclass_pre_probe,
454 .pre_remove = phy_uclass_pre_remove,
455 .per_device_auto = sizeof(struct list_head),
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200456};