blob: 777d952b041a41c68ae87898d6297c1b6fd15579 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02002/*
Nishanth Menoneaa39c62023-11-01 15:56:03 -05003 * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02004 * Written by Jean-Jacques Hiblot <jjhiblot@ti.com>
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02005 */
6
Patrick Delaunay81313352021-04-27 11:02:19 +02007#define LOG_CATEGORY UCLASS_PHY
8
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02009#include <dm.h>
Sean Anderson03036a22020-10-04 21:39:47 -040010#include <dm/device_compat.h>
developer272bde62020-05-02 11:35:11 +020011#include <dm/devres.h>
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020012#include <generic-phy.h>
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030013#include <linux/list.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060014#include <linux/printk.h>
Eugen Hristev24ab68d2023-05-15 12:59:47 +030015#include <power/regulator.h>
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030016
17/**
18 * struct phy_counts - Init and power-on counts of a single PHY port
19 *
20 * This structure is used to keep track of PHY initialization and power
21 * state change requests, so that we don't power off and deinitialize a
22 * PHY instance until all of its users want it done. Otherwise, multiple
23 * consumers using the same PHY port can cause problems (e.g. one might
24 * call power_off() after another's exit() and hang indefinitely).
25 *
26 * @id: The PHY ID within a PHY provider
27 * @power_on_count: Times generic_phy_power_on() was called for this ID
28 * without a matching generic_phy_power_off() afterwards
29 * @init_count: Times generic_phy_init() was called for this ID
30 * without a matching generic_phy_exit() afterwards
31 * @list: Handle for a linked list of these structures corresponding to
32 * ports of the same PHY provider
Eugen Hristev24ab68d2023-05-15 12:59:47 +030033 * @supply: Handle to a phy-supply device
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030034 */
35struct phy_counts {
36 unsigned long id;
37 int power_on_count;
38 int init_count;
39 struct list_head list;
Eugen Hristev24ab68d2023-05-15 12:59:47 +030040 struct udevice *supply;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030041};
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020042
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020043static inline struct phy_ops *phy_dev_ops(struct udevice *dev)
44{
45 return (struct phy_ops *)dev->driver->ops;
46}
47
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030048static struct phy_counts *phy_get_counts(struct phy *phy)
49{
50 struct list_head *uc_priv;
51 struct phy_counts *counts;
52
53 if (!generic_phy_valid(phy))
54 return NULL;
55
56 uc_priv = dev_get_uclass_priv(phy->dev);
57 list_for_each_entry(counts, uc_priv, list)
58 if (counts->id == phy->id)
59 return counts;
60
61 return NULL;
62}
63
Eugen Hristev24ab68d2023-05-15 12:59:47 +030064static int phy_alloc_counts(struct phy *phy, struct udevice *supply)
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030065{
66 struct list_head *uc_priv;
67 struct phy_counts *counts;
68
69 if (!generic_phy_valid(phy))
70 return 0;
71 if (phy_get_counts(phy))
72 return 0;
73
74 uc_priv = dev_get_uclass_priv(phy->dev);
75 counts = kzalloc(sizeof(*counts), GFP_KERNEL);
76 if (!counts)
77 return -ENOMEM;
78
79 counts->id = phy->id;
80 counts->power_on_count = 0;
81 counts->init_count = 0;
Eugen Hristev24ab68d2023-05-15 12:59:47 +030082 counts->supply = supply;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030083 list_add(&counts->list, uc_priv);
84
85 return 0;
86}
87
88static int phy_uclass_pre_probe(struct udevice *dev)
89{
90 struct list_head *uc_priv = dev_get_uclass_priv(dev);
91
92 INIT_LIST_HEAD(uc_priv);
93
94 return 0;
95}
96
97static int phy_uclass_pre_remove(struct udevice *dev)
98{
99 struct list_head *uc_priv = dev_get_uclass_priv(dev);
100 struct phy_counts *counts, *next;
101
102 list_for_each_entry_safe(counts, next, uc_priv, list)
103 kfree(counts);
104
105 return 0;
106}
107
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200108static int generic_phy_xlate_offs_flags(struct phy *phy,
Simon Glass81955512017-05-18 20:09:47 -0600109 struct ofnode_phandle_args *args)
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200110{
111 debug("%s(phy=%p)\n", __func__, phy);
112
113 if (args->args_count > 1) {
Sean Andersona1b654b2021-12-01 14:26:53 -0500114 debug("Invalid args_count: %d\n", args->args_count);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200115 return -EINVAL;
116 }
117
118 if (args->args_count)
119 phy->id = args->args[0];
120 else
121 phy->id = 0;
122
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200123 return 0;
124}
125
Jagan Tekia4e8eee2020-05-01 23:44:18 +0530126int generic_phy_get_by_index_nodev(ofnode node, int index, struct phy *phy)
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200127{
Simon Glass81955512017-05-18 20:09:47 -0600128 struct ofnode_phandle_args args;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200129 struct phy_ops *ops;
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300130 struct udevice *phydev, *supply = NULL;
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200131 int i, ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200132
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200133 debug("%s(node=%s, index=%d, phy=%p)\n",
134 __func__, ofnode_get_name(node), index, phy);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200135
136 assert(phy);
Patrice Chotard956b7ad2017-07-18 11:38:42 +0200137 phy->dev = NULL;
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200138 ret = ofnode_parse_phandle_with_args(node, "phys", "#phy-cells", 0,
139 index, &args);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200140 if (ret) {
Simon Glass81955512017-05-18 20:09:47 -0600141 debug("%s: dev_read_phandle_with_args failed: err=%d\n",
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200142 __func__, ret);
143 return ret;
144 }
145
Simon Glass81955512017-05-18 20:09:47 -0600146 ret = uclass_get_device_by_ofnode(UCLASS_PHY, args.node, &phydev);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200147 if (ret) {
Simon Glass81955512017-05-18 20:09:47 -0600148 debug("%s: uclass_get_device_by_ofnode failed: err=%d\n",
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200149 __func__, ret);
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200150
151 /* Check if args.node's parent is a PHY provider */
152 ret = uclass_get_device_by_ofnode(UCLASS_PHY,
153 ofnode_get_parent(args.node),
154 &phydev);
155 if (ret)
156 return ret;
157
158 /* insert phy idx at first position into args array */
Marek Vasut61b17ed2018-08-07 12:24:35 +0200159 for (i = args.args_count; i >= 1 ; i--)
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200160 args.args[i] = args.args[i - 1];
161
162 args.args_count++;
163 args.args[0] = ofnode_read_u32_default(args.node, "reg", -1);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200164 }
165
166 phy->dev = phydev;
167
168 ops = phy_dev_ops(phydev);
169
170 if (ops->of_xlate)
171 ret = ops->of_xlate(phy, &args);
172 else
173 ret = generic_phy_xlate_offs_flags(phy, &args);
174 if (ret) {
175 debug("of_xlate() failed: %d\n", ret);
176 goto err;
177 }
178
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300179 if (CONFIG_IS_ENABLED(DM_REGULATOR)) {
180 ret = device_get_supply_regulator(phydev, "phy-supply",
181 &supply);
182 if (ret && ret != -ENOENT) {
183 debug("%s: device_get_supply_regulator failed: %d\n",
184 __func__, ret);
185 goto err;
186 }
187 }
188
189 ret = phy_alloc_counts(phy, supply);
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300190 if (ret) {
191 debug("phy_alloc_counts() failed: %d\n", ret);
192 goto err;
193 }
194
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200195 return 0;
196
197err:
Jonas Karlman9f89e682023-08-31 22:16:35 +0000198 phy->dev = NULL;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200199 return ret;
200}
201
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200202int generic_phy_get_by_index(struct udevice *dev, int index,
203 struct phy *phy)
204{
Jagan Tekia4e8eee2020-05-01 23:44:18 +0530205 return generic_phy_get_by_index_nodev(dev_ofnode(dev), index, phy);
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200206}
207
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200208int generic_phy_get_by_name(struct udevice *dev, const char *phy_name,
209 struct phy *phy)
210{
211 int index;
212
213 debug("%s(dev=%p, name=%s, phy=%p)\n", __func__, dev, phy_name, phy);
214
Jonas Karlmanffe06b42023-08-31 22:16:33 +0000215 assert(phy);
216 phy->dev = NULL;
217
Simon Glass81955512017-05-18 20:09:47 -0600218 index = dev_read_stringlist_search(dev, "phy-names", phy_name);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200219 if (index < 0) {
Simon Glass81955512017-05-18 20:09:47 -0600220 debug("dev_read_stringlist_search() failed: %d\n", index);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200221 return index;
222 }
223
224 return generic_phy_get_by_index(dev, index, phy);
225}
226
227int generic_phy_init(struct phy *phy)
228{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300229 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200230 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200231 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200232
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530233 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200234 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300235 counts = phy_get_counts(phy);
236 if (counts->init_count > 0) {
237 counts->init_count++;
238 return 0;
239 }
240
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300241 ops = phy_dev_ops(phy->dev);
242 if (ops->init) {
243 ret = ops->init(phy);
244 if (ret) {
245 dev_err(phy->dev, "PHY: Failed to init %s: %d.\n",
246 phy->dev->name, ret);
247 return ret;
248 }
249 }
250 counts->init_count = 1;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200251
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300252 return 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200253}
254
255int generic_phy_reset(struct phy *phy)
256{
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200257 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200258 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200259
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530260 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200261 return 0;
262 ops = phy_dev_ops(phy->dev);
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200263 if (!ops->reset)
264 return 0;
265 ret = ops->reset(phy);
266 if (ret)
267 dev_err(phy->dev, "PHY: Failed to reset %s: %d.\n",
268 phy->dev->name, ret);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200269
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200270 return ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200271}
272
273int generic_phy_exit(struct phy *phy)
274{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300275 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200276 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200277 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200278
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530279 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200280 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300281 counts = phy_get_counts(phy);
282 if (counts->init_count == 0)
283 return 0;
284 if (counts->init_count > 1) {
285 counts->init_count--;
286 return 0;
287 }
288
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300289 ops = phy_dev_ops(phy->dev);
290 if (ops->exit) {
291 ret = ops->exit(phy);
292 if (ret) {
293 dev_err(phy->dev, "PHY: Failed to exit %s: %d.\n",
294 phy->dev->name, ret);
295 return ret;
296 }
297 }
298 counts->init_count = 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200299
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300300 return 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200301}
302
303int generic_phy_power_on(struct phy *phy)
304{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300305 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200306 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200307 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200308
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530309 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200310 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300311 counts = phy_get_counts(phy);
312 if (counts->power_on_count > 0) {
313 counts->power_on_count++;
314 return 0;
315 }
316
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300317 ret = regulator_set_enable_if_allowed(counts->supply, true);
318 if (ret && ret != -ENOSYS) {
319 dev_err(phy->dev, "PHY: Failed to enable regulator %s: %d.\n",
320 counts->supply->name, ret);
321 return ret;
322 }
323
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300324 ops = phy_dev_ops(phy->dev);
325 if (ops->power_on) {
326 ret = ops->power_on(phy);
327 if (ret) {
328 dev_err(phy->dev, "PHY: Failed to power on %s: %d.\n",
329 phy->dev->name, ret);
330 regulator_set_enable_if_allowed(counts->supply, false);
331 return ret;
332 }
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300333 }
334 counts->power_on_count = 1;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200335
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300336 return 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200337}
338
339int generic_phy_power_off(struct phy *phy)
340{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300341 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200342 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200343 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200344
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530345 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200346 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300347 counts = phy_get_counts(phy);
348 if (counts->power_on_count == 0)
349 return 0;
350 if (counts->power_on_count > 1) {
351 counts->power_on_count--;
352 return 0;
353 }
354
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300355 ops = phy_dev_ops(phy->dev);
356 if (ops->power_off) {
357 ret = ops->power_off(phy);
358 if (ret) {
359 dev_err(phy->dev, "PHY: Failed to power off %s: %d.\n",
360 phy->dev->name, ret);
361 return ret;
362 }
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300363 }
364 counts->power_on_count = 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200365
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300366 ret = regulator_set_enable_if_allowed(counts->supply, false);
367 if (ret && ret != -ENOSYS)
368 dev_err(phy->dev, "PHY: Failed to disable regulator %s: %d.\n",
369 counts->supply->name, ret);
370
371 return 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200372}
373
Neil Armstrong963ae6c2020-12-29 14:58:59 +0100374int generic_phy_configure(struct phy *phy, void *params)
375{
376 struct phy_ops const *ops;
377
378 if (!generic_phy_valid(phy))
379 return 0;
380 ops = phy_dev_ops(phy->dev);
381
382 return ops->configure ? ops->configure(phy, params) : 0;
383}
384
Marek Vasut4332c092023-03-19 18:09:42 +0100385int generic_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
386{
387 struct phy_ops const *ops;
388
389 if (!generic_phy_valid(phy))
390 return 0;
391 ops = phy_dev_ops(phy->dev);
392
393 return ops->set_mode ? ops->set_mode(phy, mode, submode) : 0;
394}
395
396int generic_phy_set_speed(struct phy *phy, int speed)
397{
398 struct phy_ops const *ops;
399
400 if (!generic_phy_valid(phy))
401 return 0;
402 ops = phy_dev_ops(phy->dev);
403
404 return ops->set_speed ? ops->set_speed(phy, speed) : 0;
405}
406
developer272bde62020-05-02 11:35:11 +0200407int generic_phy_get_bulk(struct udevice *dev, struct phy_bulk *bulk)
408{
409 int i, ret, count;
Angus Ainslie3365fd32022-02-03 10:08:38 -0800410 struct udevice *phydev = dev;
developer272bde62020-05-02 11:35:11 +0200411
412 bulk->count = 0;
413
414 /* Return if no phy declared */
Angus Ainslie3365fd32022-02-03 10:08:38 -0800415 if (!dev_read_prop(dev, "phys", NULL)) {
416 phydev = dev->parent;
417 if (!dev_read_prop(phydev, "phys", NULL)) {
418 pr_err("%s : no phys property\n", __func__);
419 return 0;
420 }
421 }
developer272bde62020-05-02 11:35:11 +0200422
Angus Ainslie3365fd32022-02-03 10:08:38 -0800423 count = dev_count_phandle_with_args(phydev, "phys", "#phy-cells", 0);
424 if (count < 1) {
425 pr_err("%s : no phys found %d\n", __func__, count);
developer272bde62020-05-02 11:35:11 +0200426 return count;
Angus Ainslie3365fd32022-02-03 10:08:38 -0800427 }
developer272bde62020-05-02 11:35:11 +0200428
Angus Ainslie3365fd32022-02-03 10:08:38 -0800429 bulk->phys = devm_kcalloc(phydev, count, sizeof(struct phy), GFP_KERNEL);
developer272bde62020-05-02 11:35:11 +0200430 if (!bulk->phys)
431 return -ENOMEM;
432
433 for (i = 0; i < count; i++) {
Angus Ainslie3365fd32022-02-03 10:08:38 -0800434 ret = generic_phy_get_by_index(phydev, i, &bulk->phys[i]);
developer272bde62020-05-02 11:35:11 +0200435 if (ret) {
436 pr_err("Failed to get PHY%d for %s\n", i, dev->name);
437 return ret;
438 }
439 bulk->count++;
440 }
441
442 return 0;
443}
444
445int generic_phy_init_bulk(struct phy_bulk *bulk)
446{
447 struct phy *phys = bulk->phys;
448 int i, ret;
449
450 for (i = 0; i < bulk->count; i++) {
451 ret = generic_phy_init(&phys[i]);
452 if (ret) {
453 pr_err("Can't init PHY%d\n", i);
454 goto phys_init_err;
455 }
456 }
457
458 return 0;
459
460phys_init_err:
461 for (; i > 0; i--)
462 generic_phy_exit(&phys[i - 1]);
463
464 return ret;
465}
466
467int generic_phy_exit_bulk(struct phy_bulk *bulk)
468{
469 struct phy *phys = bulk->phys;
470 int i, ret = 0;
471
472 for (i = 0; i < bulk->count; i++)
473 ret |= generic_phy_exit(&phys[i]);
474
475 return ret;
476}
477
478int generic_phy_power_on_bulk(struct phy_bulk *bulk)
479{
480 struct phy *phys = bulk->phys;
481 int i, ret;
482
483 for (i = 0; i < bulk->count; i++) {
484 ret = generic_phy_power_on(&phys[i]);
485 if (ret) {
486 pr_err("Can't power on PHY%d\n", i);
487 goto phys_poweron_err;
488 }
489 }
490
491 return 0;
492
493phys_poweron_err:
494 for (; i > 0; i--)
495 generic_phy_power_off(&phys[i - 1]);
496
497 return ret;
498}
499
500int generic_phy_power_off_bulk(struct phy_bulk *bulk)
501{
502 struct phy *phys = bulk->phys;
503 int i, ret = 0;
504
505 for (i = 0; i < bulk->count; i++)
506 ret |= generic_phy_power_off(&phys[i]);
507
508 return ret;
509}
510
Marek Vasutf87e00d2024-09-08 23:09:03 +0200511int generic_setup_phy(struct udevice *dev, struct phy *phy, int index,
512 enum phy_mode mode, int submode)
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200513{
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000514 int ret;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200515
516 ret = generic_phy_get_by_index(dev, index, phy);
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000517 if (ret)
518 return ret == -ENOENT ? 0 : ret;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200519
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000520 ret = generic_phy_init(phy);
521 if (ret)
522 return ret;
523
Marek Vasutf87e00d2024-09-08 23:09:03 +0200524 ret = generic_phy_set_mode(phy, mode, submode);
525 if (ret)
526 goto phys_mode_err;
527
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000528 ret = generic_phy_power_on(phy);
529 if (ret)
Marek Vasutf87e00d2024-09-08 23:09:03 +0200530 goto phys_mode_err;
531
532 return 0;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200533
Marek Vasutf87e00d2024-09-08 23:09:03 +0200534phys_mode_err:
535 generic_phy_exit(phy);
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200536 return ret;
537}
538
539int generic_shutdown_phy(struct phy *phy)
540{
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000541 int ret;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200542
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000543 if (!generic_phy_valid(phy))
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200544 return 0;
545
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000546 ret = generic_phy_power_off(phy);
547 if (ret)
548 return ret;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200549
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000550 return generic_phy_exit(phy);
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200551}
552
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200553UCLASS_DRIVER(phy) = {
554 .id = UCLASS_PHY,
555 .name = "phy",
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300556 .pre_probe = phy_uclass_pre_probe,
557 .pre_remove = phy_uclass_pre_remove,
558 .per_device_auto = sizeof(struct list_head),
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200559};