blob: 0dcfe258bc44f004ba271e3c03b06091b2b9479e [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02002/*
Nishanth Menoneaa39c62023-11-01 15:56:03 -05003 * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02004 * Written by Jean-Jacques Hiblot <jjhiblot@ti.com>
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02005 */
6
Patrick Delaunay81313352021-04-27 11:02:19 +02007#define LOG_CATEGORY UCLASS_PHY
8
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +02009#include <common.h>
10#include <dm.h>
Sean Anderson03036a22020-10-04 21:39:47 -040011#include <dm/device_compat.h>
developer272bde62020-05-02 11:35:11 +020012#include <dm/devres.h>
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020013#include <generic-phy.h>
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030014#include <linux/list.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060015#include <linux/printk.h>
Eugen Hristev24ab68d2023-05-15 12:59:47 +030016#include <power/regulator.h>
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030017
18/**
19 * struct phy_counts - Init and power-on counts of a single PHY port
20 *
21 * This structure is used to keep track of PHY initialization and power
22 * state change requests, so that we don't power off and deinitialize a
23 * PHY instance until all of its users want it done. Otherwise, multiple
24 * consumers using the same PHY port can cause problems (e.g. one might
25 * call power_off() after another's exit() and hang indefinitely).
26 *
27 * @id: The PHY ID within a PHY provider
28 * @power_on_count: Times generic_phy_power_on() was called for this ID
29 * without a matching generic_phy_power_off() afterwards
30 * @init_count: Times generic_phy_init() was called for this ID
31 * without a matching generic_phy_exit() afterwards
32 * @list: Handle for a linked list of these structures corresponding to
33 * ports of the same PHY provider
Eugen Hristev24ab68d2023-05-15 12:59:47 +030034 * @supply: Handle to a phy-supply device
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030035 */
36struct phy_counts {
37 unsigned long id;
38 int power_on_count;
39 int init_count;
40 struct list_head list;
Eugen Hristev24ab68d2023-05-15 12:59:47 +030041 struct udevice *supply;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030042};
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020043
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +020044static inline struct phy_ops *phy_dev_ops(struct udevice *dev)
45{
46 return (struct phy_ops *)dev->driver->ops;
47}
48
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030049static struct phy_counts *phy_get_counts(struct phy *phy)
50{
51 struct list_head *uc_priv;
52 struct phy_counts *counts;
53
54 if (!generic_phy_valid(phy))
55 return NULL;
56
57 uc_priv = dev_get_uclass_priv(phy->dev);
58 list_for_each_entry(counts, uc_priv, list)
59 if (counts->id == phy->id)
60 return counts;
61
62 return NULL;
63}
64
Eugen Hristev24ab68d2023-05-15 12:59:47 +030065static int phy_alloc_counts(struct phy *phy, struct udevice *supply)
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030066{
67 struct list_head *uc_priv;
68 struct phy_counts *counts;
69
70 if (!generic_phy_valid(phy))
71 return 0;
72 if (phy_get_counts(phy))
73 return 0;
74
75 uc_priv = dev_get_uclass_priv(phy->dev);
76 counts = kzalloc(sizeof(*counts), GFP_KERNEL);
77 if (!counts)
78 return -ENOMEM;
79
80 counts->id = phy->id;
81 counts->power_on_count = 0;
82 counts->init_count = 0;
Eugen Hristev24ab68d2023-05-15 12:59:47 +030083 counts->supply = supply;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +030084 list_add(&counts->list, uc_priv);
85
86 return 0;
87}
88
89static int phy_uclass_pre_probe(struct udevice *dev)
90{
91 struct list_head *uc_priv = dev_get_uclass_priv(dev);
92
93 INIT_LIST_HEAD(uc_priv);
94
95 return 0;
96}
97
98static int phy_uclass_pre_remove(struct udevice *dev)
99{
100 struct list_head *uc_priv = dev_get_uclass_priv(dev);
101 struct phy_counts *counts, *next;
102
103 list_for_each_entry_safe(counts, next, uc_priv, list)
104 kfree(counts);
105
106 return 0;
107}
108
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200109static int generic_phy_xlate_offs_flags(struct phy *phy,
Simon Glass81955512017-05-18 20:09:47 -0600110 struct ofnode_phandle_args *args)
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200111{
112 debug("%s(phy=%p)\n", __func__, phy);
113
114 if (args->args_count > 1) {
Sean Andersona1b654b2021-12-01 14:26:53 -0500115 debug("Invalid args_count: %d\n", args->args_count);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200116 return -EINVAL;
117 }
118
119 if (args->args_count)
120 phy->id = args->args[0];
121 else
122 phy->id = 0;
123
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200124 return 0;
125}
126
Jagan Tekia4e8eee2020-05-01 23:44:18 +0530127int generic_phy_get_by_index_nodev(ofnode node, int index, struct phy *phy)
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200128{
Simon Glass81955512017-05-18 20:09:47 -0600129 struct ofnode_phandle_args args;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200130 struct phy_ops *ops;
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300131 struct udevice *phydev, *supply = NULL;
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200132 int i, ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200133
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200134 debug("%s(node=%s, index=%d, phy=%p)\n",
135 __func__, ofnode_get_name(node), index, phy);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200136
137 assert(phy);
Patrice Chotard956b7ad2017-07-18 11:38:42 +0200138 phy->dev = NULL;
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200139 ret = ofnode_parse_phandle_with_args(node, "phys", "#phy-cells", 0,
140 index, &args);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200141 if (ret) {
Simon Glass81955512017-05-18 20:09:47 -0600142 debug("%s: dev_read_phandle_with_args failed: err=%d\n",
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200143 __func__, ret);
144 return ret;
145 }
146
Simon Glass81955512017-05-18 20:09:47 -0600147 ret = uclass_get_device_by_ofnode(UCLASS_PHY, args.node, &phydev);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200148 if (ret) {
Simon Glass81955512017-05-18 20:09:47 -0600149 debug("%s: uclass_get_device_by_ofnode failed: err=%d\n",
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200150 __func__, ret);
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200151
152 /* Check if args.node's parent is a PHY provider */
153 ret = uclass_get_device_by_ofnode(UCLASS_PHY,
154 ofnode_get_parent(args.node),
155 &phydev);
156 if (ret)
157 return ret;
158
159 /* insert phy idx at first position into args array */
Marek Vasut61b17ed2018-08-07 12:24:35 +0200160 for (i = args.args_count; i >= 1 ; i--)
Patrice Chotardcf65fa42018-06-27 11:55:42 +0200161 args.args[i] = args.args[i - 1];
162
163 args.args_count++;
164 args.args[0] = ofnode_read_u32_default(args.node, "reg", -1);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200165 }
166
167 phy->dev = phydev;
168
169 ops = phy_dev_ops(phydev);
170
171 if (ops->of_xlate)
172 ret = ops->of_xlate(phy, &args);
173 else
174 ret = generic_phy_xlate_offs_flags(phy, &args);
175 if (ret) {
176 debug("of_xlate() failed: %d\n", ret);
177 goto err;
178 }
179
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300180 if (CONFIG_IS_ENABLED(DM_REGULATOR)) {
181 ret = device_get_supply_regulator(phydev, "phy-supply",
182 &supply);
183 if (ret && ret != -ENOENT) {
184 debug("%s: device_get_supply_regulator failed: %d\n",
185 __func__, ret);
186 goto err;
187 }
188 }
189
190 ret = phy_alloc_counts(phy, supply);
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300191 if (ret) {
192 debug("phy_alloc_counts() failed: %d\n", ret);
193 goto err;
194 }
195
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200196 return 0;
197
198err:
Jonas Karlman9f89e682023-08-31 22:16:35 +0000199 phy->dev = NULL;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200200 return ret;
201}
202
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200203int generic_phy_get_by_index(struct udevice *dev, int index,
204 struct phy *phy)
205{
Jagan Tekia4e8eee2020-05-01 23:44:18 +0530206 return generic_phy_get_by_index_nodev(dev_ofnode(dev), index, phy);
Neil Armstrong8ab77e32020-03-30 11:27:23 +0200207}
208
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200209int generic_phy_get_by_name(struct udevice *dev, const char *phy_name,
210 struct phy *phy)
211{
212 int index;
213
214 debug("%s(dev=%p, name=%s, phy=%p)\n", __func__, dev, phy_name, phy);
215
Jonas Karlmanffe06b42023-08-31 22:16:33 +0000216 assert(phy);
217 phy->dev = NULL;
218
Simon Glass81955512017-05-18 20:09:47 -0600219 index = dev_read_stringlist_search(dev, "phy-names", phy_name);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200220 if (index < 0) {
Simon Glass81955512017-05-18 20:09:47 -0600221 debug("dev_read_stringlist_search() failed: %d\n", index);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200222 return index;
223 }
224
225 return generic_phy_get_by_index(dev, index, phy);
226}
227
228int generic_phy_init(struct phy *phy)
229{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300230 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200231 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200232 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200233
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530234 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200235 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300236 counts = phy_get_counts(phy);
237 if (counts->init_count > 0) {
238 counts->init_count++;
239 return 0;
240 }
241
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300242 ops = phy_dev_ops(phy->dev);
243 if (ops->init) {
244 ret = ops->init(phy);
245 if (ret) {
246 dev_err(phy->dev, "PHY: Failed to init %s: %d.\n",
247 phy->dev->name, ret);
248 return ret;
249 }
250 }
251 counts->init_count = 1;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200252
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300253 return 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200254}
255
256int generic_phy_reset(struct phy *phy)
257{
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200258 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200259 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200260
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530261 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200262 return 0;
263 ops = phy_dev_ops(phy->dev);
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200264 if (!ops->reset)
265 return 0;
266 ret = ops->reset(phy);
267 if (ret)
268 dev_err(phy->dev, "PHY: Failed to reset %s: %d.\n",
269 phy->dev->name, ret);
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200270
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200271 return ret;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200272}
273
274int generic_phy_exit(struct phy *phy)
275{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300276 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200277 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200278 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200279
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530280 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200281 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300282 counts = phy_get_counts(phy);
283 if (counts->init_count == 0)
284 return 0;
285 if (counts->init_count > 1) {
286 counts->init_count--;
287 return 0;
288 }
289
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300290 ops = phy_dev_ops(phy->dev);
291 if (ops->exit) {
292 ret = ops->exit(phy);
293 if (ret) {
294 dev_err(phy->dev, "PHY: Failed to exit %s: %d.\n",
295 phy->dev->name, ret);
296 return ret;
297 }
298 }
299 counts->init_count = 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200300
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300301 return 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200302}
303
304int generic_phy_power_on(struct phy *phy)
305{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300306 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200307 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200308 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200309
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530310 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200311 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300312 counts = phy_get_counts(phy);
313 if (counts->power_on_count > 0) {
314 counts->power_on_count++;
315 return 0;
316 }
317
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300318 ret = regulator_set_enable_if_allowed(counts->supply, true);
319 if (ret && ret != -ENOSYS) {
320 dev_err(phy->dev, "PHY: Failed to enable regulator %s: %d.\n",
321 counts->supply->name, ret);
322 return ret;
323 }
324
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300325 ops = phy_dev_ops(phy->dev);
326 if (ops->power_on) {
327 ret = ops->power_on(phy);
328 if (ret) {
329 dev_err(phy->dev, "PHY: Failed to power on %s: %d.\n",
330 phy->dev->name, ret);
331 regulator_set_enable_if_allowed(counts->supply, false);
332 return ret;
333 }
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300334 }
335 counts->power_on_count = 1;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200336
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300337 return 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200338}
339
340int generic_phy_power_off(struct phy *phy)
341{
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300342 struct phy_counts *counts;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200343 struct phy_ops const *ops;
Patrick Delaunayc2e5efd2020-07-03 17:36:40 +0200344 int ret;
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200345
Vignesh Raghavendra62bd5b12020-05-20 22:35:41 +0530346 if (!generic_phy_valid(phy))
Jean-Jacques Hibloteae1eeb2019-10-01 14:03:26 +0200347 return 0;
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300348 counts = phy_get_counts(phy);
349 if (counts->power_on_count == 0)
350 return 0;
351 if (counts->power_on_count > 1) {
352 counts->power_on_count--;
353 return 0;
354 }
355
Jonas Karlmanc2cd02e2023-05-15 12:59:50 +0300356 ops = phy_dev_ops(phy->dev);
357 if (ops->power_off) {
358 ret = ops->power_off(phy);
359 if (ret) {
360 dev_err(phy->dev, "PHY: Failed to power off %s: %d.\n",
361 phy->dev->name, ret);
362 return ret;
363 }
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300364 }
365 counts->power_on_count = 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200366
Eugen Hristev24ab68d2023-05-15 12:59:47 +0300367 ret = regulator_set_enable_if_allowed(counts->supply, false);
368 if (ret && ret != -ENOSYS)
369 dev_err(phy->dev, "PHY: Failed to disable regulator %s: %d.\n",
370 counts->supply->name, ret);
371
372 return 0;
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200373}
374
Neil Armstrong963ae6c2020-12-29 14:58:59 +0100375int generic_phy_configure(struct phy *phy, void *params)
376{
377 struct phy_ops const *ops;
378
379 if (!generic_phy_valid(phy))
380 return 0;
381 ops = phy_dev_ops(phy->dev);
382
383 return ops->configure ? ops->configure(phy, params) : 0;
384}
385
Marek Vasut4332c092023-03-19 18:09:42 +0100386int generic_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
387{
388 struct phy_ops const *ops;
389
390 if (!generic_phy_valid(phy))
391 return 0;
392 ops = phy_dev_ops(phy->dev);
393
394 return ops->set_mode ? ops->set_mode(phy, mode, submode) : 0;
395}
396
397int generic_phy_set_speed(struct phy *phy, int speed)
398{
399 struct phy_ops const *ops;
400
401 if (!generic_phy_valid(phy))
402 return 0;
403 ops = phy_dev_ops(phy->dev);
404
405 return ops->set_speed ? ops->set_speed(phy, speed) : 0;
406}
407
developer272bde62020-05-02 11:35:11 +0200408int generic_phy_get_bulk(struct udevice *dev, struct phy_bulk *bulk)
409{
410 int i, ret, count;
Angus Ainslie3365fd32022-02-03 10:08:38 -0800411 struct udevice *phydev = dev;
developer272bde62020-05-02 11:35:11 +0200412
413 bulk->count = 0;
414
415 /* Return if no phy declared */
Angus Ainslie3365fd32022-02-03 10:08:38 -0800416 if (!dev_read_prop(dev, "phys", NULL)) {
417 phydev = dev->parent;
418 if (!dev_read_prop(phydev, "phys", NULL)) {
419 pr_err("%s : no phys property\n", __func__);
420 return 0;
421 }
422 }
developer272bde62020-05-02 11:35:11 +0200423
Angus Ainslie3365fd32022-02-03 10:08:38 -0800424 count = dev_count_phandle_with_args(phydev, "phys", "#phy-cells", 0);
425 if (count < 1) {
426 pr_err("%s : no phys found %d\n", __func__, count);
developer272bde62020-05-02 11:35:11 +0200427 return count;
Angus Ainslie3365fd32022-02-03 10:08:38 -0800428 }
developer272bde62020-05-02 11:35:11 +0200429
Angus Ainslie3365fd32022-02-03 10:08:38 -0800430 bulk->phys = devm_kcalloc(phydev, count, sizeof(struct phy), GFP_KERNEL);
developer272bde62020-05-02 11:35:11 +0200431 if (!bulk->phys)
432 return -ENOMEM;
433
434 for (i = 0; i < count; i++) {
Angus Ainslie3365fd32022-02-03 10:08:38 -0800435 ret = generic_phy_get_by_index(phydev, i, &bulk->phys[i]);
developer272bde62020-05-02 11:35:11 +0200436 if (ret) {
437 pr_err("Failed to get PHY%d for %s\n", i, dev->name);
438 return ret;
439 }
440 bulk->count++;
441 }
442
443 return 0;
444}
445
446int generic_phy_init_bulk(struct phy_bulk *bulk)
447{
448 struct phy *phys = bulk->phys;
449 int i, ret;
450
451 for (i = 0; i < bulk->count; i++) {
452 ret = generic_phy_init(&phys[i]);
453 if (ret) {
454 pr_err("Can't init PHY%d\n", i);
455 goto phys_init_err;
456 }
457 }
458
459 return 0;
460
461phys_init_err:
462 for (; i > 0; i--)
463 generic_phy_exit(&phys[i - 1]);
464
465 return ret;
466}
467
468int generic_phy_exit_bulk(struct phy_bulk *bulk)
469{
470 struct phy *phys = bulk->phys;
471 int i, ret = 0;
472
473 for (i = 0; i < bulk->count; i++)
474 ret |= generic_phy_exit(&phys[i]);
475
476 return ret;
477}
478
479int generic_phy_power_on_bulk(struct phy_bulk *bulk)
480{
481 struct phy *phys = bulk->phys;
482 int i, ret;
483
484 for (i = 0; i < bulk->count; i++) {
485 ret = generic_phy_power_on(&phys[i]);
486 if (ret) {
487 pr_err("Can't power on PHY%d\n", i);
488 goto phys_poweron_err;
489 }
490 }
491
492 return 0;
493
494phys_poweron_err:
495 for (; i > 0; i--)
496 generic_phy_power_off(&phys[i - 1]);
497
498 return ret;
499}
500
501int generic_phy_power_off_bulk(struct phy_bulk *bulk)
502{
503 struct phy *phys = bulk->phys;
504 int i, ret = 0;
505
506 for (i = 0; i < bulk->count; i++)
507 ret |= generic_phy_power_off(&phys[i]);
508
509 return ret;
510}
511
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200512int generic_setup_phy(struct udevice *dev, struct phy *phy, int index)
513{
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000514 int ret;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200515
516 ret = generic_phy_get_by_index(dev, index, phy);
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000517 if (ret)
518 return ret == -ENOENT ? 0 : ret;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200519
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000520 ret = generic_phy_init(phy);
521 if (ret)
522 return ret;
523
524 ret = generic_phy_power_on(phy);
525 if (ret)
526 generic_phy_exit(phy);
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200527
528 return ret;
529}
530
531int generic_shutdown_phy(struct phy *phy)
532{
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000533 int ret;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200534
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000535 if (!generic_phy_valid(phy))
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200536 return 0;
537
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000538 ret = generic_phy_power_off(phy);
539 if (ret)
540 return ret;
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200541
Jonas Karlmanc539e3e2023-08-31 23:07:11 +0000542 return generic_phy_exit(phy);
Patrice Chotardea7c49e2022-09-06 08:15:26 +0200543}
544
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200545UCLASS_DRIVER(phy) = {
546 .id = UCLASS_PHY,
547 .name = "phy",
Alper Nebi Yasakbe4e5e12021-12-30 22:36:51 +0300548 .pre_probe = phy_uclass_pre_probe,
549 .pre_remove = phy_uclass_pre_remove,
550 .per_device_auto = sizeof(struct list_head),
Jean-Jacques Hiblot48447782017-04-24 11:51:27 +0200551};