1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * phy-core.c -- Generic Phy framework.
4 *
5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Author: Kishon Vijay Abraham I <kishon@ti.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/export.h>
12 #include <linux/module.h>
13 #include <linux/err.h>
14 #include <linux/debugfs.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/of.h>
18 #include <linux/phy/phy.h>
19 #include <linux/idr.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/regulator/consumer.h>
22
23 static void phy_release(struct device *dev);
24 static const struct class phy_class = {
25 .name = "phy",
26 .dev_release = phy_release,
27 };
28
29 static struct dentry *phy_debugfs_root;
30 static DEFINE_MUTEX(phy_provider_mutex);
31 static LIST_HEAD(phy_provider_list);
32 static LIST_HEAD(phys);
33 static DEFINE_IDA(phy_ida);
34
devm_phy_release(struct device * dev,void * res)35 static void devm_phy_release(struct device *dev, void *res)
36 {
37 struct phy *phy = *(struct phy **)res;
38
39 phy_put(dev, phy);
40 }
41
devm_phy_provider_release(struct device * dev,void * res)42 static void devm_phy_provider_release(struct device *dev, void *res)
43 {
44 struct phy_provider *phy_provider = *(struct phy_provider **)res;
45
46 of_phy_provider_unregister(phy_provider);
47 }
48
devm_phy_consume(struct device * dev,void * res)49 static void devm_phy_consume(struct device *dev, void *res)
50 {
51 struct phy *phy = *(struct phy **)res;
52
53 phy_destroy(phy);
54 }
55
devm_phy_match(struct device * dev,void * res,void * match_data)56 static int devm_phy_match(struct device *dev, void *res, void *match_data)
57 {
58 struct phy **phy = res;
59
60 return *phy == match_data;
61 }
62
63 /**
64 * phy_create_lookup() - allocate and register PHY/device association
65 * @phy: the phy of the association
66 * @con_id: connection ID string on device
67 * @dev_id: the device of the association
68 *
69 * Creates and registers phy_lookup entry.
70 */
phy_create_lookup(struct phy * phy,const char * con_id,const char * dev_id)71 int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
72 {
73 struct phy_lookup *pl;
74
75 if (!phy || !dev_id || !con_id)
76 return -EINVAL;
77
78 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
79 if (!pl)
80 return -ENOMEM;
81
82 pl->dev_id = dev_id;
83 pl->con_id = con_id;
84 pl->phy = phy;
85
86 mutex_lock(&phy_provider_mutex);
87 list_add_tail(&pl->node, &phys);
88 mutex_unlock(&phy_provider_mutex);
89
90 return 0;
91 }
92 EXPORT_SYMBOL_GPL(phy_create_lookup);
93
94 /**
95 * phy_remove_lookup() - find and remove PHY/device association
96 * @phy: the phy of the association
97 * @con_id: connection ID string on device
98 * @dev_id: the device of the association
99 *
100 * Finds and unregisters phy_lookup entry that was created with
101 * phy_create_lookup().
102 */
phy_remove_lookup(struct phy * phy,const char * con_id,const char * dev_id)103 void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id)
104 {
105 struct phy_lookup *pl;
106
107 if (!phy || !dev_id || !con_id)
108 return;
109
110 mutex_lock(&phy_provider_mutex);
111 list_for_each_entry(pl, &phys, node)
112 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) &&
113 !strcmp(pl->con_id, con_id)) {
114 list_del(&pl->node);
115 kfree(pl);
116 break;
117 }
118 mutex_unlock(&phy_provider_mutex);
119 }
120 EXPORT_SYMBOL_GPL(phy_remove_lookup);
121
phy_find(struct device * dev,const char * con_id)122 static struct phy *phy_find(struct device *dev, const char *con_id)
123 {
124 const char *dev_id = dev_name(dev);
125 struct phy_lookup *p, *pl = NULL;
126
127 mutex_lock(&phy_provider_mutex);
128 list_for_each_entry(p, &phys, node)
129 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) {
130 pl = p;
131 break;
132 }
133 mutex_unlock(&phy_provider_mutex);
134
135 return pl ? pl->phy : ERR_PTR(-ENODEV);
136 }
137
of_phy_provider_lookup(struct device_node * node)138 static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
139 {
140 struct phy_provider *phy_provider;
141 struct device_node *child;
142
143 list_for_each_entry(phy_provider, &phy_provider_list, list) {
144 if (phy_provider->dev->of_node == node)
145 return phy_provider;
146
147 for_each_child_of_node(phy_provider->children, child)
148 if (child == node) {
149 of_node_put(child);
150 return phy_provider;
151 }
152 }
153
154 return ERR_PTR(-EPROBE_DEFER);
155 }
156
phy_pm_runtime_get(struct phy * phy)157 int phy_pm_runtime_get(struct phy *phy)
158 {
159 int ret;
160
161 if (!phy)
162 return 0;
163
164 if (!pm_runtime_enabled(&phy->dev))
165 return -ENOTSUPP;
166
167 ret = pm_runtime_get(&phy->dev);
168 if (ret < 0 && ret != -EINPROGRESS)
169 pm_runtime_put_noidle(&phy->dev);
170
171 return ret;
172 }
173 EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
174
phy_pm_runtime_get_sync(struct phy * phy)175 int phy_pm_runtime_get_sync(struct phy *phy)
176 {
177 int ret;
178
179 if (!phy)
180 return 0;
181
182 if (!pm_runtime_enabled(&phy->dev))
183 return -ENOTSUPP;
184
185 ret = pm_runtime_get_sync(&phy->dev);
186 if (ret < 0)
187 pm_runtime_put_sync(&phy->dev);
188
189 return ret;
190 }
191 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
192
phy_pm_runtime_put(struct phy * phy)193 int phy_pm_runtime_put(struct phy *phy)
194 {
195 if (!phy)
196 return 0;
197
198 if (!pm_runtime_enabled(&phy->dev))
199 return -ENOTSUPP;
200
201 return pm_runtime_put(&phy->dev);
202 }
203 EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
204
phy_pm_runtime_put_sync(struct phy * phy)205 int phy_pm_runtime_put_sync(struct phy *phy)
206 {
207 if (!phy)
208 return 0;
209
210 if (!pm_runtime_enabled(&phy->dev))
211 return -ENOTSUPP;
212
213 return pm_runtime_put_sync(&phy->dev);
214 }
215 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
216
phy_pm_runtime_allow(struct phy * phy)217 void phy_pm_runtime_allow(struct phy *phy)
218 {
219 if (!phy)
220 return;
221
222 if (!pm_runtime_enabled(&phy->dev))
223 return;
224
225 pm_runtime_allow(&phy->dev);
226 }
227 EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
228
phy_pm_runtime_forbid(struct phy * phy)229 void phy_pm_runtime_forbid(struct phy *phy)
230 {
231 if (!phy)
232 return;
233
234 if (!pm_runtime_enabled(&phy->dev))
235 return;
236
237 pm_runtime_forbid(&phy->dev);
238 }
239 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
240
241 /**
242 * phy_init - phy internal initialization before phy operation
243 * @phy: the phy returned by phy_get()
244 *
245 * Used to allow phy's driver to perform phy internal initialization,
246 * such as PLL block powering, clock initialization or anything that's
247 * is required by the phy to perform the start of operation.
248 * Must be called before phy_power_on().
249 *
250 * Return: %0 if successful, a negative error code otherwise
251 */
phy_init(struct phy * phy)252 int phy_init(struct phy *phy)
253 {
254 int ret;
255
256 if (!phy)
257 return 0;
258
259 ret = phy_pm_runtime_get_sync(phy);
260 if (ret < 0 && ret != -ENOTSUPP)
261 return ret;
262 ret = 0; /* Override possible ret == -ENOTSUPP */
263
264 mutex_lock(&phy->mutex);
265 if (phy->power_count > phy->init_count)
266 dev_warn(&phy->dev, "phy_power_on was called before phy_init\n");
267
268 if (phy->init_count == 0 && phy->ops->init) {
269 ret = phy->ops->init(phy);
270 if (ret < 0) {
271 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
272 goto out;
273 }
274 }
275 ++phy->init_count;
276
277 out:
278 mutex_unlock(&phy->mutex);
279 phy_pm_runtime_put(phy);
280 return ret;
281 }
282 EXPORT_SYMBOL_GPL(phy_init);
283
284 /**
285 * phy_exit - Phy internal un-initialization
286 * @phy: the phy returned by phy_get()
287 *
288 * Must be called after phy_power_off().
289 *
290 * Return: %0 if successful, a negative error code otherwise
291 */
phy_exit(struct phy * phy)292 int phy_exit(struct phy *phy)
293 {
294 int ret;
295
296 if (!phy)
297 return 0;
298
299 ret = phy_pm_runtime_get_sync(phy);
300 if (ret < 0 && ret != -ENOTSUPP)
301 return ret;
302 ret = 0; /* Override possible ret == -ENOTSUPP */
303
304 mutex_lock(&phy->mutex);
305 if (phy->init_count == 1 && phy->ops->exit) {
306 ret = phy->ops->exit(phy);
307 if (ret < 0) {
308 dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
309 goto out;
310 }
311 }
312 --phy->init_count;
313
314 out:
315 mutex_unlock(&phy->mutex);
316 phy_pm_runtime_put(phy);
317 return ret;
318 }
319 EXPORT_SYMBOL_GPL(phy_exit);
320
321 /**
322 * phy_power_on - Enable the phy and enter proper operation
323 * @phy: the phy returned by phy_get()
324 *
325 * Must be called after phy_init().
326 *
327 * Return: %0 if successful, a negative error code otherwise
328 */
phy_power_on(struct phy * phy)329 int phy_power_on(struct phy *phy)
330 {
331 int ret = 0;
332
333 if (!phy)
334 goto out;
335
336 if (phy->pwr) {
337 ret = regulator_enable(phy->pwr);
338 if (ret)
339 goto out;
340 }
341
342 ret = phy_pm_runtime_get_sync(phy);
343 if (ret < 0 && ret != -ENOTSUPP)
344 goto err_pm_sync;
345
346 ret = 0; /* Override possible ret == -ENOTSUPP */
347
348 mutex_lock(&phy->mutex);
349 if (phy->power_count == 0 && phy->ops->power_on) {
350 ret = phy->ops->power_on(phy);
351 if (ret < 0) {
352 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
353 goto err_pwr_on;
354 }
355 }
356 ++phy->power_count;
357 mutex_unlock(&phy->mutex);
358 return 0;
359
360 err_pwr_on:
361 mutex_unlock(&phy->mutex);
362 phy_pm_runtime_put_sync(phy);
363 err_pm_sync:
364 if (phy->pwr)
365 regulator_disable(phy->pwr);
366 out:
367 return ret;
368 }
369 EXPORT_SYMBOL_GPL(phy_power_on);
370
371 /**
372 * phy_power_off - Disable the phy.
373 * @phy: the phy returned by phy_get()
374 *
375 * Must be called before phy_exit().
376 *
377 * Return: %0 if successful, a negative error code otherwise
378 */
phy_power_off(struct phy * phy)379 int phy_power_off(struct phy *phy)
380 {
381 int ret;
382
383 if (!phy)
384 return 0;
385
386 mutex_lock(&phy->mutex);
387 if (phy->power_count == 1 && phy->ops->power_off) {
388 ret = phy->ops->power_off(phy);
389 if (ret < 0) {
390 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
391 mutex_unlock(&phy->mutex);
392 return ret;
393 }
394 }
395 --phy->power_count;
396 mutex_unlock(&phy->mutex);
397 phy_pm_runtime_put(phy);
398
399 if (phy->pwr)
400 regulator_disable(phy->pwr);
401
402 return 0;
403 }
404 EXPORT_SYMBOL_GPL(phy_power_off);
405
phy_set_mode_ext(struct phy * phy,enum phy_mode mode,int submode)406 int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
407 {
408 int ret;
409
410 if (!phy || !phy->ops->set_mode)
411 return 0;
412
413 mutex_lock(&phy->mutex);
414 ret = phy->ops->set_mode(phy, mode, submode);
415 if (!ret)
416 phy->attrs.mode = mode;
417 mutex_unlock(&phy->mutex);
418
419 return ret;
420 }
421 EXPORT_SYMBOL_GPL(phy_set_mode_ext);
422
phy_set_media(struct phy * phy,enum phy_media media)423 int phy_set_media(struct phy *phy, enum phy_media media)
424 {
425 int ret;
426
427 if (!phy || !phy->ops->set_media)
428 return 0;
429
430 mutex_lock(&phy->mutex);
431 ret = phy->ops->set_media(phy, media);
432 mutex_unlock(&phy->mutex);
433
434 return ret;
435 }
436 EXPORT_SYMBOL_GPL(phy_set_media);
437
phy_set_speed(struct phy * phy,int speed)438 int phy_set_speed(struct phy *phy, int speed)
439 {
440 int ret;
441
442 if (!phy || !phy->ops->set_speed)
443 return 0;
444
445 mutex_lock(&phy->mutex);
446 ret = phy->ops->set_speed(phy, speed);
447 mutex_unlock(&phy->mutex);
448
449 return ret;
450 }
451 EXPORT_SYMBOL_GPL(phy_set_speed);
452
phy_reset(struct phy * phy)453 int phy_reset(struct phy *phy)
454 {
455 int ret;
456
457 if (!phy || !phy->ops->reset)
458 return 0;
459
460 ret = phy_pm_runtime_get_sync(phy);
461 if (ret < 0 && ret != -ENOTSUPP)
462 return ret;
463
464 mutex_lock(&phy->mutex);
465 ret = phy->ops->reset(phy);
466 mutex_unlock(&phy->mutex);
467
468 phy_pm_runtime_put(phy);
469
470 return ret;
471 }
472 EXPORT_SYMBOL_GPL(phy_reset);
473
474 /**
475 * phy_calibrate() - Tunes the phy hw parameters for current configuration
476 * @phy: the phy returned by phy_get()
477 *
478 * Used to calibrate phy hardware, typically by adjusting some parameters in
479 * runtime, which are otherwise lost after host controller reset and cannot
480 * be applied in phy_init() or phy_power_on().
481 *
482 * Return: %0 if successful, a negative error code otherwise
483 */
phy_calibrate(struct phy * phy)484 int phy_calibrate(struct phy *phy)
485 {
486 int ret;
487
488 if (!phy || !phy->ops->calibrate)
489 return 0;
490
491 mutex_lock(&phy->mutex);
492 ret = phy->ops->calibrate(phy);
493 mutex_unlock(&phy->mutex);
494
495 return ret;
496 }
497 EXPORT_SYMBOL_GPL(phy_calibrate);
498
499 /**
500 * phy_notify_connect() - phy connect notification
501 * @phy: the phy returned by phy_get()
502 * @port: the port index for connect
503 *
504 * If the phy needs to get connection status, the callback can be used.
505 * Returns: %0 if successful, a negative error code otherwise
506 */
phy_notify_connect(struct phy * phy,int port)507 int phy_notify_connect(struct phy *phy, int port)
508 {
509 int ret;
510
511 if (!phy || !phy->ops->connect)
512 return 0;
513
514 mutex_lock(&phy->mutex);
515 ret = phy->ops->connect(phy, port);
516 mutex_unlock(&phy->mutex);
517
518 return ret;
519 }
520 EXPORT_SYMBOL_GPL(phy_notify_connect);
521
522 /**
523 * phy_notify_disconnect() - phy disconnect notification
524 * @phy: the phy returned by phy_get()
525 * @port: the port index for disconnect
526 *
527 * If the phy needs to get connection status, the callback can be used.
528 *
529 * Returns: %0 if successful, a negative error code otherwise
530 */
phy_notify_disconnect(struct phy * phy,int port)531 int phy_notify_disconnect(struct phy *phy, int port)
532 {
533 int ret;
534
535 if (!phy || !phy->ops->disconnect)
536 return 0;
537
538 mutex_lock(&phy->mutex);
539 ret = phy->ops->disconnect(phy, port);
540 mutex_unlock(&phy->mutex);
541
542 return ret;
543 }
544 EXPORT_SYMBOL_GPL(phy_notify_disconnect);
545
546 /**
547 * phy_configure() - Changes the phy parameters
548 * @phy: the phy returned by phy_get()
549 * @opts: New configuration to apply
550 *
551 * Used to change the PHY parameters. phy_init() must have been called
552 * on the phy. The configuration will be applied on the current phy
553 * mode, that can be changed using phy_set_mode().
554 *
555 * Return: %0 if successful, a negative error code otherwise
556 */
phy_configure(struct phy * phy,union phy_configure_opts * opts)557 int phy_configure(struct phy *phy, union phy_configure_opts *opts)
558 {
559 int ret;
560
561 if (!phy)
562 return -EINVAL;
563
564 if (!phy->ops->configure)
565 return -EOPNOTSUPP;
566
567 mutex_lock(&phy->mutex);
568 ret = phy->ops->configure(phy, opts);
569 mutex_unlock(&phy->mutex);
570
571 return ret;
572 }
573 EXPORT_SYMBOL_GPL(phy_configure);
574
575 /**
576 * phy_validate() - Checks the phy parameters
577 * @phy: the phy returned by phy_get()
578 * @mode: phy_mode the configuration is applicable to.
579 * @submode: PHY submode the configuration is applicable to.
580 * @opts: Configuration to check
581 *
582 * Used to check that the current set of parameters can be handled by
583 * the phy. Implementations are free to tune the parameters passed as
584 * arguments if needed by some implementation detail or
585 * constraints. It will not change any actual configuration of the
586 * PHY, so calling it as many times as deemed fit will have no side
587 * effect.
588 *
589 * Return: %0 if successful, a negative error code otherwise
590 */
phy_validate(struct phy * phy,enum phy_mode mode,int submode,union phy_configure_opts * opts)591 int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
592 union phy_configure_opts *opts)
593 {
594 int ret;
595
596 if (!phy)
597 return -EINVAL;
598
599 if (!phy->ops->validate)
600 return -EOPNOTSUPP;
601
602 mutex_lock(&phy->mutex);
603 ret = phy->ops->validate(phy, mode, submode, opts);
604 mutex_unlock(&phy->mutex);
605
606 return ret;
607 }
608 EXPORT_SYMBOL_GPL(phy_validate);
609
610 /**
611 * _of_phy_get() - lookup and obtain a reference to a phy by phandle
612 * @np: device_node for which to get the phy
613 * @index: the index of the phy
614 *
615 * Returns the phy associated with the given phandle value,
616 * after getting a refcount to it or -ENODEV if there is no such phy or
617 * -EPROBE_DEFER if there is a phandle to the phy, but the device is
618 * not yet loaded. This function uses of_xlate call back function provided
619 * while registering the phy_provider to find the phy instance.
620 */
_of_phy_get(struct device_node * np,int index)621 static struct phy *_of_phy_get(struct device_node *np, int index)
622 {
623 int ret;
624 struct phy_provider *phy_provider;
625 struct phy *phy = NULL;
626 struct of_phandle_args args;
627
628 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
629 index, &args);
630 if (ret)
631 return ERR_PTR(-ENODEV);
632
633 /* This phy type handled by the usb-phy subsystem for now */
634 if (of_device_is_compatible(args.np, "usb-nop-xceiv")) {
635 phy = ERR_PTR(-ENODEV);
636 goto out_put_node;
637 }
638
639 mutex_lock(&phy_provider_mutex);
640 phy_provider = of_phy_provider_lookup(args.np);
641 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
642 phy = ERR_PTR(-EPROBE_DEFER);
643 goto out_unlock;
644 }
645
646 if (!of_device_is_available(args.np)) {
647 dev_warn(phy_provider->dev, "Requested PHY is disabled\n");
648 phy = ERR_PTR(-ENODEV);
649 goto out_put_module;
650 }
651
652 phy = phy_provider->of_xlate(phy_provider->dev, &args);
653
654 out_put_module:
655 module_put(phy_provider->owner);
656
657 out_unlock:
658 mutex_unlock(&phy_provider_mutex);
659 out_put_node:
660 of_node_put(args.np);
661
662 return phy;
663 }
664
665 /**
666 * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
667 * @np: device_node for which to get the phy
668 * @con_id: name of the phy from device's point of view
669 *
670 * Returns the phy driver, after getting a refcount to it; or
671 * -ENODEV if there is no such phy. The caller is responsible for
672 * calling of_phy_put() to release that count.
673 */
of_phy_get(struct device_node * np,const char * con_id)674 struct phy *of_phy_get(struct device_node *np, const char *con_id)
675 {
676 struct phy *phy = NULL;
677 int index = 0;
678
679 if (con_id)
680 index = of_property_match_string(np, "phy-names", con_id);
681
682 phy = _of_phy_get(np, index);
683 if (IS_ERR(phy))
684 return phy;
685
686 if (!try_module_get(phy->ops->owner))
687 return ERR_PTR(-EPROBE_DEFER);
688
689 get_device(&phy->dev);
690
691 return phy;
692 }
693 EXPORT_SYMBOL_GPL(of_phy_get);
694
695 /**
696 * of_phy_put() - release the PHY
697 * @phy: the phy returned by of_phy_get()
698 *
699 * Releases a refcount the caller received from of_phy_get().
700 */
of_phy_put(struct phy * phy)701 void of_phy_put(struct phy *phy)
702 {
703 if (!phy || IS_ERR(phy))
704 return;
705
706 mutex_lock(&phy->mutex);
707 if (phy->ops->release)
708 phy->ops->release(phy);
709 mutex_unlock(&phy->mutex);
710
711 module_put(phy->ops->owner);
712 put_device(&phy->dev);
713 }
714 EXPORT_SYMBOL_GPL(of_phy_put);
715
716 /**
717 * phy_put() - release the PHY
718 * @dev: device that wants to release this phy
719 * @phy: the phy returned by phy_get()
720 *
721 * Releases a refcount the caller received from phy_get().
722 */
phy_put(struct device * dev,struct phy * phy)723 void phy_put(struct device *dev, struct phy *phy)
724 {
725 device_link_remove(dev, &phy->dev);
726 of_phy_put(phy);
727 }
728 EXPORT_SYMBOL_GPL(phy_put);
729
730 /**
731 * devm_phy_put() - release the PHY
732 * @dev: device that wants to release this phy
733 * @phy: the phy returned by devm_phy_get()
734 *
735 * destroys the devres associated with this phy and invokes phy_put
736 * to release the phy.
737 */
devm_phy_put(struct device * dev,struct phy * phy)738 void devm_phy_put(struct device *dev, struct phy *phy)
739 {
740 int r;
741
742 if (!phy)
743 return;
744
745 r = devres_release(dev, devm_phy_release, devm_phy_match, phy);
746 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
747 }
748 EXPORT_SYMBOL_GPL(devm_phy_put);
749
750 /**
751 * of_phy_simple_xlate() - returns the phy instance from phy provider
752 * @dev: the PHY provider device (not used here)
753 * @args: of_phandle_args
754 *
755 * Intended to be used by phy provider for the common case where #phy-cells is
756 * 0. For other cases where #phy-cells is greater than '0', the phy provider
757 * should provide a custom of_xlate function that reads the *args* and returns
758 * the appropriate phy.
759 */
of_phy_simple_xlate(struct device * dev,const struct of_phandle_args * args)760 struct phy *of_phy_simple_xlate(struct device *dev,
761 const struct of_phandle_args *args)
762 {
763 struct device *target_dev;
764
765 target_dev = class_find_device_by_of_node(&phy_class, args->np);
766 if (!target_dev)
767 return ERR_PTR(-ENODEV);
768
769 put_device(target_dev);
770 return to_phy(target_dev);
771 }
772 EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
773
774 /**
775 * phy_get() - lookup and obtain a reference to a phy.
776 * @dev: device that requests this phy
777 * @string: the phy name as given in the dt data or the name of the controller
778 * port for non-dt case
779 *
780 * Returns the phy driver, after getting a refcount to it; or
781 * -ENODEV if there is no such phy. The caller is responsible for
782 * calling phy_put() to release that count.
783 */
phy_get(struct device * dev,const char * string)784 struct phy *phy_get(struct device *dev, const char *string)
785 {
786 int index = 0;
787 struct phy *phy;
788 struct device_link *link;
789
790 if (dev->of_node) {
791 if (string)
792 index = of_property_match_string(dev->of_node, "phy-names",
793 string);
794 else
795 index = 0;
796 phy = _of_phy_get(dev->of_node, index);
797 } else {
798 if (string == NULL) {
799 dev_WARN(dev, "missing string\n");
800 return ERR_PTR(-EINVAL);
801 }
802 phy = phy_find(dev, string);
803 }
804 if (IS_ERR(phy))
805 return phy;
806
807 if (!try_module_get(phy->ops->owner))
808 return ERR_PTR(-EPROBE_DEFER);
809
810 get_device(&phy->dev);
811
812 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
813 if (!link)
814 dev_dbg(dev, "failed to create device link to %s\n",
815 dev_name(phy->dev.parent));
816
817 return phy;
818 }
819 EXPORT_SYMBOL_GPL(phy_get);
820
821 /**
822 * devm_phy_get() - lookup and obtain a reference to a phy.
823 * @dev: device that requests this phy
824 * @string: the phy name as given in the dt data or phy device name
825 * for non-dt case
826 *
827 * Gets the phy using phy_get(), and associates a device with it using
828 * devres. On driver detach, release function is invoked on the devres data,
829 * then, devres data is freed.
830 */
devm_phy_get(struct device * dev,const char * string)831 struct phy *devm_phy_get(struct device *dev, const char *string)
832 {
833 struct phy **ptr, *phy;
834
835 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
836 if (!ptr)
837 return ERR_PTR(-ENOMEM);
838
839 phy = phy_get(dev, string);
840 if (!IS_ERR(phy)) {
841 *ptr = phy;
842 devres_add(dev, ptr);
843 } else {
844 devres_free(ptr);
845 }
846
847 return phy;
848 }
849 EXPORT_SYMBOL_GPL(devm_phy_get);
850
851 /**
852 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
853 * @dev: device that requests this phy
854 * @string: the phy name as given in the dt data or phy device name
855 * for non-dt case
856 *
857 * Gets the phy using phy_get(), and associates a device with it using
858 * devres. On driver detach, release function is invoked on the devres
859 * data, then, devres data is freed. This differs to devm_phy_get() in
860 * that if the phy does not exist, it is not considered an error and
861 * -ENODEV will not be returned. Instead the NULL phy is returned,
862 * which can be passed to all other phy consumer calls.
863 */
devm_phy_optional_get(struct device * dev,const char * string)864 struct phy *devm_phy_optional_get(struct device *dev, const char *string)
865 {
866 struct phy *phy = devm_phy_get(dev, string);
867
868 if (PTR_ERR(phy) == -ENODEV)
869 phy = NULL;
870
871 return phy;
872 }
873 EXPORT_SYMBOL_GPL(devm_phy_optional_get);
874
875 /**
876 * devm_of_phy_get() - lookup and obtain a reference to a phy.
877 * @dev: device that requests this phy
878 * @np: node containing the phy
879 * @con_id: name of the phy from device's point of view
880 *
881 * Gets the phy using of_phy_get(), and associates a device with it using
882 * devres. On driver detach, release function is invoked on the devres data,
883 * then, devres data is freed.
884 */
devm_of_phy_get(struct device * dev,struct device_node * np,const char * con_id)885 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
886 const char *con_id)
887 {
888 struct phy **ptr, *phy;
889 struct device_link *link;
890
891 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
892 if (!ptr)
893 return ERR_PTR(-ENOMEM);
894
895 phy = of_phy_get(np, con_id);
896 if (!IS_ERR(phy)) {
897 *ptr = phy;
898 devres_add(dev, ptr);
899 } else {
900 devres_free(ptr);
901 return phy;
902 }
903
904 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
905 if (!link)
906 dev_dbg(dev, "failed to create device link to %s\n",
907 dev_name(phy->dev.parent));
908
909 return phy;
910 }
911 EXPORT_SYMBOL_GPL(devm_of_phy_get);
912
913 /**
914 * devm_of_phy_optional_get() - lookup and obtain a reference to an optional
915 * phy.
916 * @dev: device that requests this phy
917 * @np: node containing the phy
918 * @con_id: name of the phy from device's point of view
919 *
920 * Gets the phy using of_phy_get(), and associates a device with it using
921 * devres. On driver detach, release function is invoked on the devres data,
922 * then, devres data is freed. This differs to devm_of_phy_get() in
923 * that if the phy does not exist, it is not considered an error and
924 * -ENODEV will not be returned. Instead the NULL phy is returned,
925 * which can be passed to all other phy consumer calls.
926 */
devm_of_phy_optional_get(struct device * dev,struct device_node * np,const char * con_id)927 struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np,
928 const char *con_id)
929 {
930 struct phy *phy = devm_of_phy_get(dev, np, con_id);
931
932 if (PTR_ERR(phy) == -ENODEV)
933 phy = NULL;
934
935 if (IS_ERR(phy))
936 dev_err_probe(dev, PTR_ERR(phy), "failed to get PHY %pOF:%s",
937 np, con_id);
938
939 return phy;
940 }
941 EXPORT_SYMBOL_GPL(devm_of_phy_optional_get);
942
943 /**
944 * devm_of_phy_get_by_index() - lookup and obtain a reference to a phy by index.
945 * @dev: device that requests this phy
946 * @np: node containing the phy
947 * @index: index of the phy
948 *
949 * Gets the phy using _of_phy_get(), then gets a refcount to it,
950 * and associates a device with it using devres. On driver detach,
951 * release function is invoked on the devres data,
952 * then, devres data is freed.
953 *
954 */
devm_of_phy_get_by_index(struct device * dev,struct device_node * np,int index)955 struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
956 int index)
957 {
958 struct phy **ptr, *phy;
959 struct device_link *link;
960
961 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
962 if (!ptr)
963 return ERR_PTR(-ENOMEM);
964
965 phy = _of_phy_get(np, index);
966 if (IS_ERR(phy)) {
967 devres_free(ptr);
968 return phy;
969 }
970
971 if (!try_module_get(phy->ops->owner)) {
972 devres_free(ptr);
973 return ERR_PTR(-EPROBE_DEFER);
974 }
975
976 get_device(&phy->dev);
977
978 *ptr = phy;
979 devres_add(dev, ptr);
980
981 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
982 if (!link)
983 dev_dbg(dev, "failed to create device link to %s\n",
984 dev_name(phy->dev.parent));
985
986 return phy;
987 }
988 EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
989
990 /**
991 * phy_create() - create a new phy
992 * @dev: device that is creating the new phy
993 * @node: device node of the phy
994 * @ops: function pointers for performing phy operations
995 *
996 * Called to create a phy using phy framework.
997 */
phy_create(struct device * dev,struct device_node * node,const struct phy_ops * ops)998 struct phy *phy_create(struct device *dev, struct device_node *node,
999 const struct phy_ops *ops)
1000 {
1001 int ret;
1002 int id;
1003 struct phy *phy;
1004
1005 if (WARN_ON(!dev))
1006 return ERR_PTR(-EINVAL);
1007
1008 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
1009 if (!phy)
1010 return ERR_PTR(-ENOMEM);
1011
1012 id = ida_alloc(&phy_ida, GFP_KERNEL);
1013 if (id < 0) {
1014 dev_err(dev, "unable to get id\n");
1015 ret = id;
1016 goto free_phy;
1017 }
1018
1019 device_initialize(&phy->dev);
1020 mutex_init(&phy->mutex);
1021
1022 phy->dev.class = &phy_class;
1023 phy->dev.parent = dev;
1024 phy->dev.of_node = node ?: dev->of_node;
1025 phy->id = id;
1026 phy->ops = ops;
1027
1028 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
1029 if (ret)
1030 goto put_dev;
1031
1032 /* phy-supply */
1033 phy->pwr = regulator_get_optional(&phy->dev, "phy");
1034 if (IS_ERR(phy->pwr)) {
1035 ret = PTR_ERR(phy->pwr);
1036 if (ret == -EPROBE_DEFER)
1037 goto put_dev;
1038
1039 phy->pwr = NULL;
1040 }
1041
1042 ret = device_add(&phy->dev);
1043 if (ret)
1044 goto put_dev;
1045
1046 if (pm_runtime_enabled(dev)) {
1047 pm_runtime_enable(&phy->dev);
1048 pm_runtime_no_callbacks(&phy->dev);
1049 }
1050
1051 phy->debugfs = debugfs_create_dir(dev_name(&phy->dev), phy_debugfs_root);
1052
1053 return phy;
1054
1055 put_dev:
1056 put_device(&phy->dev); /* calls phy_release() which frees resources */
1057 return ERR_PTR(ret);
1058
1059 free_phy:
1060 kfree(phy);
1061 return ERR_PTR(ret);
1062 }
1063 EXPORT_SYMBOL_GPL(phy_create);
1064
1065 /**
1066 * devm_phy_create() - create a new phy
1067 * @dev: device that is creating the new phy
1068 * @node: device node of the phy
1069 * @ops: function pointers for performing phy operations
1070 *
1071 * Creates a new PHY device adding it to the PHY class.
1072 * While at that, it also associates the device with the phy using devres.
1073 * On driver detach, release function is invoked on the devres data,
1074 * then, devres data is freed.
1075 */
devm_phy_create(struct device * dev,struct device_node * node,const struct phy_ops * ops)1076 struct phy *devm_phy_create(struct device *dev, struct device_node *node,
1077 const struct phy_ops *ops)
1078 {
1079 struct phy **ptr, *phy;
1080
1081 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
1082 if (!ptr)
1083 return ERR_PTR(-ENOMEM);
1084
1085 phy = phy_create(dev, node, ops);
1086 if (!IS_ERR(phy)) {
1087 *ptr = phy;
1088 devres_add(dev, ptr);
1089 } else {
1090 devres_free(ptr);
1091 }
1092
1093 return phy;
1094 }
1095 EXPORT_SYMBOL_GPL(devm_phy_create);
1096
1097 /**
1098 * phy_destroy() - destroy the phy
1099 * @phy: the phy to be destroyed
1100 *
1101 * Called to destroy the phy.
1102 */
phy_destroy(struct phy * phy)1103 void phy_destroy(struct phy *phy)
1104 {
1105 pm_runtime_disable(&phy->dev);
1106 device_unregister(&phy->dev);
1107 }
1108 EXPORT_SYMBOL_GPL(phy_destroy);
1109
1110 /**
1111 * devm_phy_destroy() - destroy the PHY
1112 * @dev: device that wants to release this phy
1113 * @phy: the phy returned by devm_phy_get()
1114 *
1115 * destroys the devres associated with this phy and invokes phy_destroy
1116 * to destroy the phy.
1117 */
devm_phy_destroy(struct device * dev,struct phy * phy)1118 void devm_phy_destroy(struct device *dev, struct phy *phy)
1119 {
1120 int r;
1121
1122 r = devres_release(dev, devm_phy_consume, devm_phy_match, phy);
1123 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
1124 }
1125 EXPORT_SYMBOL_GPL(devm_phy_destroy);
1126
1127 /**
1128 * __of_phy_provider_register() - create/register phy provider with the framework
1129 * @dev: struct device of the phy provider
1130 * @children: device node containing children (if different from dev->of_node)
1131 * @owner: the module owner containing of_xlate
1132 * @of_xlate: function pointer to obtain phy instance from phy provider
1133 *
1134 * Creates struct phy_provider from dev and of_xlate function pointer.
1135 * This is used in the case of dt boot for finding the phy instance from
1136 * phy provider.
1137 *
1138 * If the PHY provider doesn't nest children directly but uses a separate
1139 * child node to contain the individual children, the @children parameter
1140 * can be used to override the default. If NULL, the default (dev->of_node)
1141 * will be used. If non-NULL, the device node must be a child (or further
1142 * descendant) of dev->of_node. Otherwise an ERR_PTR()-encoded -EINVAL
1143 * error code is returned.
1144 */
__of_phy_provider_register(struct device * dev,struct device_node * children,struct module * owner,struct phy * (* of_xlate)(struct device * dev,const struct of_phandle_args * args))1145 struct phy_provider *__of_phy_provider_register(struct device *dev,
1146 struct device_node *children, struct module *owner,
1147 struct phy * (*of_xlate)(struct device *dev,
1148 const struct of_phandle_args *args))
1149 {
1150 struct phy_provider *phy_provider;
1151
1152 /*
1153 * If specified, the device node containing the children must itself
1154 * be the provider's device node or a child (or further descendant)
1155 * thereof.
1156 */
1157 if (children) {
1158 struct device_node *parent = of_node_get(children), *next;
1159
1160 while (parent) {
1161 if (parent == dev->of_node)
1162 break;
1163
1164 next = of_get_parent(parent);
1165 of_node_put(parent);
1166 parent = next;
1167 }
1168
1169 if (!parent)
1170 return ERR_PTR(-EINVAL);
1171
1172 of_node_put(parent);
1173 } else {
1174 children = dev->of_node;
1175 }
1176
1177 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
1178 if (!phy_provider)
1179 return ERR_PTR(-ENOMEM);
1180
1181 phy_provider->dev = dev;
1182 phy_provider->children = of_node_get(children);
1183 phy_provider->owner = owner;
1184 phy_provider->of_xlate = of_xlate;
1185
1186 mutex_lock(&phy_provider_mutex);
1187 list_add_tail(&phy_provider->list, &phy_provider_list);
1188 mutex_unlock(&phy_provider_mutex);
1189
1190 return phy_provider;
1191 }
1192 EXPORT_SYMBOL_GPL(__of_phy_provider_register);
1193
1194 /**
1195 * __devm_of_phy_provider_register() - create/register phy provider with the
1196 * framework
1197 * @dev: struct device of the phy provider
1198 * @children: device node containing children (if different from dev->of_node)
1199 * @owner: the module owner containing of_xlate
1200 * @of_xlate: function pointer to obtain phy instance from phy provider
1201 *
1202 * Creates struct phy_provider from dev and of_xlate function pointer.
1203 * This is used in the case of dt boot for finding the phy instance from
1204 * phy provider. While at that, it also associates the device with the
1205 * phy provider using devres. On driver detach, release function is invoked
1206 * on the devres data, then, devres data is freed.
1207 */
__devm_of_phy_provider_register(struct device * dev,struct device_node * children,struct module * owner,struct phy * (* of_xlate)(struct device * dev,const struct of_phandle_args * args))1208 struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
1209 struct device_node *children, struct module *owner,
1210 struct phy * (*of_xlate)(struct device *dev,
1211 const struct of_phandle_args *args))
1212 {
1213 struct phy_provider **ptr, *phy_provider;
1214
1215 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
1216 if (!ptr)
1217 return ERR_PTR(-ENOMEM);
1218
1219 phy_provider = __of_phy_provider_register(dev, children, owner,
1220 of_xlate);
1221 if (!IS_ERR(phy_provider)) {
1222 *ptr = phy_provider;
1223 devres_add(dev, ptr);
1224 } else {
1225 devres_free(ptr);
1226 }
1227
1228 return phy_provider;
1229 }
1230 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
1231
1232 /**
1233 * of_phy_provider_unregister() - unregister phy provider from the framework
1234 * @phy_provider: phy provider returned by of_phy_provider_register()
1235 *
1236 * Removes the phy_provider created using of_phy_provider_register().
1237 */
of_phy_provider_unregister(struct phy_provider * phy_provider)1238 void of_phy_provider_unregister(struct phy_provider *phy_provider)
1239 {
1240 if (IS_ERR(phy_provider))
1241 return;
1242
1243 mutex_lock(&phy_provider_mutex);
1244 list_del(&phy_provider->list);
1245 of_node_put(phy_provider->children);
1246 kfree(phy_provider);
1247 mutex_unlock(&phy_provider_mutex);
1248 }
1249 EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
1250
1251 /**
1252 * devm_of_phy_provider_unregister() - remove phy provider from the framework
1253 * @dev: struct device of the phy provider
1254 * @phy_provider: phy provider returned by of_phy_provider_register()
1255 *
1256 * destroys the devres associated with this phy provider and invokes
1257 * of_phy_provider_unregister to unregister the phy provider.
1258 */
devm_of_phy_provider_unregister(struct device * dev,struct phy_provider * phy_provider)1259 void devm_of_phy_provider_unregister(struct device *dev,
1260 struct phy_provider *phy_provider)
1261 {
1262 int r;
1263
1264 r = devres_release(dev, devm_phy_provider_release, devm_phy_match,
1265 phy_provider);
1266 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
1267 }
1268 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
1269
1270 /**
1271 * phy_release() - release the phy
1272 * @dev: the dev member within phy
1273 *
1274 * When the last reference to the device is removed, it is called
1275 * from the embedded kobject as release method.
1276 */
phy_release(struct device * dev)1277 static void phy_release(struct device *dev)
1278 {
1279 struct phy *phy;
1280
1281 phy = to_phy(dev);
1282 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
1283 debugfs_remove_recursive(phy->debugfs);
1284 regulator_put(phy->pwr);
1285 ida_free(&phy_ida, phy->id);
1286 kfree(phy);
1287 }
1288
phy_core_init(void)1289 static int __init phy_core_init(void)
1290 {
1291 int err;
1292
1293 err = class_register(&phy_class);
1294 if (err) {
1295 pr_err("failed to register phy class");
1296 return err;
1297 }
1298
1299 phy_debugfs_root = debugfs_create_dir("phy", NULL);
1300
1301 return 0;
1302 }
1303 device_initcall(phy_core_init);
1304
phy_core_exit(void)1305 static void __exit phy_core_exit(void)
1306 {
1307 debugfs_remove_recursive(phy_debugfs_root);
1308 class_unregister(&phy_class);
1309 }
1310 module_exit(phy_core_exit);
1311