1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * phy-core.c -- Generic Phy framework.
4 *
5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Author: Kishon Vijay Abraham I <kishon@ti.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/export.h>
12 #include <linux/module.h>
13 #include <linux/err.h>
14 #include <linux/debugfs.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/of.h>
18 #include <linux/phy/phy.h>
19 #include <linux/idr.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/regulator/consumer.h>
22
23 static void phy_release(struct device *dev);
24 static const struct class phy_class = {
25 .name = "phy",
26 .dev_release = phy_release,
27 };
28
29 static struct dentry *phy_debugfs_root;
30 static DEFINE_MUTEX(phy_provider_mutex);
31 static LIST_HEAD(phy_provider_list);
32 static LIST_HEAD(phys);
33 static DEFINE_IDA(phy_ida);
34
devm_phy_release(struct device * dev,void * res)35 static void devm_phy_release(struct device *dev, void *res)
36 {
37 struct phy *phy = *(struct phy **)res;
38
39 phy_put(dev, phy);
40 }
41
devm_phy_provider_release(struct device * dev,void * res)42 static void devm_phy_provider_release(struct device *dev, void *res)
43 {
44 struct phy_provider *phy_provider = *(struct phy_provider **)res;
45
46 of_phy_provider_unregister(phy_provider);
47 }
48
devm_phy_consume(struct device * dev,void * res)49 static void devm_phy_consume(struct device *dev, void *res)
50 {
51 struct phy *phy = *(struct phy **)res;
52
53 phy_destroy(phy);
54 }
55
devm_phy_match(struct device * dev,void * res,void * match_data)56 static int devm_phy_match(struct device *dev, void *res, void *match_data)
57 {
58 struct phy **phy = res;
59
60 return *phy == match_data;
61 }
62
63 /**
64 * phy_create_lookup() - allocate and register PHY/device association
65 * @phy: the phy of the association
66 * @con_id: connection ID string on device
67 * @dev_id: the device of the association
68 *
69 * Creates and registers phy_lookup entry.
70 */
phy_create_lookup(struct phy * phy,const char * con_id,const char * dev_id)71 int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
72 {
73 struct phy_lookup *pl;
74
75 if (!phy || !dev_id || !con_id)
76 return -EINVAL;
77
78 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
79 if (!pl)
80 return -ENOMEM;
81
82 pl->dev_id = dev_id;
83 pl->con_id = con_id;
84 pl->phy = phy;
85
86 mutex_lock(&phy_provider_mutex);
87 list_add_tail(&pl->node, &phys);
88 mutex_unlock(&phy_provider_mutex);
89
90 return 0;
91 }
92 EXPORT_SYMBOL_GPL(phy_create_lookup);
93
94 /**
95 * phy_remove_lookup() - find and remove PHY/device association
96 * @phy: the phy of the association
97 * @con_id: connection ID string on device
98 * @dev_id: the device of the association
99 *
100 * Finds and unregisters phy_lookup entry that was created with
101 * phy_create_lookup().
102 */
phy_remove_lookup(struct phy * phy,const char * con_id,const char * dev_id)103 void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id)
104 {
105 struct phy_lookup *pl;
106
107 if (!phy || !dev_id || !con_id)
108 return;
109
110 mutex_lock(&phy_provider_mutex);
111 list_for_each_entry(pl, &phys, node)
112 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) &&
113 !strcmp(pl->con_id, con_id)) {
114 list_del(&pl->node);
115 kfree(pl);
116 break;
117 }
118 mutex_unlock(&phy_provider_mutex);
119 }
120 EXPORT_SYMBOL_GPL(phy_remove_lookup);
121
phy_find(struct device * dev,const char * con_id)122 static struct phy *phy_find(struct device *dev, const char *con_id)
123 {
124 const char *dev_id = dev_name(dev);
125 struct phy_lookup *p, *pl = NULL;
126
127 mutex_lock(&phy_provider_mutex);
128 list_for_each_entry(p, &phys, node)
129 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) {
130 pl = p;
131 break;
132 }
133 mutex_unlock(&phy_provider_mutex);
134
135 return pl ? pl->phy : ERR_PTR(-ENODEV);
136 }
137
of_phy_provider_lookup(struct device_node * node)138 static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
139 {
140 struct phy_provider *phy_provider;
141 struct device_node *child;
142
143 list_for_each_entry(phy_provider, &phy_provider_list, list) {
144 if (phy_provider->dev->of_node == node)
145 return phy_provider;
146
147 for_each_child_of_node(phy_provider->children, child)
148 if (child == node) {
149 of_node_put(child);
150 return phy_provider;
151 }
152 }
153
154 return ERR_PTR(-EPROBE_DEFER);
155 }
156
phy_pm_runtime_get(struct phy * phy)157 int phy_pm_runtime_get(struct phy *phy)
158 {
159 int ret;
160
161 if (!phy)
162 return 0;
163
164 if (!pm_runtime_enabled(&phy->dev))
165 return -ENOTSUPP;
166
167 ret = pm_runtime_get(&phy->dev);
168 if (ret < 0 && ret != -EINPROGRESS)
169 pm_runtime_put_noidle(&phy->dev);
170
171 return ret;
172 }
173 EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
174
phy_pm_runtime_get_sync(struct phy * phy)175 int phy_pm_runtime_get_sync(struct phy *phy)
176 {
177 int ret;
178
179 if (!phy)
180 return 0;
181
182 if (!pm_runtime_enabled(&phy->dev))
183 return -ENOTSUPP;
184
185 ret = pm_runtime_get_sync(&phy->dev);
186 if (ret < 0)
187 pm_runtime_put_sync(&phy->dev);
188
189 return ret;
190 }
191 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
192
phy_pm_runtime_put(struct phy * phy)193 int phy_pm_runtime_put(struct phy *phy)
194 {
195 if (!phy)
196 return 0;
197
198 if (!pm_runtime_enabled(&phy->dev))
199 return -ENOTSUPP;
200
201 return pm_runtime_put(&phy->dev);
202 }
203 EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
204
phy_pm_runtime_put_sync(struct phy * phy)205 int phy_pm_runtime_put_sync(struct phy *phy)
206 {
207 if (!phy)
208 return 0;
209
210 if (!pm_runtime_enabled(&phy->dev))
211 return -ENOTSUPP;
212
213 return pm_runtime_put_sync(&phy->dev);
214 }
215 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
216
217 /**
218 * phy_init - phy internal initialization before phy operation
219 * @phy: the phy returned by phy_get()
220 *
221 * Used to allow phy's driver to perform phy internal initialization,
222 * such as PLL block powering, clock initialization or anything that's
223 * is required by the phy to perform the start of operation.
224 * Must be called before phy_power_on().
225 *
226 * Return: %0 if successful, a negative error code otherwise
227 */
phy_init(struct phy * phy)228 int phy_init(struct phy *phy)
229 {
230 int ret;
231
232 if (!phy)
233 return 0;
234
235 ret = phy_pm_runtime_get_sync(phy);
236 if (ret < 0 && ret != -ENOTSUPP)
237 return ret;
238 ret = 0; /* Override possible ret == -ENOTSUPP */
239
240 mutex_lock(&phy->mutex);
241 if (phy->power_count > phy->init_count)
242 dev_warn(&phy->dev, "phy_power_on was called before phy_init\n");
243
244 if (phy->init_count == 0 && phy->ops->init) {
245 ret = phy->ops->init(phy);
246 if (ret < 0) {
247 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
248 goto out;
249 }
250 }
251 ++phy->init_count;
252
253 out:
254 mutex_unlock(&phy->mutex);
255 phy_pm_runtime_put(phy);
256 return ret;
257 }
258 EXPORT_SYMBOL_GPL(phy_init);
259
260 /**
261 * phy_exit - Phy internal un-initialization
262 * @phy: the phy returned by phy_get()
263 *
264 * Must be called after phy_power_off().
265 *
266 * Return: %0 if successful, a negative error code otherwise
267 */
phy_exit(struct phy * phy)268 int phy_exit(struct phy *phy)
269 {
270 int ret;
271
272 if (!phy)
273 return 0;
274
275 ret = phy_pm_runtime_get_sync(phy);
276 if (ret < 0 && ret != -ENOTSUPP)
277 return ret;
278 ret = 0; /* Override possible ret == -ENOTSUPP */
279
280 mutex_lock(&phy->mutex);
281 if (phy->init_count == 1 && phy->ops->exit) {
282 ret = phy->ops->exit(phy);
283 if (ret < 0) {
284 dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
285 goto out;
286 }
287 }
288 --phy->init_count;
289
290 out:
291 mutex_unlock(&phy->mutex);
292 phy_pm_runtime_put(phy);
293 return ret;
294 }
295 EXPORT_SYMBOL_GPL(phy_exit);
296
297 /**
298 * phy_power_on - Enable the phy and enter proper operation
299 * @phy: the phy returned by phy_get()
300 *
301 * Must be called after phy_init().
302 *
303 * Return: %0 if successful, a negative error code otherwise
304 */
phy_power_on(struct phy * phy)305 int phy_power_on(struct phy *phy)
306 {
307 int ret = 0;
308
309 if (!phy)
310 goto out;
311
312 if (phy->pwr) {
313 ret = regulator_enable(phy->pwr);
314 if (ret)
315 goto out;
316 }
317
318 ret = phy_pm_runtime_get_sync(phy);
319 if (ret < 0 && ret != -ENOTSUPP)
320 goto err_pm_sync;
321
322 ret = 0; /* Override possible ret == -ENOTSUPP */
323
324 mutex_lock(&phy->mutex);
325 if (phy->power_count == 0 && phy->ops->power_on) {
326 ret = phy->ops->power_on(phy);
327 if (ret < 0) {
328 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
329 goto err_pwr_on;
330 }
331 }
332 ++phy->power_count;
333 mutex_unlock(&phy->mutex);
334 return 0;
335
336 err_pwr_on:
337 mutex_unlock(&phy->mutex);
338 phy_pm_runtime_put_sync(phy);
339 err_pm_sync:
340 if (phy->pwr)
341 regulator_disable(phy->pwr);
342 out:
343 return ret;
344 }
345 EXPORT_SYMBOL_GPL(phy_power_on);
346
347 /**
348 * phy_power_off - Disable the phy.
349 * @phy: the phy returned by phy_get()
350 *
351 * Must be called before phy_exit().
352 *
353 * Return: %0 if successful, a negative error code otherwise
354 */
phy_power_off(struct phy * phy)355 int phy_power_off(struct phy *phy)
356 {
357 int ret;
358
359 if (!phy)
360 return 0;
361
362 mutex_lock(&phy->mutex);
363 if (phy->power_count == 1 && phy->ops->power_off) {
364 ret = phy->ops->power_off(phy);
365 if (ret < 0) {
366 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
367 mutex_unlock(&phy->mutex);
368 return ret;
369 }
370 }
371 --phy->power_count;
372 mutex_unlock(&phy->mutex);
373 phy_pm_runtime_put(phy);
374
375 if (phy->pwr)
376 regulator_disable(phy->pwr);
377
378 return 0;
379 }
380 EXPORT_SYMBOL_GPL(phy_power_off);
381
phy_set_mode_ext(struct phy * phy,enum phy_mode mode,int submode)382 int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
383 {
384 int ret = 0;
385
386 if (!phy)
387 return 0;
388
389 mutex_lock(&phy->mutex);
390 if (phy->ops->set_mode)
391 ret = phy->ops->set_mode(phy, mode, submode);
392 if (!ret)
393 phy->attrs.mode = mode;
394 mutex_unlock(&phy->mutex);
395
396 return ret;
397 }
398 EXPORT_SYMBOL_GPL(phy_set_mode_ext);
399
phy_set_media(struct phy * phy,enum phy_media media)400 int phy_set_media(struct phy *phy, enum phy_media media)
401 {
402 int ret;
403
404 if (!phy || !phy->ops->set_media)
405 return 0;
406
407 mutex_lock(&phy->mutex);
408 ret = phy->ops->set_media(phy, media);
409 mutex_unlock(&phy->mutex);
410
411 return ret;
412 }
413 EXPORT_SYMBOL_GPL(phy_set_media);
414
phy_set_speed(struct phy * phy,int speed)415 int phy_set_speed(struct phy *phy, int speed)
416 {
417 int ret;
418
419 if (!phy || !phy->ops->set_speed)
420 return 0;
421
422 mutex_lock(&phy->mutex);
423 ret = phy->ops->set_speed(phy, speed);
424 mutex_unlock(&phy->mutex);
425
426 return ret;
427 }
428 EXPORT_SYMBOL_GPL(phy_set_speed);
429
phy_reset(struct phy * phy)430 int phy_reset(struct phy *phy)
431 {
432 int ret;
433
434 if (!phy || !phy->ops->reset)
435 return 0;
436
437 ret = phy_pm_runtime_get_sync(phy);
438 if (ret < 0 && ret != -ENOTSUPP)
439 return ret;
440
441 mutex_lock(&phy->mutex);
442 ret = phy->ops->reset(phy);
443 mutex_unlock(&phy->mutex);
444
445 phy_pm_runtime_put(phy);
446
447 return ret;
448 }
449 EXPORT_SYMBOL_GPL(phy_reset);
450
451 /**
452 * phy_calibrate() - Tunes the phy hw parameters for current configuration
453 * @phy: the phy returned by phy_get()
454 *
455 * Used to calibrate phy hardware, typically by adjusting some parameters in
456 * runtime, which are otherwise lost after host controller reset and cannot
457 * be applied in phy_init() or phy_power_on().
458 *
459 * Return: %0 if successful, a negative error code otherwise
460 */
phy_calibrate(struct phy * phy)461 int phy_calibrate(struct phy *phy)
462 {
463 int ret;
464
465 if (!phy || !phy->ops->calibrate)
466 return 0;
467
468 mutex_lock(&phy->mutex);
469 ret = phy->ops->calibrate(phy);
470 mutex_unlock(&phy->mutex);
471
472 return ret;
473 }
474 EXPORT_SYMBOL_GPL(phy_calibrate);
475
476 /**
477 * phy_notify_connect() - phy connect notification
478 * @phy: the phy returned by phy_get()
479 * @port: the port index for connect
480 *
481 * If the phy needs to get connection status, the callback can be used.
482 * Returns: %0 if successful, a negative error code otherwise
483 */
phy_notify_connect(struct phy * phy,int port)484 int phy_notify_connect(struct phy *phy, int port)
485 {
486 int ret;
487
488 if (!phy || !phy->ops->connect)
489 return 0;
490
491 mutex_lock(&phy->mutex);
492 ret = phy->ops->connect(phy, port);
493 mutex_unlock(&phy->mutex);
494
495 return ret;
496 }
497 EXPORT_SYMBOL_GPL(phy_notify_connect);
498
499 /**
500 * phy_notify_disconnect() - phy disconnect notification
501 * @phy: the phy returned by phy_get()
502 * @port: the port index for disconnect
503 *
504 * If the phy needs to get connection status, the callback can be used.
505 *
506 * Returns: %0 if successful, a negative error code otherwise
507 */
phy_notify_disconnect(struct phy * phy,int port)508 int phy_notify_disconnect(struct phy *phy, int port)
509 {
510 int ret;
511
512 if (!phy || !phy->ops->disconnect)
513 return 0;
514
515 mutex_lock(&phy->mutex);
516 ret = phy->ops->disconnect(phy, port);
517 mutex_unlock(&phy->mutex);
518
519 return ret;
520 }
521 EXPORT_SYMBOL_GPL(phy_notify_disconnect);
522
523 /**
524 * phy_notify_state() - phy state notification
525 * @phy: the PHY returned by phy_get()
526 * @state: the PHY state
527 *
528 * Notify the PHY of a state transition. Used to notify and
529 * configure the PHY accordingly.
530 *
531 * Returns: %0 if successful, a negative error code otherwise
532 */
phy_notify_state(struct phy * phy,union phy_notify state)533 int phy_notify_state(struct phy *phy, union phy_notify state)
534 {
535 int ret;
536
537 if (!phy || !phy->ops->notify_phystate)
538 return 0;
539
540 mutex_lock(&phy->mutex);
541 ret = phy->ops->notify_phystate(phy, state);
542 mutex_unlock(&phy->mutex);
543
544 return ret;
545 }
546 EXPORT_SYMBOL_GPL(phy_notify_state);
547
548 /**
549 * phy_configure() - Changes the phy parameters
550 * @phy: the phy returned by phy_get()
551 * @opts: New configuration to apply
552 *
553 * Used to change the PHY parameters. phy_init() must have been called
554 * on the phy. The configuration will be applied on the current phy
555 * mode, that can be changed using phy_set_mode().
556 *
557 * Return: %0 if successful, a negative error code otherwise
558 */
phy_configure(struct phy * phy,union phy_configure_opts * opts)559 int phy_configure(struct phy *phy, union phy_configure_opts *opts)
560 {
561 int ret;
562
563 if (!phy)
564 return -EINVAL;
565
566 if (!phy->ops->configure)
567 return -EOPNOTSUPP;
568
569 mutex_lock(&phy->mutex);
570 ret = phy->ops->configure(phy, opts);
571 mutex_unlock(&phy->mutex);
572
573 return ret;
574 }
575 EXPORT_SYMBOL_GPL(phy_configure);
576
577 /**
578 * phy_validate() - Checks the phy parameters
579 * @phy: the phy returned by phy_get()
580 * @mode: phy_mode the configuration is applicable to.
581 * @submode: PHY submode the configuration is applicable to.
582 * @opts: Configuration to check
583 *
584 * Used to check that the current set of parameters can be handled by
585 * the phy. Implementations are free to tune the parameters passed as
586 * arguments if needed by some implementation detail or
587 * constraints. It will not change any actual configuration of the
588 * PHY, so calling it as many times as deemed fit will have no side
589 * effect.
590 *
591 * Return: %0 if successful, a negative error code otherwise
592 */
phy_validate(struct phy * phy,enum phy_mode mode,int submode,union phy_configure_opts * opts)593 int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
594 union phy_configure_opts *opts)
595 {
596 int ret;
597
598 if (!phy)
599 return -EINVAL;
600
601 if (!phy->ops->validate)
602 return -EOPNOTSUPP;
603
604 mutex_lock(&phy->mutex);
605 ret = phy->ops->validate(phy, mode, submode, opts);
606 mutex_unlock(&phy->mutex);
607
608 return ret;
609 }
610 EXPORT_SYMBOL_GPL(phy_validate);
611
612 /**
613 * _of_phy_get() - lookup and obtain a reference to a phy by phandle
614 * @np: device_node for which to get the phy
615 * @index: the index of the phy
616 *
617 * Returns the phy associated with the given phandle value,
618 * after getting a refcount to it or -ENODEV if there is no such phy or
619 * -EPROBE_DEFER if there is a phandle to the phy, but the device is
620 * not yet loaded. This function uses of_xlate call back function provided
621 * while registering the phy_provider to find the phy instance.
622 */
_of_phy_get(struct device_node * np,int index)623 static struct phy *_of_phy_get(struct device_node *np, int index)
624 {
625 int ret;
626 struct phy_provider *phy_provider;
627 struct phy *phy = NULL;
628 struct of_phandle_args args;
629
630 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
631 index, &args);
632 if (ret)
633 return ERR_PTR(-ENODEV);
634
635 /* This phy type handled by the usb-phy subsystem for now */
636 if (of_device_is_compatible(args.np, "usb-nop-xceiv")) {
637 phy = ERR_PTR(-ENODEV);
638 goto out_put_node;
639 }
640
641 mutex_lock(&phy_provider_mutex);
642 phy_provider = of_phy_provider_lookup(args.np);
643 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
644 phy = ERR_PTR(-EPROBE_DEFER);
645 goto out_unlock;
646 }
647
648 if (!of_device_is_available(args.np)) {
649 dev_warn(phy_provider->dev, "Requested PHY is disabled\n");
650 phy = ERR_PTR(-ENODEV);
651 goto out_put_module;
652 }
653
654 phy = phy_provider->of_xlate(phy_provider->dev, &args);
655
656 out_put_module:
657 module_put(phy_provider->owner);
658
659 out_unlock:
660 mutex_unlock(&phy_provider_mutex);
661 out_put_node:
662 of_node_put(args.np);
663
664 return phy;
665 }
666
667 /**
668 * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
669 * @np: device_node for which to get the phy
670 * @con_id: name of the phy from device's point of view
671 *
672 * Returns the phy driver, after getting a refcount to it; or
673 * -ENODEV if there is no such phy. The caller is responsible for
674 * calling of_phy_put() to release that count.
675 */
of_phy_get(struct device_node * np,const char * con_id)676 struct phy *of_phy_get(struct device_node *np, const char *con_id)
677 {
678 struct phy *phy = NULL;
679 int index = 0;
680
681 if (con_id)
682 index = of_property_match_string(np, "phy-names", con_id);
683
684 phy = _of_phy_get(np, index);
685 if (IS_ERR(phy))
686 return phy;
687
688 if (!try_module_get(phy->ops->owner))
689 return ERR_PTR(-EPROBE_DEFER);
690
691 get_device(&phy->dev);
692
693 return phy;
694 }
695 EXPORT_SYMBOL_GPL(of_phy_get);
696
697 /**
698 * of_phy_put() - release the PHY
699 * @phy: the phy returned by of_phy_get()
700 *
701 * Releases a refcount the caller received from of_phy_get().
702 */
of_phy_put(struct phy * phy)703 void of_phy_put(struct phy *phy)
704 {
705 if (!phy || IS_ERR(phy))
706 return;
707
708 mutex_lock(&phy->mutex);
709 if (phy->ops->release)
710 phy->ops->release(phy);
711 mutex_unlock(&phy->mutex);
712
713 module_put(phy->ops->owner);
714 put_device(&phy->dev);
715 }
716 EXPORT_SYMBOL_GPL(of_phy_put);
717
718 /**
719 * phy_put() - release the PHY
720 * @dev: device that wants to release this phy
721 * @phy: the phy returned by phy_get()
722 *
723 * Releases a refcount the caller received from phy_get().
724 */
phy_put(struct device * dev,struct phy * phy)725 void phy_put(struct device *dev, struct phy *phy)
726 {
727 device_link_remove(dev, &phy->dev);
728 of_phy_put(phy);
729 }
730 EXPORT_SYMBOL_GPL(phy_put);
731
732 /**
733 * devm_phy_put() - release the PHY
734 * @dev: device that wants to release this phy
735 * @phy: the phy returned by devm_phy_get()
736 *
737 * destroys the devres associated with this phy and invokes phy_put
738 * to release the phy.
739 */
devm_phy_put(struct device * dev,struct phy * phy)740 void devm_phy_put(struct device *dev, struct phy *phy)
741 {
742 int r;
743
744 if (!phy)
745 return;
746
747 r = devres_release(dev, devm_phy_release, devm_phy_match, phy);
748 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
749 }
750 EXPORT_SYMBOL_GPL(devm_phy_put);
751
752 /**
753 * of_phy_simple_xlate() - returns the phy instance from phy provider
754 * @dev: the PHY provider device (not used here)
755 * @args: of_phandle_args
756 *
757 * Intended to be used by phy provider for the common case where #phy-cells is
758 * 0. For other cases where #phy-cells is greater than '0', the phy provider
759 * should provide a custom of_xlate function that reads the *args* and returns
760 * the appropriate phy.
761 */
of_phy_simple_xlate(struct device * dev,const struct of_phandle_args * args)762 struct phy *of_phy_simple_xlate(struct device *dev,
763 const struct of_phandle_args *args)
764 {
765 struct device *target_dev;
766
767 target_dev = class_find_device_by_of_node(&phy_class, args->np);
768 if (!target_dev)
769 return ERR_PTR(-ENODEV);
770
771 put_device(target_dev);
772 return to_phy(target_dev);
773 }
774 EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
775
776 /**
777 * phy_get() - lookup and obtain a reference to a phy.
778 * @dev: device that requests this phy
779 * @string: the phy name as given in the dt data or the name of the controller
780 * port for non-dt case
781 *
782 * Returns the phy driver, after getting a refcount to it; or
783 * -ENODEV if there is no such phy. The caller is responsible for
784 * calling phy_put() to release that count.
785 */
phy_get(struct device * dev,const char * string)786 struct phy *phy_get(struct device *dev, const char *string)
787 {
788 int index = 0;
789 struct phy *phy;
790 struct device_link *link;
791
792 if (dev->of_node) {
793 if (string)
794 index = of_property_match_string(dev->of_node, "phy-names",
795 string);
796 else
797 index = 0;
798 phy = _of_phy_get(dev->of_node, index);
799 } else {
800 if (string == NULL) {
801 dev_WARN(dev, "missing string\n");
802 return ERR_PTR(-EINVAL);
803 }
804 phy = phy_find(dev, string);
805 }
806 if (IS_ERR(phy))
807 return phy;
808
809 if (!try_module_get(phy->ops->owner))
810 return ERR_PTR(-EPROBE_DEFER);
811
812 get_device(&phy->dev);
813
814 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
815 if (!link)
816 dev_dbg(dev, "failed to create device link to %s\n",
817 dev_name(phy->dev.parent));
818
819 return phy;
820 }
821 EXPORT_SYMBOL_GPL(phy_get);
822
823 /**
824 * devm_phy_get() - lookup and obtain a reference to a phy.
825 * @dev: device that requests this phy
826 * @string: the phy name as given in the dt data or phy device name
827 * for non-dt case
828 *
829 * Gets the phy using phy_get(), and associates a device with it using
830 * devres. On driver detach, release function is invoked on the devres data,
831 * then, devres data is freed.
832 */
devm_phy_get(struct device * dev,const char * string)833 struct phy *devm_phy_get(struct device *dev, const char *string)
834 {
835 struct phy **ptr, *phy;
836
837 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
838 if (!ptr)
839 return ERR_PTR(-ENOMEM);
840
841 phy = phy_get(dev, string);
842 if (!IS_ERR(phy)) {
843 *ptr = phy;
844 devres_add(dev, ptr);
845 } else {
846 devres_free(ptr);
847 }
848
849 return phy;
850 }
851 EXPORT_SYMBOL_GPL(devm_phy_get);
852
853 /**
854 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
855 * @dev: device that requests this phy
856 * @string: the phy name as given in the dt data or phy device name
857 * for non-dt case
858 *
859 * Gets the phy using phy_get(), and associates a device with it using
860 * devres. On driver detach, release function is invoked on the devres
861 * data, then, devres data is freed. This differs to devm_phy_get() in
862 * that if the phy does not exist, it is not considered an error and
863 * -ENODEV will not be returned. Instead the NULL phy is returned,
864 * which can be passed to all other phy consumer calls.
865 */
devm_phy_optional_get(struct device * dev,const char * string)866 struct phy *devm_phy_optional_get(struct device *dev, const char *string)
867 {
868 struct phy *phy = devm_phy_get(dev, string);
869
870 if (PTR_ERR(phy) == -ENODEV)
871 phy = NULL;
872
873 return phy;
874 }
875 EXPORT_SYMBOL_GPL(devm_phy_optional_get);
876
877 /**
878 * devm_of_phy_get() - lookup and obtain a reference to a phy.
879 * @dev: device that requests this phy
880 * @np: node containing the phy
881 * @con_id: name of the phy from device's point of view
882 *
883 * Gets the phy using of_phy_get(), and associates a device with it using
884 * devres. On driver detach, release function is invoked on the devres data,
885 * then, devres data is freed.
886 */
devm_of_phy_get(struct device * dev,struct device_node * np,const char * con_id)887 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
888 const char *con_id)
889 {
890 struct phy **ptr, *phy;
891 struct device_link *link;
892
893 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
894 if (!ptr)
895 return ERR_PTR(-ENOMEM);
896
897 phy = of_phy_get(np, con_id);
898 if (!IS_ERR(phy)) {
899 *ptr = phy;
900 devres_add(dev, ptr);
901 } else {
902 devres_free(ptr);
903 return phy;
904 }
905
906 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
907 if (!link)
908 dev_dbg(dev, "failed to create device link to %s\n",
909 dev_name(phy->dev.parent));
910
911 return phy;
912 }
913 EXPORT_SYMBOL_GPL(devm_of_phy_get);
914
915 /**
916 * devm_of_phy_optional_get() - lookup and obtain a reference to an optional
917 * phy.
918 * @dev: device that requests this phy
919 * @np: node containing the phy
920 * @con_id: name of the phy from device's point of view
921 *
922 * Gets the phy using of_phy_get(), and associates a device with it using
923 * devres. On driver detach, release function is invoked on the devres data,
924 * then, devres data is freed. This differs to devm_of_phy_get() in
925 * that if the phy does not exist, it is not considered an error and
926 * -ENODEV will not be returned. Instead the NULL phy is returned,
927 * which can be passed to all other phy consumer calls.
928 */
devm_of_phy_optional_get(struct device * dev,struct device_node * np,const char * con_id)929 struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np,
930 const char *con_id)
931 {
932 struct phy *phy = devm_of_phy_get(dev, np, con_id);
933
934 if (PTR_ERR(phy) == -ENODEV)
935 phy = NULL;
936
937 if (IS_ERR(phy))
938 dev_err_probe(dev, PTR_ERR(phy), "failed to get PHY %pOF:%s",
939 np, con_id);
940
941 return phy;
942 }
943 EXPORT_SYMBOL_GPL(devm_of_phy_optional_get);
944
945 /**
946 * devm_of_phy_get_by_index() - lookup and obtain a reference to a phy by index.
947 * @dev: device that requests this phy
948 * @np: node containing the phy
949 * @index: index of the phy
950 *
951 * Gets the phy using _of_phy_get(), then gets a refcount to it,
952 * and associates a device with it using devres. On driver detach,
953 * release function is invoked on the devres data,
954 * then, devres data is freed.
955 *
956 */
devm_of_phy_get_by_index(struct device * dev,struct device_node * np,int index)957 struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
958 int index)
959 {
960 struct phy **ptr, *phy;
961 struct device_link *link;
962
963 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
964 if (!ptr)
965 return ERR_PTR(-ENOMEM);
966
967 phy = _of_phy_get(np, index);
968 if (IS_ERR(phy)) {
969 devres_free(ptr);
970 return phy;
971 }
972
973 if (!try_module_get(phy->ops->owner)) {
974 devres_free(ptr);
975 return ERR_PTR(-EPROBE_DEFER);
976 }
977
978 get_device(&phy->dev);
979
980 *ptr = phy;
981 devres_add(dev, ptr);
982
983 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
984 if (!link)
985 dev_dbg(dev, "failed to create device link to %s\n",
986 dev_name(phy->dev.parent));
987
988 return phy;
989 }
990 EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
991
992 /**
993 * phy_create() - create a new phy
994 * @dev: device that is creating the new phy
995 * @node: device node of the phy
996 * @ops: function pointers for performing phy operations
997 *
998 * Called to create a phy using phy framework.
999 */
phy_create(struct device * dev,struct device_node * node,const struct phy_ops * ops)1000 struct phy *phy_create(struct device *dev, struct device_node *node,
1001 const struct phy_ops *ops)
1002 {
1003 int ret;
1004 int id;
1005 struct phy *phy;
1006
1007 if (WARN_ON(!dev))
1008 return ERR_PTR(-EINVAL);
1009
1010 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
1011 if (!phy)
1012 return ERR_PTR(-ENOMEM);
1013
1014 id = ida_alloc(&phy_ida, GFP_KERNEL);
1015 if (id < 0) {
1016 dev_err(dev, "unable to get id\n");
1017 ret = id;
1018 goto free_phy;
1019 }
1020
1021 device_initialize(&phy->dev);
1022 lockdep_register_key(&phy->lockdep_key);
1023 mutex_init_with_key(&phy->mutex, &phy->lockdep_key);
1024
1025 phy->dev.class = &phy_class;
1026 phy->dev.parent = dev;
1027 phy->dev.of_node = node ?: dev->of_node;
1028 phy->id = id;
1029 phy->ops = ops;
1030
1031 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
1032 if (ret)
1033 goto put_dev;
1034
1035 /* phy-supply */
1036 phy->pwr = regulator_get_optional(&phy->dev, "phy");
1037 if (IS_ERR(phy->pwr)) {
1038 ret = PTR_ERR(phy->pwr);
1039 if (ret == -EPROBE_DEFER)
1040 goto put_dev;
1041
1042 phy->pwr = NULL;
1043 }
1044
1045 ret = device_add(&phy->dev);
1046 if (ret)
1047 goto put_dev;
1048
1049 if (pm_runtime_enabled(dev)) {
1050 pm_runtime_enable(&phy->dev);
1051 pm_runtime_no_callbacks(&phy->dev);
1052 }
1053
1054 phy->debugfs = debugfs_create_dir(dev_name(&phy->dev), phy_debugfs_root);
1055
1056 return phy;
1057
1058 put_dev:
1059 put_device(&phy->dev); /* calls phy_release() which frees resources */
1060 return ERR_PTR(ret);
1061
1062 free_phy:
1063 kfree(phy);
1064 return ERR_PTR(ret);
1065 }
1066 EXPORT_SYMBOL_GPL(phy_create);
1067
1068 /**
1069 * devm_phy_create() - create a new phy
1070 * @dev: device that is creating the new phy
1071 * @node: device node of the phy
1072 * @ops: function pointers for performing phy operations
1073 *
1074 * Creates a new PHY device adding it to the PHY class.
1075 * While at that, it also associates the device with the phy using devres.
1076 * On driver detach, release function is invoked on the devres data,
1077 * then, devres data is freed.
1078 */
devm_phy_create(struct device * dev,struct device_node * node,const struct phy_ops * ops)1079 struct phy *devm_phy_create(struct device *dev, struct device_node *node,
1080 const struct phy_ops *ops)
1081 {
1082 struct phy **ptr, *phy;
1083
1084 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
1085 if (!ptr)
1086 return ERR_PTR(-ENOMEM);
1087
1088 phy = phy_create(dev, node, ops);
1089 if (!IS_ERR(phy)) {
1090 *ptr = phy;
1091 devres_add(dev, ptr);
1092 } else {
1093 devres_free(ptr);
1094 }
1095
1096 return phy;
1097 }
1098 EXPORT_SYMBOL_GPL(devm_phy_create);
1099
1100 /**
1101 * phy_destroy() - destroy the phy
1102 * @phy: the phy to be destroyed
1103 *
1104 * Called to destroy the phy.
1105 */
phy_destroy(struct phy * phy)1106 void phy_destroy(struct phy *phy)
1107 {
1108 pm_runtime_disable(&phy->dev);
1109 device_unregister(&phy->dev);
1110 }
1111 EXPORT_SYMBOL_GPL(phy_destroy);
1112
1113 /**
1114 * devm_phy_destroy() - destroy the PHY
1115 * @dev: device that wants to release this phy
1116 * @phy: the phy returned by devm_phy_get()
1117 *
1118 * destroys the devres associated with this phy and invokes phy_destroy
1119 * to destroy the phy.
1120 */
devm_phy_destroy(struct device * dev,struct phy * phy)1121 void devm_phy_destroy(struct device *dev, struct phy *phy)
1122 {
1123 int r;
1124
1125 r = devres_release(dev, devm_phy_consume, devm_phy_match, phy);
1126 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
1127 }
1128 EXPORT_SYMBOL_GPL(devm_phy_destroy);
1129
1130 /**
1131 * __of_phy_provider_register() - create/register phy provider with the framework
1132 * @dev: struct device of the phy provider
1133 * @children: device node containing children (if different from dev->of_node)
1134 * @owner: the module owner containing of_xlate
1135 * @of_xlate: function pointer to obtain phy instance from phy provider
1136 *
1137 * Creates struct phy_provider from dev and of_xlate function pointer.
1138 * This is used in the case of dt boot for finding the phy instance from
1139 * phy provider.
1140 *
1141 * If the PHY provider doesn't nest children directly but uses a separate
1142 * child node to contain the individual children, the @children parameter
1143 * can be used to override the default. If NULL, the default (dev->of_node)
1144 * will be used. If non-NULL, the device node must be a child (or further
1145 * descendant) of dev->of_node. Otherwise an ERR_PTR()-encoded -EINVAL
1146 * error code is returned.
1147 */
__of_phy_provider_register(struct device * dev,struct device_node * children,struct module * owner,struct phy * (* of_xlate)(struct device * dev,const struct of_phandle_args * args))1148 struct phy_provider *__of_phy_provider_register(struct device *dev,
1149 struct device_node *children, struct module *owner,
1150 struct phy * (*of_xlate)(struct device *dev,
1151 const struct of_phandle_args *args))
1152 {
1153 struct phy_provider *phy_provider;
1154
1155 /*
1156 * If specified, the device node containing the children must itself
1157 * be the provider's device node or a child (or further descendant)
1158 * thereof.
1159 */
1160 if (children) {
1161 struct device_node *parent = of_node_get(children), *next;
1162
1163 while (parent) {
1164 if (parent == dev->of_node)
1165 break;
1166
1167 next = of_get_parent(parent);
1168 of_node_put(parent);
1169 parent = next;
1170 }
1171
1172 if (!parent)
1173 return ERR_PTR(-EINVAL);
1174
1175 of_node_put(parent);
1176 } else {
1177 children = dev->of_node;
1178 }
1179
1180 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
1181 if (!phy_provider)
1182 return ERR_PTR(-ENOMEM);
1183
1184 phy_provider->dev = dev;
1185 phy_provider->children = of_node_get(children);
1186 phy_provider->owner = owner;
1187 phy_provider->of_xlate = of_xlate;
1188
1189 mutex_lock(&phy_provider_mutex);
1190 list_add_tail(&phy_provider->list, &phy_provider_list);
1191 mutex_unlock(&phy_provider_mutex);
1192
1193 return phy_provider;
1194 }
1195 EXPORT_SYMBOL_GPL(__of_phy_provider_register);
1196
1197 /**
1198 * __devm_of_phy_provider_register() - create/register phy provider with the
1199 * framework
1200 * @dev: struct device of the phy provider
1201 * @children: device node containing children (if different from dev->of_node)
1202 * @owner: the module owner containing of_xlate
1203 * @of_xlate: function pointer to obtain phy instance from phy provider
1204 *
1205 * Creates struct phy_provider from dev and of_xlate function pointer.
1206 * This is used in the case of dt boot for finding the phy instance from
1207 * phy provider. While at that, it also associates the device with the
1208 * phy provider using devres. On driver detach, release function is invoked
1209 * on the devres data, then, devres data is freed.
1210 */
__devm_of_phy_provider_register(struct device * dev,struct device_node * children,struct module * owner,struct phy * (* of_xlate)(struct device * dev,const struct of_phandle_args * args))1211 struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
1212 struct device_node *children, struct module *owner,
1213 struct phy * (*of_xlate)(struct device *dev,
1214 const struct of_phandle_args *args))
1215 {
1216 struct phy_provider **ptr, *phy_provider;
1217
1218 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
1219 if (!ptr)
1220 return ERR_PTR(-ENOMEM);
1221
1222 phy_provider = __of_phy_provider_register(dev, children, owner,
1223 of_xlate);
1224 if (!IS_ERR(phy_provider)) {
1225 *ptr = phy_provider;
1226 devres_add(dev, ptr);
1227 } else {
1228 devres_free(ptr);
1229 }
1230
1231 return phy_provider;
1232 }
1233 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
1234
1235 /**
1236 * of_phy_provider_unregister() - unregister phy provider from the framework
1237 * @phy_provider: phy provider returned by of_phy_provider_register()
1238 *
1239 * Removes the phy_provider created using of_phy_provider_register().
1240 */
of_phy_provider_unregister(struct phy_provider * phy_provider)1241 void of_phy_provider_unregister(struct phy_provider *phy_provider)
1242 {
1243 if (IS_ERR(phy_provider))
1244 return;
1245
1246 mutex_lock(&phy_provider_mutex);
1247 list_del(&phy_provider->list);
1248 of_node_put(phy_provider->children);
1249 kfree(phy_provider);
1250 mutex_unlock(&phy_provider_mutex);
1251 }
1252 EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
1253
1254 /**
1255 * devm_of_phy_provider_unregister() - remove phy provider from the framework
1256 * @dev: struct device of the phy provider
1257 * @phy_provider: phy provider returned by of_phy_provider_register()
1258 *
1259 * destroys the devres associated with this phy provider and invokes
1260 * of_phy_provider_unregister to unregister the phy provider.
1261 */
devm_of_phy_provider_unregister(struct device * dev,struct phy_provider * phy_provider)1262 void devm_of_phy_provider_unregister(struct device *dev,
1263 struct phy_provider *phy_provider)
1264 {
1265 int r;
1266
1267 r = devres_release(dev, devm_phy_provider_release, devm_phy_match,
1268 phy_provider);
1269 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
1270 }
1271 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
1272
1273 /**
1274 * phy_release() - release the phy
1275 * @dev: the dev member within phy
1276 *
1277 * When the last reference to the device is removed, it is called
1278 * from the embedded kobject as release method.
1279 */
phy_release(struct device * dev)1280 static void phy_release(struct device *dev)
1281 {
1282 struct phy *phy;
1283
1284 phy = to_phy(dev);
1285 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
1286 debugfs_remove_recursive(phy->debugfs);
1287 regulator_put(phy->pwr);
1288 mutex_destroy(&phy->mutex);
1289 lockdep_unregister_key(&phy->lockdep_key);
1290 ida_free(&phy_ida, phy->id);
1291 kfree(phy);
1292 }
1293
phy_core_init(void)1294 static int __init phy_core_init(void)
1295 {
1296 int err;
1297
1298 err = class_register(&phy_class);
1299 if (err) {
1300 pr_err("failed to register phy class");
1301 return err;
1302 }
1303
1304 phy_debugfs_root = debugfs_create_dir("phy", NULL);
1305
1306 return 0;
1307 }
1308 device_initcall(phy_core_init);
1309
phy_core_exit(void)1310 static void __exit phy_core_exit(void)
1311 {
1312 debugfs_remove_recursive(phy_debugfs_root);
1313 class_unregister(&phy_class);
1314 }
1315 module_exit(phy_core_exit);
1316