xref: /linux/net/ethtool/phy.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Bootlin
4  *
5  */
6 #include "common.h"
7 #include "netlink.h"
8 
9 #include <linux/phy.h>
10 #include <linux/phy_link_topology.h>
11 #include <linux/sfp.h>
12 
13 struct phy_req_info {
14 	struct ethnl_req_info		base;
15 	struct phy_device_node		*pdn;
16 };
17 
18 #define PHY_REQINFO(__req_base) \
19 	container_of(__req_base, struct phy_req_info, base)
20 
21 const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = {
22 	[ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
23 };
24 
25 /* Caller holds rtnl */
26 static ssize_t
ethnl_phy_reply_size(const struct ethnl_req_info * req_base,struct netlink_ext_ack * extack)27 ethnl_phy_reply_size(const struct ethnl_req_info *req_base,
28 		     struct netlink_ext_ack *extack)
29 {
30 	struct phy_req_info *req_info = PHY_REQINFO(req_base);
31 	struct phy_device_node *pdn = req_info->pdn;
32 	struct phy_device *phydev = pdn->phy;
33 	size_t size = 0;
34 
35 	ASSERT_RTNL();
36 
37 	/* ETHTOOL_A_PHY_INDEX */
38 	size += nla_total_size(sizeof(u32));
39 
40 	/* ETHTOOL_A_DRVNAME */
41 	if (phydev->drv)
42 		size += nla_total_size(strlen(phydev->drv->name) + 1);
43 
44 	/* ETHTOOL_A_NAME */
45 	size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1);
46 
47 	/* ETHTOOL_A_PHY_UPSTREAM_TYPE */
48 	size += nla_total_size(sizeof(u32));
49 
50 	if (phy_on_sfp(phydev)) {
51 		const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus);
52 
53 		/* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
54 		if (upstream_sfp_name)
55 			size += nla_total_size(strlen(upstream_sfp_name) + 1);
56 
57 		/* ETHTOOL_A_PHY_UPSTREAM_INDEX */
58 		size += nla_total_size(sizeof(u32));
59 	}
60 
61 	/* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */
62 	if (phydev->sfp_bus) {
63 		const char *sfp_name = sfp_get_name(phydev->sfp_bus);
64 
65 		if (sfp_name)
66 			size += nla_total_size(strlen(sfp_name) + 1);
67 	}
68 
69 	return size;
70 }
71 
72 static int
ethnl_phy_fill_reply(const struct ethnl_req_info * req_base,struct sk_buff * skb)73 ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb)
74 {
75 	struct phy_req_info *req_info = PHY_REQINFO(req_base);
76 	struct phy_device_node *pdn = req_info->pdn;
77 	struct phy_device *phydev = pdn->phy;
78 	enum phy_upstream ptype;
79 
80 	ptype = pdn->upstream_type;
81 
82 	if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) ||
83 	    nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) ||
84 	    nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype))
85 		return -EMSGSIZE;
86 
87 	if (phydev->drv &&
88 	    nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name))
89 		return -EMSGSIZE;
90 
91 	if (ptype == PHY_UPSTREAM_PHY) {
92 		struct phy_device *upstream = pdn->upstream.phydev;
93 		const char *sfp_upstream_name;
94 
95 		/* Parent index */
96 		if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex))
97 			return -EMSGSIZE;
98 
99 		if (pdn->parent_sfp_bus) {
100 			sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus);
101 			if (sfp_upstream_name &&
102 			    nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
103 					   sfp_upstream_name))
104 				return -EMSGSIZE;
105 		}
106 	}
107 
108 	if (phydev->sfp_bus) {
109 		const char *sfp_name = sfp_get_name(phydev->sfp_bus);
110 
111 		if (sfp_name &&
112 		    nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
113 				   sfp_name))
114 			return -EMSGSIZE;
115 	}
116 
117 	return 0;
118 }
119 
ethnl_phy_parse_request(struct ethnl_req_info * req_base,struct nlattr ** tb,struct netlink_ext_ack * extack)120 static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
121 				   struct nlattr **tb,
122 				   struct netlink_ext_ack *extack)
123 {
124 	struct phy_link_topology *topo = req_base->dev->link_topo;
125 	struct phy_req_info *req_info = PHY_REQINFO(req_base);
126 	struct phy_device *phydev;
127 
128 	phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER],
129 				      extack);
130 	if (!phydev)
131 		return 0;
132 
133 	if (IS_ERR(phydev))
134 		return PTR_ERR(phydev);
135 
136 	if (!topo)
137 		return 0;
138 
139 	req_info->pdn = xa_load(&topo->phys, phydev->phyindex);
140 
141 	return 0;
142 }
143 
ethnl_phy_doit(struct sk_buff * skb,struct genl_info * info)144 int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
145 {
146 	struct phy_req_info req_info = {};
147 	struct nlattr **tb = info->attrs;
148 	struct sk_buff *rskb;
149 	void *reply_payload;
150 	int reply_len;
151 	int ret;
152 
153 	ret = ethnl_parse_header_dev_get(&req_info.base,
154 					 tb[ETHTOOL_A_PHY_HEADER],
155 					 genl_info_net(info), info->extack,
156 					 true);
157 	if (ret < 0)
158 		return ret;
159 
160 	rtnl_lock();
161 
162 	ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack);
163 	if (ret < 0)
164 		goto err_unlock_rtnl;
165 
166 	/* No PHY, return early */
167 	if (!req_info.pdn)
168 		goto err_unlock_rtnl;
169 
170 	ret = ethnl_phy_reply_size(&req_info.base, info->extack);
171 	if (ret < 0)
172 		goto err_unlock_rtnl;
173 	reply_len = ret + ethnl_reply_header_size();
174 
175 	rskb = ethnl_reply_init(reply_len, req_info.base.dev,
176 				ETHTOOL_MSG_PHY_GET_REPLY,
177 				ETHTOOL_A_PHY_HEADER,
178 				info, &reply_payload);
179 	if (!rskb) {
180 		ret = -ENOMEM;
181 		goto err_unlock_rtnl;
182 	}
183 
184 	ret = ethnl_phy_fill_reply(&req_info.base, rskb);
185 	if (ret)
186 		goto err_free_msg;
187 
188 	rtnl_unlock();
189 	ethnl_parse_header_dev_put(&req_info.base);
190 	genlmsg_end(rskb, reply_payload);
191 
192 	return genlmsg_reply(rskb, info);
193 
194 err_free_msg:
195 	nlmsg_free(rskb);
196 err_unlock_rtnl:
197 	rtnl_unlock();
198 	ethnl_parse_header_dev_put(&req_info.base);
199 	return ret;
200 }
201 
202 struct ethnl_phy_dump_ctx {
203 	struct phy_req_info	*phy_req_info;
204 	unsigned long ifindex;
205 	unsigned long phy_index;
206 };
207 
ethnl_phy_start(struct netlink_callback * cb)208 int ethnl_phy_start(struct netlink_callback *cb)
209 {
210 	const struct genl_info *info = genl_info_dump(cb);
211 	struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
212 	int ret;
213 
214 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
215 
216 	ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL);
217 	if (!ctx->phy_req_info)
218 		return -ENOMEM;
219 
220 	ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base,
221 					 info->attrs[ETHTOOL_A_PHY_HEADER],
222 					 sock_net(cb->skb->sk), cb->extack,
223 					 false);
224 	ctx->ifindex = 0;
225 	ctx->phy_index = 0;
226 
227 	if (ret)
228 		kfree(ctx->phy_req_info);
229 
230 	return ret;
231 }
232 
ethnl_phy_done(struct netlink_callback * cb)233 int ethnl_phy_done(struct netlink_callback *cb)
234 {
235 	struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
236 
237 	if (ctx->phy_req_info->base.dev)
238 		ethnl_parse_header_dev_put(&ctx->phy_req_info->base);
239 
240 	kfree(ctx->phy_req_info);
241 
242 	return 0;
243 }
244 
ethnl_phy_dump_one_dev(struct sk_buff * skb,struct net_device * dev,struct netlink_callback * cb)245 static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev,
246 				  struct netlink_callback *cb)
247 {
248 	struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
249 	struct phy_req_info *pri = ctx->phy_req_info;
250 	struct phy_device_node *pdn;
251 	int ret = 0;
252 	void *ehdr;
253 
254 	if (!dev->link_topo)
255 		return 0;
256 
257 	xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) {
258 		ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY);
259 		if (!ehdr) {
260 			ret = -EMSGSIZE;
261 			break;
262 		}
263 
264 		ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER);
265 		if (ret < 0) {
266 			genlmsg_cancel(skb, ehdr);
267 			break;
268 		}
269 
270 		pri->pdn = pdn;
271 		ret = ethnl_phy_fill_reply(&pri->base, skb);
272 		if (ret < 0) {
273 			genlmsg_cancel(skb, ehdr);
274 			break;
275 		}
276 
277 		genlmsg_end(skb, ehdr);
278 	}
279 
280 	return ret;
281 }
282 
ethnl_phy_dumpit(struct sk_buff * skb,struct netlink_callback * cb)283 int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
284 {
285 	struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
286 	struct net *net = sock_net(skb->sk);
287 	struct net_device *dev;
288 	int ret = 0;
289 
290 	rtnl_lock();
291 
292 	if (ctx->phy_req_info->base.dev) {
293 		ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb);
294 	} else {
295 		for_each_netdev_dump(net, dev, ctx->ifindex) {
296 			ret = ethnl_phy_dump_one_dev(skb, dev, cb);
297 			if (ret)
298 				break;
299 
300 			ctx->phy_index = 0;
301 		}
302 	}
303 	rtnl_unlock();
304 
305 	return ret;
306 }
307