xref: /linux/net/ethtool/phy.c (revision 3e64db35bc37edbe9e37aaa987df92cde12ddb6c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Bootlin
4  *
5  */
6 #include "common.h"
7 #include "netlink.h"
8 
9 #include <linux/phy.h>
10 #include <linux/phy_link_topology.h>
11 #include <linux/sfp.h>
12 
13 struct phy_req_info {
14 	struct ethnl_req_info		base;
15 	struct phy_device_node		pdn;
16 };
17 
18 #define PHY_REQINFO(__req_base) \
19 	container_of(__req_base, struct phy_req_info, base)
20 
21 const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = {
22 	[ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
23 };
24 
25 /* Caller holds rtnl */
26 static ssize_t
27 ethnl_phy_reply_size(const struct ethnl_req_info *req_base,
28 		     struct netlink_ext_ack *extack)
29 {
30 	struct phy_link_topology *topo;
31 	struct phy_device_node *pdn;
32 	struct phy_device *phydev;
33 	unsigned long index;
34 	size_t size;
35 
36 	ASSERT_RTNL();
37 
38 	topo = &req_base->dev->link_topo;
39 
40 	size = nla_total_size(0);
41 
42 	xa_for_each(&topo->phys, index, pdn) {
43 		phydev = pdn->phy;
44 
45 		/* ETHTOOL_A_PHY_INDEX */
46 		size += nla_total_size(sizeof(u32));
47 
48 		/* ETHTOOL_A_DRVNAME */
49 		size += nla_total_size(strlen(phydev->drv->name) + 1);
50 
51 		/* ETHTOOL_A_NAME */
52 		size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1);
53 
54 		/* ETHTOOL_A_PHY_UPSTREAM_TYPE */
55 		size += nla_total_size(sizeof(u8));
56 
57 		/* ETHTOOL_A_PHY_ID */
58 		size += nla_total_size(sizeof(u32));
59 
60 		if (phy_on_sfp(phydev)) {
61 			const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus);
62 
63 			/* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
64 			if (upstream_sfp_name)
65 				size += nla_total_size(strlen(upstream_sfp_name) + 1);
66 
67 			/* ETHTOOL_A_PHY_UPSTREAM_INDEX */
68 			size += nla_total_size(sizeof(u32));
69 		}
70 
71 		/* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */
72 		if (phydev->sfp_bus) {
73 			const char *sfp_name = sfp_get_name(phydev->sfp_bus);
74 
75 			if (sfp_name)
76 				size += nla_total_size(strlen(sfp_name) + 1);
77 		}
78 	}
79 
80 	return size;
81 }
82 
83 static int
84 ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb)
85 {
86 	struct phy_req_info *req_info = PHY_REQINFO(req_base);
87 	struct phy_device_node *pdn = &req_info->pdn;
88 	struct phy_device *phydev = pdn->phy;
89 	enum phy_upstream ptype;
90 	struct nlattr *nest;
91 
92 	ptype = pdn->upstream_type;
93 
94 	if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) ||
95 	    nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name) ||
96 	    nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) ||
97 	    nla_put_u8(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype) ||
98 	    nla_put_u32(skb, ETHTOOL_A_PHY_ID, phydev->phy_id))
99 		return -EMSGSIZE;
100 
101 	if (ptype == PHY_UPSTREAM_PHY) {
102 		struct phy_device *upstream = pdn->upstream.phydev;
103 		const char *sfp_upstream_name;
104 
105 		nest = nla_nest_start(skb, ETHTOOL_A_PHY_UPSTREAM);
106 		if (!nest)
107 			return -EMSGSIZE;
108 
109 		/* Parent index */
110 		if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex))
111 			return -EMSGSIZE;
112 
113 		if (pdn->parent_sfp_bus) {
114 			sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus);
115 			if (sfp_upstream_name && nla_put_string(skb,
116 								ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
117 								sfp_upstream_name))
118 				return -EMSGSIZE;
119 		}
120 
121 		nla_nest_end(skb, nest);
122 	}
123 
124 	if (phydev->sfp_bus) {
125 		const char *sfp_name = sfp_get_name(phydev->sfp_bus);
126 
127 		if (sfp_name &&
128 		    nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
129 				   sfp_name))
130 			return -EMSGSIZE;
131 	}
132 
133 	return 0;
134 }
135 
136 static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
137 				   struct nlattr **tb)
138 {
139 	struct phy_link_topology *topo = &req_base->dev->link_topo;
140 	struct phy_req_info *req_info = PHY_REQINFO(req_base);
141 	struct phy_device_node *pdn;
142 
143 	if (!req_base->phydev)
144 		return 0;
145 
146 	pdn = xa_load(&topo->phys, req_base->phydev->phyindex);
147 	memcpy(&req_info->pdn, pdn, sizeof(*pdn));
148 
149 	return 0;
150 }
151 
152 int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
153 {
154 	struct phy_req_info req_info = {};
155 	struct nlattr **tb = info->attrs;
156 	struct sk_buff *rskb;
157 	void *reply_payload;
158 	int reply_len;
159 	int ret;
160 
161 	ret = ethnl_parse_header_dev_get(&req_info.base,
162 					 tb[ETHTOOL_A_PHY_HEADER],
163 					 genl_info_net(info), info->extack,
164 					 true);
165 	if (ret < 0)
166 		return ret;
167 
168 	rtnl_lock();
169 
170 	ret = ethnl_phy_parse_request(&req_info.base, tb);
171 	if (ret < 0)
172 		goto err_unlock_rtnl;
173 
174 	/* No PHY, return early */
175 	if (!req_info.pdn.phy)
176 		goto err_unlock_rtnl;
177 
178 	ret = ethnl_phy_reply_size(&req_info.base, info->extack);
179 	if (ret < 0)
180 		goto err_unlock_rtnl;
181 	reply_len = ret + ethnl_reply_header_size();
182 
183 	rskb = ethnl_reply_init(reply_len, req_info.base.dev,
184 				ETHTOOL_MSG_PHY_GET_REPLY,
185 				ETHTOOL_A_PHY_HEADER,
186 				info, &reply_payload);
187 	if (!rskb) {
188 		ret = -ENOMEM;
189 		goto err_unlock_rtnl;
190 	}
191 
192 	ret = ethnl_phy_fill_reply(&req_info.base, rskb);
193 	if (ret)
194 		goto err_free_msg;
195 
196 	rtnl_unlock();
197 	ethnl_parse_header_dev_put(&req_info.base);
198 	genlmsg_end(rskb, reply_payload);
199 
200 	return genlmsg_reply(rskb, info);
201 
202 err_free_msg:
203 	nlmsg_free(rskb);
204 err_unlock_rtnl:
205 	rtnl_unlock();
206 	ethnl_parse_header_dev_put(&req_info.base);
207 	return ret;
208 }
209 
210 struct ethnl_phy_dump_ctx {
211 	struct phy_req_info	*phy_req_info;
212 };
213 
214 int ethnl_phy_start(struct netlink_callback *cb)
215 {
216 	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
217 	struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
218 	struct nlattr **tb = info->info.attrs;
219 	int ret;
220 
221 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
222 
223 	ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL);
224 	if (!ctx->phy_req_info)
225 		return -ENOMEM;
226 
227 	ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base,
228 					 tb[ETHTOOL_A_PHY_HEADER],
229 					 sock_net(cb->skb->sk), cb->extack,
230 					 false);
231 	return ret;
232 }
233 
234 int ethnl_phy_done(struct netlink_callback *cb)
235 {
236 	struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
237 
238 	kfree(ctx->phy_req_info);
239 
240 	return 0;
241 }
242 
243 static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev,
244 				  struct netlink_callback *cb)
245 {
246 	struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
247 	struct phy_req_info *pri = ctx->phy_req_info;
248 	struct phy_device_node *pdn;
249 	unsigned long index = 1;
250 	int ret = 0;
251 	void *ehdr;
252 
253 	pri->base.dev = dev;
254 
255 	xa_for_each(&dev->link_topo.phys, index, pdn) {
256 		ehdr = ethnl_dump_put(skb, cb,
257 				      ETHTOOL_MSG_PHY_GET_REPLY);
258 		if (!ehdr) {
259 			ret = -EMSGSIZE;
260 			break;
261 		}
262 
263 		ret = ethnl_fill_reply_header(skb, dev,
264 					      ETHTOOL_A_PHY_HEADER);
265 		if (ret < 0) {
266 			genlmsg_cancel(skb, ehdr);
267 			break;
268 		}
269 
270 		memcpy(&pri->pdn, pdn, sizeof(*pdn));
271 		ret = ethnl_phy_fill_reply(&pri->base, skb);
272 
273 		genlmsg_end(skb, ehdr);
274 	}
275 
276 	return ret;
277 }
278 
279 int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
280 {
281 	struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
282 	struct net *net = sock_net(skb->sk);
283 	unsigned long ifindex = 1;
284 	struct net_device *dev;
285 	int ret = 0;
286 
287 	rtnl_lock();
288 
289 	if (ctx->phy_req_info->base.dev) {
290 		ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb);
291 		ethnl_parse_header_dev_put(&ctx->phy_req_info->base);
292 		ctx->phy_req_info->base.dev = NULL;
293 	} else {
294 		for_each_netdev_dump(net, dev, ifindex) {
295 			ret = ethnl_phy_dump_one_dev(skb, dev, cb);
296 			if (ret)
297 				break;
298 		}
299 	}
300 	rtnl_unlock();
301 
302 	if (ret == -EMSGSIZE && skb->len)
303 		return skb->len;
304 	return ret;
305 }
306 
307