1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 *
7 * Routines for handling Netlink messages for HSR and PRP.
8 */
9
10 #include "hsr_netlink.h"
11 #include <linux/kernel.h>
12 #include <net/rtnetlink.h>
13 #include <net/genetlink.h>
14 #include "hsr_main.h"
15 #include "hsr_device.h"
16 #include "hsr_framereg.h"
17
18 static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
22 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
23 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
24 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
25 [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
26 [IFLA_HSR_INTERLINK] = { .type = NLA_U32 },
27 };
28
29 /* Here, it seems a netdevice has already been allocated for us, and the
30 * hsr_dev_setup routine has been executed. Nice!
31 */
hsr_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)32 static int hsr_newlink(struct net *src_net, struct net_device *dev,
33 struct nlattr *tb[], struct nlattr *data[],
34 struct netlink_ext_ack *extack)
35 {
36 enum hsr_version proto_version;
37 unsigned char multicast_spec;
38 u8 proto = HSR_PROTOCOL_HSR;
39
40 struct net_device *link[2], *interlink = NULL;
41 if (!data) {
42 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
43 return -EINVAL;
44 }
45 if (!data[IFLA_HSR_SLAVE1]) {
46 NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
47 return -EINVAL;
48 }
49 link[0] = __dev_get_by_index(src_net,
50 nla_get_u32(data[IFLA_HSR_SLAVE1]));
51 if (!link[0]) {
52 NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
53 return -EINVAL;
54 }
55 if (!data[IFLA_HSR_SLAVE2]) {
56 NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
57 return -EINVAL;
58 }
59 link[1] = __dev_get_by_index(src_net,
60 nla_get_u32(data[IFLA_HSR_SLAVE2]));
61 if (!link[1]) {
62 NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
63 return -EINVAL;
64 }
65
66 if (link[0] == link[1]) {
67 NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
68 return -EINVAL;
69 }
70
71 if (data[IFLA_HSR_INTERLINK])
72 interlink = __dev_get_by_index(src_net,
73 nla_get_u32(data[IFLA_HSR_INTERLINK]));
74
75 if (interlink && interlink == link[0]) {
76 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same");
77 return -EINVAL;
78 }
79
80 if (interlink && interlink == link[1]) {
81 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same");
82 return -EINVAL;
83 }
84
85 if (!data[IFLA_HSR_MULTICAST_SPEC])
86 multicast_spec = 0;
87 else
88 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
89
90 if (data[IFLA_HSR_PROTOCOL])
91 proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
92
93 if (proto >= HSR_PROTOCOL_MAX) {
94 NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
95 return -EINVAL;
96 }
97
98 if (!data[IFLA_HSR_VERSION]) {
99 proto_version = HSR_V0;
100 } else {
101 if (proto == HSR_PROTOCOL_PRP) {
102 NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
103 return -EINVAL;
104 }
105
106 proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
107 if (proto_version > HSR_V1) {
108 NL_SET_ERR_MSG_MOD(extack,
109 "Only HSR version 0/1 supported");
110 return -EINVAL;
111 }
112 }
113
114 if (proto == HSR_PROTOCOL_PRP) {
115 proto_version = PRP_V1;
116 if (interlink) {
117 NL_SET_ERR_MSG_MOD(extack,
118 "Interlink only works with HSR");
119 return -EINVAL;
120 }
121 }
122
123 return hsr_dev_finalize(dev, link, interlink, multicast_spec,
124 proto_version, extack);
125 }
126
hsr_dellink(struct net_device * dev,struct list_head * head)127 static void hsr_dellink(struct net_device *dev, struct list_head *head)
128 {
129 struct hsr_priv *hsr = netdev_priv(dev);
130
131 del_timer_sync(&hsr->prune_timer);
132 del_timer_sync(&hsr->prune_proxy_timer);
133 del_timer_sync(&hsr->announce_timer);
134 timer_delete_sync(&hsr->announce_proxy_timer);
135
136 hsr_debugfs_term(hsr);
137 hsr_del_ports(hsr);
138
139 hsr_del_self_node(hsr);
140 hsr_del_nodes(&hsr->node_db);
141 hsr_del_nodes(&hsr->proxy_node_db);
142
143 unregister_netdevice_queue(dev, head);
144 }
145
hsr_fill_info(struct sk_buff * skb,const struct net_device * dev)146 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
147 {
148 struct hsr_priv *hsr = netdev_priv(dev);
149 u8 proto = HSR_PROTOCOL_HSR;
150 struct hsr_port *port;
151
152 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
153 if (port) {
154 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
155 goto nla_put_failure;
156 }
157
158 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
159 if (port) {
160 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
161 goto nla_put_failure;
162 }
163
164 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
165 hsr->sup_multicast_addr) ||
166 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
167 goto nla_put_failure;
168 if (hsr->prot_version == PRP_V1)
169 proto = HSR_PROTOCOL_PRP;
170 if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
171 goto nla_put_failure;
172
173 return 0;
174
175 nla_put_failure:
176 return -EMSGSIZE;
177 }
178
179 static struct rtnl_link_ops hsr_link_ops __read_mostly = {
180 .kind = "hsr",
181 .maxtype = IFLA_HSR_MAX,
182 .policy = hsr_policy,
183 .priv_size = sizeof(struct hsr_priv),
184 .setup = hsr_dev_setup,
185 .newlink = hsr_newlink,
186 .dellink = hsr_dellink,
187 .fill_info = hsr_fill_info,
188 };
189
190 /* attribute policy */
191 static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
192 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
193 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
194 [HSR_A_IFINDEX] = { .type = NLA_U32 },
195 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
196 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
197 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
198 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
199 };
200
201 static struct genl_family hsr_genl_family;
202
203 static const struct genl_multicast_group hsr_mcgrps[] = {
204 { .name = "hsr-network", },
205 };
206
207 /* This is called if for some node with MAC address addr, we only get frames
208 * over one of the slave interfaces. This would indicate an open network ring
209 * (i.e. a link has failed somewhere).
210 */
hsr_nl_ringerror(struct hsr_priv * hsr,unsigned char addr[ETH_ALEN],struct hsr_port * port)211 void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
212 struct hsr_port *port)
213 {
214 struct sk_buff *skb;
215 void *msg_head;
216 struct hsr_port *master;
217 int res;
218
219 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
220 if (!skb)
221 goto fail;
222
223 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
224 HSR_C_RING_ERROR);
225 if (!msg_head)
226 goto nla_put_failure;
227
228 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
229 if (res < 0)
230 goto nla_put_failure;
231
232 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
233 if (res < 0)
234 goto nla_put_failure;
235
236 genlmsg_end(skb, msg_head);
237 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
238
239 return;
240
241 nla_put_failure:
242 kfree_skb(skb);
243
244 fail:
245 rcu_read_lock();
246 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
247 netdev_warn(master->dev, "Could not send HSR ring error message\n");
248 rcu_read_unlock();
249 }
250
251 /* This is called when we haven't heard from the node with MAC address addr for
252 * some time (just before the node is removed from the node table/list).
253 */
hsr_nl_nodedown(struct hsr_priv * hsr,unsigned char addr[ETH_ALEN])254 void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
255 {
256 struct sk_buff *skb;
257 void *msg_head;
258 struct hsr_port *master;
259 int res;
260
261 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
262 if (!skb)
263 goto fail;
264
265 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
266 if (!msg_head)
267 goto nla_put_failure;
268
269 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
270 if (res < 0)
271 goto nla_put_failure;
272
273 genlmsg_end(skb, msg_head);
274 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
275
276 return;
277
278 nla_put_failure:
279 kfree_skb(skb);
280
281 fail:
282 rcu_read_lock();
283 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
284 netdev_warn(master->dev, "Could not send HSR node down\n");
285 rcu_read_unlock();
286 }
287
288 /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
289 * about the status of a specific node in the network, defined by its MAC
290 * address.
291 *
292 * Input: hsr ifindex, node mac address
293 * Output: hsr ifindex, node mac address (copied from request),
294 * age of latest frame from node over slave 1, slave 2 [ms]
295 */
hsr_get_node_status(struct sk_buff * skb_in,struct genl_info * info)296 static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
297 {
298 /* For receiving */
299 struct nlattr *na;
300 struct net_device *hsr_dev;
301
302 /* For sending */
303 struct sk_buff *skb_out;
304 void *msg_head;
305 struct hsr_priv *hsr;
306 struct hsr_port *port;
307 unsigned char hsr_node_addr_b[ETH_ALEN];
308 int hsr_node_if1_age;
309 u16 hsr_node_if1_seq;
310 int hsr_node_if2_age;
311 u16 hsr_node_if2_seq;
312 int addr_b_ifindex;
313 int res;
314
315 if (!info)
316 goto invalid;
317
318 na = info->attrs[HSR_A_IFINDEX];
319 if (!na)
320 goto invalid;
321 na = info->attrs[HSR_A_NODE_ADDR];
322 if (!na)
323 goto invalid;
324
325 rcu_read_lock();
326 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
327 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
328 if (!hsr_dev)
329 goto rcu_unlock;
330 if (!is_hsr_master(hsr_dev))
331 goto rcu_unlock;
332
333 /* Send reply */
334 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
335 if (!skb_out) {
336 res = -ENOMEM;
337 goto fail;
338 }
339
340 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
341 info->snd_seq, &hsr_genl_family, 0,
342 HSR_C_SET_NODE_STATUS);
343 if (!msg_head) {
344 res = -ENOMEM;
345 goto nla_put_failure;
346 }
347
348 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
349 if (res < 0)
350 goto nla_put_failure;
351
352 hsr = netdev_priv(hsr_dev);
353 res = hsr_get_node_data(hsr,
354 (unsigned char *)
355 nla_data(info->attrs[HSR_A_NODE_ADDR]),
356 hsr_node_addr_b,
357 &addr_b_ifindex,
358 &hsr_node_if1_age,
359 &hsr_node_if1_seq,
360 &hsr_node_if2_age,
361 &hsr_node_if2_seq);
362 if (res < 0)
363 goto nla_put_failure;
364
365 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
366 nla_data(info->attrs[HSR_A_NODE_ADDR]));
367 if (res < 0)
368 goto nla_put_failure;
369
370 if (addr_b_ifindex > -1) {
371 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
372 hsr_node_addr_b);
373 if (res < 0)
374 goto nla_put_failure;
375
376 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
377 addr_b_ifindex);
378 if (res < 0)
379 goto nla_put_failure;
380 }
381
382 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
383 if (res < 0)
384 goto nla_put_failure;
385 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
386 if (res < 0)
387 goto nla_put_failure;
388 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
389 if (port)
390 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
391 port->dev->ifindex);
392 if (res < 0)
393 goto nla_put_failure;
394
395 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
396 if (res < 0)
397 goto nla_put_failure;
398 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
399 if (res < 0)
400 goto nla_put_failure;
401 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
402 if (port)
403 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
404 port->dev->ifindex);
405 if (res < 0)
406 goto nla_put_failure;
407
408 rcu_read_unlock();
409
410 genlmsg_end(skb_out, msg_head);
411 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
412
413 return 0;
414
415 rcu_unlock:
416 rcu_read_unlock();
417 invalid:
418 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
419 return 0;
420
421 nla_put_failure:
422 kfree_skb(skb_out);
423 /* Fall through */
424
425 fail:
426 rcu_read_unlock();
427 return res;
428 }
429
430 /* Get a list of MacAddressA of all nodes known to this node (including self).
431 */
hsr_get_node_list(struct sk_buff * skb_in,struct genl_info * info)432 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
433 {
434 unsigned char addr[ETH_ALEN];
435 struct net_device *hsr_dev;
436 struct sk_buff *skb_out;
437 struct hsr_priv *hsr;
438 bool restart = false;
439 struct nlattr *na;
440 void *pos = NULL;
441 void *msg_head;
442 int res;
443
444 if (!info)
445 goto invalid;
446
447 na = info->attrs[HSR_A_IFINDEX];
448 if (!na)
449 goto invalid;
450
451 rcu_read_lock();
452 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
453 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
454 if (!hsr_dev)
455 goto rcu_unlock;
456 if (!is_hsr_master(hsr_dev))
457 goto rcu_unlock;
458
459 restart:
460 /* Send reply */
461 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
462 if (!skb_out) {
463 res = -ENOMEM;
464 goto fail;
465 }
466
467 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
468 info->snd_seq, &hsr_genl_family, 0,
469 HSR_C_SET_NODE_LIST);
470 if (!msg_head) {
471 res = -ENOMEM;
472 goto nla_put_failure;
473 }
474
475 if (!restart) {
476 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
477 if (res < 0)
478 goto nla_put_failure;
479 }
480
481 hsr = netdev_priv(hsr_dev);
482
483 if (!pos)
484 pos = hsr_get_next_node(hsr, NULL, addr);
485 while (pos) {
486 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
487 if (res < 0) {
488 if (res == -EMSGSIZE) {
489 genlmsg_end(skb_out, msg_head);
490 genlmsg_unicast(genl_info_net(info), skb_out,
491 info->snd_portid);
492 restart = true;
493 goto restart;
494 }
495 goto nla_put_failure;
496 }
497 pos = hsr_get_next_node(hsr, pos, addr);
498 }
499 rcu_read_unlock();
500
501 genlmsg_end(skb_out, msg_head);
502 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
503
504 return 0;
505
506 rcu_unlock:
507 rcu_read_unlock();
508 invalid:
509 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
510 return 0;
511
512 nla_put_failure:
513 nlmsg_free(skb_out);
514 /* Fall through */
515
516 fail:
517 rcu_read_unlock();
518 return res;
519 }
520
521 static const struct genl_small_ops hsr_ops[] = {
522 {
523 .cmd = HSR_C_GET_NODE_STATUS,
524 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
525 .flags = 0,
526 .doit = hsr_get_node_status,
527 .dumpit = NULL,
528 },
529 {
530 .cmd = HSR_C_GET_NODE_LIST,
531 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
532 .flags = 0,
533 .doit = hsr_get_node_list,
534 .dumpit = NULL,
535 },
536 };
537
538 static struct genl_family hsr_genl_family __ro_after_init = {
539 .hdrsize = 0,
540 .name = "HSR",
541 .version = 1,
542 .maxattr = HSR_A_MAX,
543 .policy = hsr_genl_policy,
544 .netnsok = true,
545 .module = THIS_MODULE,
546 .small_ops = hsr_ops,
547 .n_small_ops = ARRAY_SIZE(hsr_ops),
548 .resv_start_op = HSR_C_SET_NODE_LIST + 1,
549 .mcgrps = hsr_mcgrps,
550 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
551 };
552
hsr_netlink_init(void)553 int __init hsr_netlink_init(void)
554 {
555 int rc;
556
557 rc = rtnl_link_register(&hsr_link_ops);
558 if (rc)
559 goto fail_rtnl_link_register;
560
561 rc = genl_register_family(&hsr_genl_family);
562 if (rc)
563 goto fail_genl_register_family;
564
565 hsr_debugfs_create_root();
566 return 0;
567
568 fail_genl_register_family:
569 rtnl_link_unregister(&hsr_link_ops);
570 fail_rtnl_link_register:
571
572 return rc;
573 }
574
hsr_netlink_exit(void)575 void __exit hsr_netlink_exit(void)
576 {
577 genl_unregister_family(&hsr_genl_family);
578 rtnl_link_unregister(&hsr_link_ops);
579 }
580
581 MODULE_ALIAS_RTNL_LINK("hsr");
582