1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 *
7 * Routines for handling Netlink messages for HSR and PRP.
8 */
9
10 #include "hsr_netlink.h"
11 #include <linux/kernel.h>
12 #include <net/rtnetlink.h>
13 #include <net/genetlink.h>
14 #include "hsr_main.h"
15 #include "hsr_device.h"
16 #include "hsr_framereg.h"
17
18 static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
22 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
23 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
24 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
25 [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
26 [IFLA_HSR_INTERLINK] = { .type = NLA_U32 },
27 };
28
29 /* Here, it seems a netdevice has already been allocated for us, and the
30 * hsr_dev_setup routine has been executed. Nice!
31 */
hsr_newlink(struct net_device * dev,struct rtnl_newlink_params * params,struct netlink_ext_ack * extack)32 static int hsr_newlink(struct net_device *dev,
33 struct rtnl_newlink_params *params,
34 struct netlink_ext_ack *extack)
35 {
36 struct net *link_net = rtnl_newlink_link_net(params);
37 struct net_device *link[2], *interlink = NULL;
38 struct nlattr **data = params->data;
39 enum hsr_version proto_version;
40 unsigned char multicast_spec;
41 u8 proto = HSR_PROTOCOL_HSR;
42
43 if (!net_eq(link_net, dev_net(dev))) {
44 NL_SET_ERR_MSG_MOD(extack,
45 "HSR slaves/interlink must be on the same net namespace than HSR link");
46 return -EINVAL;
47 }
48
49 if (!data) {
50 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
51 return -EINVAL;
52 }
53 if (!data[IFLA_HSR_SLAVE1]) {
54 NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
55 return -EINVAL;
56 }
57 link[0] = __dev_get_by_index(link_net,
58 nla_get_u32(data[IFLA_HSR_SLAVE1]));
59 if (!link[0]) {
60 NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
61 return -EINVAL;
62 }
63 if (!data[IFLA_HSR_SLAVE2]) {
64 NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
65 return -EINVAL;
66 }
67 link[1] = __dev_get_by_index(link_net,
68 nla_get_u32(data[IFLA_HSR_SLAVE2]));
69 if (!link[1]) {
70 NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
71 return -EINVAL;
72 }
73
74 if (link[0] == link[1]) {
75 NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
76 return -EINVAL;
77 }
78
79 if (data[IFLA_HSR_INTERLINK])
80 interlink = __dev_get_by_index(link_net,
81 nla_get_u32(data[IFLA_HSR_INTERLINK]));
82
83 if (interlink && interlink == link[0]) {
84 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same");
85 return -EINVAL;
86 }
87
88 if (interlink && interlink == link[1]) {
89 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same");
90 return -EINVAL;
91 }
92
93 multicast_spec = nla_get_u8_default(data[IFLA_HSR_MULTICAST_SPEC], 0);
94
95 if (data[IFLA_HSR_PROTOCOL])
96 proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
97
98 if (proto >= HSR_PROTOCOL_MAX) {
99 NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
100 return -EINVAL;
101 }
102
103 if (!data[IFLA_HSR_VERSION]) {
104 proto_version = HSR_V0;
105 } else {
106 if (proto == HSR_PROTOCOL_PRP) {
107 NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
108 return -EINVAL;
109 }
110
111 proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
112 if (proto_version > HSR_V1) {
113 NL_SET_ERR_MSG_MOD(extack,
114 "Only HSR version 0/1 supported");
115 return -EINVAL;
116 }
117 }
118
119 if (proto == HSR_PROTOCOL_PRP) {
120 proto_version = PRP_V1;
121 if (interlink) {
122 NL_SET_ERR_MSG_MOD(extack,
123 "Interlink only works with HSR");
124 return -EINVAL;
125 }
126 }
127
128 return hsr_dev_finalize(dev, link, interlink, multicast_spec,
129 proto_version, extack);
130 }
131
hsr_dellink(struct net_device * dev,struct list_head * head)132 static void hsr_dellink(struct net_device *dev, struct list_head *head)
133 {
134 struct hsr_priv *hsr = netdev_priv(dev);
135
136 timer_delete_sync(&hsr->prune_timer);
137 timer_delete_sync(&hsr->prune_proxy_timer);
138 timer_delete_sync(&hsr->announce_timer);
139 timer_delete_sync(&hsr->announce_proxy_timer);
140
141 hsr_debugfs_term(hsr);
142 hsr_del_ports(hsr);
143
144 hsr_del_self_node(hsr);
145 hsr_del_nodes(&hsr->node_db);
146 hsr_del_nodes(&hsr->proxy_node_db);
147
148 unregister_netdevice_queue(dev, head);
149 }
150
hsr_fill_info(struct sk_buff * skb,const struct net_device * dev)151 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
152 {
153 struct hsr_priv *hsr = netdev_priv(dev);
154 u8 proto = HSR_PROTOCOL_HSR;
155 struct hsr_port *port;
156
157 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
158 if (port) {
159 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
160 goto nla_put_failure;
161 }
162
163 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
164 if (port) {
165 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
166 goto nla_put_failure;
167 }
168
169 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
170 hsr->sup_multicast_addr) ||
171 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
172 goto nla_put_failure;
173 if (hsr->prot_version == PRP_V1)
174 proto = HSR_PROTOCOL_PRP;
175 if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
176 goto nla_put_failure;
177
178 return 0;
179
180 nla_put_failure:
181 return -EMSGSIZE;
182 }
183
184 static struct rtnl_link_ops hsr_link_ops __read_mostly = {
185 .kind = "hsr",
186 .maxtype = IFLA_HSR_MAX,
187 .policy = hsr_policy,
188 .priv_size = sizeof(struct hsr_priv),
189 .setup = hsr_dev_setup,
190 .newlink = hsr_newlink,
191 .dellink = hsr_dellink,
192 .fill_info = hsr_fill_info,
193 };
194
195 /* attribute policy */
196 static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
197 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
198 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
199 [HSR_A_IFINDEX] = { .type = NLA_U32 },
200 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
201 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
202 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
203 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
204 };
205
206 static struct genl_family hsr_genl_family;
207
208 static const struct genl_multicast_group hsr_mcgrps[] = {
209 { .name = "hsr-network", },
210 };
211
212 /* This is called if for some node with MAC address addr, we only get frames
213 * over one of the slave interfaces. This would indicate an open network ring
214 * (i.e. a link has failed somewhere).
215 */
hsr_nl_ringerror(struct hsr_priv * hsr,unsigned char addr[ETH_ALEN],struct hsr_port * port)216 void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
217 struct hsr_port *port)
218 {
219 struct sk_buff *skb;
220 void *msg_head;
221 struct hsr_port *master;
222 int res;
223
224 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
225 if (!skb)
226 goto fail;
227
228 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
229 HSR_C_RING_ERROR);
230 if (!msg_head)
231 goto nla_put_failure;
232
233 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
234 if (res < 0)
235 goto nla_put_failure;
236
237 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
238 if (res < 0)
239 goto nla_put_failure;
240
241 genlmsg_end(skb, msg_head);
242 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
243
244 return;
245
246 nla_put_failure:
247 kfree_skb(skb);
248
249 fail:
250 rcu_read_lock();
251 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
252 netdev_warn(master->dev, "Could not send HSR ring error message\n");
253 rcu_read_unlock();
254 }
255
256 /* This is called when we haven't heard from the node with MAC address addr for
257 * some time (just before the node is removed from the node table/list).
258 */
hsr_nl_nodedown(struct hsr_priv * hsr,unsigned char addr[ETH_ALEN])259 void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
260 {
261 struct sk_buff *skb;
262 void *msg_head;
263 struct hsr_port *master;
264 int res;
265
266 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
267 if (!skb)
268 goto fail;
269
270 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
271 if (!msg_head)
272 goto nla_put_failure;
273
274 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
275 if (res < 0)
276 goto nla_put_failure;
277
278 genlmsg_end(skb, msg_head);
279 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
280
281 return;
282
283 nla_put_failure:
284 kfree_skb(skb);
285
286 fail:
287 rcu_read_lock();
288 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
289 netdev_warn(master->dev, "Could not send HSR node down\n");
290 rcu_read_unlock();
291 }
292
293 /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
294 * about the status of a specific node in the network, defined by its MAC
295 * address.
296 *
297 * Input: hsr ifindex, node mac address
298 * Output: hsr ifindex, node mac address (copied from request),
299 * age of latest frame from node over slave 1, slave 2 [ms]
300 */
hsr_get_node_status(struct sk_buff * skb_in,struct genl_info * info)301 static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
302 {
303 /* For receiving */
304 struct nlattr *na;
305 struct net_device *hsr_dev;
306
307 /* For sending */
308 struct sk_buff *skb_out;
309 void *msg_head;
310 struct hsr_priv *hsr;
311 struct hsr_port *port;
312 unsigned char hsr_node_addr_b[ETH_ALEN];
313 int hsr_node_if1_age;
314 u16 hsr_node_if1_seq;
315 int hsr_node_if2_age;
316 u16 hsr_node_if2_seq;
317 int addr_b_ifindex;
318 int res;
319
320 if (!info)
321 goto invalid;
322
323 na = info->attrs[HSR_A_IFINDEX];
324 if (!na)
325 goto invalid;
326 na = info->attrs[HSR_A_NODE_ADDR];
327 if (!na)
328 goto invalid;
329
330 rcu_read_lock();
331 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
332 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
333 if (!hsr_dev)
334 goto rcu_unlock;
335 if (!is_hsr_master(hsr_dev))
336 goto rcu_unlock;
337
338 /* Send reply */
339 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
340 if (!skb_out) {
341 res = -ENOMEM;
342 goto fail;
343 }
344
345 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
346 info->snd_seq, &hsr_genl_family, 0,
347 HSR_C_SET_NODE_STATUS);
348 if (!msg_head) {
349 res = -ENOMEM;
350 goto nla_put_failure;
351 }
352
353 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
354 if (res < 0)
355 goto nla_put_failure;
356
357 hsr = netdev_priv(hsr_dev);
358 res = hsr_get_node_data(hsr,
359 (unsigned char *)
360 nla_data(info->attrs[HSR_A_NODE_ADDR]),
361 hsr_node_addr_b,
362 &addr_b_ifindex,
363 &hsr_node_if1_age,
364 &hsr_node_if1_seq,
365 &hsr_node_if2_age,
366 &hsr_node_if2_seq);
367 if (res < 0)
368 goto nla_put_failure;
369
370 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
371 nla_data(info->attrs[HSR_A_NODE_ADDR]));
372 if (res < 0)
373 goto nla_put_failure;
374
375 if (addr_b_ifindex > -1) {
376 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
377 hsr_node_addr_b);
378 if (res < 0)
379 goto nla_put_failure;
380
381 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
382 addr_b_ifindex);
383 if (res < 0)
384 goto nla_put_failure;
385 }
386
387 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
388 if (res < 0)
389 goto nla_put_failure;
390 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
391 if (res < 0)
392 goto nla_put_failure;
393 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
394 if (port)
395 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
396 port->dev->ifindex);
397 if (res < 0)
398 goto nla_put_failure;
399
400 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
401 if (res < 0)
402 goto nla_put_failure;
403 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
404 if (res < 0)
405 goto nla_put_failure;
406 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
407 if (port)
408 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
409 port->dev->ifindex);
410 if (res < 0)
411 goto nla_put_failure;
412
413 rcu_read_unlock();
414
415 genlmsg_end(skb_out, msg_head);
416 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
417
418 return 0;
419
420 rcu_unlock:
421 rcu_read_unlock();
422 invalid:
423 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
424 return 0;
425
426 nla_put_failure:
427 kfree_skb(skb_out);
428 /* Fall through */
429
430 fail:
431 rcu_read_unlock();
432 return res;
433 }
434
435 /* Get a list of MacAddressA of all nodes known to this node (including self).
436 */
hsr_get_node_list(struct sk_buff * skb_in,struct genl_info * info)437 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
438 {
439 unsigned char addr[ETH_ALEN];
440 struct net_device *hsr_dev;
441 struct sk_buff *skb_out;
442 struct hsr_priv *hsr;
443 bool restart = false;
444 struct nlattr *na;
445 void *pos = NULL;
446 void *msg_head;
447 int res;
448
449 if (!info)
450 goto invalid;
451
452 na = info->attrs[HSR_A_IFINDEX];
453 if (!na)
454 goto invalid;
455
456 rcu_read_lock();
457 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
458 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
459 if (!hsr_dev)
460 goto rcu_unlock;
461 if (!is_hsr_master(hsr_dev))
462 goto rcu_unlock;
463
464 restart:
465 /* Send reply */
466 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
467 if (!skb_out) {
468 res = -ENOMEM;
469 goto fail;
470 }
471
472 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
473 info->snd_seq, &hsr_genl_family, 0,
474 HSR_C_SET_NODE_LIST);
475 if (!msg_head) {
476 res = -ENOMEM;
477 goto nla_put_failure;
478 }
479
480 if (!restart) {
481 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
482 if (res < 0)
483 goto nla_put_failure;
484 }
485
486 hsr = netdev_priv(hsr_dev);
487
488 if (!pos)
489 pos = hsr_get_next_node(hsr, NULL, addr);
490 while (pos) {
491 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
492 if (res < 0) {
493 if (res == -EMSGSIZE) {
494 genlmsg_end(skb_out, msg_head);
495 genlmsg_unicast(genl_info_net(info), skb_out,
496 info->snd_portid);
497 restart = true;
498 goto restart;
499 }
500 goto nla_put_failure;
501 }
502 pos = hsr_get_next_node(hsr, pos, addr);
503 }
504 rcu_read_unlock();
505
506 genlmsg_end(skb_out, msg_head);
507 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
508
509 return 0;
510
511 rcu_unlock:
512 rcu_read_unlock();
513 invalid:
514 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
515 return 0;
516
517 nla_put_failure:
518 nlmsg_free(skb_out);
519 /* Fall through */
520
521 fail:
522 rcu_read_unlock();
523 return res;
524 }
525
526 static const struct genl_small_ops hsr_ops[] = {
527 {
528 .cmd = HSR_C_GET_NODE_STATUS,
529 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
530 .flags = 0,
531 .doit = hsr_get_node_status,
532 .dumpit = NULL,
533 },
534 {
535 .cmd = HSR_C_GET_NODE_LIST,
536 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
537 .flags = 0,
538 .doit = hsr_get_node_list,
539 .dumpit = NULL,
540 },
541 };
542
543 static struct genl_family hsr_genl_family __ro_after_init = {
544 .hdrsize = 0,
545 .name = "HSR",
546 .version = 1,
547 .maxattr = HSR_A_MAX,
548 .policy = hsr_genl_policy,
549 .netnsok = true,
550 .module = THIS_MODULE,
551 .small_ops = hsr_ops,
552 .n_small_ops = ARRAY_SIZE(hsr_ops),
553 .resv_start_op = HSR_C_SET_NODE_LIST + 1,
554 .mcgrps = hsr_mcgrps,
555 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
556 };
557
hsr_netlink_init(void)558 int __init hsr_netlink_init(void)
559 {
560 int rc;
561
562 rc = rtnl_link_register(&hsr_link_ops);
563 if (rc)
564 goto fail_rtnl_link_register;
565
566 rc = genl_register_family(&hsr_genl_family);
567 if (rc)
568 goto fail_genl_register_family;
569
570 hsr_debugfs_create_root();
571 return 0;
572
573 fail_genl_register_family:
574 rtnl_link_unregister(&hsr_link_ops);
575 fail_rtnl_link_register:
576
577 return rc;
578 }
579
hsr_netlink_exit(void)580 void __exit hsr_netlink_exit(void)
581 {
582 genl_unregister_family(&hsr_genl_family);
583 rtnl_link_unregister(&hsr_link_ops);
584 }
585
586 MODULE_ALIAS_RTNL_LINK("hsr");
587