1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2011-2014 Autronica Fire and Security AS 3 * 4 * Author(s): 5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se 6 * 7 * Routines for handling Netlink messages for HSR and PRP. 8 */ 9 10 #include "hsr_netlink.h" 11 #include <linux/kernel.h> 12 #include <net/rtnetlink.h> 13 #include <net/genetlink.h> 14 #include "hsr_main.h" 15 #include "hsr_device.h" 16 #include "hsr_framereg.h" 17 18 static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { 19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, 20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, 21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, 22 [IFLA_HSR_VERSION] = { .type = NLA_U8 }, 23 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN }, 24 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, 25 [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 }, 26 [IFLA_HSR_INTERLINK] = { .type = NLA_U32 }, 27 }; 28 29 /* Here, it seems a netdevice has already been allocated for us, and the 30 * hsr_dev_setup routine has been executed. Nice! 31 */ 32 static int hsr_newlink(struct net *src_net, struct net_device *dev, 33 struct nlattr *tb[], struct nlattr *data[], 34 struct netlink_ext_ack *extack) 35 { 36 enum hsr_version proto_version; 37 unsigned char multicast_spec; 38 u8 proto = HSR_PROTOCOL_HSR; 39 40 struct net_device *link[2], *interlink = NULL; 41 if (!data) { 42 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified"); 43 return -EINVAL; 44 } 45 if (!data[IFLA_HSR_SLAVE1]) { 46 NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified"); 47 return -EINVAL; 48 } 49 link[0] = __dev_get_by_index(src_net, 50 nla_get_u32(data[IFLA_HSR_SLAVE1])); 51 if (!link[0]) { 52 NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist"); 53 return -EINVAL; 54 } 55 if (!data[IFLA_HSR_SLAVE2]) { 56 NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified"); 57 return -EINVAL; 58 } 59 link[1] = __dev_get_by_index(src_net, 60 nla_get_u32(data[IFLA_HSR_SLAVE2])); 61 if (!link[1]) { 62 NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist"); 63 return -EINVAL; 64 } 65 66 if (link[0] == link[1]) { 67 NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same"); 68 return -EINVAL; 69 } 70 71 if (data[IFLA_HSR_INTERLINK]) 72 interlink = __dev_get_by_index(src_net, 73 nla_get_u32(data[IFLA_HSR_INTERLINK])); 74 75 if (interlink && interlink == link[0]) { 76 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same"); 77 return -EINVAL; 78 } 79 80 if (interlink && interlink == link[1]) { 81 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same"); 82 return -EINVAL; 83 } 84 85 multicast_spec = nla_get_u8_default(data[IFLA_HSR_MULTICAST_SPEC], 0); 86 87 if (data[IFLA_HSR_PROTOCOL]) 88 proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]); 89 90 if (proto >= HSR_PROTOCOL_MAX) { 91 NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol"); 92 return -EINVAL; 93 } 94 95 if (!data[IFLA_HSR_VERSION]) { 96 proto_version = HSR_V0; 97 } else { 98 if (proto == HSR_PROTOCOL_PRP) { 99 NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported"); 100 return -EINVAL; 101 } 102 103 proto_version = nla_get_u8(data[IFLA_HSR_VERSION]); 104 if (proto_version > HSR_V1) { 105 NL_SET_ERR_MSG_MOD(extack, 106 "Only HSR version 0/1 supported"); 107 return -EINVAL; 108 } 109 } 110 111 if (proto == HSR_PROTOCOL_PRP) { 112 proto_version = PRP_V1; 113 if (interlink) { 114 NL_SET_ERR_MSG_MOD(extack, 115 "Interlink only works with HSR"); 116 return -EINVAL; 117 } 118 } 119 120 return hsr_dev_finalize(dev, link, interlink, multicast_spec, 121 proto_version, extack); 122 } 123 124 static void hsr_dellink(struct net_device *dev, struct list_head *head) 125 { 126 struct hsr_priv *hsr = netdev_priv(dev); 127 128 timer_delete_sync(&hsr->prune_timer); 129 timer_delete_sync(&hsr->prune_proxy_timer); 130 timer_delete_sync(&hsr->announce_timer); 131 timer_delete_sync(&hsr->announce_proxy_timer); 132 133 hsr_debugfs_term(hsr); 134 hsr_del_ports(hsr); 135 136 hsr_del_self_node(hsr); 137 hsr_del_nodes(&hsr->node_db); 138 hsr_del_nodes(&hsr->proxy_node_db); 139 140 unregister_netdevice_queue(dev, head); 141 } 142 143 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) 144 { 145 struct hsr_priv *hsr = netdev_priv(dev); 146 u8 proto = HSR_PROTOCOL_HSR; 147 struct hsr_port *port; 148 149 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); 150 if (port) { 151 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex)) 152 goto nla_put_failure; 153 } 154 155 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); 156 if (port) { 157 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex)) 158 goto nla_put_failure; 159 } 160 161 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, 162 hsr->sup_multicast_addr) || 163 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr)) 164 goto nla_put_failure; 165 if (hsr->prot_version == PRP_V1) 166 proto = HSR_PROTOCOL_PRP; 167 if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto)) 168 goto nla_put_failure; 169 170 return 0; 171 172 nla_put_failure: 173 return -EMSGSIZE; 174 } 175 176 static struct rtnl_link_ops hsr_link_ops __read_mostly = { 177 .kind = "hsr", 178 .maxtype = IFLA_HSR_MAX, 179 .policy = hsr_policy, 180 .priv_size = sizeof(struct hsr_priv), 181 .setup = hsr_dev_setup, 182 .newlink = hsr_newlink, 183 .dellink = hsr_dellink, 184 .fill_info = hsr_fill_info, 185 }; 186 187 /* attribute policy */ 188 static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = { 189 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN }, 190 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN }, 191 [HSR_A_IFINDEX] = { .type = NLA_U32 }, 192 [HSR_A_IF1_AGE] = { .type = NLA_U32 }, 193 [HSR_A_IF2_AGE] = { .type = NLA_U32 }, 194 [HSR_A_IF1_SEQ] = { .type = NLA_U16 }, 195 [HSR_A_IF2_SEQ] = { .type = NLA_U16 }, 196 }; 197 198 static struct genl_family hsr_genl_family; 199 200 static const struct genl_multicast_group hsr_mcgrps[] = { 201 { .name = "hsr-network", }, 202 }; 203 204 /* This is called if for some node with MAC address addr, we only get frames 205 * over one of the slave interfaces. This would indicate an open network ring 206 * (i.e. a link has failed somewhere). 207 */ 208 void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], 209 struct hsr_port *port) 210 { 211 struct sk_buff *skb; 212 void *msg_head; 213 struct hsr_port *master; 214 int res; 215 216 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 217 if (!skb) 218 goto fail; 219 220 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, 221 HSR_C_RING_ERROR); 222 if (!msg_head) 223 goto nla_put_failure; 224 225 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); 226 if (res < 0) 227 goto nla_put_failure; 228 229 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex); 230 if (res < 0) 231 goto nla_put_failure; 232 233 genlmsg_end(skb, msg_head); 234 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); 235 236 return; 237 238 nla_put_failure: 239 kfree_skb(skb); 240 241 fail: 242 rcu_read_lock(); 243 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 244 netdev_warn(master->dev, "Could not send HSR ring error message\n"); 245 rcu_read_unlock(); 246 } 247 248 /* This is called when we haven't heard from the node with MAC address addr for 249 * some time (just before the node is removed from the node table/list). 250 */ 251 void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]) 252 { 253 struct sk_buff *skb; 254 void *msg_head; 255 struct hsr_port *master; 256 int res; 257 258 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 259 if (!skb) 260 goto fail; 261 262 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN); 263 if (!msg_head) 264 goto nla_put_failure; 265 266 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); 267 if (res < 0) 268 goto nla_put_failure; 269 270 genlmsg_end(skb, msg_head); 271 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); 272 273 return; 274 275 nla_put_failure: 276 kfree_skb(skb); 277 278 fail: 279 rcu_read_lock(); 280 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 281 netdev_warn(master->dev, "Could not send HSR node down\n"); 282 rcu_read_unlock(); 283 } 284 285 /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table 286 * about the status of a specific node in the network, defined by its MAC 287 * address. 288 * 289 * Input: hsr ifindex, node mac address 290 * Output: hsr ifindex, node mac address (copied from request), 291 * age of latest frame from node over slave 1, slave 2 [ms] 292 */ 293 static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) 294 { 295 /* For receiving */ 296 struct nlattr *na; 297 struct net_device *hsr_dev; 298 299 /* For sending */ 300 struct sk_buff *skb_out; 301 void *msg_head; 302 struct hsr_priv *hsr; 303 struct hsr_port *port; 304 unsigned char hsr_node_addr_b[ETH_ALEN]; 305 int hsr_node_if1_age; 306 u16 hsr_node_if1_seq; 307 int hsr_node_if2_age; 308 u16 hsr_node_if2_seq; 309 int addr_b_ifindex; 310 int res; 311 312 if (!info) 313 goto invalid; 314 315 na = info->attrs[HSR_A_IFINDEX]; 316 if (!na) 317 goto invalid; 318 na = info->attrs[HSR_A_NODE_ADDR]; 319 if (!na) 320 goto invalid; 321 322 rcu_read_lock(); 323 hsr_dev = dev_get_by_index_rcu(genl_info_net(info), 324 nla_get_u32(info->attrs[HSR_A_IFINDEX])); 325 if (!hsr_dev) 326 goto rcu_unlock; 327 if (!is_hsr_master(hsr_dev)) 328 goto rcu_unlock; 329 330 /* Send reply */ 331 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 332 if (!skb_out) { 333 res = -ENOMEM; 334 goto fail; 335 } 336 337 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, 338 info->snd_seq, &hsr_genl_family, 0, 339 HSR_C_SET_NODE_STATUS); 340 if (!msg_head) { 341 res = -ENOMEM; 342 goto nla_put_failure; 343 } 344 345 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); 346 if (res < 0) 347 goto nla_put_failure; 348 349 hsr = netdev_priv(hsr_dev); 350 res = hsr_get_node_data(hsr, 351 (unsigned char *) 352 nla_data(info->attrs[HSR_A_NODE_ADDR]), 353 hsr_node_addr_b, 354 &addr_b_ifindex, 355 &hsr_node_if1_age, 356 &hsr_node_if1_seq, 357 &hsr_node_if2_age, 358 &hsr_node_if2_seq); 359 if (res < 0) 360 goto nla_put_failure; 361 362 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, 363 nla_data(info->attrs[HSR_A_NODE_ADDR])); 364 if (res < 0) 365 goto nla_put_failure; 366 367 if (addr_b_ifindex > -1) { 368 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN, 369 hsr_node_addr_b); 370 if (res < 0) 371 goto nla_put_failure; 372 373 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, 374 addr_b_ifindex); 375 if (res < 0) 376 goto nla_put_failure; 377 } 378 379 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age); 380 if (res < 0) 381 goto nla_put_failure; 382 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq); 383 if (res < 0) 384 goto nla_put_failure; 385 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); 386 if (port) 387 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, 388 port->dev->ifindex); 389 if (res < 0) 390 goto nla_put_failure; 391 392 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age); 393 if (res < 0) 394 goto nla_put_failure; 395 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq); 396 if (res < 0) 397 goto nla_put_failure; 398 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); 399 if (port) 400 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, 401 port->dev->ifindex); 402 if (res < 0) 403 goto nla_put_failure; 404 405 rcu_read_unlock(); 406 407 genlmsg_end(skb_out, msg_head); 408 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); 409 410 return 0; 411 412 rcu_unlock: 413 rcu_read_unlock(); 414 invalid: 415 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); 416 return 0; 417 418 nla_put_failure: 419 kfree_skb(skb_out); 420 /* Fall through */ 421 422 fail: 423 rcu_read_unlock(); 424 return res; 425 } 426 427 /* Get a list of MacAddressA of all nodes known to this node (including self). 428 */ 429 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) 430 { 431 unsigned char addr[ETH_ALEN]; 432 struct net_device *hsr_dev; 433 struct sk_buff *skb_out; 434 struct hsr_priv *hsr; 435 bool restart = false; 436 struct nlattr *na; 437 void *pos = NULL; 438 void *msg_head; 439 int res; 440 441 if (!info) 442 goto invalid; 443 444 na = info->attrs[HSR_A_IFINDEX]; 445 if (!na) 446 goto invalid; 447 448 rcu_read_lock(); 449 hsr_dev = dev_get_by_index_rcu(genl_info_net(info), 450 nla_get_u32(info->attrs[HSR_A_IFINDEX])); 451 if (!hsr_dev) 452 goto rcu_unlock; 453 if (!is_hsr_master(hsr_dev)) 454 goto rcu_unlock; 455 456 restart: 457 /* Send reply */ 458 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); 459 if (!skb_out) { 460 res = -ENOMEM; 461 goto fail; 462 } 463 464 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, 465 info->snd_seq, &hsr_genl_family, 0, 466 HSR_C_SET_NODE_LIST); 467 if (!msg_head) { 468 res = -ENOMEM; 469 goto nla_put_failure; 470 } 471 472 if (!restart) { 473 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); 474 if (res < 0) 475 goto nla_put_failure; 476 } 477 478 hsr = netdev_priv(hsr_dev); 479 480 if (!pos) 481 pos = hsr_get_next_node(hsr, NULL, addr); 482 while (pos) { 483 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr); 484 if (res < 0) { 485 if (res == -EMSGSIZE) { 486 genlmsg_end(skb_out, msg_head); 487 genlmsg_unicast(genl_info_net(info), skb_out, 488 info->snd_portid); 489 restart = true; 490 goto restart; 491 } 492 goto nla_put_failure; 493 } 494 pos = hsr_get_next_node(hsr, pos, addr); 495 } 496 rcu_read_unlock(); 497 498 genlmsg_end(skb_out, msg_head); 499 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); 500 501 return 0; 502 503 rcu_unlock: 504 rcu_read_unlock(); 505 invalid: 506 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); 507 return 0; 508 509 nla_put_failure: 510 nlmsg_free(skb_out); 511 /* Fall through */ 512 513 fail: 514 rcu_read_unlock(); 515 return res; 516 } 517 518 static const struct genl_small_ops hsr_ops[] = { 519 { 520 .cmd = HSR_C_GET_NODE_STATUS, 521 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 522 .flags = 0, 523 .doit = hsr_get_node_status, 524 .dumpit = NULL, 525 }, 526 { 527 .cmd = HSR_C_GET_NODE_LIST, 528 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 529 .flags = 0, 530 .doit = hsr_get_node_list, 531 .dumpit = NULL, 532 }, 533 }; 534 535 static struct genl_family hsr_genl_family __ro_after_init = { 536 .hdrsize = 0, 537 .name = "HSR", 538 .version = 1, 539 .maxattr = HSR_A_MAX, 540 .policy = hsr_genl_policy, 541 .netnsok = true, 542 .module = THIS_MODULE, 543 .small_ops = hsr_ops, 544 .n_small_ops = ARRAY_SIZE(hsr_ops), 545 .resv_start_op = HSR_C_SET_NODE_LIST + 1, 546 .mcgrps = hsr_mcgrps, 547 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps), 548 }; 549 550 int __init hsr_netlink_init(void) 551 { 552 int rc; 553 554 rc = rtnl_link_register(&hsr_link_ops); 555 if (rc) 556 goto fail_rtnl_link_register; 557 558 rc = genl_register_family(&hsr_genl_family); 559 if (rc) 560 goto fail_genl_register_family; 561 562 hsr_debugfs_create_root(); 563 return 0; 564 565 fail_genl_register_family: 566 rtnl_link_unregister(&hsr_link_ops); 567 fail_rtnl_link_register: 568 569 return rc; 570 } 571 572 void __exit hsr_netlink_exit(void) 573 { 574 genl_unregister_family(&hsr_genl_family); 575 rtnl_link_unregister(&hsr_link_ops); 576 } 577 578 MODULE_ALIAS_RTNL_LINK("hsr"); 579