1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2011-2014 Autronica Fire and Security AS 3 * 4 * Author(s): 5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se 6 * 7 * Frame router for HSR and PRP. 8 */ 9 10 #include "hsr_forward.h" 11 #include <linux/types.h> 12 #include <linux/skbuff.h> 13 #include <linux/etherdevice.h> 14 #include <linux/if_vlan.h> 15 #include "hsr_main.h" 16 #include "hsr_framereg.h" 17 18 struct hsr_node; 19 20 /* The uses I can see for these HSR supervision frames are: 21 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type = 22 * 22") to reset any sequence_nr counters belonging to that node. Useful if 23 * the other node's counter has been reset for some reason. 24 * -- 25 * Or not - resetting the counter and bridging the frame would create a 26 * loop, unfortunately. 27 * 28 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck 29 * frame is received from a particular node, we know something is wrong. 30 * We just register these (as with normal frames) and throw them away. 31 * 32 * 3) Allow different MAC addresses for the two slave interfaces, using the 33 * MacAddressA field. 34 */ 35 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) 36 { 37 struct ethhdr *eth_hdr; 38 struct hsr_sup_tag *hsr_sup_tag; 39 struct hsrv1_ethhdr_sp *hsr_V1_hdr; 40 41 WARN_ON_ONCE(!skb_mac_header_was_set(skb)); 42 eth_hdr = (struct ethhdr *)skb_mac_header(skb); 43 44 /* Correct addr? */ 45 if (!ether_addr_equal(eth_hdr->h_dest, 46 hsr->sup_multicast_addr)) 47 return false; 48 49 /* Correct ether type?. */ 50 if (!(eth_hdr->h_proto == htons(ETH_P_PRP) || 51 eth_hdr->h_proto == htons(ETH_P_HSR))) 52 return false; 53 54 /* Get the supervision header from correct location. */ 55 if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */ 56 hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb); 57 if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP)) 58 return false; 59 60 hsr_sup_tag = &hsr_V1_hdr->hsr_sup; 61 } else { 62 hsr_sup_tag = 63 &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup; 64 } 65 66 if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE && 67 hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK && 68 hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD && 69 hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA) 70 return false; 71 if (hsr_sup_tag->HSR_TLV_length != 12 && 72 hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload)) 73 return false; 74 75 return true; 76 } 77 78 static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in, 79 struct hsr_frame_info *frame) 80 { 81 struct sk_buff *skb; 82 int copylen; 83 unsigned char *dst, *src; 84 85 skb_pull(skb_in, HSR_HLEN); 86 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC); 87 skb_push(skb_in, HSR_HLEN); 88 if (!skb) 89 return NULL; 90 91 skb_reset_mac_header(skb); 92 93 if (skb->ip_summed == CHECKSUM_PARTIAL) 94 skb->csum_start -= HSR_HLEN; 95 96 copylen = 2 * ETH_ALEN; 97 if (frame->is_vlan) 98 copylen += VLAN_HLEN; 99 src = skb_mac_header(skb_in); 100 dst = skb_mac_header(skb); 101 memcpy(dst, src, copylen); 102 103 skb->protocol = eth_hdr(skb)->h_proto; 104 return skb; 105 } 106 107 struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, 108 struct hsr_port *port) 109 { 110 if (!frame->skb_std) { 111 if (frame->skb_hsr) { 112 frame->skb_std = 113 create_stripped_skb_hsr(frame->skb_hsr, frame); 114 } else { 115 /* Unexpected */ 116 WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", 117 __FILE__, __LINE__, port->dev->name); 118 return NULL; 119 } 120 } 121 122 return skb_clone(frame->skb_std, GFP_ATOMIC); 123 } 124 125 struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame, 126 struct hsr_port *port) 127 { 128 if (!frame->skb_std) { 129 if (frame->skb_prp) { 130 /* trim the skb by len - HSR_HLEN to exclude RCT */ 131 skb_trim(frame->skb_prp, 132 frame->skb_prp->len - HSR_HLEN); 133 frame->skb_std = 134 __pskb_copy(frame->skb_prp, 135 skb_headroom(frame->skb_prp), 136 GFP_ATOMIC); 137 } else { 138 /* Unexpected */ 139 WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", 140 __FILE__, __LINE__, port->dev->name); 141 return NULL; 142 } 143 } 144 145 return skb_clone(frame->skb_std, GFP_ATOMIC); 146 } 147 148 static void prp_set_lan_id(struct prp_rct *trailer, 149 struct hsr_port *port) 150 { 151 int lane_id; 152 153 if (port->type == HSR_PT_SLAVE_A) 154 lane_id = 0; 155 else 156 lane_id = 1; 157 158 /* Add net_id in the upper 3 bits of lane_id */ 159 lane_id |= port->hsr->net_id; 160 set_prp_lan_id(trailer, lane_id); 161 } 162 163 /* Tailroom for PRP rct should have been created before calling this */ 164 static struct sk_buff *prp_fill_rct(struct sk_buff *skb, 165 struct hsr_frame_info *frame, 166 struct hsr_port *port) 167 { 168 struct prp_rct *trailer; 169 int min_size = ETH_ZLEN; 170 int lsdu_size; 171 172 if (!skb) 173 return skb; 174 175 if (frame->is_vlan) 176 min_size = VLAN_ETH_ZLEN; 177 178 if (skb_put_padto(skb, min_size)) 179 return NULL; 180 181 trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN); 182 lsdu_size = skb->len - 14; 183 if (frame->is_vlan) 184 lsdu_size -= 4; 185 prp_set_lan_id(trailer, port); 186 set_prp_LSDU_size(trailer, lsdu_size); 187 trailer->sequence_nr = htons(frame->sequence_nr); 188 trailer->PRP_suffix = htons(ETH_P_PRP); 189 190 return skb; 191 } 192 193 static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr, 194 struct hsr_port *port) 195 { 196 int path_id; 197 198 if (port->type == HSR_PT_SLAVE_A) 199 path_id = 0; 200 else 201 path_id = 1; 202 203 set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id); 204 } 205 206 static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, 207 struct hsr_frame_info *frame, 208 struct hsr_port *port, u8 proto_version) 209 { 210 struct hsr_ethhdr *hsr_ethhdr; 211 int lsdu_size; 212 213 /* pad to minimum packet size which is 60 + 6 (HSR tag) */ 214 if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) 215 return NULL; 216 217 lsdu_size = skb->len - 14; 218 if (frame->is_vlan) 219 lsdu_size -= 4; 220 221 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb); 222 223 hsr_set_path_id(hsr_ethhdr, port); 224 set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size); 225 hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr); 226 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; 227 hsr_ethhdr->ethhdr.h_proto = htons(proto_version ? 228 ETH_P_HSR : ETH_P_PRP); 229 230 return skb; 231 } 232 233 /* If the original frame was an HSR tagged frame, just clone it to be sent 234 * unchanged. Otherwise, create a private frame especially tagged for 'port'. 235 */ 236 struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame, 237 struct hsr_port *port) 238 { 239 unsigned char *dst, *src; 240 struct sk_buff *skb; 241 int movelen; 242 243 if (frame->skb_hsr) { 244 struct hsr_ethhdr *hsr_ethhdr = 245 (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr); 246 247 /* set the lane id properly */ 248 hsr_set_path_id(hsr_ethhdr, port); 249 return skb_clone(frame->skb_hsr, GFP_ATOMIC); 250 } 251 252 /* Create the new skb with enough headroom to fit the HSR tag */ 253 skb = __pskb_copy(frame->skb_std, 254 skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC); 255 if (!skb) 256 return NULL; 257 skb_reset_mac_header(skb); 258 259 if (skb->ip_summed == CHECKSUM_PARTIAL) 260 skb->csum_start += HSR_HLEN; 261 262 movelen = ETH_HLEN; 263 if (frame->is_vlan) 264 movelen += VLAN_HLEN; 265 266 src = skb_mac_header(skb); 267 dst = skb_push(skb, HSR_HLEN); 268 memmove(dst, src, movelen); 269 skb_reset_mac_header(skb); 270 271 /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in 272 * that case 273 */ 274 return hsr_fill_tag(skb, frame, port, port->hsr->prot_version); 275 } 276 277 struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame, 278 struct hsr_port *port) 279 { 280 struct sk_buff *skb; 281 282 if (frame->skb_prp) { 283 struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp); 284 285 if (trailer) { 286 prp_set_lan_id(trailer, port); 287 } else { 288 WARN_ONCE(!trailer, "errored PRP skb"); 289 return NULL; 290 } 291 return skb_clone(frame->skb_prp, GFP_ATOMIC); 292 } 293 294 skb = skb_copy_expand(frame->skb_std, 0, 295 skb_tailroom(frame->skb_std) + HSR_HLEN, 296 GFP_ATOMIC); 297 prp_fill_rct(skb, frame, port); 298 299 return skb; 300 } 301 302 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, 303 struct hsr_node *node_src) 304 { 305 bool was_multicast_frame; 306 int res; 307 308 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST); 309 hsr_addr_subst_source(node_src, skb); 310 skb_pull(skb, ETH_HLEN); 311 res = netif_rx(skb); 312 if (res == NET_RX_DROP) { 313 dev->stats.rx_dropped++; 314 } else { 315 dev->stats.rx_packets++; 316 dev->stats.rx_bytes += skb->len; 317 if (was_multicast_frame) 318 dev->stats.multicast++; 319 } 320 } 321 322 static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port, 323 struct hsr_frame_info *frame) 324 { 325 if (frame->port_rcv->type == HSR_PT_MASTER) { 326 hsr_addr_subst_dest(frame->node_src, skb, port); 327 328 /* Address substitution (IEC62439-3 pp 26, 50): replace mac 329 * address of outgoing frame with that of the outgoing slave's. 330 */ 331 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr); 332 } 333 return dev_queue_xmit(skb); 334 } 335 336 bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port) 337 { 338 return ((frame->port_rcv->type == HSR_PT_SLAVE_A && 339 port->type == HSR_PT_SLAVE_B) || 340 (frame->port_rcv->type == HSR_PT_SLAVE_B && 341 port->type == HSR_PT_SLAVE_A)); 342 } 343 344 /* Forward the frame through all devices except: 345 * - Back through the receiving device 346 * - If it's a HSR frame: through a device where it has passed before 347 * - if it's a PRP frame: through another PRP slave device (no bridge) 348 * - To the local HSR master only if the frame is directly addressed to it, or 349 * a non-supervision multicast or broadcast frame. 350 * 351 * HSR slave devices should insert a HSR tag into the frame, or forward the 352 * frame unchanged if it's already tagged. Interlink devices should strip HSR 353 * tags if they're of the non-HSR type (but only after duplicate discard). The 354 * master device always strips HSR tags. 355 */ 356 static void hsr_forward_do(struct hsr_frame_info *frame) 357 { 358 struct hsr_port *port; 359 struct sk_buff *skb; 360 361 hsr_for_each_port(frame->port_rcv->hsr, port) { 362 struct hsr_priv *hsr = port->hsr; 363 /* Don't send frame back the way it came */ 364 if (port == frame->port_rcv) 365 continue; 366 367 /* Don't deliver locally unless we should */ 368 if (port->type == HSR_PT_MASTER && !frame->is_local_dest) 369 continue; 370 371 /* Deliver frames directly addressed to us to master only */ 372 if (port->type != HSR_PT_MASTER && frame->is_local_exclusive) 373 continue; 374 375 /* Don't send frame over port where it has been sent before. 376 * Also fro SAN, this shouldn't be done. 377 */ 378 if (!frame->is_from_san && 379 hsr_register_frame_out(port, frame->node_src, 380 frame->sequence_nr)) 381 continue; 382 383 if (frame->is_supervision && port->type == HSR_PT_MASTER) { 384 hsr_handle_sup_frame(frame); 385 continue; 386 } 387 388 /* Check if frame is to be dropped. Eg. for PRP no forward 389 * between ports. 390 */ 391 if (hsr->proto_ops->drop_frame && 392 hsr->proto_ops->drop_frame(frame, port)) 393 continue; 394 395 if (port->type != HSR_PT_MASTER) 396 skb = hsr->proto_ops->create_tagged_frame(frame, port); 397 else 398 skb = hsr->proto_ops->get_untagged_frame(frame, port); 399 400 if (!skb) { 401 frame->port_rcv->dev->stats.rx_dropped++; 402 continue; 403 } 404 405 skb->dev = port->dev; 406 if (port->type == HSR_PT_MASTER) 407 hsr_deliver_master(skb, port->dev, frame->node_src); 408 else 409 hsr_xmit(skb, port, frame); 410 } 411 } 412 413 static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, 414 struct hsr_frame_info *frame) 415 { 416 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { 417 frame->is_local_exclusive = true; 418 skb->pkt_type = PACKET_HOST; 419 } else { 420 frame->is_local_exclusive = false; 421 } 422 423 if (skb->pkt_type == PACKET_HOST || 424 skb->pkt_type == PACKET_MULTICAST || 425 skb->pkt_type == PACKET_BROADCAST) { 426 frame->is_local_dest = true; 427 } else { 428 frame->is_local_dest = false; 429 } 430 } 431 432 static void handle_std_frame(struct sk_buff *skb, 433 struct hsr_frame_info *frame) 434 { 435 struct hsr_port *port = frame->port_rcv; 436 struct hsr_priv *hsr = port->hsr; 437 unsigned long irqflags; 438 439 frame->skb_hsr = NULL; 440 frame->skb_prp = NULL; 441 frame->skb_std = skb; 442 443 if (port->type != HSR_PT_MASTER) { 444 frame->is_from_san = true; 445 } else { 446 /* Sequence nr for the master node */ 447 spin_lock_irqsave(&hsr->seqnr_lock, irqflags); 448 frame->sequence_nr = hsr->sequence_nr; 449 hsr->sequence_nr++; 450 spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags); 451 } 452 } 453 454 void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, 455 struct hsr_frame_info *frame) 456 { 457 if (proto == htons(ETH_P_PRP) || 458 proto == htons(ETH_P_HSR)) { 459 /* HSR tagged frame :- Data or Supervision */ 460 frame->skb_std = NULL; 461 frame->skb_prp = NULL; 462 frame->skb_hsr = skb; 463 frame->sequence_nr = hsr_get_skb_sequence_nr(skb); 464 return; 465 } 466 467 /* Standard frame or PRP from master port */ 468 handle_std_frame(skb, frame); 469 } 470 471 void prp_fill_frame_info(__be16 proto, struct sk_buff *skb, 472 struct hsr_frame_info *frame) 473 { 474 /* Supervision frame */ 475 struct prp_rct *rct = skb_get_PRP_rct(skb); 476 477 if (rct && 478 prp_check_lsdu_size(skb, rct, frame->is_supervision)) { 479 frame->skb_hsr = NULL; 480 frame->skb_std = NULL; 481 frame->skb_prp = skb; 482 frame->sequence_nr = prp_get_skb_sequence_nr(rct); 483 return; 484 } 485 handle_std_frame(skb, frame); 486 } 487 488 static int fill_frame_info(struct hsr_frame_info *frame, 489 struct sk_buff *skb, struct hsr_port *port) 490 { 491 struct hsr_priv *hsr = port->hsr; 492 struct hsr_vlan_ethhdr *vlan_hdr; 493 struct ethhdr *ethhdr; 494 __be16 proto; 495 496 memset(frame, 0, sizeof(*frame)); 497 frame->is_supervision = is_supervision_frame(port->hsr, skb); 498 frame->node_src = hsr_get_node(port, &hsr->node_db, skb, 499 frame->is_supervision, 500 port->type); 501 if (!frame->node_src) 502 return -1; /* Unknown node and !is_supervision, or no mem */ 503 504 ethhdr = (struct ethhdr *)skb_mac_header(skb); 505 frame->is_vlan = false; 506 proto = ethhdr->h_proto; 507 508 if (proto == htons(ETH_P_8021Q)) 509 frame->is_vlan = true; 510 511 if (frame->is_vlan) { 512 vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr; 513 proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; 514 /* FIXME: */ 515 netdev_warn_once(skb->dev, "VLAN not yet supported"); 516 } 517 518 frame->is_from_san = false; 519 frame->port_rcv = port; 520 hsr->proto_ops->fill_frame_info(proto, skb, frame); 521 check_local_dest(port->hsr, skb, frame); 522 523 return 0; 524 } 525 526 /* Must be called holding rcu read lock (because of the port parameter) */ 527 void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) 528 { 529 struct hsr_frame_info frame; 530 531 if (skb_mac_header(skb) != skb->data) { 532 WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n", 533 __FILE__, __LINE__, port->dev->name); 534 goto out_drop; 535 } 536 537 if (fill_frame_info(&frame, skb, port) < 0) 538 goto out_drop; 539 540 hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); 541 hsr_forward_do(&frame); 542 /* Gets called for ingress frames as well as egress from master port. 543 * So check and increment stats for master port only here. 544 */ 545 if (port->type == HSR_PT_MASTER) { 546 port->dev->stats.tx_packets++; 547 port->dev->stats.tx_bytes += skb->len; 548 } 549 550 kfree_skb(frame.skb_hsr); 551 kfree_skb(frame.skb_prp); 552 kfree_skb(frame.skb_std); 553 return; 554 555 out_drop: 556 port->dev->stats.tx_dropped++; 557 kfree_skb(skb); 558 } 559