xref: /linux/net/bridge/br_mrp.c (revision 2634682fdffd9ba6e74b76be8aa91cf8b2e05c41)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/mrp_bridge.h>
4 #include "br_private_mrp.h"
5 
6 static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
7 static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
8 
9 static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
10 				struct net_bridge_port *s_port,
11 				struct net_bridge_port *port)
12 {
13 	if (port == p_port ||
14 	    port == s_port)
15 		return true;
16 
17 	return false;
18 }
19 
20 static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
21 			      struct net_bridge_port *port)
22 {
23 	if (port == i_port)
24 		return true;
25 
26 	return false;
27 }
28 
29 static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
30 					       u32 ifindex)
31 {
32 	struct net_bridge_port *res = NULL;
33 	struct net_bridge_port *port;
34 
35 	list_for_each_entry(port, &br->port_list, list) {
36 		if (port->dev->ifindex == ifindex) {
37 			res = port;
38 			break;
39 		}
40 	}
41 
42 	return res;
43 }
44 
45 static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
46 {
47 	struct br_mrp *res = NULL;
48 	struct br_mrp *mrp;
49 
50 	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
51 				lockdep_rtnl_is_held()) {
52 		if (mrp->ring_id == ring_id) {
53 			res = mrp;
54 			break;
55 		}
56 	}
57 
58 	return res;
59 }
60 
61 static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
62 {
63 	struct br_mrp *res = NULL;
64 	struct br_mrp *mrp;
65 
66 	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
67 				lockdep_rtnl_is_held()) {
68 		if (mrp->in_id == in_id) {
69 			res = mrp;
70 			break;
71 		}
72 	}
73 
74 	return res;
75 }
76 
77 static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
78 {
79 	struct br_mrp *mrp;
80 
81 	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
82 				lockdep_rtnl_is_held()) {
83 		struct net_bridge_port *p;
84 
85 		p = rtnl_dereference(mrp->p_port);
86 		if (p && p->dev->ifindex == ifindex)
87 			return false;
88 
89 		p = rtnl_dereference(mrp->s_port);
90 		if (p && p->dev->ifindex == ifindex)
91 			return false;
92 
93 		p = rtnl_dereference(mrp->i_port);
94 		if (p && p->dev->ifindex == ifindex)
95 			return false;
96 	}
97 
98 	return true;
99 }
100 
101 static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
102 				       struct net_bridge_port *p)
103 {
104 	struct br_mrp *res = NULL;
105 	struct br_mrp *mrp;
106 
107 	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
108 				lockdep_rtnl_is_held()) {
109 		if (rcu_access_pointer(mrp->p_port) == p ||
110 		    rcu_access_pointer(mrp->s_port) == p ||
111 		    rcu_access_pointer(mrp->i_port) == p) {
112 			res = mrp;
113 			break;
114 		}
115 	}
116 
117 	return res;
118 }
119 
120 static int br_mrp_next_seq(struct br_mrp *mrp)
121 {
122 	mrp->seq_id++;
123 	return mrp->seq_id;
124 }
125 
126 static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
127 					const u8 *src, const u8 *dst)
128 {
129 	struct ethhdr *eth_hdr;
130 	struct sk_buff *skb;
131 	__be16 *version;
132 
133 	skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
134 	if (!skb)
135 		return NULL;
136 
137 	skb->dev = p->dev;
138 	skb->protocol = htons(ETH_P_MRP);
139 	skb->priority = MRP_FRAME_PRIO;
140 	skb_reserve(skb, sizeof(*eth_hdr));
141 
142 	eth_hdr = skb_push(skb, sizeof(*eth_hdr));
143 	ether_addr_copy(eth_hdr->h_dest, dst);
144 	ether_addr_copy(eth_hdr->h_source, src);
145 	eth_hdr->h_proto = htons(ETH_P_MRP);
146 
147 	version = skb_put(skb, sizeof(*version));
148 	*version = cpu_to_be16(MRP_VERSION);
149 
150 	return skb;
151 }
152 
153 static void br_mrp_skb_tlv(struct sk_buff *skb,
154 			   enum br_mrp_tlv_header_type type,
155 			   u8 length)
156 {
157 	struct br_mrp_tlv_hdr *hdr;
158 
159 	hdr = skb_put(skb, sizeof(*hdr));
160 	hdr->type = type;
161 	hdr->length = length;
162 }
163 
164 static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
165 {
166 	struct br_mrp_common_hdr *hdr;
167 
168 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
169 
170 	hdr = skb_put(skb, sizeof(*hdr));
171 	hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
172 	memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
173 }
174 
175 static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
176 					     struct net_bridge_port *p,
177 					     enum br_mrp_port_role_type port_role)
178 {
179 	struct br_mrp_ring_test_hdr *hdr = NULL;
180 	struct sk_buff *skb = NULL;
181 
182 	if (!p)
183 		return NULL;
184 
185 	skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
186 	if (!skb)
187 		return NULL;
188 
189 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
190 	hdr = skb_put(skb, sizeof(*hdr));
191 
192 	hdr->prio = cpu_to_be16(mrp->prio);
193 	ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
194 	hdr->port_role = cpu_to_be16(port_role);
195 	hdr->state = cpu_to_be16(mrp->ring_state);
196 	hdr->transitions = cpu_to_be16(mrp->ring_transitions);
197 	hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
198 
199 	br_mrp_skb_common(skb, mrp);
200 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
201 
202 	return skb;
203 }
204 
205 static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
206 						struct net_bridge_port *p,
207 						enum br_mrp_port_role_type port_role)
208 {
209 	struct br_mrp_in_test_hdr *hdr = NULL;
210 	struct sk_buff *skb = NULL;
211 
212 	if (!p)
213 		return NULL;
214 
215 	skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
216 	if (!skb)
217 		return NULL;
218 
219 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
220 	hdr = skb_put(skb, sizeof(*hdr));
221 
222 	hdr->id = cpu_to_be16(mrp->in_id);
223 	ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
224 	hdr->port_role = cpu_to_be16(port_role);
225 	hdr->state = cpu_to_be16(mrp->in_state);
226 	hdr->transitions = cpu_to_be16(mrp->in_transitions);
227 	hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
228 
229 	br_mrp_skb_common(skb, mrp);
230 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
231 
232 	return skb;
233 }
234 
235 /* This function is continuously called in the following cases:
236  * - when node role is MRM, in this case test_monitor is always set to false
237  *   because it needs to notify the userspace that the ring is open and needs to
238  *   send MRP_Test frames
239  * - when node role is MRA, there are 2 subcases:
240  *     - when MRA behaves as MRM, in this case is similar with MRM role
241  *     - when MRA behaves as MRC, in this case test_monitor is set to true,
242  *       because it needs to detect when it stops seeing MRP_Test frames
243  *       from MRM node but it doesn't need to send MRP_Test frames.
244  */
245 static void br_mrp_test_work_expired(struct work_struct *work)
246 {
247 	struct delayed_work *del_work = to_delayed_work(work);
248 	struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
249 	struct net_bridge_port *p;
250 	bool notify_open = false;
251 	struct sk_buff *skb;
252 
253 	if (time_before_eq(mrp->test_end, jiffies))
254 		return;
255 
256 	if (mrp->test_count_miss < mrp->test_max_miss) {
257 		mrp->test_count_miss++;
258 	} else {
259 		/* Notify that the ring is open only if the ring state is
260 		 * closed, otherwise it would continue to notify at every
261 		 * interval.
262 		 * Also notify that the ring is open when the node has the
263 		 * role MRA and behaves as MRC. The reason is that the
264 		 * userspace needs to know when the MRM stopped sending
265 		 * MRP_Test frames so that the current node to try to take
266 		 * the role of a MRM.
267 		 */
268 		if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
269 		    mrp->test_monitor)
270 			notify_open = true;
271 	}
272 
273 	rcu_read_lock();
274 
275 	p = rcu_dereference(mrp->p_port);
276 	if (p) {
277 		if (!mrp->test_monitor) {
278 			skb = br_mrp_alloc_test_skb(mrp, p,
279 						    BR_MRP_PORT_ROLE_PRIMARY);
280 			if (!skb)
281 				goto out;
282 
283 			skb_reset_network_header(skb);
284 			dev_queue_xmit(skb);
285 		}
286 
287 		if (notify_open && !mrp->ring_role_offloaded)
288 			br_mrp_ring_port_open(p->dev, true);
289 	}
290 
291 	p = rcu_dereference(mrp->s_port);
292 	if (p) {
293 		if (!mrp->test_monitor) {
294 			skb = br_mrp_alloc_test_skb(mrp, p,
295 						    BR_MRP_PORT_ROLE_SECONDARY);
296 			if (!skb)
297 				goto out;
298 
299 			skb_reset_network_header(skb);
300 			dev_queue_xmit(skb);
301 		}
302 
303 		if (notify_open && !mrp->ring_role_offloaded)
304 			br_mrp_ring_port_open(p->dev, true);
305 	}
306 
307 out:
308 	rcu_read_unlock();
309 
310 	queue_delayed_work(system_wq, &mrp->test_work,
311 			   usecs_to_jiffies(mrp->test_interval));
312 }
313 
314 /* This function is continuously called when the node has the interconnect role
315  * MIM. It would generate interconnect test frames and will send them on all 3
316  * ports. But will also check if it stop receiving interconnect test frames.
317  */
318 static void br_mrp_in_test_work_expired(struct work_struct *work)
319 {
320 	struct delayed_work *del_work = to_delayed_work(work);
321 	struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
322 	struct net_bridge_port *p;
323 	bool notify_open = false;
324 	struct sk_buff *skb;
325 
326 	if (time_before_eq(mrp->in_test_end, jiffies))
327 		return;
328 
329 	if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
330 		mrp->in_test_count_miss++;
331 	} else {
332 		/* Notify that the interconnect ring is open only if the
333 		 * interconnect ring state is closed, otherwise it would
334 		 * continue to notify at every interval.
335 		 */
336 		if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
337 			notify_open = true;
338 	}
339 
340 	rcu_read_lock();
341 
342 	p = rcu_dereference(mrp->p_port);
343 	if (p) {
344 		skb = br_mrp_alloc_in_test_skb(mrp, p,
345 					       BR_MRP_PORT_ROLE_PRIMARY);
346 		if (!skb)
347 			goto out;
348 
349 		skb_reset_network_header(skb);
350 		dev_queue_xmit(skb);
351 
352 		if (notify_open && !mrp->in_role_offloaded)
353 			br_mrp_in_port_open(p->dev, true);
354 	}
355 
356 	p = rcu_dereference(mrp->s_port);
357 	if (p) {
358 		skb = br_mrp_alloc_in_test_skb(mrp, p,
359 					       BR_MRP_PORT_ROLE_SECONDARY);
360 		if (!skb)
361 			goto out;
362 
363 		skb_reset_network_header(skb);
364 		dev_queue_xmit(skb);
365 
366 		if (notify_open && !mrp->in_role_offloaded)
367 			br_mrp_in_port_open(p->dev, true);
368 	}
369 
370 	p = rcu_dereference(mrp->i_port);
371 	if (p) {
372 		skb = br_mrp_alloc_in_test_skb(mrp, p,
373 					       BR_MRP_PORT_ROLE_INTER);
374 		if (!skb)
375 			goto out;
376 
377 		skb_reset_network_header(skb);
378 		dev_queue_xmit(skb);
379 
380 		if (notify_open && !mrp->in_role_offloaded)
381 			br_mrp_in_port_open(p->dev, true);
382 	}
383 
384 out:
385 	rcu_read_unlock();
386 
387 	queue_delayed_work(system_wq, &mrp->in_test_work,
388 			   usecs_to_jiffies(mrp->in_test_interval));
389 }
390 
391 /* Deletes the MRP instance.
392  * note: called under rtnl_lock
393  */
394 static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
395 {
396 	struct net_bridge_port *p;
397 	u8 state;
398 
399 	/* Stop sending MRP_Test frames */
400 	cancel_delayed_work_sync(&mrp->test_work);
401 	br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
402 
403 	/* Stop sending MRP_InTest frames if has an interconnect role */
404 	cancel_delayed_work_sync(&mrp->in_test_work);
405 	br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
406 
407 	br_mrp_switchdev_del(br, mrp);
408 
409 	/* Reset the ports */
410 	p = rtnl_dereference(mrp->p_port);
411 	if (p) {
412 		spin_lock_bh(&br->lock);
413 		state = netif_running(br->dev) ?
414 				BR_STATE_FORWARDING : BR_STATE_DISABLED;
415 		p->state = state;
416 		p->flags &= ~BR_MRP_AWARE;
417 		spin_unlock_bh(&br->lock);
418 		br_mrp_port_switchdev_set_state(p, state);
419 		rcu_assign_pointer(mrp->p_port, NULL);
420 	}
421 
422 	p = rtnl_dereference(mrp->s_port);
423 	if (p) {
424 		spin_lock_bh(&br->lock);
425 		state = netif_running(br->dev) ?
426 				BR_STATE_FORWARDING : BR_STATE_DISABLED;
427 		p->state = state;
428 		p->flags &= ~BR_MRP_AWARE;
429 		spin_unlock_bh(&br->lock);
430 		br_mrp_port_switchdev_set_state(p, state);
431 		rcu_assign_pointer(mrp->s_port, NULL);
432 	}
433 
434 	p = rtnl_dereference(mrp->i_port);
435 	if (p) {
436 		spin_lock_bh(&br->lock);
437 		state = netif_running(br->dev) ?
438 				BR_STATE_FORWARDING : BR_STATE_DISABLED;
439 		p->state = state;
440 		p->flags &= ~BR_MRP_AWARE;
441 		spin_unlock_bh(&br->lock);
442 		br_mrp_port_switchdev_set_state(p, state);
443 		rcu_assign_pointer(mrp->i_port, NULL);
444 	}
445 
446 	list_del_rcu(&mrp->list);
447 	kfree_rcu(mrp, rcu);
448 }
449 
450 /* Adds a new MRP instance.
451  * note: called under rtnl_lock
452  */
453 int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
454 {
455 	struct net_bridge_port *p;
456 	struct br_mrp *mrp;
457 	int err;
458 
459 	/* If the ring exists, it is not possible to create another one with the
460 	 * same ring_id
461 	 */
462 	mrp = br_mrp_find_id(br, instance->ring_id);
463 	if (mrp)
464 		return -EINVAL;
465 
466 	if (!br_mrp_get_port(br, instance->p_ifindex) ||
467 	    !br_mrp_get_port(br, instance->s_ifindex))
468 		return -EINVAL;
469 
470 	/* It is not possible to have the same port part of multiple rings */
471 	if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
472 	    !br_mrp_unique_ifindex(br, instance->s_ifindex))
473 		return -EINVAL;
474 
475 	mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
476 	if (!mrp)
477 		return -ENOMEM;
478 
479 	mrp->ring_id = instance->ring_id;
480 	mrp->prio = instance->prio;
481 
482 	p = br_mrp_get_port(br, instance->p_ifindex);
483 	spin_lock_bh(&br->lock);
484 	p->state = BR_STATE_FORWARDING;
485 	p->flags |= BR_MRP_AWARE;
486 	spin_unlock_bh(&br->lock);
487 	rcu_assign_pointer(mrp->p_port, p);
488 
489 	p = br_mrp_get_port(br, instance->s_ifindex);
490 	spin_lock_bh(&br->lock);
491 	p->state = BR_STATE_FORWARDING;
492 	p->flags |= BR_MRP_AWARE;
493 	spin_unlock_bh(&br->lock);
494 	rcu_assign_pointer(mrp->s_port, p);
495 
496 	INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
497 	INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
498 	list_add_tail_rcu(&mrp->list, &br->mrp_list);
499 
500 	err = br_mrp_switchdev_add(br, mrp);
501 	if (err)
502 		goto delete_mrp;
503 
504 	return 0;
505 
506 delete_mrp:
507 	br_mrp_del_impl(br, mrp);
508 
509 	return err;
510 }
511 
512 /* Deletes the MRP instance from which the port is part of
513  * note: called under rtnl_lock
514  */
515 void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
516 {
517 	struct br_mrp *mrp = br_mrp_find_port(br, p);
518 
519 	/* If the port is not part of a MRP instance just bail out */
520 	if (!mrp)
521 		return;
522 
523 	br_mrp_del_impl(br, mrp);
524 }
525 
526 /* Deletes existing MRP instance based on ring_id
527  * note: called under rtnl_lock
528  */
529 int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
530 {
531 	struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
532 
533 	if (!mrp)
534 		return -EINVAL;
535 
536 	br_mrp_del_impl(br, mrp);
537 
538 	return 0;
539 }
540 
541 /* Set port state, port state can be forwarding, blocked or disabled
542  * note: already called with rtnl_lock
543  */
544 int br_mrp_set_port_state(struct net_bridge_port *p,
545 			  enum br_mrp_port_state_type state)
546 {
547 	if (!p || !(p->flags & BR_MRP_AWARE))
548 		return -EINVAL;
549 
550 	spin_lock_bh(&p->br->lock);
551 
552 	if (state == BR_MRP_PORT_STATE_FORWARDING)
553 		p->state = BR_STATE_FORWARDING;
554 	else
555 		p->state = BR_STATE_BLOCKING;
556 
557 	spin_unlock_bh(&p->br->lock);
558 
559 	br_mrp_port_switchdev_set_state(p, state);
560 
561 	return 0;
562 }
563 
564 /* Set port role, port role can be primary or secondary
565  * note: already called with rtnl_lock
566  */
567 int br_mrp_set_port_role(struct net_bridge_port *p,
568 			 enum br_mrp_port_role_type role)
569 {
570 	struct br_mrp *mrp;
571 
572 	if (!p || !(p->flags & BR_MRP_AWARE))
573 		return -EINVAL;
574 
575 	mrp = br_mrp_find_port(p->br, p);
576 
577 	if (!mrp)
578 		return -EINVAL;
579 
580 	switch (role) {
581 	case BR_MRP_PORT_ROLE_PRIMARY:
582 		rcu_assign_pointer(mrp->p_port, p);
583 		break;
584 	case BR_MRP_PORT_ROLE_SECONDARY:
585 		rcu_assign_pointer(mrp->s_port, p);
586 		break;
587 	default:
588 		return -EINVAL;
589 	}
590 
591 	br_mrp_port_switchdev_set_role(p, role);
592 
593 	return 0;
594 }
595 
596 /* Set ring state, ring state can be only Open or Closed
597  * note: already called with rtnl_lock
598  */
599 int br_mrp_set_ring_state(struct net_bridge *br,
600 			  struct br_mrp_ring_state *state)
601 {
602 	struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
603 
604 	if (!mrp)
605 		return -EINVAL;
606 
607 	if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED &&
608 	    state->ring_state != BR_MRP_RING_STATE_CLOSED)
609 		mrp->ring_transitions++;
610 
611 	mrp->ring_state = state->ring_state;
612 
613 	br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
614 
615 	return 0;
616 }
617 
618 /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
619  * MRC(Media Redundancy Client).
620  * note: already called with rtnl_lock
621  */
622 int br_mrp_set_ring_role(struct net_bridge *br,
623 			 struct br_mrp_ring_role *role)
624 {
625 	struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
626 	int err;
627 
628 	if (!mrp)
629 		return -EINVAL;
630 
631 	mrp->ring_role = role->ring_role;
632 
633 	/* If there is an error just bailed out */
634 	err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
635 	if (err && err != -EOPNOTSUPP)
636 		return err;
637 
638 	/* Now detect if the HW actually applied the role or not. If the HW
639 	 * applied the role it means that the SW will not to do those operations
640 	 * anymore. For example if the role ir MRM then the HW will notify the
641 	 * SW when ring is open, but if the is not pushed to the HW the SW will
642 	 * need to detect when the ring is open
643 	 */
644 	mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
645 
646 	return 0;
647 }
648 
649 /* Start to generate or monitor MRP test frames, the frames are generated by
650  * HW and if it fails, they are generated by the SW.
651  * note: already called with rtnl_lock
652  */
653 int br_mrp_start_test(struct net_bridge *br,
654 		      struct br_mrp_start_test *test)
655 {
656 	struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
657 
658 	if (!mrp)
659 		return -EINVAL;
660 
661 	/* Try to push it to the HW and if it fails then continue with SW
662 	 * implementation and if that also fails then return error.
663 	 */
664 	if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
665 					     test->max_miss, test->period,
666 					     test->monitor))
667 		return 0;
668 
669 	mrp->test_interval = test->interval;
670 	mrp->test_end = jiffies + usecs_to_jiffies(test->period);
671 	mrp->test_max_miss = test->max_miss;
672 	mrp->test_monitor = test->monitor;
673 	mrp->test_count_miss = 0;
674 	queue_delayed_work(system_wq, &mrp->test_work,
675 			   usecs_to_jiffies(test->interval));
676 
677 	return 0;
678 }
679 
680 /* Set in state, int state can be only Open or Closed
681  * note: already called with rtnl_lock
682  */
683 int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
684 {
685 	struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
686 
687 	if (!mrp)
688 		return -EINVAL;
689 
690 	if (mrp->in_state == BR_MRP_IN_STATE_CLOSED &&
691 	    state->in_state != BR_MRP_IN_STATE_CLOSED)
692 		mrp->in_transitions++;
693 
694 	mrp->in_state = state->in_state;
695 
696 	br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
697 
698 	return 0;
699 }
700 
701 /* Set in role, in role can be only MIM(Media Interconnection Manager) or
702  * MIC(Media Interconnection Client).
703  * note: already called with rtnl_lock
704  */
705 int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
706 {
707 	struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
708 	struct net_bridge_port *p;
709 	int err;
710 
711 	if (!mrp)
712 		return -EINVAL;
713 
714 	if (!br_mrp_get_port(br, role->i_ifindex))
715 		return -EINVAL;
716 
717 	if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
718 		u8 state;
719 
720 		/* It is not allowed to disable a port that doesn't exist */
721 		p = rtnl_dereference(mrp->i_port);
722 		if (!p)
723 			return -EINVAL;
724 
725 		/* Stop the generating MRP_InTest frames */
726 		cancel_delayed_work_sync(&mrp->in_test_work);
727 		br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
728 
729 		/* Remove the port */
730 		spin_lock_bh(&br->lock);
731 		state = netif_running(br->dev) ?
732 				BR_STATE_FORWARDING : BR_STATE_DISABLED;
733 		p->state = state;
734 		p->flags &= ~BR_MRP_AWARE;
735 		spin_unlock_bh(&br->lock);
736 		br_mrp_port_switchdev_set_state(p, state);
737 		rcu_assign_pointer(mrp->i_port, NULL);
738 
739 		mrp->in_role = role->in_role;
740 		mrp->in_id = 0;
741 
742 		return 0;
743 	}
744 
745 	/* It is not possible to have the same port part of multiple rings */
746 	if (!br_mrp_unique_ifindex(br, role->i_ifindex))
747 		return -EINVAL;
748 
749 	/* It is not allowed to set a different interconnect port if the mrp
750 	 * instance has already one. First it needs to be disabled and after
751 	 * that set the new port
752 	 */
753 	if (rcu_access_pointer(mrp->i_port))
754 		return -EINVAL;
755 
756 	p = br_mrp_get_port(br, role->i_ifindex);
757 	spin_lock_bh(&br->lock);
758 	p->state = BR_STATE_FORWARDING;
759 	p->flags |= BR_MRP_AWARE;
760 	spin_unlock_bh(&br->lock);
761 	rcu_assign_pointer(mrp->i_port, p);
762 
763 	mrp->in_role = role->in_role;
764 	mrp->in_id = role->in_id;
765 
766 	/* If there is an error just bailed out */
767 	err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
768 					   role->ring_id, role->in_role);
769 	if (err && err != -EOPNOTSUPP)
770 		return err;
771 
772 	/* Now detect if the HW actually applied the role or not. If the HW
773 	 * applied the role it means that the SW will not to do those operations
774 	 * anymore. For example if the role is MIM then the HW will notify the
775 	 * SW when interconnect ring is open, but if the is not pushed to the HW
776 	 * the SW will need to detect when the interconnect ring is open.
777 	 */
778 	mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
779 
780 	return 0;
781 }
782 
783 /* Start to generate MRP_InTest frames, the frames are generated by
784  * HW and if it fails, they are generated by the SW.
785  * note: already called with rtnl_lock
786  */
787 int br_mrp_start_in_test(struct net_bridge *br,
788 			 struct br_mrp_start_in_test *in_test)
789 {
790 	struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
791 
792 	if (!mrp)
793 		return -EINVAL;
794 
795 	if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
796 		return -EINVAL;
797 
798 	/* Try to push it to the HW and if it fails then continue with SW
799 	 * implementation and if that also fails then return error.
800 	 */
801 	if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
802 					   in_test->max_miss, in_test->period))
803 		return 0;
804 
805 	mrp->in_test_interval = in_test->interval;
806 	mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
807 	mrp->in_test_max_miss = in_test->max_miss;
808 	mrp->in_test_count_miss = 0;
809 	queue_delayed_work(system_wq, &mrp->in_test_work,
810 			   usecs_to_jiffies(in_test->interval));
811 
812 	return 0;
813 }
814 
815 /* Determin if the frame type is a ring frame */
816 static bool br_mrp_ring_frame(struct sk_buff *skb)
817 {
818 	const struct br_mrp_tlv_hdr *hdr;
819 	struct br_mrp_tlv_hdr _hdr;
820 
821 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
822 	if (!hdr)
823 		return false;
824 
825 	if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
826 	    hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
827 	    hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
828 	    hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
829 	    hdr->type == BR_MRP_TLV_HEADER_OPTION)
830 		return true;
831 
832 	return false;
833 }
834 
835 /* Determin if the frame type is an interconnect frame */
836 static bool br_mrp_in_frame(struct sk_buff *skb)
837 {
838 	const struct br_mrp_tlv_hdr *hdr;
839 	struct br_mrp_tlv_hdr _hdr;
840 
841 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
842 	if (!hdr)
843 		return false;
844 
845 	if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
846 	    hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
847 	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
848 	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP)
849 		return true;
850 
851 	return false;
852 }
853 
854 /* Process only MRP Test frame. All the other MRP frames are processed by
855  * userspace application
856  * note: already called with rcu_read_lock
857  */
858 static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
859 			       struct sk_buff *skb)
860 {
861 	const struct br_mrp_tlv_hdr *hdr;
862 	struct br_mrp_tlv_hdr _hdr;
863 
864 	/* Each MRP header starts with a version field which is 16 bits.
865 	 * Therefore skip the version and get directly the TLV header.
866 	 */
867 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
868 	if (!hdr)
869 		return;
870 
871 	if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
872 		return;
873 
874 	mrp->test_count_miss = 0;
875 
876 	/* Notify the userspace that the ring is closed only when the ring is
877 	 * not closed
878 	 */
879 	if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
880 		br_mrp_ring_port_open(port->dev, false);
881 }
882 
883 /* Determin if the test hdr has a better priority than the node */
884 static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
885 					struct net_bridge *br,
886 					const struct br_mrp_ring_test_hdr *hdr)
887 {
888 	u16 prio = be16_to_cpu(hdr->prio);
889 
890 	if (prio < mrp->prio ||
891 	    (prio == mrp->prio &&
892 	    ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
893 		return true;
894 
895 	return false;
896 }
897 
898 /* Process only MRP Test frame. All the other MRP frames are processed by
899  * userspace application
900  * note: already called with rcu_read_lock
901  */
902 static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
903 			       struct net_bridge_port *port,
904 			       struct sk_buff *skb)
905 {
906 	const struct br_mrp_ring_test_hdr *test_hdr;
907 	struct br_mrp_ring_test_hdr _test_hdr;
908 	const struct br_mrp_tlv_hdr *hdr;
909 	struct br_mrp_tlv_hdr _hdr;
910 
911 	/* Each MRP header starts with a version field which is 16 bits.
912 	 * Therefore skip the version and get directly the TLV header.
913 	 */
914 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
915 	if (!hdr)
916 		return;
917 
918 	if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
919 		return;
920 
921 	test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
922 				      sizeof(_test_hdr), &_test_hdr);
923 	if (!test_hdr)
924 		return;
925 
926 	/* Only frames that have a better priority than the node will
927 	 * clear the miss counter because otherwise the node will need to behave
928 	 * as MRM.
929 	 */
930 	if (br_mrp_test_better_than_own(mrp, br, test_hdr))
931 		mrp->test_count_miss = 0;
932 }
933 
934 /* Process only MRP InTest frame. All the other MRP frames are processed by
935  * userspace application
936  * note: already called with rcu_read_lock
937  */
938 static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
939 			       struct sk_buff *skb)
940 {
941 	const struct br_mrp_in_test_hdr *in_hdr;
942 	struct br_mrp_in_test_hdr _in_hdr;
943 	const struct br_mrp_tlv_hdr *hdr;
944 	struct br_mrp_tlv_hdr _hdr;
945 
946 	/* Each MRP header starts with a version field which is 16 bits.
947 	 * Therefore skip the version and get directly the TLV header.
948 	 */
949 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
950 	if (!hdr)
951 		return false;
952 
953 	/* The check for InTest frame type was already done */
954 	in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
955 				    sizeof(_in_hdr), &_in_hdr);
956 	if (!in_hdr)
957 		return false;
958 
959 	/* It needs to process only it's own InTest frames. */
960 	if (mrp->in_id != ntohs(in_hdr->id))
961 		return false;
962 
963 	mrp->in_test_count_miss = 0;
964 
965 	/* Notify the userspace that the ring is closed only when the ring is
966 	 * not closed
967 	 */
968 	if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
969 		br_mrp_in_port_open(port->dev, false);
970 
971 	return true;
972 }
973 
974 /* Get the MRP frame type
975  * note: already called with rcu_read_lock
976  */
977 static u8 br_mrp_get_frame_type(struct sk_buff *skb)
978 {
979 	const struct br_mrp_tlv_hdr *hdr;
980 	struct br_mrp_tlv_hdr _hdr;
981 
982 	/* Each MRP header starts with a version field which is 16 bits.
983 	 * Therefore skip the version and get directly the TLV header.
984 	 */
985 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
986 	if (!hdr)
987 		return 0xff;
988 
989 	return hdr->type;
990 }
991 
992 static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
993 {
994 	if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
995 	    (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
996 		return true;
997 
998 	return false;
999 }
1000 
1001 static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
1002 {
1003 	if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
1004 	    (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
1005 		return true;
1006 
1007 	return false;
1008 }
1009 
1010 /* This will just forward the frame to the other mrp ring ports, depending on
1011  * the frame type, ring role and interconnect role
1012  * note: already called with rcu_read_lock
1013  */
1014 static int br_mrp_rcv(struct net_bridge_port *p,
1015 		      struct sk_buff *skb, struct net_device *dev)
1016 {
1017 	struct net_bridge_port *p_port, *s_port, *i_port = NULL;
1018 	struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
1019 	struct net_bridge *br;
1020 	struct br_mrp *mrp;
1021 
1022 	/* If port is disabled don't accept any frames */
1023 	if (p->state == BR_STATE_DISABLED)
1024 		return 0;
1025 
1026 	br = p->br;
1027 	mrp =  br_mrp_find_port(br, p);
1028 	if (unlikely(!mrp))
1029 		return 0;
1030 
1031 	p_port = rcu_dereference(mrp->p_port);
1032 	if (!p_port)
1033 		return 0;
1034 	p_dst = p_port;
1035 
1036 	s_port = rcu_dereference(mrp->s_port);
1037 	if (!s_port)
1038 		return 0;
1039 	s_dst = s_port;
1040 
1041 	/* If the frame is a ring frame then it is not required to check the
1042 	 * interconnect role and ports to process or forward the frame
1043 	 */
1044 	if (br_mrp_ring_frame(skb)) {
1045 		/* If the role is MRM then don't forward the frames */
1046 		if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
1047 			br_mrp_mrm_process(mrp, p, skb);
1048 			goto no_forward;
1049 		}
1050 
1051 		/* If the role is MRA then don't forward the frames if it
1052 		 * behaves as MRM node
1053 		 */
1054 		if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
1055 			if (!mrp->test_monitor) {
1056 				br_mrp_mrm_process(mrp, p, skb);
1057 				goto no_forward;
1058 			}
1059 
1060 			br_mrp_mra_process(mrp, br, p, skb);
1061 		}
1062 
1063 		goto forward;
1064 	}
1065 
1066 	if (br_mrp_in_frame(skb)) {
1067 		u8 in_type = br_mrp_get_frame_type(skb);
1068 
1069 		i_port = rcu_dereference(mrp->i_port);
1070 		i_dst = i_port;
1071 
1072 		/* If the ring port is in block state it should not forward
1073 		 * In_Test frames
1074 		 */
1075 		if (br_mrp_is_ring_port(p_port, s_port, p) &&
1076 		    p->state == BR_STATE_BLOCKING &&
1077 		    in_type == BR_MRP_TLV_HEADER_IN_TEST)
1078 			goto no_forward;
1079 
1080 		/* Nodes that behaves as MRM needs to stop forwarding the
1081 		 * frames in case the ring is closed, otherwise will be a loop.
1082 		 * In this case the frame is no forward between the ring ports.
1083 		 */
1084 		if (br_mrp_mrm_behaviour(mrp) &&
1085 		    br_mrp_is_ring_port(p_port, s_port, p) &&
1086 		    (s_port->state != BR_STATE_FORWARDING ||
1087 		     p_port->state != BR_STATE_FORWARDING)) {
1088 			p_dst = NULL;
1089 			s_dst = NULL;
1090 		}
1091 
1092 		/* A node that behaves as MRC and doesn't have a interconnect
1093 		 * role then it should forward all frames between the ring ports
1094 		 * because it doesn't have an interconnect port
1095 		 */
1096 		if (br_mrp_mrc_behaviour(mrp) &&
1097 		    mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
1098 			goto forward;
1099 
1100 		if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
1101 			if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
1102 				/* MIM should not forward it's own InTest
1103 				 * frames
1104 				 */
1105 				if (br_mrp_mim_process(mrp, p, skb)) {
1106 					goto no_forward;
1107 				} else {
1108 					if (br_mrp_is_ring_port(p_port, s_port,
1109 								p))
1110 						i_dst = NULL;
1111 
1112 					if (br_mrp_is_in_port(i_port, p))
1113 						goto no_forward;
1114 				}
1115 			} else {
1116 				/* MIM should forward IntLinkChange and
1117 				 * IntTopoChange between ring ports but MIM
1118 				 * should not forward IntLinkChange and
1119 				 * IntTopoChange if the frame was received at
1120 				 * the interconnect port
1121 				 */
1122 				if (br_mrp_is_ring_port(p_port, s_port, p))
1123 					i_dst = NULL;
1124 
1125 				if (br_mrp_is_in_port(i_port, p))
1126 					goto no_forward;
1127 			}
1128 		}
1129 
1130 		if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
1131 			/* MIC should forward InTest frames on all ports
1132 			 * regardless of the received port
1133 			 */
1134 			if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
1135 				goto forward;
1136 
1137 			/* MIC should forward IntLinkChange frames only if they
1138 			 * are received on ring ports to all the ports
1139 			 */
1140 			if (br_mrp_is_ring_port(p_port, s_port, p) &&
1141 			    (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
1142 			     in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
1143 				goto forward;
1144 
1145 			/* Should forward the InTopo frames only between the
1146 			 * ring ports
1147 			 */
1148 			if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
1149 				i_dst = NULL;
1150 				goto forward;
1151 			}
1152 
1153 			/* In all the other cases don't forward the frames */
1154 			goto no_forward;
1155 		}
1156 	}
1157 
1158 forward:
1159 	if (p_dst)
1160 		br_forward(p_dst, skb, true, false);
1161 	if (s_dst)
1162 		br_forward(s_dst, skb, true, false);
1163 	if (i_dst)
1164 		br_forward(i_dst, skb, true, false);
1165 
1166 no_forward:
1167 	return 1;
1168 }
1169 
1170 /* Check if the frame was received on a port that is part of MRP ring
1171  * and if the frame has MRP eth. In that case process the frame otherwise do
1172  * normal forwarding.
1173  * note: already called with rcu_read_lock
1174  */
1175 int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
1176 {
1177 	/* If there is no MRP instance do normal forwarding */
1178 	if (likely(!(p->flags & BR_MRP_AWARE)))
1179 		goto out;
1180 
1181 	if (unlikely(skb->protocol == htons(ETH_P_MRP)))
1182 		return br_mrp_rcv(p, skb, p->dev);
1183 
1184 out:
1185 	return 0;
1186 }
1187 
1188 bool br_mrp_enabled(struct net_bridge *br)
1189 {
1190 	return !list_empty(&br->mrp_list);
1191 }
1192