1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IEEE 802.1Q Multiple Registration Protocol (MRP) 4 * 5 * Copyright (c) 2012 Massachusetts Institute of Technology 6 * 7 * Adapted from code in net/802/garp.c 8 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> 9 */ 10 #include <linux/kernel.h> 11 #include <linux/timer.h> 12 #include <linux/skbuff.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/slab.h> 17 #include <linux/module.h> 18 #include <net/mrp.h> 19 #include <linux/unaligned.h> 20 21 static unsigned int mrp_join_time __read_mostly = 200; 22 module_param(mrp_join_time, uint, 0644); 23 MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)"); 24 25 static unsigned int mrp_periodic_time __read_mostly = 1000; 26 module_param(mrp_periodic_time, uint, 0644); 27 MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)"); 28 29 MODULE_DESCRIPTION("IEEE 802.1Q Multiple Registration Protocol (MRP)"); 30 MODULE_LICENSE("GPL"); 31 32 static const u8 33 mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = { 34 [MRP_APPLICANT_VO] = { 35 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 36 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP, 37 [MRP_EVENT_LV] = MRP_APPLICANT_VO, 38 [MRP_EVENT_TX] = MRP_APPLICANT_VO, 39 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO, 40 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO, 41 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO, 42 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO, 43 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO, 44 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO, 45 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO, 46 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO, 47 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO, 48 }, 49 [MRP_APPLICANT_VP] = { 50 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 51 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP, 52 [MRP_EVENT_LV] = MRP_APPLICANT_VO, 53 [MRP_EVENT_TX] = MRP_APPLICANT_AA, 54 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP, 55 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP, 56 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP, 57 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP, 58 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP, 59 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 60 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 61 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 62 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP, 63 }, 64 [MRP_APPLICANT_VN] = { 65 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 66 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN, 67 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 68 [MRP_EVENT_TX] = MRP_APPLICANT_AN, 69 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN, 70 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN, 71 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN, 72 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN, 73 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN, 74 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN, 75 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN, 76 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN, 77 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN, 78 }, 79 [MRP_APPLICANT_AN] = { 80 [MRP_EVENT_NEW] = MRP_APPLICANT_AN, 81 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN, 82 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 83 [MRP_EVENT_TX] = MRP_APPLICANT_QA, 84 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN, 85 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN, 86 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN, 87 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN, 88 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN, 89 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN, 90 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN, 91 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN, 92 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN, 93 }, 94 [MRP_APPLICANT_AA] = { 95 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 96 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA, 97 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 98 [MRP_EVENT_TX] = MRP_APPLICANT_QA, 99 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA, 100 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA, 101 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA, 102 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA, 103 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA, 104 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 105 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 106 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 107 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA, 108 }, 109 [MRP_APPLICANT_QA] = { 110 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 111 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA, 112 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 113 [MRP_EVENT_TX] = MRP_APPLICANT_QA, 114 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA, 115 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA, 116 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA, 117 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA, 118 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA, 119 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 120 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 121 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 122 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA, 123 }, 124 [MRP_APPLICANT_LA] = { 125 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 126 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA, 127 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 128 [MRP_EVENT_TX] = MRP_APPLICANT_VO, 129 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA, 130 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA, 131 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA, 132 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA, 133 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA, 134 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA, 135 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA, 136 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA, 137 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA, 138 }, 139 [MRP_APPLICANT_AO] = { 140 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 141 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP, 142 [MRP_EVENT_LV] = MRP_APPLICANT_AO, 143 [MRP_EVENT_TX] = MRP_APPLICANT_AO, 144 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO, 145 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO, 146 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO, 147 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO, 148 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO, 149 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO, 150 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO, 151 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO, 152 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO, 153 }, 154 [MRP_APPLICANT_QO] = { 155 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 156 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP, 157 [MRP_EVENT_LV] = MRP_APPLICANT_QO, 158 [MRP_EVENT_TX] = MRP_APPLICANT_QO, 159 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO, 160 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO, 161 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO, 162 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO, 163 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO, 164 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO, 165 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO, 166 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO, 167 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO, 168 }, 169 [MRP_APPLICANT_AP] = { 170 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 171 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP, 172 [MRP_EVENT_LV] = MRP_APPLICANT_AO, 173 [MRP_EVENT_TX] = MRP_APPLICANT_QA, 174 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP, 175 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP, 176 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP, 177 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP, 178 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP, 179 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 180 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 181 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 182 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP, 183 }, 184 [MRP_APPLICANT_QP] = { 185 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 186 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP, 187 [MRP_EVENT_LV] = MRP_APPLICANT_QO, 188 [MRP_EVENT_TX] = MRP_APPLICANT_QP, 189 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP, 190 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP, 191 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP, 192 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP, 193 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP, 194 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 195 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 196 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 197 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP, 198 }, 199 }; 200 201 static const u8 202 mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = { 203 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL, 204 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN, 205 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW, 206 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW, 207 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN, 208 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL, 209 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV, 210 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL, 211 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL, 212 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN, 213 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL, 214 }; 215 216 static void mrp_attrvalue_inc(void *value, u8 len) 217 { 218 u8 *v = (u8 *)value; 219 220 /* Add 1 to the last byte. If it becomes zero, 221 * go to the previous byte and repeat. 222 */ 223 while (len > 0 && !++v[--len]) 224 ; 225 } 226 227 static int mrp_attr_cmp(const struct mrp_attr *attr, 228 const void *value, u8 len, u8 type) 229 { 230 if (attr->type != type) 231 return attr->type - type; 232 if (attr->len != len) 233 return attr->len - len; 234 return memcmp(attr->value, value, len); 235 } 236 237 static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app, 238 const void *value, u8 len, u8 type) 239 { 240 struct rb_node *parent = app->mad.rb_node; 241 struct mrp_attr *attr; 242 int d; 243 244 while (parent) { 245 attr = rb_entry(parent, struct mrp_attr, node); 246 d = mrp_attr_cmp(attr, value, len, type); 247 if (d > 0) 248 parent = parent->rb_left; 249 else if (d < 0) 250 parent = parent->rb_right; 251 else 252 return attr; 253 } 254 return NULL; 255 } 256 257 static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app, 258 const void *value, u8 len, u8 type) 259 { 260 struct rb_node *parent = NULL, **p = &app->mad.rb_node; 261 struct mrp_attr *attr; 262 int d; 263 264 while (*p) { 265 parent = *p; 266 attr = rb_entry(parent, struct mrp_attr, node); 267 d = mrp_attr_cmp(attr, value, len, type); 268 if (d > 0) 269 p = &parent->rb_left; 270 else if (d < 0) 271 p = &parent->rb_right; 272 else { 273 /* The attribute already exists; re-use it. */ 274 return attr; 275 } 276 } 277 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC); 278 if (!attr) 279 return attr; 280 attr->state = MRP_APPLICANT_VO; 281 attr->type = type; 282 attr->len = len; 283 memcpy(attr->value, value, len); 284 285 rb_link_node(&attr->node, parent, p); 286 rb_insert_color(&attr->node, &app->mad); 287 return attr; 288 } 289 290 static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr) 291 { 292 rb_erase(&attr->node, &app->mad); 293 kfree(attr); 294 } 295 296 static void mrp_attr_destroy_all(struct mrp_applicant *app) 297 { 298 struct rb_node *node, *next; 299 struct mrp_attr *attr; 300 301 for (node = rb_first(&app->mad); 302 next = node ? rb_next(node) : NULL, node != NULL; 303 node = next) { 304 attr = rb_entry(node, struct mrp_attr, node); 305 mrp_attr_destroy(app, attr); 306 } 307 } 308 309 static int mrp_pdu_init(struct mrp_applicant *app) 310 { 311 struct sk_buff *skb; 312 struct mrp_pdu_hdr *ph; 313 314 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev), 315 GFP_ATOMIC); 316 if (!skb) 317 return -ENOMEM; 318 319 skb->dev = app->dev; 320 skb->protocol = app->app->pkttype.type; 321 skb_reserve(skb, LL_RESERVED_SPACE(app->dev)); 322 skb_reset_network_header(skb); 323 skb_reset_transport_header(skb); 324 325 ph = __skb_put(skb, sizeof(*ph)); 326 ph->version = app->app->version; 327 328 app->pdu = skb; 329 return 0; 330 } 331 332 static int mrp_pdu_append_end_mark(struct mrp_applicant *app) 333 { 334 __be16 *endmark; 335 336 if (skb_tailroom(app->pdu) < sizeof(*endmark)) 337 return -1; 338 endmark = __skb_put(app->pdu, sizeof(*endmark)); 339 put_unaligned(MRP_END_MARK, endmark); 340 return 0; 341 } 342 343 static void mrp_pdu_queue(struct mrp_applicant *app) 344 { 345 if (!app->pdu) 346 return; 347 348 if (mrp_cb(app->pdu)->mh) 349 mrp_pdu_append_end_mark(app); 350 mrp_pdu_append_end_mark(app); 351 352 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type), 353 app->app->group_address, app->dev->dev_addr, 354 app->pdu->len); 355 356 skb_queue_tail(&app->queue, app->pdu); 357 app->pdu = NULL; 358 } 359 360 static void mrp_queue_xmit(struct mrp_applicant *app) 361 { 362 struct sk_buff *skb; 363 364 while ((skb = skb_dequeue(&app->queue))) 365 dev_queue_xmit(skb); 366 } 367 368 static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app, 369 u8 attrtype, u8 attrlen) 370 { 371 struct mrp_msg_hdr *mh; 372 373 if (mrp_cb(app->pdu)->mh) { 374 if (mrp_pdu_append_end_mark(app) < 0) 375 return -1; 376 mrp_cb(app->pdu)->mh = NULL; 377 mrp_cb(app->pdu)->vah = NULL; 378 } 379 380 if (skb_tailroom(app->pdu) < sizeof(*mh)) 381 return -1; 382 mh = __skb_put(app->pdu, sizeof(*mh)); 383 mh->attrtype = attrtype; 384 mh->attrlen = attrlen; 385 mrp_cb(app->pdu)->mh = mh; 386 return 0; 387 } 388 389 static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app, 390 const void *firstattrvalue, u8 attrlen) 391 { 392 struct mrp_vecattr_hdr *vah; 393 394 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen) 395 return -1; 396 vah = __skb_put(app->pdu, sizeof(*vah) + attrlen); 397 put_unaligned(0, &vah->lenflags); 398 memcpy(vah->firstattrvalue, firstattrvalue, attrlen); 399 mrp_cb(app->pdu)->vah = vah; 400 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen); 401 return 0; 402 } 403 404 static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app, 405 const struct mrp_attr *attr, 406 enum mrp_vecattr_event vaevent) 407 { 408 u16 len, pos; 409 u8 *vaevents; 410 int err; 411 again: 412 if (!app->pdu) { 413 err = mrp_pdu_init(app); 414 if (err < 0) 415 return err; 416 } 417 418 /* If there is no Message header in the PDU, or the Message header is 419 * for a different attribute type, add an EndMark (if necessary) and a 420 * new Message header to the PDU. 421 */ 422 if (!mrp_cb(app->pdu)->mh || 423 mrp_cb(app->pdu)->mh->attrtype != attr->type || 424 mrp_cb(app->pdu)->mh->attrlen != attr->len) { 425 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0) 426 goto queue; 427 } 428 429 /* If there is no VectorAttribute header for this Message in the PDU, 430 * or this attribute's value does not sequentially follow the previous 431 * attribute's value, add a new VectorAttribute header to the PDU. 432 */ 433 if (!mrp_cb(app->pdu)->vah || 434 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) { 435 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0) 436 goto queue; 437 } 438 439 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags)); 440 pos = len % 3; 441 442 /* Events are packed into Vectors in the PDU, three to a byte. Add a 443 * byte to the end of the Vector if necessary. 444 */ 445 if (!pos) { 446 if (skb_tailroom(app->pdu) < sizeof(u8)) 447 goto queue; 448 vaevents = __skb_put(app->pdu, sizeof(u8)); 449 } else { 450 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8)); 451 } 452 453 switch (pos) { 454 case 0: 455 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX * 456 __MRP_VECATTR_EVENT_MAX); 457 break; 458 case 1: 459 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX; 460 break; 461 case 2: 462 *vaevents += vaevent; 463 break; 464 default: 465 WARN_ON(1); 466 } 467 468 /* Increment the length of the VectorAttribute in the PDU, as well as 469 * the value of the next attribute that would continue its Vector. 470 */ 471 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags); 472 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len); 473 474 return 0; 475 476 queue: 477 mrp_pdu_queue(app); 478 goto again; 479 } 480 481 static void mrp_attr_event(struct mrp_applicant *app, 482 struct mrp_attr *attr, enum mrp_event event) 483 { 484 enum mrp_applicant_state state; 485 486 state = mrp_applicant_state_table[attr->state][event]; 487 if (state == MRP_APPLICANT_INVALID) { 488 WARN_ON(1); 489 return; 490 } 491 492 if (event == MRP_EVENT_TX) { 493 /* When appending the attribute fails, don't update its state 494 * in order to retry at the next TX event. 495 */ 496 497 switch (mrp_tx_action_table[attr->state]) { 498 case MRP_TX_ACTION_NONE: 499 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL: 500 case MRP_TX_ACTION_S_IN_OPTIONAL: 501 break; 502 case MRP_TX_ACTION_S_NEW: 503 if (mrp_pdu_append_vecattr_event( 504 app, attr, MRP_VECATTR_EVENT_NEW) < 0) 505 return; 506 break; 507 case MRP_TX_ACTION_S_JOIN_IN: 508 if (mrp_pdu_append_vecattr_event( 509 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0) 510 return; 511 break; 512 case MRP_TX_ACTION_S_LV: 513 if (mrp_pdu_append_vecattr_event( 514 app, attr, MRP_VECATTR_EVENT_LV) < 0) 515 return; 516 /* As a pure applicant, sending a leave message 517 * implies that the attribute was unregistered and 518 * can be destroyed. 519 */ 520 mrp_attr_destroy(app, attr); 521 return; 522 default: 523 WARN_ON(1); 524 } 525 } 526 527 attr->state = state; 528 } 529 530 int mrp_request_join(const struct net_device *dev, 531 const struct mrp_application *appl, 532 const void *value, u8 len, u8 type) 533 { 534 struct mrp_port *port = rtnl_dereference(dev->mrp_port); 535 struct mrp_applicant *app = rtnl_dereference( 536 port->applicants[appl->type]); 537 struct mrp_attr *attr; 538 539 if (sizeof(struct mrp_skb_cb) + len > 540 sizeof_field(struct sk_buff, cb)) 541 return -ENOMEM; 542 543 spin_lock_bh(&app->lock); 544 attr = mrp_attr_create(app, value, len, type); 545 if (!attr) { 546 spin_unlock_bh(&app->lock); 547 return -ENOMEM; 548 } 549 mrp_attr_event(app, attr, MRP_EVENT_JOIN); 550 spin_unlock_bh(&app->lock); 551 return 0; 552 } 553 EXPORT_SYMBOL_GPL(mrp_request_join); 554 555 void mrp_request_leave(const struct net_device *dev, 556 const struct mrp_application *appl, 557 const void *value, u8 len, u8 type) 558 { 559 struct mrp_port *port = rtnl_dereference(dev->mrp_port); 560 struct mrp_applicant *app = rtnl_dereference( 561 port->applicants[appl->type]); 562 struct mrp_attr *attr; 563 564 if (sizeof(struct mrp_skb_cb) + len > 565 sizeof_field(struct sk_buff, cb)) 566 return; 567 568 spin_lock_bh(&app->lock); 569 attr = mrp_attr_lookup(app, value, len, type); 570 if (!attr) { 571 spin_unlock_bh(&app->lock); 572 return; 573 } 574 mrp_attr_event(app, attr, MRP_EVENT_LV); 575 spin_unlock_bh(&app->lock); 576 } 577 EXPORT_SYMBOL_GPL(mrp_request_leave); 578 579 static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event) 580 { 581 struct rb_node *node, *next; 582 struct mrp_attr *attr; 583 584 for (node = rb_first(&app->mad); 585 next = node ? rb_next(node) : NULL, node != NULL; 586 node = next) { 587 attr = rb_entry(node, struct mrp_attr, node); 588 mrp_attr_event(app, attr, event); 589 } 590 } 591 592 static void mrp_join_timer_arm(struct mrp_applicant *app) 593 { 594 unsigned long delay; 595 596 delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time)); 597 mod_timer(&app->join_timer, jiffies + delay); 598 } 599 600 static void mrp_join_timer(struct timer_list *t) 601 { 602 struct mrp_applicant *app = from_timer(app, t, join_timer); 603 604 spin_lock(&app->lock); 605 mrp_mad_event(app, MRP_EVENT_TX); 606 mrp_pdu_queue(app); 607 spin_unlock(&app->lock); 608 609 mrp_queue_xmit(app); 610 spin_lock(&app->lock); 611 if (likely(app->active)) 612 mrp_join_timer_arm(app); 613 spin_unlock(&app->lock); 614 } 615 616 static void mrp_periodic_timer_arm(struct mrp_applicant *app) 617 { 618 mod_timer(&app->periodic_timer, 619 jiffies + msecs_to_jiffies(mrp_periodic_time)); 620 } 621 622 static void mrp_periodic_timer(struct timer_list *t) 623 { 624 struct mrp_applicant *app = from_timer(app, t, periodic_timer); 625 626 spin_lock(&app->lock); 627 if (likely(app->active)) { 628 mrp_mad_event(app, MRP_EVENT_PERIODIC); 629 mrp_pdu_queue(app); 630 mrp_periodic_timer_arm(app); 631 } 632 spin_unlock(&app->lock); 633 } 634 635 static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) 636 { 637 __be16 endmark; 638 639 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0) 640 return -1; 641 if (endmark == MRP_END_MARK) { 642 *offset += sizeof(endmark); 643 return -1; 644 } 645 return 0; 646 } 647 648 static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app, 649 struct sk_buff *skb, 650 enum mrp_vecattr_event vaevent) 651 { 652 struct mrp_attr *attr; 653 enum mrp_event event; 654 655 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue, 656 mrp_cb(skb)->mh->attrlen, 657 mrp_cb(skb)->mh->attrtype); 658 if (attr == NULL) 659 return; 660 661 switch (vaevent) { 662 case MRP_VECATTR_EVENT_NEW: 663 event = MRP_EVENT_R_NEW; 664 break; 665 case MRP_VECATTR_EVENT_JOIN_IN: 666 event = MRP_EVENT_R_JOIN_IN; 667 break; 668 case MRP_VECATTR_EVENT_IN: 669 event = MRP_EVENT_R_IN; 670 break; 671 case MRP_VECATTR_EVENT_JOIN_MT: 672 event = MRP_EVENT_R_JOIN_MT; 673 break; 674 case MRP_VECATTR_EVENT_MT: 675 event = MRP_EVENT_R_MT; 676 break; 677 case MRP_VECATTR_EVENT_LV: 678 event = MRP_EVENT_R_LV; 679 break; 680 default: 681 return; 682 } 683 684 mrp_attr_event(app, attr, event); 685 } 686 687 static int mrp_pdu_parse_vecattr(struct mrp_applicant *app, 688 struct sk_buff *skb, int *offset) 689 { 690 struct mrp_vecattr_hdr _vah; 691 u16 valen; 692 u8 vaevents, vaevent; 693 694 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah), 695 &_vah); 696 if (!mrp_cb(skb)->vah) 697 return -1; 698 *offset += sizeof(_vah); 699 700 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) & 701 MRP_VECATTR_HDR_FLAG_LA) 702 mrp_mad_event(app, MRP_EVENT_R_LA); 703 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) & 704 MRP_VECATTR_HDR_LEN_MASK); 705 706 /* The VectorAttribute structure in a PDU carries event information 707 * about one or more attributes having consecutive values. Only the 708 * value for the first attribute is contained in the structure. So 709 * we make a copy of that value, and then increment it each time we 710 * advance to the next event in its Vector. 711 */ 712 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen > 713 sizeof_field(struct sk_buff, cb)) 714 return -1; 715 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue, 716 mrp_cb(skb)->mh->attrlen) < 0) 717 return -1; 718 *offset += mrp_cb(skb)->mh->attrlen; 719 720 /* In a VectorAttribute, the Vector contains events which are packed 721 * three to a byte. We process one byte of the Vector at a time. 722 */ 723 while (valen > 0) { 724 if (skb_copy_bits(skb, *offset, &vaevents, 725 sizeof(vaevents)) < 0) 726 return -1; 727 *offset += sizeof(vaevents); 728 729 /* Extract and process the first event. */ 730 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX * 731 __MRP_VECATTR_EVENT_MAX); 732 if (vaevent >= __MRP_VECATTR_EVENT_MAX) { 733 /* The byte is malformed; stop processing. */ 734 return -1; 735 } 736 mrp_pdu_parse_vecattr_event(app, skb, vaevent); 737 738 /* If present, extract and process the second event. */ 739 if (!--valen) 740 break; 741 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue, 742 mrp_cb(skb)->mh->attrlen); 743 vaevents %= (__MRP_VECATTR_EVENT_MAX * 744 __MRP_VECATTR_EVENT_MAX); 745 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX; 746 mrp_pdu_parse_vecattr_event(app, skb, vaevent); 747 748 /* If present, extract and process the third event. */ 749 if (!--valen) 750 break; 751 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue, 752 mrp_cb(skb)->mh->attrlen); 753 vaevents %= __MRP_VECATTR_EVENT_MAX; 754 vaevent = vaevents; 755 mrp_pdu_parse_vecattr_event(app, skb, vaevent); 756 } 757 return 0; 758 } 759 760 static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb, 761 int *offset) 762 { 763 struct mrp_msg_hdr _mh; 764 765 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh); 766 if (!mrp_cb(skb)->mh) 767 return -1; 768 *offset += sizeof(_mh); 769 770 if (mrp_cb(skb)->mh->attrtype == 0 || 771 mrp_cb(skb)->mh->attrtype > app->app->maxattr || 772 mrp_cb(skb)->mh->attrlen == 0) 773 return -1; 774 775 while (skb->len > *offset) { 776 if (mrp_pdu_parse_end_mark(skb, offset) < 0) 777 break; 778 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0) 779 return -1; 780 } 781 return 0; 782 } 783 784 static int mrp_rcv(struct sk_buff *skb, struct net_device *dev, 785 struct packet_type *pt, struct net_device *orig_dev) 786 { 787 struct mrp_application *appl = container_of(pt, struct mrp_application, 788 pkttype); 789 struct mrp_port *port; 790 struct mrp_applicant *app; 791 struct mrp_pdu_hdr _ph; 792 const struct mrp_pdu_hdr *ph; 793 int offset = skb_network_offset(skb); 794 795 /* If the interface is in promiscuous mode, drop the packet if 796 * it was unicast to another host. 797 */ 798 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) 799 goto out; 800 skb = skb_share_check(skb, GFP_ATOMIC); 801 if (unlikely(!skb)) 802 goto out; 803 port = rcu_dereference(dev->mrp_port); 804 if (unlikely(!port)) 805 goto out; 806 app = rcu_dereference(port->applicants[appl->type]); 807 if (unlikely(!app)) 808 goto out; 809 810 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph); 811 if (!ph) 812 goto out; 813 offset += sizeof(_ph); 814 815 if (ph->version != app->app->version) 816 goto out; 817 818 spin_lock(&app->lock); 819 while (skb->len > offset) { 820 if (mrp_pdu_parse_end_mark(skb, &offset) < 0) 821 break; 822 if (mrp_pdu_parse_msg(app, skb, &offset) < 0) 823 break; 824 } 825 spin_unlock(&app->lock); 826 out: 827 kfree_skb(skb); 828 return 0; 829 } 830 831 static int mrp_init_port(struct net_device *dev) 832 { 833 struct mrp_port *port; 834 835 port = kzalloc(sizeof(*port), GFP_KERNEL); 836 if (!port) 837 return -ENOMEM; 838 rcu_assign_pointer(dev->mrp_port, port); 839 return 0; 840 } 841 842 static void mrp_release_port(struct net_device *dev) 843 { 844 struct mrp_port *port = rtnl_dereference(dev->mrp_port); 845 unsigned int i; 846 847 for (i = 0; i <= MRP_APPLICATION_MAX; i++) { 848 if (rtnl_dereference(port->applicants[i])) 849 return; 850 } 851 RCU_INIT_POINTER(dev->mrp_port, NULL); 852 kfree_rcu(port, rcu); 853 } 854 855 int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl) 856 { 857 struct mrp_applicant *app; 858 int err; 859 860 ASSERT_RTNL(); 861 862 if (!rtnl_dereference(dev->mrp_port)) { 863 err = mrp_init_port(dev); 864 if (err < 0) 865 goto err1; 866 } 867 868 err = -ENOMEM; 869 app = kzalloc(sizeof(*app), GFP_KERNEL); 870 if (!app) 871 goto err2; 872 873 err = dev_mc_add(dev, appl->group_address); 874 if (err < 0) 875 goto err3; 876 877 app->dev = dev; 878 app->app = appl; 879 app->mad = RB_ROOT; 880 app->active = true; 881 spin_lock_init(&app->lock); 882 skb_queue_head_init(&app->queue); 883 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); 884 timer_setup(&app->join_timer, mrp_join_timer, 0); 885 mrp_join_timer_arm(app); 886 timer_setup(&app->periodic_timer, mrp_periodic_timer, 0); 887 mrp_periodic_timer_arm(app); 888 return 0; 889 890 err3: 891 kfree(app); 892 err2: 893 mrp_release_port(dev); 894 err1: 895 return err; 896 } 897 EXPORT_SYMBOL_GPL(mrp_init_applicant); 898 899 void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) 900 { 901 struct mrp_port *port = rtnl_dereference(dev->mrp_port); 902 struct mrp_applicant *app = rtnl_dereference( 903 port->applicants[appl->type]); 904 905 ASSERT_RTNL(); 906 907 RCU_INIT_POINTER(port->applicants[appl->type], NULL); 908 909 spin_lock_bh(&app->lock); 910 app->active = false; 911 spin_unlock_bh(&app->lock); 912 /* Delete timer and generate a final TX event to flush out 913 * all pending messages before the applicant is gone. 914 */ 915 timer_shutdown_sync(&app->join_timer); 916 timer_shutdown_sync(&app->periodic_timer); 917 918 spin_lock_bh(&app->lock); 919 mrp_mad_event(app, MRP_EVENT_TX); 920 mrp_attr_destroy_all(app); 921 mrp_pdu_queue(app); 922 spin_unlock_bh(&app->lock); 923 924 mrp_queue_xmit(app); 925 926 dev_mc_del(dev, appl->group_address); 927 kfree_rcu(app, rcu); 928 mrp_release_port(dev); 929 } 930 EXPORT_SYMBOL_GPL(mrp_uninit_applicant); 931 932 int mrp_register_application(struct mrp_application *appl) 933 { 934 appl->pkttype.func = mrp_rcv; 935 dev_add_pack(&appl->pkttype); 936 return 0; 937 } 938 EXPORT_SYMBOL_GPL(mrp_register_application); 939 940 void mrp_unregister_application(struct mrp_application *appl) 941 { 942 dev_remove_pack(&appl->pkttype); 943 } 944 EXPORT_SYMBOL_GPL(mrp_unregister_application); 945