1 /* 2 * dvb_net.c 3 * 4 * Copyright (C) 2001 Convergence integrated media GmbH 5 * Ralph Metzler <ralph@convergence.de> 6 * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de> 7 * 8 * ULE Decapsulation code: 9 * Copyright (C) 2003, 2004 gcs - Global Communication & Services GmbH. 10 * and Department of Scientific Computing 11 * Paris Lodron University of Salzburg. 12 * Hilmar Linder <hlinder@cosy.sbg.ac.at> 13 * and Wolfram Stering <wstering@cosy.sbg.ac.at> 14 * 15 * ULE Decaps according to RFC 4326. 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 2 20 * of the License, or (at your option) any later version. 21 * 22 * This program is distributed in the hope that it will be useful, 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * GNU General Public License for more details. 26 * 27 * You should have received a copy of the GNU General Public License 28 * along with this program; if not, write to the Free Software 29 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 30 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html 31 */ 32 33 /* 34 * ULE ChangeLog: 35 * Feb 2004: hl/ws v1: Implementing draft-fair-ipdvb-ule-01.txt 36 * 37 * Dec 2004: hl/ws v2: Implementing draft-ietf-ipdvb-ule-03.txt: 38 * ULE Extension header handling. 39 * Bugreports by Moritz Vieth and Hanno Tersteegen, 40 * Fraunhofer Institute for Open Communication Systems 41 * Competence Center for Advanced Satellite Communications. 42 * Bugfixes and robustness improvements. 43 * Filtering on dest MAC addresses, if present (D-Bit = 0) 44 * ULE_DEBUG compile-time option. 45 * Apr 2006: cp v3: Bugfixes and compliency with RFC 4326 (ULE) by 46 * Christian Praehauser <cpraehaus@cosy.sbg.ac.at>, 47 * Paris Lodron University of Salzburg. 48 */ 49 50 /* 51 * FIXME / TODO (dvb_net.c): 52 * 53 * Unloading does not work for 2.6.9 kernels: a refcount doesn't go to zero. 54 * 55 */ 56 57 #include <linux/module.h> 58 #include <linux/kernel.h> 59 #include <linux/netdevice.h> 60 #include <linux/etherdevice.h> 61 #include <linux/dvb/net.h> 62 #include <linux/uio.h> 63 #include <asm/uaccess.h> 64 #include <linux/crc32.h> 65 #include <linux/mutex.h> 66 #include <linux/sched.h> 67 68 #include "dvb_demux.h" 69 #include "dvb_net.h" 70 71 static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt ) 72 { 73 unsigned int j; 74 for (j = 0; j < cnt; j++) 75 c = crc32_be( c, iov[j].iov_base, iov[j].iov_len ); 76 return c; 77 } 78 79 80 #define DVB_NET_MULTICAST_MAX 10 81 82 #undef ULE_DEBUG 83 84 #ifdef ULE_DEBUG 85 86 static void hexdump(const unsigned char *buf, unsigned short len) 87 { 88 print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); 89 } 90 91 #endif 92 93 struct dvb_net_priv { 94 int in_use; 95 u16 pid; 96 struct net_device *net; 97 struct dvb_net *host; 98 struct dmx_demux *demux; 99 struct dmx_section_feed *secfeed; 100 struct dmx_section_filter *secfilter; 101 struct dmx_ts_feed *tsfeed; 102 int multi_num; 103 struct dmx_section_filter *multi_secfilter[DVB_NET_MULTICAST_MAX]; 104 unsigned char multi_macs[DVB_NET_MULTICAST_MAX][6]; 105 int rx_mode; 106 #define RX_MODE_UNI 0 107 #define RX_MODE_MULTI 1 108 #define RX_MODE_ALL_MULTI 2 109 #define RX_MODE_PROMISC 3 110 struct work_struct set_multicast_list_wq; 111 struct work_struct restart_net_feed_wq; 112 unsigned char feedtype; /* Either FEED_TYPE_ or FEED_TYPE_ULE */ 113 int need_pusi; /* Set to 1, if synchronization on PUSI required. */ 114 unsigned char tscc; /* TS continuity counter after sync on PUSI. */ 115 struct sk_buff *ule_skb; /* ULE SNDU decodes into this buffer. */ 116 unsigned char *ule_next_hdr; /* Pointer into skb to next ULE extension header. */ 117 unsigned short ule_sndu_len; /* ULE SNDU length in bytes, w/o D-Bit. */ 118 unsigned short ule_sndu_type; /* ULE SNDU type field, complete. */ 119 unsigned char ule_sndu_type_1; /* ULE SNDU type field, if split across 2 TS cells. */ 120 unsigned char ule_dbit; /* Whether the DestMAC address present 121 * or not (bit is set). */ 122 unsigned char ule_bridged; /* Whether the ULE_BRIDGED extension header was found. */ 123 int ule_sndu_remain; /* Nr. of bytes still required for current ULE SNDU. */ 124 unsigned long ts_count; /* Current ts cell counter. */ 125 struct mutex mutex; 126 }; 127 128 129 /** 130 * Determine the packet's protocol ID. The rule here is that we 131 * assume 802.3 if the type field is short enough to be a length. 132 * This is normal practice and works for any 'now in use' protocol. 133 * 134 * stolen from eth.c out of the linux kernel, hacked for dvb-device 135 * by Michael Holzt <kju@debian.org> 136 */ 137 static __be16 dvb_net_eth_type_trans(struct sk_buff *skb, 138 struct net_device *dev) 139 { 140 struct ethhdr *eth; 141 unsigned char *rawp; 142 143 skb_reset_mac_header(skb); 144 skb_pull(skb,dev->hard_header_len); 145 eth = eth_hdr(skb); 146 147 if (*eth->h_dest & 1) { 148 if(ether_addr_equal(eth->h_dest,dev->broadcast)) 149 skb->pkt_type=PACKET_BROADCAST; 150 else 151 skb->pkt_type=PACKET_MULTICAST; 152 } 153 154 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) 155 return eth->h_proto; 156 157 rawp = skb->data; 158 159 /** 160 * This is a magic hack to spot IPX packets. Older Novell breaks 161 * the protocol design and runs IPX over 802.3 without an 802.2 LLC 162 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This 163 * won't work for fault tolerant netware but does for the rest. 164 */ 165 if (*(unsigned short *)rawp == 0xFFFF) 166 return htons(ETH_P_802_3); 167 168 /** 169 * Real 802.2 LLC 170 */ 171 return htons(ETH_P_802_2); 172 } 173 174 #define TS_SZ 188 175 #define TS_SYNC 0x47 176 #define TS_TEI 0x80 177 #define TS_SC 0xC0 178 #define TS_PUSI 0x40 179 #define TS_AF_A 0x20 180 #define TS_AF_D 0x10 181 182 /* ULE Extension Header handlers. */ 183 184 #define ULE_TEST 0 185 #define ULE_BRIDGED 1 186 187 #define ULE_OPTEXTHDR_PADDING 0 188 189 static int ule_test_sndu( struct dvb_net_priv *p ) 190 { 191 return -1; 192 } 193 194 static int ule_bridged_sndu( struct dvb_net_priv *p ) 195 { 196 struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr; 197 if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) { 198 int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data); 199 /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */ 200 if(framelen != ntohs(hdr->h_proto)) { 201 return -1; 202 } 203 } 204 /* Note: 205 * From RFC4326: 206 * "A bridged SNDU is a Mandatory Extension Header of Type 1. 207 * It must be the final (or only) extension header specified in the header chain of a SNDU." 208 * The 'ule_bridged' flag will cause the extension header processing loop to terminate. 209 */ 210 p->ule_bridged = 1; 211 return 0; 212 } 213 214 static int ule_exthdr_padding(struct dvb_net_priv *p) 215 { 216 return 0; 217 } 218 219 /** Handle ULE extension headers. 220 * Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding. 221 * Returns: >= 0: nr. of bytes consumed by next extension header 222 * -1: Mandatory extension header that is not recognized or TEST SNDU; discard. 223 */ 224 static int handle_one_ule_extension( struct dvb_net_priv *p ) 225 { 226 /* Table of mandatory extension header handlers. The header type is the index. */ 227 static int (*ule_mandatory_ext_handlers[255])( struct dvb_net_priv *p ) = 228 { [0] = ule_test_sndu, [1] = ule_bridged_sndu, [2] = NULL, }; 229 230 /* Table of optional extension header handlers. The header type is the index. */ 231 static int (*ule_optional_ext_handlers[255])( struct dvb_net_priv *p ) = 232 { [0] = ule_exthdr_padding, [1] = NULL, }; 233 234 int ext_len = 0; 235 unsigned char hlen = (p->ule_sndu_type & 0x0700) >> 8; 236 unsigned char htype = p->ule_sndu_type & 0x00FF; 237 238 /* Discriminate mandatory and optional extension headers. */ 239 if (hlen == 0) { 240 /* Mandatory extension header */ 241 if (ule_mandatory_ext_handlers[htype]) { 242 ext_len = ule_mandatory_ext_handlers[htype]( p ); 243 if(ext_len >= 0) { 244 p->ule_next_hdr += ext_len; 245 if (!p->ule_bridged) { 246 p->ule_sndu_type = ntohs(*(__be16 *)p->ule_next_hdr); 247 p->ule_next_hdr += 2; 248 } else { 249 p->ule_sndu_type = ntohs(*(__be16 *)(p->ule_next_hdr + ((p->ule_dbit ? 2 : 3) * ETH_ALEN))); 250 /* This assures the extension handling loop will terminate. */ 251 } 252 } 253 // else: extension handler failed or SNDU should be discarded 254 } else 255 ext_len = -1; /* SNDU has to be discarded. */ 256 } else { 257 /* Optional extension header. Calculate the length. */ 258 ext_len = hlen << 1; 259 /* Process the optional extension header according to its type. */ 260 if (ule_optional_ext_handlers[htype]) 261 (void)ule_optional_ext_handlers[htype]( p ); 262 p->ule_next_hdr += ext_len; 263 p->ule_sndu_type = ntohs( *(__be16 *)(p->ule_next_hdr-2) ); 264 /* 265 * note: the length of the next header type is included in the 266 * length of THIS optional extension header 267 */ 268 } 269 270 return ext_len; 271 } 272 273 static int handle_ule_extensions( struct dvb_net_priv *p ) 274 { 275 int total_ext_len = 0, l; 276 277 p->ule_next_hdr = p->ule_skb->data; 278 do { 279 l = handle_one_ule_extension( p ); 280 if (l < 0) 281 return l; /* Stop extension header processing and discard SNDU. */ 282 total_ext_len += l; 283 #ifdef ULE_DEBUG 284 pr_debug("ule_next_hdr=%p, ule_sndu_type=%i, l=%i, total_ext_len=%i\n", 285 p->ule_next_hdr, (int)p->ule_sndu_type, 286 l, total_ext_len); 287 #endif 288 289 } while (p->ule_sndu_type < ETH_P_802_3_MIN); 290 291 return total_ext_len; 292 } 293 294 295 /** Prepare for a new ULE SNDU: reset the decoder state. */ 296 static inline void reset_ule( struct dvb_net_priv *p ) 297 { 298 p->ule_skb = NULL; 299 p->ule_next_hdr = NULL; 300 p->ule_sndu_len = 0; 301 p->ule_sndu_type = 0; 302 p->ule_sndu_type_1 = 0; 303 p->ule_sndu_remain = 0; 304 p->ule_dbit = 0xFF; 305 p->ule_bridged = 0; 306 } 307 308 /** 309 * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of 310 * TS cells of a single PID. 311 */ 312 static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) 313 { 314 struct dvb_net_priv *priv = netdev_priv(dev); 315 unsigned long skipped = 0L; 316 const u8 *ts, *ts_end, *from_where = NULL; 317 u8 ts_remain = 0, how_much = 0, new_ts = 1; 318 struct ethhdr *ethh = NULL; 319 bool error = false; 320 321 #ifdef ULE_DEBUG 322 /* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */ 323 static unsigned char ule_hist[100*TS_SZ]; 324 static unsigned char *ule_where = ule_hist, ule_dump; 325 #endif 326 327 /* For all TS cells in current buffer. 328 * Appearently, we are called for every single TS cell. 329 */ 330 for (ts = buf, ts_end = buf + buf_len; ts < ts_end; /* no default incr. */ ) { 331 332 if (new_ts) { 333 /* We are about to process a new TS cell. */ 334 335 #ifdef ULE_DEBUG 336 if (ule_where >= &ule_hist[100*TS_SZ]) ule_where = ule_hist; 337 memcpy( ule_where, ts, TS_SZ ); 338 if (ule_dump) { 339 hexdump( ule_where, TS_SZ ); 340 ule_dump = 0; 341 } 342 ule_where += TS_SZ; 343 #endif 344 345 /* Check TS error conditions: sync_byte, transport_error_indicator, scrambling_control . */ 346 if ((ts[0] != TS_SYNC) || (ts[1] & TS_TEI) || ((ts[3] & TS_SC) != 0)) { 347 printk(KERN_WARNING "%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n", 348 priv->ts_count, ts[0], 349 (ts[1] & TS_TEI) >> 7, 350 (ts[3] & TS_SC) >> 6); 351 352 /* Drop partly decoded SNDU, reset state, resync on PUSI. */ 353 if (priv->ule_skb) { 354 dev_kfree_skb( priv->ule_skb ); 355 /* Prepare for next SNDU. */ 356 dev->stats.rx_errors++; 357 dev->stats.rx_frame_errors++; 358 } 359 reset_ule(priv); 360 priv->need_pusi = 1; 361 362 /* Continue with next TS cell. */ 363 ts += TS_SZ; 364 priv->ts_count++; 365 continue; 366 } 367 368 ts_remain = 184; 369 from_where = ts + 4; 370 } 371 /* Synchronize on PUSI, if required. */ 372 if (priv->need_pusi) { 373 if (ts[1] & TS_PUSI) { 374 /* Find beginning of first ULE SNDU in current TS cell. */ 375 /* Synchronize continuity counter. */ 376 priv->tscc = ts[3] & 0x0F; 377 /* There is a pointer field here. */ 378 if (ts[4] > ts_remain) { 379 printk(KERN_ERR "%lu: Invalid ULE packet " 380 "(pointer field %d)\n", priv->ts_count, ts[4]); 381 ts += TS_SZ; 382 priv->ts_count++; 383 continue; 384 } 385 /* Skip to destination of pointer field. */ 386 from_where = &ts[5] + ts[4]; 387 ts_remain -= 1 + ts[4]; 388 skipped = 0; 389 } else { 390 skipped++; 391 ts += TS_SZ; 392 priv->ts_count++; 393 continue; 394 } 395 } 396 397 if (new_ts) { 398 /* Check continuity counter. */ 399 if ((ts[3] & 0x0F) == priv->tscc) 400 priv->tscc = (priv->tscc + 1) & 0x0F; 401 else { 402 /* TS discontinuity handling: */ 403 printk(KERN_WARNING "%lu: TS discontinuity: got %#x, " 404 "expected %#x.\n", priv->ts_count, ts[3] & 0x0F, priv->tscc); 405 /* Drop partly decoded SNDU, reset state, resync on PUSI. */ 406 if (priv->ule_skb) { 407 dev_kfree_skb( priv->ule_skb ); 408 /* Prepare for next SNDU. */ 409 // reset_ule(priv); moved to below. 410 dev->stats.rx_errors++; 411 dev->stats.rx_frame_errors++; 412 } 413 reset_ule(priv); 414 /* skip to next PUSI. */ 415 priv->need_pusi = 1; 416 continue; 417 } 418 /* If we still have an incomplete payload, but PUSI is 419 * set; some TS cells are missing. 420 * This is only possible here, if we missed exactly 16 TS 421 * cells (continuity counter wrap). */ 422 if (ts[1] & TS_PUSI) { 423 if (! priv->need_pusi) { 424 if (!(*from_where < (ts_remain-1)) || *from_where != priv->ule_sndu_remain) { 425 /* Pointer field is invalid. Drop this TS cell and any started ULE SNDU. */ 426 printk(KERN_WARNING "%lu: Invalid pointer " 427 "field: %u.\n", priv->ts_count, *from_where); 428 429 /* Drop partly decoded SNDU, reset state, resync on PUSI. */ 430 if (priv->ule_skb) { 431 error = true; 432 dev_kfree_skb(priv->ule_skb); 433 } 434 435 if (error || priv->ule_sndu_remain) { 436 dev->stats.rx_errors++; 437 dev->stats.rx_frame_errors++; 438 error = false; 439 } 440 441 reset_ule(priv); 442 priv->need_pusi = 1; 443 continue; 444 } 445 /* Skip pointer field (we're processing a 446 * packed payload). */ 447 from_where += 1; 448 ts_remain -= 1; 449 } else 450 priv->need_pusi = 0; 451 452 if (priv->ule_sndu_remain > 183) { 453 /* Current SNDU lacks more data than there could be available in the 454 * current TS cell. */ 455 dev->stats.rx_errors++; 456 dev->stats.rx_length_errors++; 457 printk(KERN_WARNING "%lu: Expected %d more SNDU bytes, but " 458 "got PUSI (pf %d, ts_remain %d). Flushing incomplete payload.\n", 459 priv->ts_count, priv->ule_sndu_remain, ts[4], ts_remain); 460 dev_kfree_skb(priv->ule_skb); 461 /* Prepare for next SNDU. */ 462 reset_ule(priv); 463 /* Resync: go to where pointer field points to: start of next ULE SNDU. */ 464 from_where += ts[4]; 465 ts_remain -= ts[4]; 466 } 467 } 468 } 469 470 /* Check if new payload needs to be started. */ 471 if (priv->ule_skb == NULL) { 472 /* Start a new payload with skb. 473 * Find ULE header. It is only guaranteed that the 474 * length field (2 bytes) is contained in the current 475 * TS. 476 * Check ts_remain has to be >= 2 here. */ 477 if (ts_remain < 2) { 478 printk(KERN_WARNING "Invalid payload packing: only %d " 479 "bytes left in TS. Resyncing.\n", ts_remain); 480 priv->ule_sndu_len = 0; 481 priv->need_pusi = 1; 482 ts += TS_SZ; 483 continue; 484 } 485 486 if (! priv->ule_sndu_len) { 487 /* Got at least two bytes, thus extrace the SNDU length. */ 488 priv->ule_sndu_len = from_where[0] << 8 | from_where[1]; 489 if (priv->ule_sndu_len & 0x8000) { 490 /* D-Bit is set: no dest mac present. */ 491 priv->ule_sndu_len &= 0x7FFF; 492 priv->ule_dbit = 1; 493 } else 494 priv->ule_dbit = 0; 495 496 if (priv->ule_sndu_len < 5) { 497 printk(KERN_WARNING "%lu: Invalid ULE SNDU length %u. " 498 "Resyncing.\n", priv->ts_count, priv->ule_sndu_len); 499 dev->stats.rx_errors++; 500 dev->stats.rx_length_errors++; 501 priv->ule_sndu_len = 0; 502 priv->need_pusi = 1; 503 new_ts = 1; 504 ts += TS_SZ; 505 priv->ts_count++; 506 continue; 507 } 508 ts_remain -= 2; /* consume the 2 bytes SNDU length. */ 509 from_where += 2; 510 } 511 512 priv->ule_sndu_remain = priv->ule_sndu_len + 2; 513 /* 514 * State of current TS: 515 * ts_remain (remaining bytes in the current TS cell) 516 * 0 ule_type is not available now, we need the next TS cell 517 * 1 the first byte of the ule_type is present 518 * >=2 full ULE header present, maybe some payload data as well. 519 */ 520 switch (ts_remain) { 521 case 1: 522 priv->ule_sndu_remain--; 523 priv->ule_sndu_type = from_where[0] << 8; 524 priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */ 525 ts_remain -= 1; from_where += 1; 526 /* Continue w/ next TS. */ 527 case 0: 528 new_ts = 1; 529 ts += TS_SZ; 530 priv->ts_count++; 531 continue; 532 533 default: /* complete ULE header is present in current TS. */ 534 /* Extract ULE type field. */ 535 if (priv->ule_sndu_type_1) { 536 priv->ule_sndu_type_1 = 0; 537 priv->ule_sndu_type |= from_where[0]; 538 from_where += 1; /* points to payload start. */ 539 ts_remain -= 1; 540 } else { 541 /* Complete type is present in new TS. */ 542 priv->ule_sndu_type = from_where[0] << 8 | from_where[1]; 543 from_where += 2; /* points to payload start. */ 544 ts_remain -= 2; 545 } 546 break; 547 } 548 549 /* Allocate the skb (decoder target buffer) with the correct size, as follows: 550 * prepare for the largest case: bridged SNDU with MAC address (dbit = 0). */ 551 priv->ule_skb = dev_alloc_skb( priv->ule_sndu_len + ETH_HLEN + ETH_ALEN ); 552 if (priv->ule_skb == NULL) { 553 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", 554 dev->name); 555 dev->stats.rx_dropped++; 556 return; 557 } 558 559 /* This includes the CRC32 _and_ dest mac, if !dbit. */ 560 priv->ule_sndu_remain = priv->ule_sndu_len; 561 priv->ule_skb->dev = dev; 562 /* Leave space for Ethernet or bridged SNDU header (eth hdr plus one MAC addr). */ 563 skb_reserve( priv->ule_skb, ETH_HLEN + ETH_ALEN ); 564 } 565 566 /* Copy data into our current skb. */ 567 how_much = min(priv->ule_sndu_remain, (int)ts_remain); 568 memcpy(skb_put(priv->ule_skb, how_much), from_where, how_much); 569 priv->ule_sndu_remain -= how_much; 570 ts_remain -= how_much; 571 from_where += how_much; 572 573 /* Check for complete payload. */ 574 if (priv->ule_sndu_remain <= 0) { 575 /* Check CRC32, we've got it in our skb already. */ 576 __be16 ulen = htons(priv->ule_sndu_len); 577 __be16 utype = htons(priv->ule_sndu_type); 578 const u8 *tail; 579 struct kvec iov[3] = { 580 { &ulen, sizeof ulen }, 581 { &utype, sizeof utype }, 582 { priv->ule_skb->data, priv->ule_skb->len - 4 } 583 }; 584 u32 ule_crc = ~0L, expected_crc; 585 if (priv->ule_dbit) { 586 /* Set D-bit for CRC32 verification, 587 * if it was set originally. */ 588 ulen |= htons(0x8000); 589 } 590 591 ule_crc = iov_crc32(ule_crc, iov, 3); 592 tail = skb_tail_pointer(priv->ule_skb); 593 expected_crc = *(tail - 4) << 24 | 594 *(tail - 3) << 16 | 595 *(tail - 2) << 8 | 596 *(tail - 1); 597 if (ule_crc != expected_crc) { 598 printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n", 599 priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0); 600 601 #ifdef ULE_DEBUG 602 hexdump( iov[0].iov_base, iov[0].iov_len ); 603 hexdump( iov[1].iov_base, iov[1].iov_len ); 604 hexdump( iov[2].iov_base, iov[2].iov_len ); 605 606 if (ule_where == ule_hist) { 607 hexdump( &ule_hist[98*TS_SZ], TS_SZ ); 608 hexdump( &ule_hist[99*TS_SZ], TS_SZ ); 609 } else if (ule_where == &ule_hist[TS_SZ]) { 610 hexdump( &ule_hist[99*TS_SZ], TS_SZ ); 611 hexdump( ule_hist, TS_SZ ); 612 } else { 613 hexdump( ule_where - TS_SZ - TS_SZ, TS_SZ ); 614 hexdump( ule_where - TS_SZ, TS_SZ ); 615 } 616 ule_dump = 1; 617 #endif 618 619 dev->stats.rx_errors++; 620 dev->stats.rx_crc_errors++; 621 dev_kfree_skb(priv->ule_skb); 622 } else { 623 /* CRC32 verified OK. */ 624 u8 dest_addr[ETH_ALEN]; 625 static const u8 bc_addr[ETH_ALEN] = 626 { [ 0 ... ETH_ALEN-1] = 0xff }; 627 628 /* CRC32 was OK. Remove it from skb. */ 629 priv->ule_skb->tail -= 4; 630 priv->ule_skb->len -= 4; 631 632 if (!priv->ule_dbit) { 633 /* 634 * The destination MAC address is the 635 * next data in the skb. It comes 636 * before any extension headers. 637 * 638 * Check if the payload of this SNDU 639 * should be passed up the stack. 640 */ 641 register int drop = 0; 642 if (priv->rx_mode != RX_MODE_PROMISC) { 643 if (priv->ule_skb->data[0] & 0x01) { 644 /* multicast or broadcast */ 645 if (!ether_addr_equal(priv->ule_skb->data, bc_addr)) { 646 /* multicast */ 647 if (priv->rx_mode == RX_MODE_MULTI) { 648 int i; 649 for(i = 0; i < priv->multi_num && 650 !ether_addr_equal(priv->ule_skb->data, 651 priv->multi_macs[i]); i++) 652 ; 653 if (i == priv->multi_num) 654 drop = 1; 655 } else if (priv->rx_mode != RX_MODE_ALL_MULTI) 656 drop = 1; /* no broadcast; */ 657 /* else: all multicast mode: accept all multicast packets */ 658 } 659 /* else: broadcast */ 660 } 661 else if (!ether_addr_equal(priv->ule_skb->data, dev->dev_addr)) 662 drop = 1; 663 /* else: destination address matches the MAC address of our receiver device */ 664 } 665 /* else: promiscuous mode; pass everything up the stack */ 666 667 if (drop) { 668 #ifdef ULE_DEBUG 669 netdev_dbg(dev, "Dropping SNDU: MAC destination address does not match: dest addr: %pM, dev addr: %pM\n", 670 priv->ule_skb->data, dev->dev_addr); 671 #endif 672 dev_kfree_skb(priv->ule_skb); 673 goto sndu_done; 674 } 675 else 676 { 677 skb_copy_from_linear_data(priv->ule_skb, 678 dest_addr, 679 ETH_ALEN); 680 skb_pull(priv->ule_skb, ETH_ALEN); 681 } 682 } 683 684 /* Handle ULE Extension Headers. */ 685 if (priv->ule_sndu_type < ETH_P_802_3_MIN) { 686 /* There is an extension header. Handle it accordingly. */ 687 int l = handle_ule_extensions(priv); 688 if (l < 0) { 689 /* Mandatory extension header unknown or TEST SNDU. Drop it. */ 690 // printk( KERN_WARNING "Dropping SNDU, extension headers.\n" ); 691 dev_kfree_skb(priv->ule_skb); 692 goto sndu_done; 693 } 694 skb_pull(priv->ule_skb, l); 695 } 696 697 /* 698 * Construct/assure correct ethernet header. 699 * Note: in bridged mode (priv->ule_bridged != 700 * 0) we already have the (original) ethernet 701 * header at the start of the payload (after 702 * optional dest. address and any extension 703 * headers). 704 */ 705 706 if (!priv->ule_bridged) { 707 skb_push(priv->ule_skb, ETH_HLEN); 708 ethh = (struct ethhdr *)priv->ule_skb->data; 709 if (!priv->ule_dbit) { 710 /* dest_addr buffer is only valid if priv->ule_dbit == 0 */ 711 memcpy(ethh->h_dest, dest_addr, ETH_ALEN); 712 memset(ethh->h_source, 0, ETH_ALEN); 713 } 714 else /* zeroize source and dest */ 715 memset( ethh, 0, ETH_ALEN*2 ); 716 717 ethh->h_proto = htons(priv->ule_sndu_type); 718 } 719 /* else: skb is in correct state; nothing to do. */ 720 priv->ule_bridged = 0; 721 722 /* Stuff into kernel's protocol stack. */ 723 priv->ule_skb->protocol = dvb_net_eth_type_trans(priv->ule_skb, dev); 724 /* If D-bit is set (i.e. destination MAC address not present), 725 * receive the packet anyhow. */ 726 /* if (priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST) 727 priv->ule_skb->pkt_type = PACKET_HOST; */ 728 dev->stats.rx_packets++; 729 dev->stats.rx_bytes += priv->ule_skb->len; 730 netif_rx(priv->ule_skb); 731 } 732 sndu_done: 733 /* Prepare for next SNDU. */ 734 reset_ule(priv); 735 } 736 737 /* More data in current TS (look at the bytes following the CRC32)? */ 738 if (ts_remain >= 2 && *((unsigned short *)from_where) != 0xFFFF) { 739 /* Next ULE SNDU starts right there. */ 740 new_ts = 0; 741 priv->ule_skb = NULL; 742 priv->ule_sndu_type_1 = 0; 743 priv->ule_sndu_len = 0; 744 // printk(KERN_WARNING "More data in current TS: [%#x %#x %#x %#x]\n", 745 // *(from_where + 0), *(from_where + 1), 746 // *(from_where + 2), *(from_where + 3)); 747 // printk(KERN_WARNING "ts @ %p, stopped @ %p:\n", ts, from_where + 0); 748 // hexdump(ts, 188); 749 } else { 750 new_ts = 1; 751 ts += TS_SZ; 752 priv->ts_count++; 753 if (priv->ule_skb == NULL) { 754 priv->need_pusi = 1; 755 priv->ule_sndu_type_1 = 0; 756 priv->ule_sndu_len = 0; 757 } 758 } 759 } /* for all available TS cells */ 760 } 761 762 static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len, 763 const u8 *buffer2, size_t buffer2_len, 764 struct dmx_ts_feed *feed, enum dmx_success success) 765 { 766 struct net_device *dev = feed->priv; 767 768 if (buffer2) 769 printk(KERN_WARNING "buffer2 not NULL: %p.\n", buffer2); 770 if (buffer1_len > 32768) 771 printk(KERN_WARNING "length > 32k: %zu.\n", buffer1_len); 772 /* printk("TS callback: %u bytes, %u TS cells @ %p.\n", 773 buffer1_len, buffer1_len / TS_SZ, buffer1); */ 774 dvb_net_ule(dev, buffer1, buffer1_len); 775 return 0; 776 } 777 778 779 static void dvb_net_sec(struct net_device *dev, 780 const u8 *pkt, int pkt_len) 781 { 782 u8 *eth; 783 struct sk_buff *skb; 784 struct net_device_stats *stats = &dev->stats; 785 int snap = 0; 786 787 /* note: pkt_len includes a 32bit checksum */ 788 if (pkt_len < 16) { 789 printk("%s: IP/MPE packet length = %d too small.\n", 790 dev->name, pkt_len); 791 stats->rx_errors++; 792 stats->rx_length_errors++; 793 return; 794 } 795 /* it seems some ISPs manage to screw up here, so we have to 796 * relax the error checks... */ 797 #if 0 798 if ((pkt[5] & 0xfd) != 0xc1) { 799 /* drop scrambled or broken packets */ 800 #else 801 if ((pkt[5] & 0x3c) != 0x00) { 802 /* drop scrambled */ 803 #endif 804 stats->rx_errors++; 805 stats->rx_crc_errors++; 806 return; 807 } 808 if (pkt[5] & 0x02) { 809 /* handle LLC/SNAP, see rfc-1042 */ 810 if (pkt_len < 24 || memcmp(&pkt[12], "\xaa\xaa\x03\0\0\0", 6)) { 811 stats->rx_dropped++; 812 return; 813 } 814 snap = 8; 815 } 816 if (pkt[7]) { 817 /* FIXME: assemble datagram from multiple sections */ 818 stats->rx_errors++; 819 stats->rx_frame_errors++; 820 return; 821 } 822 823 /* we have 14 byte ethernet header (ip header follows); 824 * 12 byte MPE header; 4 byte checksum; + 2 byte alignment, 8 byte LLC/SNAP 825 */ 826 if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) { 827 //printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 828 stats->rx_dropped++; 829 return; 830 } 831 skb_reserve(skb, 2); /* longword align L3 header */ 832 skb->dev = dev; 833 834 /* copy L3 payload */ 835 eth = (u8 *) skb_put(skb, pkt_len - 12 - 4 + 14 - snap); 836 memcpy(eth + 14, pkt + 12 + snap, pkt_len - 12 - 4 - snap); 837 838 /* create ethernet header: */ 839 eth[0]=pkt[0x0b]; 840 eth[1]=pkt[0x0a]; 841 eth[2]=pkt[0x09]; 842 eth[3]=pkt[0x08]; 843 eth[4]=pkt[0x04]; 844 eth[5]=pkt[0x03]; 845 846 eth[6]=eth[7]=eth[8]=eth[9]=eth[10]=eth[11]=0; 847 848 if (snap) { 849 eth[12] = pkt[18]; 850 eth[13] = pkt[19]; 851 } else { 852 /* protocol numbers are from rfc-1700 or 853 * http://www.iana.org/assignments/ethernet-numbers 854 */ 855 if (pkt[12] >> 4 == 6) { /* version field from IP header */ 856 eth[12] = 0x86; /* IPv6 */ 857 eth[13] = 0xdd; 858 } else { 859 eth[12] = 0x08; /* IPv4 */ 860 eth[13] = 0x00; 861 } 862 } 863 864 skb->protocol = dvb_net_eth_type_trans(skb, dev); 865 866 stats->rx_packets++; 867 stats->rx_bytes+=skb->len; 868 netif_rx(skb); 869 } 870 871 static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len, 872 const u8 *buffer2, size_t buffer2_len, 873 struct dmx_section_filter *filter, 874 enum dmx_success success) 875 { 876 struct net_device *dev = filter->priv; 877 878 /** 879 * we rely on the DVB API definition where exactly one complete 880 * section is delivered in buffer1 881 */ 882 dvb_net_sec (dev, buffer1, buffer1_len); 883 return 0; 884 } 885 886 static int dvb_net_tx(struct sk_buff *skb, struct net_device *dev) 887 { 888 dev_kfree_skb(skb); 889 return NETDEV_TX_OK; 890 } 891 892 static u8 mask_normal[6]={0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 893 static u8 mask_allmulti[6]={0xff, 0xff, 0xff, 0x00, 0x00, 0x00}; 894 static u8 mac_allmulti[6]={0x01, 0x00, 0x5e, 0x00, 0x00, 0x00}; 895 static u8 mask_promisc[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 896 897 static int dvb_net_filter_sec_set(struct net_device *dev, 898 struct dmx_section_filter **secfilter, 899 u8 *mac, u8 *mac_mask) 900 { 901 struct dvb_net_priv *priv = netdev_priv(dev); 902 int ret; 903 904 *secfilter=NULL; 905 ret = priv->secfeed->allocate_filter(priv->secfeed, secfilter); 906 if (ret<0) { 907 printk("%s: could not get filter\n", dev->name); 908 return ret; 909 } 910 911 (*secfilter)->priv=(void *) dev; 912 913 memset((*secfilter)->filter_value, 0x00, DMX_MAX_FILTER_SIZE); 914 memset((*secfilter)->filter_mask, 0x00, DMX_MAX_FILTER_SIZE); 915 memset((*secfilter)->filter_mode, 0xff, DMX_MAX_FILTER_SIZE); 916 917 (*secfilter)->filter_value[0]=0x3e; 918 (*secfilter)->filter_value[3]=mac[5]; 919 (*secfilter)->filter_value[4]=mac[4]; 920 (*secfilter)->filter_value[8]=mac[3]; 921 (*secfilter)->filter_value[9]=mac[2]; 922 (*secfilter)->filter_value[10]=mac[1]; 923 (*secfilter)->filter_value[11]=mac[0]; 924 925 (*secfilter)->filter_mask[0] = 0xff; 926 (*secfilter)->filter_mask[3] = mac_mask[5]; 927 (*secfilter)->filter_mask[4] = mac_mask[4]; 928 (*secfilter)->filter_mask[8] = mac_mask[3]; 929 (*secfilter)->filter_mask[9] = mac_mask[2]; 930 (*secfilter)->filter_mask[10] = mac_mask[1]; 931 (*secfilter)->filter_mask[11]=mac_mask[0]; 932 933 netdev_dbg(dev, "filter mac=%pM mask=%pM\n", mac, mac_mask); 934 935 return 0; 936 } 937 938 static int dvb_net_feed_start(struct net_device *dev) 939 { 940 int ret = 0, i; 941 struct dvb_net_priv *priv = netdev_priv(dev); 942 struct dmx_demux *demux = priv->demux; 943 unsigned char *mac = (unsigned char *) dev->dev_addr; 944 945 netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode); 946 mutex_lock(&priv->mutex); 947 if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0]) 948 printk("%s: BUG %d\n", __func__, __LINE__); 949 950 priv->secfeed=NULL; 951 priv->secfilter=NULL; 952 priv->tsfeed = NULL; 953 954 if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) { 955 netdev_dbg(dev, "alloc secfeed\n"); 956 ret=demux->allocate_section_feed(demux, &priv->secfeed, 957 dvb_net_sec_callback); 958 if (ret<0) { 959 printk("%s: could not allocate section feed\n", dev->name); 960 goto error; 961 } 962 963 ret = priv->secfeed->set(priv->secfeed, priv->pid, 32768, 1); 964 965 if (ret<0) { 966 printk("%s: could not set section feed\n", dev->name); 967 priv->demux->release_section_feed(priv->demux, priv->secfeed); 968 priv->secfeed=NULL; 969 goto error; 970 } 971 972 if (priv->rx_mode != RX_MODE_PROMISC) { 973 netdev_dbg(dev, "set secfilter\n"); 974 dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_normal); 975 } 976 977 switch (priv->rx_mode) { 978 case RX_MODE_MULTI: 979 for (i = 0; i < priv->multi_num; i++) { 980 netdev_dbg(dev, "set multi_secfilter[%d]\n", i); 981 dvb_net_filter_sec_set(dev, &priv->multi_secfilter[i], 982 priv->multi_macs[i], mask_normal); 983 } 984 break; 985 case RX_MODE_ALL_MULTI: 986 priv->multi_num=1; 987 netdev_dbg(dev, "set multi_secfilter[0]\n"); 988 dvb_net_filter_sec_set(dev, &priv->multi_secfilter[0], 989 mac_allmulti, mask_allmulti); 990 break; 991 case RX_MODE_PROMISC: 992 priv->multi_num=0; 993 netdev_dbg(dev, "set secfilter\n"); 994 dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_promisc); 995 break; 996 } 997 998 netdev_dbg(dev, "start filtering\n"); 999 priv->secfeed->start_filtering(priv->secfeed); 1000 } else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) { 1001 struct timespec timeout = { 0, 10000000 }; // 10 msec 1002 1003 /* we have payloads encapsulated in TS */ 1004 netdev_dbg(dev, "alloc tsfeed\n"); 1005 ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback); 1006 if (ret < 0) { 1007 printk("%s: could not allocate ts feed\n", dev->name); 1008 goto error; 1009 } 1010 1011 /* Set netdevice pointer for ts decaps callback. */ 1012 priv->tsfeed->priv = (void *)dev; 1013 ret = priv->tsfeed->set(priv->tsfeed, 1014 priv->pid, /* pid */ 1015 TS_PACKET, /* type */ 1016 DMX_PES_OTHER, /* pes type */ 1017 32768, /* circular buffer size */ 1018 timeout /* timeout */ 1019 ); 1020 1021 if (ret < 0) { 1022 printk("%s: could not set ts feed\n", dev->name); 1023 priv->demux->release_ts_feed(priv->demux, priv->tsfeed); 1024 priv->tsfeed = NULL; 1025 goto error; 1026 } 1027 1028 netdev_dbg(dev, "start filtering\n"); 1029 priv->tsfeed->start_filtering(priv->tsfeed); 1030 } else 1031 ret = -EINVAL; 1032 1033 error: 1034 mutex_unlock(&priv->mutex); 1035 return ret; 1036 } 1037 1038 static int dvb_net_feed_stop(struct net_device *dev) 1039 { 1040 struct dvb_net_priv *priv = netdev_priv(dev); 1041 int i, ret = 0; 1042 1043 mutex_lock(&priv->mutex); 1044 if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) { 1045 if (priv->secfeed) { 1046 if (priv->secfeed->is_filtering) { 1047 netdev_dbg(dev, "stop secfeed\n"); 1048 priv->secfeed->stop_filtering(priv->secfeed); 1049 } 1050 1051 if (priv->secfilter) { 1052 netdev_dbg(dev, "release secfilter\n"); 1053 priv->secfeed->release_filter(priv->secfeed, 1054 priv->secfilter); 1055 priv->secfilter=NULL; 1056 } 1057 1058 for (i=0; i<priv->multi_num; i++) { 1059 if (priv->multi_secfilter[i]) { 1060 netdev_dbg(dev, "release multi_filter[%d]\n", 1061 i); 1062 priv->secfeed->release_filter(priv->secfeed, 1063 priv->multi_secfilter[i]); 1064 priv->multi_secfilter[i] = NULL; 1065 } 1066 } 1067 1068 priv->demux->release_section_feed(priv->demux, priv->secfeed); 1069 priv->secfeed = NULL; 1070 } else 1071 printk("%s: no feed to stop\n", dev->name); 1072 } else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) { 1073 if (priv->tsfeed) { 1074 if (priv->tsfeed->is_filtering) { 1075 netdev_dbg(dev, "stop tsfeed\n"); 1076 priv->tsfeed->stop_filtering(priv->tsfeed); 1077 } 1078 priv->demux->release_ts_feed(priv->demux, priv->tsfeed); 1079 priv->tsfeed = NULL; 1080 } 1081 else 1082 printk("%s: no ts feed to stop\n", dev->name); 1083 } else 1084 ret = -EINVAL; 1085 mutex_unlock(&priv->mutex); 1086 return ret; 1087 } 1088 1089 1090 static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr) 1091 { 1092 struct dvb_net_priv *priv = netdev_priv(dev); 1093 1094 if (priv->multi_num == DVB_NET_MULTICAST_MAX) 1095 return -ENOMEM; 1096 1097 memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN); 1098 1099 priv->multi_num++; 1100 return 0; 1101 } 1102 1103 1104 static void wq_set_multicast_list (struct work_struct *work) 1105 { 1106 struct dvb_net_priv *priv = 1107 container_of(work, struct dvb_net_priv, set_multicast_list_wq); 1108 struct net_device *dev = priv->net; 1109 1110 dvb_net_feed_stop(dev); 1111 priv->rx_mode = RX_MODE_UNI; 1112 netif_addr_lock_bh(dev); 1113 1114 if (dev->flags & IFF_PROMISC) { 1115 netdev_dbg(dev, "promiscuous mode\n"); 1116 priv->rx_mode = RX_MODE_PROMISC; 1117 } else if ((dev->flags & IFF_ALLMULTI)) { 1118 netdev_dbg(dev, "allmulti mode\n"); 1119 priv->rx_mode = RX_MODE_ALL_MULTI; 1120 } else if (!netdev_mc_empty(dev)) { 1121 struct netdev_hw_addr *ha; 1122 1123 netdev_dbg(dev, "set_mc_list, %d entries\n", 1124 netdev_mc_count(dev)); 1125 1126 priv->rx_mode = RX_MODE_MULTI; 1127 priv->multi_num = 0; 1128 1129 netdev_for_each_mc_addr(ha, dev) 1130 dvb_set_mc_filter(dev, ha->addr); 1131 } 1132 1133 netif_addr_unlock_bh(dev); 1134 dvb_net_feed_start(dev); 1135 } 1136 1137 1138 static void dvb_net_set_multicast_list (struct net_device *dev) 1139 { 1140 struct dvb_net_priv *priv = netdev_priv(dev); 1141 schedule_work(&priv->set_multicast_list_wq); 1142 } 1143 1144 1145 static void wq_restart_net_feed (struct work_struct *work) 1146 { 1147 struct dvb_net_priv *priv = 1148 container_of(work, struct dvb_net_priv, restart_net_feed_wq); 1149 struct net_device *dev = priv->net; 1150 1151 if (netif_running(dev)) { 1152 dvb_net_feed_stop(dev); 1153 dvb_net_feed_start(dev); 1154 } 1155 } 1156 1157 1158 static int dvb_net_set_mac (struct net_device *dev, void *p) 1159 { 1160 struct dvb_net_priv *priv = netdev_priv(dev); 1161 struct sockaddr *addr=p; 1162 1163 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1164 1165 if (netif_running(dev)) 1166 schedule_work(&priv->restart_net_feed_wq); 1167 1168 return 0; 1169 } 1170 1171 1172 static int dvb_net_open(struct net_device *dev) 1173 { 1174 struct dvb_net_priv *priv = netdev_priv(dev); 1175 1176 priv->in_use++; 1177 dvb_net_feed_start(dev); 1178 return 0; 1179 } 1180 1181 1182 static int dvb_net_stop(struct net_device *dev) 1183 { 1184 struct dvb_net_priv *priv = netdev_priv(dev); 1185 1186 priv->in_use--; 1187 return dvb_net_feed_stop(dev); 1188 } 1189 1190 static const struct header_ops dvb_header_ops = { 1191 .create = eth_header, 1192 .parse = eth_header_parse, 1193 }; 1194 1195 1196 static const struct net_device_ops dvb_netdev_ops = { 1197 .ndo_open = dvb_net_open, 1198 .ndo_stop = dvb_net_stop, 1199 .ndo_start_xmit = dvb_net_tx, 1200 .ndo_set_rx_mode = dvb_net_set_multicast_list, 1201 .ndo_set_mac_address = dvb_net_set_mac, 1202 .ndo_change_mtu = eth_change_mtu, 1203 .ndo_validate_addr = eth_validate_addr, 1204 }; 1205 1206 static void dvb_net_setup(struct net_device *dev) 1207 { 1208 ether_setup(dev); 1209 1210 dev->header_ops = &dvb_header_ops; 1211 dev->netdev_ops = &dvb_netdev_ops; 1212 dev->mtu = 4096; 1213 1214 dev->flags |= IFF_NOARP; 1215 } 1216 1217 static int get_if(struct dvb_net *dvbnet) 1218 { 1219 int i; 1220 1221 for (i=0; i<DVB_NET_DEVICES_MAX; i++) 1222 if (!dvbnet->state[i]) 1223 break; 1224 1225 if (i == DVB_NET_DEVICES_MAX) 1226 return -1; 1227 1228 dvbnet->state[i]=1; 1229 return i; 1230 } 1231 1232 static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype) 1233 { 1234 struct net_device *net; 1235 struct dvb_net_priv *priv; 1236 int result; 1237 int if_num; 1238 1239 if (feedtype != DVB_NET_FEEDTYPE_MPE && feedtype != DVB_NET_FEEDTYPE_ULE) 1240 return -EINVAL; 1241 if ((if_num = get_if(dvbnet)) < 0) 1242 return -EINVAL; 1243 1244 net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb", 1245 NET_NAME_UNKNOWN, dvb_net_setup); 1246 if (!net) 1247 return -ENOMEM; 1248 1249 if (dvbnet->dvbdev->id) 1250 snprintf(net->name, IFNAMSIZ, "dvb%d%u%d", 1251 dvbnet->dvbdev->adapter->num, dvbnet->dvbdev->id, if_num); 1252 else 1253 /* compatibility fix to keep dvb0_0 format */ 1254 snprintf(net->name, IFNAMSIZ, "dvb%d_%d", 1255 dvbnet->dvbdev->adapter->num, if_num); 1256 1257 net->addr_len = 6; 1258 memcpy(net->dev_addr, dvbnet->dvbdev->adapter->proposed_mac, 6); 1259 1260 dvbnet->device[if_num] = net; 1261 1262 priv = netdev_priv(net); 1263 priv->net = net; 1264 priv->demux = dvbnet->demux; 1265 priv->pid = pid; 1266 priv->rx_mode = RX_MODE_UNI; 1267 priv->need_pusi = 1; 1268 priv->tscc = 0; 1269 priv->feedtype = feedtype; 1270 reset_ule(priv); 1271 1272 INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list); 1273 INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed); 1274 mutex_init(&priv->mutex); 1275 1276 net->base_addr = pid; 1277 1278 if ((result = register_netdev(net)) < 0) { 1279 dvbnet->device[if_num] = NULL; 1280 free_netdev(net); 1281 return result; 1282 } 1283 printk("dvb_net: created network interface %s\n", net->name); 1284 1285 return if_num; 1286 } 1287 1288 static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num) 1289 { 1290 struct net_device *net = dvbnet->device[num]; 1291 struct dvb_net_priv *priv; 1292 1293 if (!dvbnet->state[num]) 1294 return -EINVAL; 1295 priv = netdev_priv(net); 1296 if (priv->in_use) 1297 return -EBUSY; 1298 1299 dvb_net_stop(net); 1300 flush_work(&priv->set_multicast_list_wq); 1301 flush_work(&priv->restart_net_feed_wq); 1302 printk("dvb_net: removed network interface %s\n", net->name); 1303 unregister_netdev(net); 1304 dvbnet->state[num]=0; 1305 dvbnet->device[num] = NULL; 1306 free_netdev(net); 1307 1308 return 0; 1309 } 1310 1311 static int dvb_net_do_ioctl(struct file *file, 1312 unsigned int cmd, void *parg) 1313 { 1314 struct dvb_device *dvbdev = file->private_data; 1315 struct dvb_net *dvbnet = dvbdev->priv; 1316 int ret = 0; 1317 1318 if (((file->f_flags&O_ACCMODE)==O_RDONLY)) 1319 return -EPERM; 1320 1321 if (mutex_lock_interruptible(&dvbnet->ioctl_mutex)) 1322 return -ERESTARTSYS; 1323 1324 switch (cmd) { 1325 case NET_ADD_IF: 1326 { 1327 struct dvb_net_if *dvbnetif = parg; 1328 int result; 1329 1330 if (!capable(CAP_SYS_ADMIN)) { 1331 ret = -EPERM; 1332 goto ioctl_error; 1333 } 1334 1335 if (!try_module_get(dvbdev->adapter->module)) { 1336 ret = -EPERM; 1337 goto ioctl_error; 1338 } 1339 1340 result=dvb_net_add_if(dvbnet, dvbnetif->pid, dvbnetif->feedtype); 1341 if (result<0) { 1342 module_put(dvbdev->adapter->module); 1343 ret = result; 1344 goto ioctl_error; 1345 } 1346 dvbnetif->if_num=result; 1347 break; 1348 } 1349 case NET_GET_IF: 1350 { 1351 struct net_device *netdev; 1352 struct dvb_net_priv *priv_data; 1353 struct dvb_net_if *dvbnetif = parg; 1354 1355 if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX || 1356 !dvbnet->state[dvbnetif->if_num]) { 1357 ret = -EINVAL; 1358 goto ioctl_error; 1359 } 1360 1361 netdev = dvbnet->device[dvbnetif->if_num]; 1362 1363 priv_data = netdev_priv(netdev); 1364 dvbnetif->pid=priv_data->pid; 1365 dvbnetif->feedtype=priv_data->feedtype; 1366 break; 1367 } 1368 case NET_REMOVE_IF: 1369 { 1370 if (!capable(CAP_SYS_ADMIN)) { 1371 ret = -EPERM; 1372 goto ioctl_error; 1373 } 1374 if ((unsigned long) parg >= DVB_NET_DEVICES_MAX) { 1375 ret = -EINVAL; 1376 goto ioctl_error; 1377 } 1378 ret = dvb_net_remove_if(dvbnet, (unsigned long) parg); 1379 if (!ret) 1380 module_put(dvbdev->adapter->module); 1381 break; 1382 } 1383 1384 /* binary compatibility cruft */ 1385 case __NET_ADD_IF_OLD: 1386 { 1387 struct __dvb_net_if_old *dvbnetif = parg; 1388 int result; 1389 1390 if (!capable(CAP_SYS_ADMIN)) { 1391 ret = -EPERM; 1392 goto ioctl_error; 1393 } 1394 1395 if (!try_module_get(dvbdev->adapter->module)) { 1396 ret = -EPERM; 1397 goto ioctl_error; 1398 } 1399 1400 result=dvb_net_add_if(dvbnet, dvbnetif->pid, DVB_NET_FEEDTYPE_MPE); 1401 if (result<0) { 1402 module_put(dvbdev->adapter->module); 1403 ret = result; 1404 goto ioctl_error; 1405 } 1406 dvbnetif->if_num=result; 1407 break; 1408 } 1409 case __NET_GET_IF_OLD: 1410 { 1411 struct net_device *netdev; 1412 struct dvb_net_priv *priv_data; 1413 struct __dvb_net_if_old *dvbnetif = parg; 1414 1415 if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX || 1416 !dvbnet->state[dvbnetif->if_num]) { 1417 ret = -EINVAL; 1418 goto ioctl_error; 1419 } 1420 1421 netdev = dvbnet->device[dvbnetif->if_num]; 1422 1423 priv_data = netdev_priv(netdev); 1424 dvbnetif->pid=priv_data->pid; 1425 break; 1426 } 1427 default: 1428 ret = -ENOTTY; 1429 break; 1430 } 1431 1432 ioctl_error: 1433 mutex_unlock(&dvbnet->ioctl_mutex); 1434 return ret; 1435 } 1436 1437 static long dvb_net_ioctl(struct file *file, 1438 unsigned int cmd, unsigned long arg) 1439 { 1440 return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl); 1441 } 1442 1443 static int dvb_net_close(struct inode *inode, struct file *file) 1444 { 1445 struct dvb_device *dvbdev = file->private_data; 1446 struct dvb_net *dvbnet = dvbdev->priv; 1447 1448 dvb_generic_release(inode, file); 1449 1450 if(dvbdev->users == 1 && dvbnet->exit == 1) 1451 wake_up(&dvbdev->wait_queue); 1452 return 0; 1453 } 1454 1455 1456 static const struct file_operations dvb_net_fops = { 1457 .owner = THIS_MODULE, 1458 .unlocked_ioctl = dvb_net_ioctl, 1459 .open = dvb_generic_open, 1460 .release = dvb_net_close, 1461 .llseek = noop_llseek, 1462 }; 1463 1464 static const struct dvb_device dvbdev_net = { 1465 .priv = NULL, 1466 .users = 1, 1467 .writers = 1, 1468 #if defined(CONFIG_MEDIA_CONTROLLER_DVB) 1469 .name = "dvb-net", 1470 #endif 1471 .fops = &dvb_net_fops, 1472 }; 1473 1474 void dvb_net_release (struct dvb_net *dvbnet) 1475 { 1476 int i; 1477 1478 dvbnet->exit = 1; 1479 if (dvbnet->dvbdev->users < 1) 1480 wait_event(dvbnet->dvbdev->wait_queue, 1481 dvbnet->dvbdev->users==1); 1482 1483 dvb_unregister_device(dvbnet->dvbdev); 1484 1485 for (i=0; i<DVB_NET_DEVICES_MAX; i++) { 1486 if (!dvbnet->state[i]) 1487 continue; 1488 dvb_net_remove_if(dvbnet, i); 1489 } 1490 } 1491 EXPORT_SYMBOL(dvb_net_release); 1492 1493 1494 int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet, 1495 struct dmx_demux *dmx) 1496 { 1497 int i; 1498 1499 mutex_init(&dvbnet->ioctl_mutex); 1500 dvbnet->demux = dmx; 1501 1502 for (i=0; i<DVB_NET_DEVICES_MAX; i++) 1503 dvbnet->state[i] = 0; 1504 1505 return dvb_register_device(adap, &dvbnet->dvbdev, &dvbdev_net, 1506 dvbnet, DVB_DEVICE_NET); 1507 } 1508 EXPORT_SYMBOL(dvb_net_init); 1509