1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) 10 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de) 11 */ 12 #include <linux/errno.h> 13 #include <linux/types.h> 14 #include <linux/socket.h> 15 #include <linux/in.h> 16 #include <linux/kernel.h> 17 #include <linux/timer.h> 18 #include <linux/string.h> 19 #include <linux/sockios.h> 20 #include <linux/net.h> 21 #include <net/ax25.h> 22 #include <linux/inet.h> 23 #include <linux/netdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/netfilter.h> 26 #include <net/sock.h> 27 #include <net/tcp_states.h> 28 #include <asm/uaccess.h> 29 #include <asm/system.h> 30 #include <linux/fcntl.h> 31 #include <linux/mm.h> 32 #include <linux/interrupt.h> 33 34 /* 35 * Given a fragment, queue it on the fragment queue and if the fragment 36 * is complete, send it back to ax25_rx_iframe. 37 */ 38 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) 39 { 40 struct sk_buff *skbn, *skbo; 41 42 if (ax25->fragno != 0) { 43 if (!(*skb->data & AX25_SEG_FIRST)) { 44 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) { 45 /* Enqueue fragment */ 46 ax25->fragno = *skb->data & AX25_SEG_REM; 47 skb_pull(skb, 1); /* skip fragno */ 48 ax25->fraglen += skb->len; 49 skb_queue_tail(&ax25->frag_queue, skb); 50 51 /* Last fragment received ? */ 52 if (ax25->fragno == 0) { 53 skbn = alloc_skb(AX25_MAX_HEADER_LEN + 54 ax25->fraglen, 55 GFP_ATOMIC); 56 if (!skbn) { 57 skb_queue_purge(&ax25->frag_queue); 58 return 1; 59 } 60 61 skb_reserve(skbn, AX25_MAX_HEADER_LEN); 62 63 skbn->dev = ax25->ax25_dev->dev; 64 skbn->h.raw = skbn->data; 65 skbn->nh.raw = skbn->data; 66 67 /* Copy data from the fragments */ 68 while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) { 69 memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len); 70 kfree_skb(skbo); 71 } 72 73 ax25->fraglen = 0; 74 75 if (ax25_rx_iframe(ax25, skbn) == 0) 76 kfree_skb(skbn); 77 } 78 79 return 1; 80 } 81 } 82 } else { 83 /* First fragment received */ 84 if (*skb->data & AX25_SEG_FIRST) { 85 skb_queue_purge(&ax25->frag_queue); 86 ax25->fragno = *skb->data & AX25_SEG_REM; 87 skb_pull(skb, 1); /* skip fragno */ 88 ax25->fraglen = skb->len; 89 skb_queue_tail(&ax25->frag_queue, skb); 90 return 1; 91 } 92 } 93 94 return 0; 95 } 96 97 /* 98 * This is where all valid I frames are sent to, to be dispatched to 99 * whichever protocol requires them. 100 */ 101 int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) 102 { 103 int (*func)(struct sk_buff *, ax25_cb *); 104 unsigned char pid; 105 int queued = 0; 106 107 if (skb == NULL) return 0; 108 109 ax25_start_idletimer(ax25); 110 111 pid = *skb->data; 112 113 if (pid == AX25_P_IP) { 114 /* working around a TCP bug to keep additional listeners 115 * happy. TCP re-uses the buffer and destroys the original 116 * content. 117 */ 118 struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC); 119 if (skbn != NULL) { 120 kfree_skb(skb); 121 skb = skbn; 122 } 123 124 skb_pull(skb, 1); /* Remove PID */ 125 skb->mac.raw = skb->nh.raw; 126 skb->nh.raw = skb->data; 127 skb->dev = ax25->ax25_dev->dev; 128 skb->pkt_type = PACKET_HOST; 129 skb->protocol = htons(ETH_P_IP); 130 netif_rx(skb); 131 return 1; 132 } 133 if (pid == AX25_P_SEGMENT) { 134 skb_pull(skb, 1); /* Remove PID */ 135 return ax25_rx_fragment(ax25, skb); 136 } 137 138 if ((func = ax25_protocol_function(pid)) != NULL) { 139 skb_pull(skb, 1); /* Remove PID */ 140 return (*func)(skb, ax25); 141 } 142 143 if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) { 144 if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) || 145 ax25->pidincl) { 146 if (sock_queue_rcv_skb(ax25->sk, skb) == 0) 147 queued = 1; 148 else 149 ax25->condition |= AX25_COND_OWN_RX_BUSY; 150 } 151 } 152 153 return queued; 154 } 155 156 /* 157 * Higher level upcall for a LAPB frame 158 */ 159 static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama) 160 { 161 int queued = 0; 162 163 if (ax25->state == AX25_STATE_0) 164 return 0; 165 166 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { 167 case AX25_PROTO_STD_SIMPLEX: 168 case AX25_PROTO_STD_DUPLEX: 169 queued = ax25_std_frame_in(ax25, skb, type); 170 break; 171 172 #ifdef CONFIG_AX25_DAMA_SLAVE 173 case AX25_PROTO_DAMA_SLAVE: 174 if (dama || ax25->ax25_dev->dama.slave) 175 queued = ax25_ds_frame_in(ax25, skb, type); 176 else 177 queued = ax25_std_frame_in(ax25, skb, type); 178 break; 179 #endif 180 } 181 182 return queued; 183 } 184 185 static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, 186 ax25_address *dev_addr, struct packet_type *ptype) 187 { 188 ax25_address src, dest, *next_digi = NULL; 189 int type = 0, mine = 0, dama; 190 struct sock *make, *sk; 191 ax25_digi dp, reverse_dp; 192 ax25_cb *ax25; 193 ax25_dev *ax25_dev; 194 195 /* 196 * Process the AX.25/LAPB frame. 197 */ 198 199 skb->h.raw = skb->data; 200 201 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { 202 kfree_skb(skb); 203 return 0; 204 } 205 206 /* 207 * Parse the address header. 208 */ 209 210 if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) { 211 kfree_skb(skb); 212 return 0; 213 } 214 215 /* 216 * Ours perhaps ? 217 */ 218 if (dp.lastrepeat + 1 < dp.ndigi) /* Not yet digipeated completely */ 219 next_digi = &dp.calls[dp.lastrepeat + 1]; 220 221 /* 222 * Pull of the AX.25 headers leaving the CTRL/PID bytes 223 */ 224 skb_pull(skb, ax25_addr_size(&dp)); 225 226 /* For our port addresses ? */ 227 if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi) 228 mine = 1; 229 230 /* Also match on any registered callsign from L3/4 */ 231 if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi) 232 mine = 1; 233 234 /* UI frame - bypass LAPB processing */ 235 if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) { 236 skb->h.raw = skb->data + 2; /* skip control and pid */ 237 238 ax25_send_to_raw(&dest, skb, skb->data[1]); 239 240 if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) { 241 kfree_skb(skb); 242 return 0; 243 } 244 245 /* Now we are pointing at the pid byte */ 246 switch (skb->data[1]) { 247 case AX25_P_IP: 248 skb_pull(skb,2); /* drop PID/CTRL */ 249 skb->h.raw = skb->data; 250 skb->nh.raw = skb->data; 251 skb->dev = dev; 252 skb->pkt_type = PACKET_HOST; 253 skb->protocol = htons(ETH_P_IP); 254 netif_rx(skb); 255 break; 256 257 case AX25_P_ARP: 258 skb_pull(skb,2); 259 skb->h.raw = skb->data; 260 skb->nh.raw = skb->data; 261 skb->dev = dev; 262 skb->pkt_type = PACKET_HOST; 263 skb->protocol = htons(ETH_P_ARP); 264 netif_rx(skb); 265 break; 266 case AX25_P_TEXT: 267 /* Now find a suitable dgram socket */ 268 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM); 269 if (sk != NULL) { 270 bh_lock_sock(sk); 271 if (atomic_read(&sk->sk_rmem_alloc) >= 272 sk->sk_rcvbuf) { 273 kfree_skb(skb); 274 } else { 275 /* 276 * Remove the control and PID. 277 */ 278 skb_pull(skb, 2); 279 if (sock_queue_rcv_skb(sk, skb) != 0) 280 kfree_skb(skb); 281 } 282 bh_unlock_sock(sk); 283 sock_put(sk); 284 } else { 285 kfree_skb(skb); 286 } 287 break; 288 289 default: 290 kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */ 291 break; 292 } 293 294 return 0; 295 } 296 297 /* 298 * Is connected mode supported on this device ? 299 * If not, should we DM the incoming frame (except DMs) or 300 * silently ignore them. For now we stay quiet. 301 */ 302 if (ax25_dev->values[AX25_VALUES_CONMODE] == 0) { 303 kfree_skb(skb); 304 return 0; 305 } 306 307 /* LAPB */ 308 309 /* AX.25 state 1-4 */ 310 311 ax25_digi_invert(&dp, &reverse_dp); 312 313 if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) { 314 /* 315 * Process the frame. If it is queued up internally it 316 * returns one otherwise we free it immediately. This 317 * routine itself wakes the user context layers so we do 318 * no further work 319 */ 320 if (ax25_process_rx_frame(ax25, skb, type, dama) == 0) 321 kfree_skb(skb); 322 323 ax25_cb_put(ax25); 324 return 0; 325 } 326 327 /* AX.25 state 0 (disconnected) */ 328 329 /* a) received not a SABM(E) */ 330 331 if ((*skb->data & ~AX25_PF) != AX25_SABM && 332 (*skb->data & ~AX25_PF) != AX25_SABME) { 333 /* 334 * Never reply to a DM. Also ignore any connects for 335 * addresses that are not our interfaces and not a socket. 336 */ 337 if ((*skb->data & ~AX25_PF) != AX25_DM && mine) 338 ax25_return_dm(dev, &src, &dest, &dp); 339 340 kfree_skb(skb); 341 return 0; 342 } 343 344 /* b) received SABM(E) */ 345 346 if (dp.lastrepeat + 1 == dp.ndigi) 347 sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET); 348 else 349 sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET); 350 351 if (sk != NULL) { 352 bh_lock_sock(sk); 353 if (sk_acceptq_is_full(sk) || 354 (make = ax25_make_new(sk, ax25_dev)) == NULL) { 355 if (mine) 356 ax25_return_dm(dev, &src, &dest, &dp); 357 kfree_skb(skb); 358 bh_unlock_sock(sk); 359 sock_put(sk); 360 361 return 0; 362 } 363 364 ax25 = ax25_sk(make); 365 skb_set_owner_r(skb, make); 366 skb_queue_head(&sk->sk_receive_queue, skb); 367 368 make->sk_state = TCP_ESTABLISHED; 369 370 sk->sk_ack_backlog++; 371 bh_unlock_sock(sk); 372 } else { 373 if (!mine) { 374 kfree_skb(skb); 375 return 0; 376 } 377 378 if ((ax25 = ax25_create_cb()) == NULL) { 379 ax25_return_dm(dev, &src, &dest, &dp); 380 kfree_skb(skb); 381 return 0; 382 } 383 384 ax25_fillin_cb(ax25, ax25_dev); 385 } 386 387 ax25->source_addr = dest; 388 ax25->dest_addr = src; 389 390 /* 391 * Sort out any digipeated paths. 392 */ 393 if (dp.ndigi && !ax25->digipeat && 394 (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { 395 kfree_skb(skb); 396 ax25_destroy_socket(ax25); 397 if (sk) 398 sock_put(sk); 399 return 0; 400 } 401 402 if (dp.ndigi == 0) { 403 kfree(ax25->digipeat); 404 ax25->digipeat = NULL; 405 } else { 406 /* Reverse the source SABM's path */ 407 memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi)); 408 } 409 410 if ((*skb->data & ~AX25_PF) == AX25_SABME) { 411 ax25->modulus = AX25_EMODULUS; 412 ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW]; 413 } else { 414 ax25->modulus = AX25_MODULUS; 415 ax25->window = ax25_dev->values[AX25_VALUES_WINDOW]; 416 } 417 418 ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE); 419 420 #ifdef CONFIG_AX25_DAMA_SLAVE 421 if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE) 422 ax25_dama_on(ax25); 423 #endif 424 425 ax25->state = AX25_STATE_3; 426 427 ax25_cb_add(ax25); 428 429 ax25_start_heartbeat(ax25); 430 ax25_start_t3timer(ax25); 431 ax25_start_idletimer(ax25); 432 433 if (sk) { 434 if (!sock_flag(sk, SOCK_DEAD)) 435 sk->sk_data_ready(sk, skb->len); 436 sock_put(sk); 437 } else 438 kfree_skb(skb); 439 440 return 0; 441 } 442 443 /* 444 * Receive an AX.25 frame via a SLIP interface. 445 */ 446 int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, 447 struct packet_type *ptype, struct net_device *orig_dev) 448 { 449 skb->sk = NULL; /* Initially we don't know who it's for */ 450 skb->destructor = NULL; /* Who initializes this, dammit?! */ 451 452 if ((*skb->data & 0x0F) != 0) { 453 kfree_skb(skb); /* Not a KISS data frame */ 454 return 0; 455 } 456 457 skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */ 458 459 return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype); 460 } 461