1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) 10 */ 11 #include <linux/errno.h> 12 #include <linux/types.h> 13 #include <linux/socket.h> 14 #include <linux/in.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/timer.h> 18 #include <linux/string.h> 19 #include <linux/sockios.h> 20 #include <linux/spinlock.h> 21 #include <linux/net.h> 22 #include <linux/slab.h> 23 #include <net/ax25.h> 24 #include <linux/inet.h> 25 #include <linux/netdevice.h> 26 #include <linux/skbuff.h> 27 #include <net/sock.h> 28 #include <asm/uaccess.h> 29 #include <linux/fcntl.h> 30 #include <linux/mm.h> 31 #include <linux/interrupt.h> 32 33 static DEFINE_SPINLOCK(ax25_frag_lock); 34 35 ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev) 36 { 37 ax25_dev *ax25_dev; 38 ax25_cb *ax25; 39 40 /* 41 * Take the default packet length for the device if zero is 42 * specified. 43 */ 44 if (paclen == 0) { 45 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) 46 return NULL; 47 48 paclen = ax25_dev->values[AX25_VALUES_PACLEN]; 49 } 50 51 /* 52 * Look for an existing connection. 53 */ 54 if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) { 55 ax25_output(ax25, paclen, skb); 56 return ax25; /* It already existed */ 57 } 58 59 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) 60 return NULL; 61 62 if ((ax25 = ax25_create_cb()) == NULL) 63 return NULL; 64 65 ax25_fillin_cb(ax25, ax25_dev); 66 67 ax25->source_addr = *src; 68 ax25->dest_addr = *dest; 69 70 if (digi != NULL) { 71 ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC); 72 if (ax25->digipeat == NULL) { 73 ax25_cb_put(ax25); 74 return NULL; 75 } 76 } 77 78 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { 79 case AX25_PROTO_STD_SIMPLEX: 80 case AX25_PROTO_STD_DUPLEX: 81 ax25_std_establish_data_link(ax25); 82 break; 83 84 #ifdef CONFIG_AX25_DAMA_SLAVE 85 case AX25_PROTO_DAMA_SLAVE: 86 if (ax25_dev->dama.slave) 87 ax25_ds_establish_data_link(ax25); 88 else 89 ax25_std_establish_data_link(ax25); 90 break; 91 #endif 92 } 93 94 /* 95 * There is one ref for the state machine; a caller needs 96 * one more to put it back, just like with the existing one. 97 */ 98 ax25_cb_hold(ax25); 99 100 ax25_cb_add(ax25); 101 102 ax25->state = AX25_STATE_1; 103 104 ax25_start_heartbeat(ax25); 105 106 ax25_output(ax25, paclen, skb); 107 108 return ax25; /* We had to create it */ 109 } 110 111 EXPORT_SYMBOL(ax25_send_frame); 112 113 /* 114 * All outgoing AX.25 I frames pass via this routine. Therefore this is 115 * where the fragmentation of frames takes place. If fragment is set to 116 * zero then we are not allowed to do fragmentation, even if the frame 117 * is too large. 118 */ 119 void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) 120 { 121 struct sk_buff *skbn; 122 unsigned char *p; 123 int frontlen, len, fragno, ka9qfrag, first = 1; 124 125 if (paclen < 16) { 126 WARN_ON_ONCE(1); 127 kfree_skb(skb); 128 return; 129 } 130 131 if ((skb->len - 1) > paclen) { 132 if (*skb->data == AX25_P_TEXT) { 133 skb_pull(skb, 1); /* skip PID */ 134 ka9qfrag = 0; 135 } else { 136 paclen -= 2; /* Allow for fragment control info */ 137 ka9qfrag = 1; 138 } 139 140 fragno = skb->len / paclen; 141 if (skb->len % paclen == 0) fragno--; 142 143 frontlen = skb_headroom(skb); /* Address space + CTRL */ 144 145 while (skb->len > 0) { 146 spin_lock_bh(&ax25_frag_lock); 147 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) { 148 spin_unlock_bh(&ax25_frag_lock); 149 printk(KERN_CRIT "AX.25: ax25_output - out of memory\n"); 150 return; 151 } 152 153 if (skb->sk != NULL) 154 skb_set_owner_w(skbn, skb->sk); 155 156 spin_unlock_bh(&ax25_frag_lock); 157 158 len = (paclen > skb->len) ? skb->len : paclen; 159 160 if (ka9qfrag == 1) { 161 skb_reserve(skbn, frontlen + 2); 162 skb_set_network_header(skbn, 163 skb_network_offset(skb)); 164 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); 165 p = skb_push(skbn, 2); 166 167 *p++ = AX25_P_SEGMENT; 168 169 *p = fragno--; 170 if (first) { 171 *p |= AX25_SEG_FIRST; 172 first = 0; 173 } 174 } else { 175 skb_reserve(skbn, frontlen + 1); 176 skb_set_network_header(skbn, 177 skb_network_offset(skb)); 178 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); 179 p = skb_push(skbn, 1); 180 *p = AX25_P_TEXT; 181 } 182 183 skb_pull(skb, len); 184 skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */ 185 } 186 187 kfree_skb(skb); 188 } else { 189 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ 190 } 191 192 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { 193 case AX25_PROTO_STD_SIMPLEX: 194 case AX25_PROTO_STD_DUPLEX: 195 ax25_kick(ax25); 196 break; 197 198 #ifdef CONFIG_AX25_DAMA_SLAVE 199 /* 200 * A DAMA slave is _required_ to work as normal AX.25L2V2 201 * if no DAMA master is available. 202 */ 203 case AX25_PROTO_DAMA_SLAVE: 204 if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25); 205 break; 206 #endif 207 } 208 } 209 210 /* 211 * This procedure is passed a buffer descriptor for an iframe. It builds 212 * the rest of the control part of the frame and then writes it out. 213 */ 214 static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit) 215 { 216 unsigned char *frame; 217 218 if (skb == NULL) 219 return; 220 221 skb_reset_network_header(skb); 222 223 if (ax25->modulus == AX25_MODULUS) { 224 frame = skb_push(skb, 1); 225 226 *frame = AX25_I; 227 *frame |= (poll_bit) ? AX25_PF : 0; 228 *frame |= (ax25->vr << 5); 229 *frame |= (ax25->vs << 1); 230 } else { 231 frame = skb_push(skb, 2); 232 233 frame[0] = AX25_I; 234 frame[0] |= (ax25->vs << 1); 235 frame[1] = (poll_bit) ? AX25_EPF : 0; 236 frame[1] |= (ax25->vr << 1); 237 } 238 239 ax25_start_idletimer(ax25); 240 241 ax25_transmit_buffer(ax25, skb, AX25_COMMAND); 242 } 243 244 void ax25_kick(ax25_cb *ax25) 245 { 246 struct sk_buff *skb, *skbn; 247 int last = 1; 248 unsigned short start, end, next; 249 250 if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4) 251 return; 252 253 if (ax25->condition & AX25_COND_PEER_RX_BUSY) 254 return; 255 256 if (skb_peek(&ax25->write_queue) == NULL) 257 return; 258 259 start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs; 260 end = (ax25->va + ax25->window) % ax25->modulus; 261 262 if (start == end) 263 return; 264 265 /* 266 * Transmit data until either we're out of data to send or 267 * the window is full. Send a poll on the final I frame if 268 * the window is filled. 269 */ 270 271 /* 272 * Dequeue the frame and copy it. 273 * Check for race with ax25_clear_queues(). 274 */ 275 skb = skb_dequeue(&ax25->write_queue); 276 if (!skb) 277 return; 278 279 ax25->vs = start; 280 281 do { 282 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { 283 skb_queue_head(&ax25->write_queue, skb); 284 break; 285 } 286 287 if (skb->sk != NULL) 288 skb_set_owner_w(skbn, skb->sk); 289 290 next = (ax25->vs + 1) % ax25->modulus; 291 last = (next == end); 292 293 /* 294 * Transmit the frame copy. 295 * bke 960114: do not set the Poll bit on the last frame 296 * in DAMA mode. 297 */ 298 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { 299 case AX25_PROTO_STD_SIMPLEX: 300 case AX25_PROTO_STD_DUPLEX: 301 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF); 302 break; 303 304 #ifdef CONFIG_AX25_DAMA_SLAVE 305 case AX25_PROTO_DAMA_SLAVE: 306 ax25_send_iframe(ax25, skbn, AX25_POLLOFF); 307 break; 308 #endif 309 } 310 311 ax25->vs = next; 312 313 /* 314 * Requeue the original data frame. 315 */ 316 skb_queue_tail(&ax25->ack_queue, skb); 317 318 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL); 319 320 ax25->condition &= ~AX25_COND_ACK_PENDING; 321 322 if (!ax25_t1timer_running(ax25)) { 323 ax25_stop_t3timer(ax25); 324 ax25_calculate_t1(ax25); 325 ax25_start_t1timer(ax25); 326 } 327 } 328 329 void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type) 330 { 331 struct sk_buff *skbn; 332 unsigned char *ptr; 333 int headroom; 334 335 if (ax25->ax25_dev == NULL) { 336 ax25_disconnect(ax25, ENETUNREACH); 337 return; 338 } 339 340 headroom = ax25_addr_size(ax25->digipeat); 341 342 if (skb_headroom(skb) < headroom) { 343 if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) { 344 printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n"); 345 kfree_skb(skb); 346 return; 347 } 348 349 if (skb->sk != NULL) 350 skb_set_owner_w(skbn, skb->sk); 351 352 consume_skb(skb); 353 skb = skbn; 354 } 355 356 ptr = skb_push(skb, headroom); 357 358 ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus); 359 360 ax25_queue_xmit(skb, ax25->ax25_dev->dev); 361 } 362 363 /* 364 * A small shim to dev_queue_xmit to add the KISS control byte, and do 365 * any packet forwarding in operation. 366 */ 367 void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev) 368 { 369 unsigned char *ptr; 370 371 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev)); 372 373 ptr = skb_push(skb, 1); 374 *ptr = 0x00; /* KISS */ 375 376 dev_queue_xmit(skb); 377 } 378 379 int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr) 380 { 381 if (ax25->vs == nr) { 382 ax25_frames_acked(ax25, nr); 383 ax25_calculate_rtt(ax25); 384 ax25_stop_t1timer(ax25); 385 ax25_start_t3timer(ax25); 386 return 1; 387 } else { 388 if (ax25->va != nr) { 389 ax25_frames_acked(ax25, nr); 390 ax25_calculate_t1(ax25); 391 ax25_start_t1timer(ax25); 392 return 1; 393 } 394 } 395 return 0; 396 } 397 398