xref: /linux/net/core/netpoll.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11 
12 #include <linux/smp_lock.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/if_arp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/inet.h>
19 #include <linux/interrupt.h>
20 #include <linux/netpoll.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
25 #include <net/tcp.h>
26 #include <net/udp.h>
27 #include <asm/unaligned.h>
28 
29 /*
30  * We maintain a small pool of fully-sized skbs, to make sure the
31  * message gets out even in extreme OOM situations.
32  */
33 
34 #define MAX_UDP_CHUNK 1460
35 #define MAX_SKBS 32
36 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
37 #define MAX_RETRIES 20000
38 
39 static DEFINE_SPINLOCK(skb_list_lock);
40 static int nr_skbs;
41 static struct sk_buff *skbs;
42 
43 static DEFINE_SPINLOCK(queue_lock);
44 static int queue_depth;
45 static struct sk_buff *queue_head, *queue_tail;
46 
47 static atomic_t trapped;
48 
49 #define NETPOLL_RX_ENABLED  1
50 #define NETPOLL_RX_DROP     2
51 
52 #define MAX_SKB_SIZE \
53 		(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
54 				sizeof(struct iphdr) + sizeof(struct ethhdr))
55 
56 static void zap_completion_queue(void);
57 static void arp_reply(struct sk_buff *skb);
58 
59 static void queue_process(void *p)
60 {
61 	unsigned long flags;
62 	struct sk_buff *skb;
63 
64 	while (queue_head) {
65 		spin_lock_irqsave(&queue_lock, flags);
66 
67 		skb = queue_head;
68 		queue_head = skb->next;
69 		if (skb == queue_tail)
70 			queue_head = NULL;
71 
72 		queue_depth--;
73 
74 		spin_unlock_irqrestore(&queue_lock, flags);
75 
76 		dev_queue_xmit(skb);
77 	}
78 }
79 
80 static DECLARE_WORK(send_queue, queue_process, NULL);
81 
82 void netpoll_queue(struct sk_buff *skb)
83 {
84 	unsigned long flags;
85 
86 	if (queue_depth == MAX_QUEUE_DEPTH) {
87 		__kfree_skb(skb);
88 		return;
89 	}
90 
91 	spin_lock_irqsave(&queue_lock, flags);
92 	if (!queue_head)
93 		queue_head = skb;
94 	else
95 		queue_tail->next = skb;
96 	queue_tail = skb;
97 	queue_depth++;
98 	spin_unlock_irqrestore(&queue_lock, flags);
99 
100 	schedule_work(&send_queue);
101 }
102 
103 static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
104 			     unsigned short ulen, u32 saddr, u32 daddr)
105 {
106 	unsigned int psum;
107 
108 	if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
109 		return 0;
110 
111 	psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
112 
113 	if (skb->ip_summed == CHECKSUM_HW &&
114 	    !(u16)csum_fold(csum_add(psum, skb->csum)))
115 		return 0;
116 
117 	skb->csum = psum;
118 
119 	return __skb_checksum_complete(skb);
120 }
121 
122 /*
123  * Check whether delayed processing was scheduled for our NIC. If so,
124  * we attempt to grab the poll lock and use ->poll() to pump the card.
125  * If this fails, either we've recursed in ->poll() or it's already
126  * running on another CPU.
127  *
128  * Note: we don't mask interrupts with this lock because we're using
129  * trylock here and interrupts are already disabled in the softirq
130  * case. Further, we test the poll_owner to avoid recursion on UP
131  * systems where the lock doesn't exist.
132  *
133  * In cases where there is bi-directional communications, reading only
134  * one message at a time can lead to packets being dropped by the
135  * network adapter, forcing superfluous retries and possibly timeouts.
136  * Thus, we set our budget to greater than 1.
137  */
138 static void poll_napi(struct netpoll *np)
139 {
140 	struct netpoll_info *npinfo = np->dev->npinfo;
141 	int budget = 16;
142 
143 	if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
144 	    npinfo->poll_owner != smp_processor_id() &&
145 	    spin_trylock(&npinfo->poll_lock)) {
146 		npinfo->rx_flags |= NETPOLL_RX_DROP;
147 		atomic_inc(&trapped);
148 
149 		np->dev->poll(np->dev, &budget);
150 
151 		atomic_dec(&trapped);
152 		npinfo->rx_flags &= ~NETPOLL_RX_DROP;
153 		spin_unlock(&npinfo->poll_lock);
154 	}
155 }
156 
157 static void service_arp_queue(struct netpoll_info *npi)
158 {
159 	struct sk_buff *skb;
160 
161 	if (unlikely(!npi))
162 		return;
163 
164 	skb = skb_dequeue(&npi->arp_tx);
165 
166 	while (skb != NULL) {
167 		arp_reply(skb);
168 		skb = skb_dequeue(&npi->arp_tx);
169 	}
170 	return;
171 }
172 
173 void netpoll_poll(struct netpoll *np)
174 {
175 	if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
176 		return;
177 
178 	/* Process pending work on NIC */
179 	np->dev->poll_controller(np->dev);
180 	if (np->dev->poll)
181 		poll_napi(np);
182 
183 	service_arp_queue(np->dev->npinfo);
184 
185 	zap_completion_queue();
186 }
187 
188 static void refill_skbs(void)
189 {
190 	struct sk_buff *skb;
191 	unsigned long flags;
192 
193 	spin_lock_irqsave(&skb_list_lock, flags);
194 	while (nr_skbs < MAX_SKBS) {
195 		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
196 		if (!skb)
197 			break;
198 
199 		skb->next = skbs;
200 		skbs = skb;
201 		nr_skbs++;
202 	}
203 	spin_unlock_irqrestore(&skb_list_lock, flags);
204 }
205 
206 static void zap_completion_queue(void)
207 {
208 	unsigned long flags;
209 	struct softnet_data *sd = &get_cpu_var(softnet_data);
210 
211 	if (sd->completion_queue) {
212 		struct sk_buff *clist;
213 
214 		local_irq_save(flags);
215 		clist = sd->completion_queue;
216 		sd->completion_queue = NULL;
217 		local_irq_restore(flags);
218 
219 		while (clist != NULL) {
220 			struct sk_buff *skb = clist;
221 			clist = clist->next;
222 			if(skb->destructor)
223 				dev_kfree_skb_any(skb); /* put this one back */
224 			else
225 				__kfree_skb(skb);
226 		}
227 	}
228 
229 	put_cpu_var(softnet_data);
230 }
231 
232 static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
233 {
234 	int once = 1, count = 0;
235 	unsigned long flags;
236 	struct sk_buff *skb = NULL;
237 
238 	zap_completion_queue();
239 repeat:
240 	if (nr_skbs < MAX_SKBS)
241 		refill_skbs();
242 
243 	skb = alloc_skb(len, GFP_ATOMIC);
244 
245 	if (!skb) {
246 		spin_lock_irqsave(&skb_list_lock, flags);
247 		skb = skbs;
248 		if (skb) {
249 			skbs = skb->next;
250 			skb->next = NULL;
251 			nr_skbs--;
252 		}
253 		spin_unlock_irqrestore(&skb_list_lock, flags);
254 	}
255 
256 	if(!skb) {
257 		count++;
258 		if (once && (count == 1000000)) {
259 			printk("out of netpoll skbs!\n");
260 			once = 0;
261 		}
262 		netpoll_poll(np);
263 		goto repeat;
264 	}
265 
266 	atomic_set(&skb->users, 1);
267 	skb_reserve(skb, reserve);
268 	return skb;
269 }
270 
271 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
272 {
273 	int status;
274 	struct netpoll_info *npinfo;
275 
276 	if (!np || !np->dev || !netif_running(np->dev)) {
277 		__kfree_skb(skb);
278 		return;
279 	}
280 
281 	npinfo = np->dev->npinfo;
282 
283 	/* avoid recursion */
284 	if (npinfo->poll_owner == smp_processor_id() ||
285 	    np->dev->xmit_lock_owner == smp_processor_id()) {
286 		if (np->drop)
287 			np->drop(skb);
288 		else
289 			__kfree_skb(skb);
290 		return;
291 	}
292 
293 	do {
294 		npinfo->tries--;
295 		netif_tx_lock(np->dev);
296 
297 		/*
298 		 * network drivers do not expect to be called if the queue is
299 		 * stopped.
300 		 */
301 		status = NETDEV_TX_BUSY;
302 		if (!netif_queue_stopped(np->dev))
303 			status = np->dev->hard_start_xmit(skb, np->dev);
304 
305 		netif_tx_unlock(np->dev);
306 
307 		/* success */
308 		if(!status) {
309 			npinfo->tries = MAX_RETRIES; /* reset */
310 			return;
311 		}
312 
313 		/* transmit busy */
314 		netpoll_poll(np);
315 		udelay(50);
316 	} while (npinfo->tries > 0);
317 }
318 
319 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
320 {
321 	int total_len, eth_len, ip_len, udp_len;
322 	struct sk_buff *skb;
323 	struct udphdr *udph;
324 	struct iphdr *iph;
325 	struct ethhdr *eth;
326 
327 	udp_len = len + sizeof(*udph);
328 	ip_len = eth_len = udp_len + sizeof(*iph);
329 	total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
330 
331 	skb = find_skb(np, total_len, total_len - len);
332 	if (!skb)
333 		return;
334 
335 	memcpy(skb->data, msg, len);
336 	skb->len += len;
337 
338 	udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
339 	udph->source = htons(np->local_port);
340 	udph->dest = htons(np->remote_port);
341 	udph->len = htons(udp_len);
342 	udph->check = 0;
343 
344 	iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
345 
346 	/* iph->version = 4; iph->ihl = 5; */
347 	put_unaligned(0x45, (unsigned char *)iph);
348 	iph->tos      = 0;
349 	put_unaligned(htons(ip_len), &(iph->tot_len));
350 	iph->id       = 0;
351 	iph->frag_off = 0;
352 	iph->ttl      = 64;
353 	iph->protocol = IPPROTO_UDP;
354 	iph->check    = 0;
355 	put_unaligned(htonl(np->local_ip), &(iph->saddr));
356 	put_unaligned(htonl(np->remote_ip), &(iph->daddr));
357 	iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
358 
359 	eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
360 
361 	eth->h_proto = htons(ETH_P_IP);
362 	memcpy(eth->h_source, np->local_mac, 6);
363 	memcpy(eth->h_dest, np->remote_mac, 6);
364 
365 	skb->dev = np->dev;
366 
367 	netpoll_send_skb(np, skb);
368 }
369 
370 static void arp_reply(struct sk_buff *skb)
371 {
372 	struct netpoll_info *npinfo = skb->dev->npinfo;
373 	struct arphdr *arp;
374 	unsigned char *arp_ptr;
375 	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
376 	u32 sip, tip;
377 	struct sk_buff *send_skb;
378 	struct netpoll *np = NULL;
379 
380 	if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
381 		np = npinfo->rx_np;
382 	if (!np)
383 		return;
384 
385 	/* No arp on this interface */
386 	if (skb->dev->flags & IFF_NOARP)
387 		return;
388 
389 	if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
390 				 (2 * skb->dev->addr_len) +
391 				 (2 * sizeof(u32)))))
392 		return;
393 
394 	skb->h.raw = skb->nh.raw = skb->data;
395 	arp = skb->nh.arph;
396 
397 	if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
398 	     arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
399 	    arp->ar_pro != htons(ETH_P_IP) ||
400 	    arp->ar_op != htons(ARPOP_REQUEST))
401 		return;
402 
403 	arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;
404 	memcpy(&sip, arp_ptr, 4);
405 	arp_ptr += 4 + skb->dev->addr_len;
406 	memcpy(&tip, arp_ptr, 4);
407 
408 	/* Should we ignore arp? */
409 	if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
410 		return;
411 
412 	size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
413 	send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
414 			    LL_RESERVED_SPACE(np->dev));
415 
416 	if (!send_skb)
417 		return;
418 
419 	send_skb->nh.raw = send_skb->data;
420 	arp = (struct arphdr *) skb_put(send_skb, size);
421 	send_skb->dev = skb->dev;
422 	send_skb->protocol = htons(ETH_P_ARP);
423 
424 	/* Fill the device header for the ARP frame */
425 
426 	if (np->dev->hard_header &&
427 	    np->dev->hard_header(send_skb, skb->dev, ptype,
428 				       np->remote_mac, np->local_mac,
429 				       send_skb->len) < 0) {
430 		kfree_skb(send_skb);
431 		return;
432 	}
433 
434 	/*
435 	 * Fill out the arp protocol part.
436 	 *
437 	 * we only support ethernet device type,
438 	 * which (according to RFC 1390) should always equal 1 (Ethernet).
439 	 */
440 
441 	arp->ar_hrd = htons(np->dev->type);
442 	arp->ar_pro = htons(ETH_P_IP);
443 	arp->ar_hln = np->dev->addr_len;
444 	arp->ar_pln = 4;
445 	arp->ar_op = htons(type);
446 
447 	arp_ptr=(unsigned char *)(arp + 1);
448 	memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
449 	arp_ptr += np->dev->addr_len;
450 	memcpy(arp_ptr, &tip, 4);
451 	arp_ptr += 4;
452 	memcpy(arp_ptr, np->remote_mac, np->dev->addr_len);
453 	arp_ptr += np->dev->addr_len;
454 	memcpy(arp_ptr, &sip, 4);
455 
456 	netpoll_send_skb(np, send_skb);
457 }
458 
459 int __netpoll_rx(struct sk_buff *skb)
460 {
461 	int proto, len, ulen;
462 	struct iphdr *iph;
463 	struct udphdr *uh;
464 	struct netpoll_info *npi = skb->dev->npinfo;
465 	struct netpoll *np = npi->rx_np;
466 
467 
468 	if (!np)
469 		goto out;
470 	if (skb->dev->type != ARPHRD_ETHER)
471 		goto out;
472 
473 	/* check if netpoll clients need ARP */
474 	if (skb->protocol == __constant_htons(ETH_P_ARP) &&
475 	    atomic_read(&trapped)) {
476 		skb_queue_tail(&npi->arp_tx, skb);
477 		return 1;
478 	}
479 
480 	proto = ntohs(eth_hdr(skb)->h_proto);
481 	if (proto != ETH_P_IP)
482 		goto out;
483 	if (skb->pkt_type == PACKET_OTHERHOST)
484 		goto out;
485 	if (skb_shared(skb))
486 		goto out;
487 
488 	iph = (struct iphdr *)skb->data;
489 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
490 		goto out;
491 	if (iph->ihl < 5 || iph->version != 4)
492 		goto out;
493 	if (!pskb_may_pull(skb, iph->ihl*4))
494 		goto out;
495 	if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
496 		goto out;
497 
498 	len = ntohs(iph->tot_len);
499 	if (skb->len < len || len < iph->ihl*4)
500 		goto out;
501 
502 	if (iph->protocol != IPPROTO_UDP)
503 		goto out;
504 
505 	len -= iph->ihl*4;
506 	uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
507 	ulen = ntohs(uh->len);
508 
509 	if (ulen != len)
510 		goto out;
511 	if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
512 		goto out;
513 	if (np->local_ip && np->local_ip != ntohl(iph->daddr))
514 		goto out;
515 	if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
516 		goto out;
517 	if (np->local_port && np->local_port != ntohs(uh->dest))
518 		goto out;
519 
520 	np->rx_hook(np, ntohs(uh->source),
521 		    (char *)(uh+1),
522 		    ulen - sizeof(struct udphdr));
523 
524 	kfree_skb(skb);
525 	return 1;
526 
527 out:
528 	if (atomic_read(&trapped)) {
529 		kfree_skb(skb);
530 		return 1;
531 	}
532 
533 	return 0;
534 }
535 
536 int netpoll_parse_options(struct netpoll *np, char *opt)
537 {
538 	char *cur=opt, *delim;
539 
540 	if(*cur != '@') {
541 		if ((delim = strchr(cur, '@')) == NULL)
542 			goto parse_failed;
543 		*delim=0;
544 		np->local_port=simple_strtol(cur, NULL, 10);
545 		cur=delim;
546 	}
547 	cur++;
548 	printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
549 
550 	if(*cur != '/') {
551 		if ((delim = strchr(cur, '/')) == NULL)
552 			goto parse_failed;
553 		*delim=0;
554 		np->local_ip=ntohl(in_aton(cur));
555 		cur=delim;
556 
557 		printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
558 		       np->name, HIPQUAD(np->local_ip));
559 	}
560 	cur++;
561 
562 	if ( *cur != ',') {
563 		/* parse out dev name */
564 		if ((delim = strchr(cur, ',')) == NULL)
565 			goto parse_failed;
566 		*delim=0;
567 		strlcpy(np->dev_name, cur, sizeof(np->dev_name));
568 		cur=delim;
569 	}
570 	cur++;
571 
572 	printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
573 
574 	if ( *cur != '@' ) {
575 		/* dst port */
576 		if ((delim = strchr(cur, '@')) == NULL)
577 			goto parse_failed;
578 		*delim=0;
579 		np->remote_port=simple_strtol(cur, NULL, 10);
580 		cur=delim;
581 	}
582 	cur++;
583 	printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
584 
585 	/* dst ip */
586 	if ((delim = strchr(cur, '/')) == NULL)
587 		goto parse_failed;
588 	*delim=0;
589 	np->remote_ip=ntohl(in_aton(cur));
590 	cur=delim+1;
591 
592 	printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
593 		       np->name, HIPQUAD(np->remote_ip));
594 
595 	if( *cur != 0 )
596 	{
597 		/* MAC address */
598 		if ((delim = strchr(cur, ':')) == NULL)
599 			goto parse_failed;
600 		*delim=0;
601 		np->remote_mac[0]=simple_strtol(cur, NULL, 16);
602 		cur=delim+1;
603 		if ((delim = strchr(cur, ':')) == NULL)
604 			goto parse_failed;
605 		*delim=0;
606 		np->remote_mac[1]=simple_strtol(cur, NULL, 16);
607 		cur=delim+1;
608 		if ((delim = strchr(cur, ':')) == NULL)
609 			goto parse_failed;
610 		*delim=0;
611 		np->remote_mac[2]=simple_strtol(cur, NULL, 16);
612 		cur=delim+1;
613 		if ((delim = strchr(cur, ':')) == NULL)
614 			goto parse_failed;
615 		*delim=0;
616 		np->remote_mac[3]=simple_strtol(cur, NULL, 16);
617 		cur=delim+1;
618 		if ((delim = strchr(cur, ':')) == NULL)
619 			goto parse_failed;
620 		*delim=0;
621 		np->remote_mac[4]=simple_strtol(cur, NULL, 16);
622 		cur=delim+1;
623 		np->remote_mac[5]=simple_strtol(cur, NULL, 16);
624 	}
625 
626 	printk(KERN_INFO "%s: remote ethernet address "
627 	       "%02x:%02x:%02x:%02x:%02x:%02x\n",
628 	       np->name,
629 	       np->remote_mac[0],
630 	       np->remote_mac[1],
631 	       np->remote_mac[2],
632 	       np->remote_mac[3],
633 	       np->remote_mac[4],
634 	       np->remote_mac[5]);
635 
636 	return 0;
637 
638  parse_failed:
639 	printk(KERN_INFO "%s: couldn't parse config at %s!\n",
640 	       np->name, cur);
641 	return -1;
642 }
643 
644 int netpoll_setup(struct netpoll *np)
645 {
646 	struct net_device *ndev = NULL;
647 	struct in_device *in_dev;
648 	struct netpoll_info *npinfo;
649 	unsigned long flags;
650 
651 	if (np->dev_name)
652 		ndev = dev_get_by_name(np->dev_name);
653 	if (!ndev) {
654 		printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
655 		       np->name, np->dev_name);
656 		return -1;
657 	}
658 
659 	np->dev = ndev;
660 	if (!ndev->npinfo) {
661 		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
662 		if (!npinfo)
663 			goto release;
664 
665 		npinfo->rx_flags = 0;
666 		npinfo->rx_np = NULL;
667 		spin_lock_init(&npinfo->poll_lock);
668 		npinfo->poll_owner = -1;
669 		npinfo->tries = MAX_RETRIES;
670 		spin_lock_init(&npinfo->rx_lock);
671 		skb_queue_head_init(&npinfo->arp_tx);
672 	} else
673 		npinfo = ndev->npinfo;
674 
675 	if (!ndev->poll_controller) {
676 		printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
677 		       np->name, np->dev_name);
678 		goto release;
679 	}
680 
681 	if (!netif_running(ndev)) {
682 		unsigned long atmost, atleast;
683 
684 		printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
685 		       np->name, np->dev_name);
686 
687 		rtnl_lock();
688 		if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) {
689 			printk(KERN_ERR "%s: failed to open %s\n",
690 			       np->name, np->dev_name);
691 			rtnl_unlock();
692 			goto release;
693 		}
694 		rtnl_unlock();
695 
696 		atleast = jiffies + HZ/10;
697  		atmost = jiffies + 4*HZ;
698 		while (!netif_carrier_ok(ndev)) {
699 			if (time_after(jiffies, atmost)) {
700 				printk(KERN_NOTICE
701 				       "%s: timeout waiting for carrier\n",
702 				       np->name);
703 				break;
704 			}
705 			cond_resched();
706 		}
707 
708 		/* If carrier appears to come up instantly, we don't
709 		 * trust it and pause so that we don't pump all our
710 		 * queued console messages into the bitbucket.
711 		 */
712 
713 		if (time_before(jiffies, atleast)) {
714 			printk(KERN_NOTICE "%s: carrier detect appears"
715 			       " untrustworthy, waiting 4 seconds\n",
716 			       np->name);
717 			msleep(4000);
718 		}
719 	}
720 
721 	if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
722 		memcpy(np->local_mac, ndev->dev_addr, 6);
723 
724 	if (!np->local_ip) {
725 		rcu_read_lock();
726 		in_dev = __in_dev_get_rcu(ndev);
727 
728 		if (!in_dev || !in_dev->ifa_list) {
729 			rcu_read_unlock();
730 			printk(KERN_ERR "%s: no IP address for %s, aborting\n",
731 			       np->name, np->dev_name);
732 			goto release;
733 		}
734 
735 		np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
736 		rcu_read_unlock();
737 		printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
738 		       np->name, HIPQUAD(np->local_ip));
739 	}
740 
741 	if (np->rx_hook) {
742 		spin_lock_irqsave(&npinfo->rx_lock, flags);
743 		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
744 		npinfo->rx_np = np;
745 		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
746 	}
747 
748 	/* fill up the skb queue */
749 	refill_skbs();
750 
751 	/* last thing to do is link it to the net device structure */
752 	ndev->npinfo = npinfo;
753 
754 	/* avoid racing with NAPI reading npinfo */
755 	synchronize_rcu();
756 
757 	return 0;
758 
759  release:
760 	if (!ndev->npinfo)
761 		kfree(npinfo);
762 	np->dev = NULL;
763 	dev_put(ndev);
764 	return -1;
765 }
766 
767 void netpoll_cleanup(struct netpoll *np)
768 {
769 	struct netpoll_info *npinfo;
770 	unsigned long flags;
771 
772 	if (np->dev) {
773 		npinfo = np->dev->npinfo;
774 		if (npinfo && npinfo->rx_np == np) {
775 			spin_lock_irqsave(&npinfo->rx_lock, flags);
776 			npinfo->rx_np = NULL;
777 			npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
778 			spin_unlock_irqrestore(&npinfo->rx_lock, flags);
779 		}
780 		dev_put(np->dev);
781 	}
782 
783 	np->dev = NULL;
784 }
785 
786 int netpoll_trap(void)
787 {
788 	return atomic_read(&trapped);
789 }
790 
791 void netpoll_set_trap(int trap)
792 {
793 	if (trap)
794 		atomic_inc(&trapped);
795 	else
796 		atomic_dec(&trapped);
797 }
798 
799 EXPORT_SYMBOL(netpoll_set_trap);
800 EXPORT_SYMBOL(netpoll_trap);
801 EXPORT_SYMBOL(netpoll_parse_options);
802 EXPORT_SYMBOL(netpoll_setup);
803 EXPORT_SYMBOL(netpoll_cleanup);
804 EXPORT_SYMBOL(netpoll_send_udp);
805 EXPORT_SYMBOL(netpoll_poll);
806 EXPORT_SYMBOL(netpoll_queue);
807