xref: /linux/net/core/netpoll.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11 
12 #include <linux/moduleparam.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/if_arp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/inet.h>
19 #include <linux/interrupt.h>
20 #include <linux/netpoll.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
25 #include <linux/slab.h>
26 #include <net/tcp.h>
27 #include <net/udp.h>
28 #include <asm/unaligned.h>
29 #include <trace/events/napi.h>
30 
31 /*
32  * We maintain a small pool of fully-sized skbs, to make sure the
33  * message gets out even in extreme OOM situations.
34  */
35 
36 #define MAX_UDP_CHUNK 1460
37 #define MAX_SKBS 32
38 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
39 
40 static struct sk_buff_head skb_pool;
41 
42 static atomic_t trapped;
43 
44 #define USEC_PER_POLL	50
45 #define NETPOLL_RX_ENABLED  1
46 #define NETPOLL_RX_DROP     2
47 
48 #define MAX_SKB_SIZE \
49 		(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50 				sizeof(struct iphdr) + sizeof(struct ethhdr))
51 
52 static void zap_completion_queue(void);
53 static void arp_reply(struct sk_buff *skb);
54 
55 static unsigned int carrier_timeout = 4;
56 module_param(carrier_timeout, uint, 0644);
57 
58 static void queue_process(struct work_struct *work)
59 {
60 	struct netpoll_info *npinfo =
61 		container_of(work, struct netpoll_info, tx_work.work);
62 	struct sk_buff *skb;
63 	unsigned long flags;
64 
65 	while ((skb = skb_dequeue(&npinfo->txq))) {
66 		struct net_device *dev = skb->dev;
67 		const struct net_device_ops *ops = dev->netdev_ops;
68 		struct netdev_queue *txq;
69 
70 		if (!netif_device_present(dev) || !netif_running(dev)) {
71 			__kfree_skb(skb);
72 			continue;
73 		}
74 
75 		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
76 
77 		local_irq_save(flags);
78 		__netif_tx_lock(txq, smp_processor_id());
79 		if (netif_tx_queue_stopped(txq) ||
80 		    netif_tx_queue_frozen(txq) ||
81 		    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
82 			skb_queue_head(&npinfo->txq, skb);
83 			__netif_tx_unlock(txq);
84 			local_irq_restore(flags);
85 
86 			schedule_delayed_work(&npinfo->tx_work, HZ/10);
87 			return;
88 		}
89 		__netif_tx_unlock(txq);
90 		local_irq_restore(flags);
91 	}
92 }
93 
94 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
95 			    unsigned short ulen, __be32 saddr, __be32 daddr)
96 {
97 	__wsum psum;
98 
99 	if (uh->check == 0 || skb_csum_unnecessary(skb))
100 		return 0;
101 
102 	psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
103 
104 	if (skb->ip_summed == CHECKSUM_COMPLETE &&
105 	    !csum_fold(csum_add(psum, skb->csum)))
106 		return 0;
107 
108 	skb->csum = psum;
109 
110 	return __skb_checksum_complete(skb);
111 }
112 
113 /*
114  * Check whether delayed processing was scheduled for our NIC. If so,
115  * we attempt to grab the poll lock and use ->poll() to pump the card.
116  * If this fails, either we've recursed in ->poll() or it's already
117  * running on another CPU.
118  *
119  * Note: we don't mask interrupts with this lock because we're using
120  * trylock here and interrupts are already disabled in the softirq
121  * case. Further, we test the poll_owner to avoid recursion on UP
122  * systems where the lock doesn't exist.
123  *
124  * In cases where there is bi-directional communications, reading only
125  * one message at a time can lead to packets being dropped by the
126  * network adapter, forcing superfluous retries and possibly timeouts.
127  * Thus, we set our budget to greater than 1.
128  */
129 static int poll_one_napi(struct netpoll_info *npinfo,
130 			 struct napi_struct *napi, int budget)
131 {
132 	int work;
133 
134 	/* net_rx_action's ->poll() invocations and our's are
135 	 * synchronized by this test which is only made while
136 	 * holding the napi->poll_lock.
137 	 */
138 	if (!test_bit(NAPI_STATE_SCHED, &napi->state))
139 		return budget;
140 
141 	npinfo->rx_flags |= NETPOLL_RX_DROP;
142 	atomic_inc(&trapped);
143 	set_bit(NAPI_STATE_NPSVC, &napi->state);
144 
145 	work = napi->poll(napi, budget);
146 	trace_napi_poll(napi);
147 
148 	clear_bit(NAPI_STATE_NPSVC, &napi->state);
149 	atomic_dec(&trapped);
150 	npinfo->rx_flags &= ~NETPOLL_RX_DROP;
151 
152 	return budget - work;
153 }
154 
155 static void poll_napi(struct net_device *dev)
156 {
157 	struct napi_struct *napi;
158 	int budget = 16;
159 
160 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
161 		if (napi->poll_owner != smp_processor_id() &&
162 		    spin_trylock(&napi->poll_lock)) {
163 			budget = poll_one_napi(dev->npinfo, napi, budget);
164 			spin_unlock(&napi->poll_lock);
165 
166 			if (!budget)
167 				break;
168 		}
169 	}
170 }
171 
172 static void service_arp_queue(struct netpoll_info *npi)
173 {
174 	if (npi) {
175 		struct sk_buff *skb;
176 
177 		while ((skb = skb_dequeue(&npi->arp_tx)))
178 			arp_reply(skb);
179 	}
180 }
181 
182 void netpoll_poll_dev(struct net_device *dev)
183 {
184 	const struct net_device_ops *ops;
185 
186 	if (!dev || !netif_running(dev))
187 		return;
188 
189 	ops = dev->netdev_ops;
190 	if (!ops->ndo_poll_controller)
191 		return;
192 
193 	/* Process pending work on NIC */
194 	ops->ndo_poll_controller(dev);
195 
196 	poll_napi(dev);
197 
198 	service_arp_queue(dev->npinfo);
199 
200 	zap_completion_queue();
201 }
202 EXPORT_SYMBOL(netpoll_poll_dev);
203 
204 void netpoll_poll(struct netpoll *np)
205 {
206 	netpoll_poll_dev(np->dev);
207 }
208 EXPORT_SYMBOL(netpoll_poll);
209 
210 static void refill_skbs(void)
211 {
212 	struct sk_buff *skb;
213 	unsigned long flags;
214 
215 	spin_lock_irqsave(&skb_pool.lock, flags);
216 	while (skb_pool.qlen < MAX_SKBS) {
217 		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
218 		if (!skb)
219 			break;
220 
221 		__skb_queue_tail(&skb_pool, skb);
222 	}
223 	spin_unlock_irqrestore(&skb_pool.lock, flags);
224 }
225 
226 static void zap_completion_queue(void)
227 {
228 	unsigned long flags;
229 	struct softnet_data *sd = &get_cpu_var(softnet_data);
230 
231 	if (sd->completion_queue) {
232 		struct sk_buff *clist;
233 
234 		local_irq_save(flags);
235 		clist = sd->completion_queue;
236 		sd->completion_queue = NULL;
237 		local_irq_restore(flags);
238 
239 		while (clist != NULL) {
240 			struct sk_buff *skb = clist;
241 			clist = clist->next;
242 			if (skb->destructor) {
243 				atomic_inc(&skb->users);
244 				dev_kfree_skb_any(skb); /* put this one back */
245 			} else {
246 				__kfree_skb(skb);
247 			}
248 		}
249 	}
250 
251 	put_cpu_var(softnet_data);
252 }
253 
254 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
255 {
256 	int count = 0;
257 	struct sk_buff *skb;
258 
259 	zap_completion_queue();
260 	refill_skbs();
261 repeat:
262 
263 	skb = alloc_skb(len, GFP_ATOMIC);
264 	if (!skb)
265 		skb = skb_dequeue(&skb_pool);
266 
267 	if (!skb) {
268 		if (++count < 10) {
269 			netpoll_poll(np);
270 			goto repeat;
271 		}
272 		return NULL;
273 	}
274 
275 	atomic_set(&skb->users, 1);
276 	skb_reserve(skb, reserve);
277 	return skb;
278 }
279 
280 static int netpoll_owner_active(struct net_device *dev)
281 {
282 	struct napi_struct *napi;
283 
284 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
285 		if (napi->poll_owner == smp_processor_id())
286 			return 1;
287 	}
288 	return 0;
289 }
290 
291 void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
292 {
293 	int status = NETDEV_TX_BUSY;
294 	unsigned long tries;
295 	struct net_device *dev = np->dev;
296 	const struct net_device_ops *ops = dev->netdev_ops;
297 	/* It is up to the caller to keep npinfo alive. */
298 	struct netpoll_info *npinfo = np->dev->npinfo;
299 
300 	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
301 		__kfree_skb(skb);
302 		return;
303 	}
304 
305 	/* don't get messages out of order, and no recursion */
306 	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
307 		struct netdev_queue *txq;
308 		unsigned long flags;
309 
310 		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
311 
312 		local_irq_save(flags);
313 		/* try until next clock tick */
314 		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
315 		     tries > 0; --tries) {
316 			if (__netif_tx_trylock(txq)) {
317 				if (!netif_tx_queue_stopped(txq)) {
318 					dev->priv_flags |= IFF_IN_NETPOLL;
319 					status = ops->ndo_start_xmit(skb, dev);
320 					dev->priv_flags &= ~IFF_IN_NETPOLL;
321 					if (status == NETDEV_TX_OK)
322 						txq_trans_update(txq);
323 				}
324 				__netif_tx_unlock(txq);
325 
326 				if (status == NETDEV_TX_OK)
327 					break;
328 
329 			}
330 
331 			/* tickle device maybe there is some cleanup */
332 			netpoll_poll(np);
333 
334 			udelay(USEC_PER_POLL);
335 		}
336 
337 		WARN_ONCE(!irqs_disabled(),
338 			"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
339 			dev->name, ops->ndo_start_xmit);
340 
341 		local_irq_restore(flags);
342 	}
343 
344 	if (status != NETDEV_TX_OK) {
345 		skb_queue_tail(&npinfo->txq, skb);
346 		schedule_delayed_work(&npinfo->tx_work,0);
347 	}
348 }
349 EXPORT_SYMBOL(netpoll_send_skb);
350 
351 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
352 {
353 	int total_len, eth_len, ip_len, udp_len;
354 	struct sk_buff *skb;
355 	struct udphdr *udph;
356 	struct iphdr *iph;
357 	struct ethhdr *eth;
358 
359 	udp_len = len + sizeof(*udph);
360 	ip_len = eth_len = udp_len + sizeof(*iph);
361 	total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
362 
363 	skb = find_skb(np, total_len, total_len - len);
364 	if (!skb)
365 		return;
366 
367 	skb_copy_to_linear_data(skb, msg, len);
368 	skb->len += len;
369 
370 	skb_push(skb, sizeof(*udph));
371 	skb_reset_transport_header(skb);
372 	udph = udp_hdr(skb);
373 	udph->source = htons(np->local_port);
374 	udph->dest = htons(np->remote_port);
375 	udph->len = htons(udp_len);
376 	udph->check = 0;
377 	udph->check = csum_tcpudp_magic(np->local_ip,
378 					np->remote_ip,
379 					udp_len, IPPROTO_UDP,
380 					csum_partial(udph, udp_len, 0));
381 	if (udph->check == 0)
382 		udph->check = CSUM_MANGLED_0;
383 
384 	skb_push(skb, sizeof(*iph));
385 	skb_reset_network_header(skb);
386 	iph = ip_hdr(skb);
387 
388 	/* iph->version = 4; iph->ihl = 5; */
389 	put_unaligned(0x45, (unsigned char *)iph);
390 	iph->tos      = 0;
391 	put_unaligned(htons(ip_len), &(iph->tot_len));
392 	iph->id       = 0;
393 	iph->frag_off = 0;
394 	iph->ttl      = 64;
395 	iph->protocol = IPPROTO_UDP;
396 	iph->check    = 0;
397 	put_unaligned(np->local_ip, &(iph->saddr));
398 	put_unaligned(np->remote_ip, &(iph->daddr));
399 	iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
400 
401 	eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
402 	skb_reset_mac_header(skb);
403 	skb->protocol = eth->h_proto = htons(ETH_P_IP);
404 	memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
405 	memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
406 
407 	skb->dev = np->dev;
408 
409 	netpoll_send_skb(np, skb);
410 }
411 EXPORT_SYMBOL(netpoll_send_udp);
412 
413 static void arp_reply(struct sk_buff *skb)
414 {
415 	struct netpoll_info *npinfo = skb->dev->npinfo;
416 	struct arphdr *arp;
417 	unsigned char *arp_ptr;
418 	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
419 	__be32 sip, tip;
420 	unsigned char *sha;
421 	struct sk_buff *send_skb;
422 	struct netpoll *np, *tmp;
423 	unsigned long flags;
424 	int hits = 0;
425 
426 	if (list_empty(&npinfo->rx_np))
427 		return;
428 
429 	/* Before checking the packet, we do some early
430 	   inspection whether this is interesting at all */
431 	spin_lock_irqsave(&npinfo->rx_lock, flags);
432 	list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
433 		if (np->dev == skb->dev)
434 			hits++;
435 	}
436 	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
437 
438 	/* No netpoll struct is using this dev */
439 	if (!hits)
440 		return;
441 
442 	/* No arp on this interface */
443 	if (skb->dev->flags & IFF_NOARP)
444 		return;
445 
446 	if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
447 		return;
448 
449 	skb_reset_network_header(skb);
450 	skb_reset_transport_header(skb);
451 	arp = arp_hdr(skb);
452 
453 	if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
454 	     arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
455 	    arp->ar_pro != htons(ETH_P_IP) ||
456 	    arp->ar_op != htons(ARPOP_REQUEST))
457 		return;
458 
459 	arp_ptr = (unsigned char *)(arp+1);
460 	/* save the location of the src hw addr */
461 	sha = arp_ptr;
462 	arp_ptr += skb->dev->addr_len;
463 	memcpy(&sip, arp_ptr, 4);
464 	arp_ptr += 4;
465 	/* If we actually cared about dst hw addr,
466 	   it would get copied here */
467 	arp_ptr += skb->dev->addr_len;
468 	memcpy(&tip, arp_ptr, 4);
469 
470 	/* Should we ignore arp? */
471 	if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
472 		return;
473 
474 	size = arp_hdr_len(skb->dev);
475 
476 	spin_lock_irqsave(&npinfo->rx_lock, flags);
477 	list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
478 		if (tip != np->local_ip)
479 			continue;
480 
481 		send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
482 				    LL_RESERVED_SPACE(np->dev));
483 		if (!send_skb)
484 			continue;
485 
486 		skb_reset_network_header(send_skb);
487 		arp = (struct arphdr *) skb_put(send_skb, size);
488 		send_skb->dev = skb->dev;
489 		send_skb->protocol = htons(ETH_P_ARP);
490 
491 		/* Fill the device header for the ARP frame */
492 		if (dev_hard_header(send_skb, skb->dev, ptype,
493 				    sha, np->dev->dev_addr,
494 				    send_skb->len) < 0) {
495 			kfree_skb(send_skb);
496 			continue;
497 		}
498 
499 		/*
500 		 * Fill out the arp protocol part.
501 		 *
502 		 * we only support ethernet device type,
503 		 * which (according to RFC 1390) should
504 		 * always equal 1 (Ethernet).
505 		 */
506 
507 		arp->ar_hrd = htons(np->dev->type);
508 		arp->ar_pro = htons(ETH_P_IP);
509 		arp->ar_hln = np->dev->addr_len;
510 		arp->ar_pln = 4;
511 		arp->ar_op = htons(type);
512 
513 		arp_ptr = (unsigned char *)(arp + 1);
514 		memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
515 		arp_ptr += np->dev->addr_len;
516 		memcpy(arp_ptr, &tip, 4);
517 		arp_ptr += 4;
518 		memcpy(arp_ptr, sha, np->dev->addr_len);
519 		arp_ptr += np->dev->addr_len;
520 		memcpy(arp_ptr, &sip, 4);
521 
522 		netpoll_send_skb(np, send_skb);
523 
524 		/* If there are several rx_hooks for the same address,
525 		   we're fine by sending a single reply */
526 		break;
527 	}
528 	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
529 }
530 
531 int __netpoll_rx(struct sk_buff *skb)
532 {
533 	int proto, len, ulen;
534 	int hits = 0;
535 	struct iphdr *iph;
536 	struct udphdr *uh;
537 	struct netpoll_info *npinfo = skb->dev->npinfo;
538 	struct netpoll *np, *tmp;
539 
540 	if (list_empty(&npinfo->rx_np))
541 		goto out;
542 
543 	if (skb->dev->type != ARPHRD_ETHER)
544 		goto out;
545 
546 	/* check if netpoll clients need ARP */
547 	if (skb->protocol == htons(ETH_P_ARP) &&
548 	    atomic_read(&trapped)) {
549 		skb_queue_tail(&npinfo->arp_tx, skb);
550 		return 1;
551 	}
552 
553 	proto = ntohs(eth_hdr(skb)->h_proto);
554 	if (proto != ETH_P_IP)
555 		goto out;
556 	if (skb->pkt_type == PACKET_OTHERHOST)
557 		goto out;
558 	if (skb_shared(skb))
559 		goto out;
560 
561 	iph = (struct iphdr *)skb->data;
562 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
563 		goto out;
564 	if (iph->ihl < 5 || iph->version != 4)
565 		goto out;
566 	if (!pskb_may_pull(skb, iph->ihl*4))
567 		goto out;
568 	if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
569 		goto out;
570 
571 	len = ntohs(iph->tot_len);
572 	if (skb->len < len || len < iph->ihl*4)
573 		goto out;
574 
575 	/*
576 	 * Our transport medium may have padded the buffer out.
577 	 * Now We trim to the true length of the frame.
578 	 */
579 	if (pskb_trim_rcsum(skb, len))
580 		goto out;
581 
582 	if (iph->protocol != IPPROTO_UDP)
583 		goto out;
584 
585 	len -= iph->ihl*4;
586 	uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
587 	ulen = ntohs(uh->len);
588 
589 	if (ulen != len)
590 		goto out;
591 	if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
592 		goto out;
593 
594 	list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
595 		if (np->local_ip && np->local_ip != iph->daddr)
596 			continue;
597 		if (np->remote_ip && np->remote_ip != iph->saddr)
598 			continue;
599 		if (np->local_port && np->local_port != ntohs(uh->dest))
600 			continue;
601 
602 		np->rx_hook(np, ntohs(uh->source),
603 			       (char *)(uh+1),
604 			       ulen - sizeof(struct udphdr));
605 		hits++;
606 	}
607 
608 	if (!hits)
609 		goto out;
610 
611 	kfree_skb(skb);
612 	return 1;
613 
614 out:
615 	if (atomic_read(&trapped)) {
616 		kfree_skb(skb);
617 		return 1;
618 	}
619 
620 	return 0;
621 }
622 
623 void netpoll_print_options(struct netpoll *np)
624 {
625 	printk(KERN_INFO "%s: local port %d\n",
626 			 np->name, np->local_port);
627 	printk(KERN_INFO "%s: local IP %pI4\n",
628 			 np->name, &np->local_ip);
629 	printk(KERN_INFO "%s: interface '%s'\n",
630 			 np->name, np->dev_name);
631 	printk(KERN_INFO "%s: remote port %d\n",
632 			 np->name, np->remote_port);
633 	printk(KERN_INFO "%s: remote IP %pI4\n",
634 			 np->name, &np->remote_ip);
635 	printk(KERN_INFO "%s: remote ethernet address %pM\n",
636 	                 np->name, np->remote_mac);
637 }
638 EXPORT_SYMBOL(netpoll_print_options);
639 
640 int netpoll_parse_options(struct netpoll *np, char *opt)
641 {
642 	char *cur=opt, *delim;
643 
644 	if (*cur != '@') {
645 		if ((delim = strchr(cur, '@')) == NULL)
646 			goto parse_failed;
647 		*delim = 0;
648 		np->local_port = simple_strtol(cur, NULL, 10);
649 		cur = delim;
650 	}
651 	cur++;
652 
653 	if (*cur != '/') {
654 		if ((delim = strchr(cur, '/')) == NULL)
655 			goto parse_failed;
656 		*delim = 0;
657 		np->local_ip = in_aton(cur);
658 		cur = delim;
659 	}
660 	cur++;
661 
662 	if (*cur != ',') {
663 		/* parse out dev name */
664 		if ((delim = strchr(cur, ',')) == NULL)
665 			goto parse_failed;
666 		*delim = 0;
667 		strlcpy(np->dev_name, cur, sizeof(np->dev_name));
668 		cur = delim;
669 	}
670 	cur++;
671 
672 	if (*cur != '@') {
673 		/* dst port */
674 		if ((delim = strchr(cur, '@')) == NULL)
675 			goto parse_failed;
676 		*delim = 0;
677 		if (*cur == ' ' || *cur == '\t')
678 			printk(KERN_INFO "%s: warning: whitespace"
679 					"is not allowed\n", np->name);
680 		np->remote_port = simple_strtol(cur, NULL, 10);
681 		cur = delim;
682 	}
683 	cur++;
684 
685 	/* dst ip */
686 	if ((delim = strchr(cur, '/')) == NULL)
687 		goto parse_failed;
688 	*delim = 0;
689 	np->remote_ip = in_aton(cur);
690 	cur = delim + 1;
691 
692 	if (*cur != 0) {
693 		/* MAC address */
694 		if ((delim = strchr(cur, ':')) == NULL)
695 			goto parse_failed;
696 		*delim = 0;
697 		np->remote_mac[0] = simple_strtol(cur, NULL, 16);
698 		cur = delim + 1;
699 		if ((delim = strchr(cur, ':')) == NULL)
700 			goto parse_failed;
701 		*delim = 0;
702 		np->remote_mac[1] = simple_strtol(cur, NULL, 16);
703 		cur = delim + 1;
704 		if ((delim = strchr(cur, ':')) == NULL)
705 			goto parse_failed;
706 		*delim = 0;
707 		np->remote_mac[2] = simple_strtol(cur, NULL, 16);
708 		cur = delim + 1;
709 		if ((delim = strchr(cur, ':')) == NULL)
710 			goto parse_failed;
711 		*delim = 0;
712 		np->remote_mac[3] = simple_strtol(cur, NULL, 16);
713 		cur = delim + 1;
714 		if ((delim = strchr(cur, ':')) == NULL)
715 			goto parse_failed;
716 		*delim = 0;
717 		np->remote_mac[4] = simple_strtol(cur, NULL, 16);
718 		cur = delim + 1;
719 		np->remote_mac[5] = simple_strtol(cur, NULL, 16);
720 	}
721 
722 	netpoll_print_options(np);
723 
724 	return 0;
725 
726  parse_failed:
727 	printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
728 	       np->name, cur);
729 	return -1;
730 }
731 EXPORT_SYMBOL(netpoll_parse_options);
732 
733 int __netpoll_setup(struct netpoll *np)
734 {
735 	struct net_device *ndev = np->dev;
736 	struct netpoll_info *npinfo;
737 	const struct net_device_ops *ops;
738 	unsigned long flags;
739 	int err;
740 
741 	if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
742 	    !ndev->netdev_ops->ndo_poll_controller) {
743 		printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
744 		       np->name, np->dev_name);
745 		err = -ENOTSUPP;
746 		goto out;
747 	}
748 
749 	if (!ndev->npinfo) {
750 		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
751 		if (!npinfo) {
752 			err = -ENOMEM;
753 			goto out;
754 		}
755 
756 		npinfo->rx_flags = 0;
757 		INIT_LIST_HEAD(&npinfo->rx_np);
758 
759 		spin_lock_init(&npinfo->rx_lock);
760 		skb_queue_head_init(&npinfo->arp_tx);
761 		skb_queue_head_init(&npinfo->txq);
762 		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
763 
764 		atomic_set(&npinfo->refcnt, 1);
765 
766 		ops = np->dev->netdev_ops;
767 		if (ops->ndo_netpoll_setup) {
768 			err = ops->ndo_netpoll_setup(ndev, npinfo);
769 			if (err)
770 				goto free_npinfo;
771 		}
772 	} else {
773 		npinfo = ndev->npinfo;
774 		atomic_inc(&npinfo->refcnt);
775 	}
776 
777 	npinfo->netpoll = np;
778 
779 	if (np->rx_hook) {
780 		spin_lock_irqsave(&npinfo->rx_lock, flags);
781 		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
782 		list_add_tail(&np->rx, &npinfo->rx_np);
783 		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
784 	}
785 
786 	/* last thing to do is link it to the net device structure */
787 	rcu_assign_pointer(ndev->npinfo, npinfo);
788 
789 	return 0;
790 
791 free_npinfo:
792 	kfree(npinfo);
793 out:
794 	return err;
795 }
796 EXPORT_SYMBOL_GPL(__netpoll_setup);
797 
798 int netpoll_setup(struct netpoll *np)
799 {
800 	struct net_device *ndev = NULL;
801 	struct in_device *in_dev;
802 	int err;
803 
804 	if (np->dev_name)
805 		ndev = dev_get_by_name(&init_net, np->dev_name);
806 	if (!ndev) {
807 		printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
808 		       np->name, np->dev_name);
809 		return -ENODEV;
810 	}
811 
812 	if (!netif_running(ndev)) {
813 		unsigned long atmost, atleast;
814 
815 		printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
816 		       np->name, np->dev_name);
817 
818 		rtnl_lock();
819 		err = dev_open(ndev);
820 		rtnl_unlock();
821 
822 		if (err) {
823 			printk(KERN_ERR "%s: failed to open %s\n",
824 			       np->name, ndev->name);
825 			goto put;
826 		}
827 
828 		atleast = jiffies + HZ/10;
829 		atmost = jiffies + carrier_timeout * HZ;
830 		while (!netif_carrier_ok(ndev)) {
831 			if (time_after(jiffies, atmost)) {
832 				printk(KERN_NOTICE
833 				       "%s: timeout waiting for carrier\n",
834 				       np->name);
835 				break;
836 			}
837 			msleep(1);
838 		}
839 
840 		/* If carrier appears to come up instantly, we don't
841 		 * trust it and pause so that we don't pump all our
842 		 * queued console messages into the bitbucket.
843 		 */
844 
845 		if (time_before(jiffies, atleast)) {
846 			printk(KERN_NOTICE "%s: carrier detect appears"
847 			       " untrustworthy, waiting 4 seconds\n",
848 			       np->name);
849 			msleep(4000);
850 		}
851 	}
852 
853 	if (!np->local_ip) {
854 		rcu_read_lock();
855 		in_dev = __in_dev_get_rcu(ndev);
856 
857 		if (!in_dev || !in_dev->ifa_list) {
858 			rcu_read_unlock();
859 			printk(KERN_ERR "%s: no IP address for %s, aborting\n",
860 			       np->name, np->dev_name);
861 			err = -EDESTADDRREQ;
862 			goto put;
863 		}
864 
865 		np->local_ip = in_dev->ifa_list->ifa_local;
866 		rcu_read_unlock();
867 		printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
868 	}
869 
870 	np->dev = ndev;
871 
872 	/* fill up the skb queue */
873 	refill_skbs();
874 
875 	rtnl_lock();
876 	err = __netpoll_setup(np);
877 	rtnl_unlock();
878 
879 	if (err)
880 		goto put;
881 
882 	return 0;
883 
884 put:
885 	dev_put(ndev);
886 	return err;
887 }
888 EXPORT_SYMBOL(netpoll_setup);
889 
890 static int __init netpoll_init(void)
891 {
892 	skb_queue_head_init(&skb_pool);
893 	return 0;
894 }
895 core_initcall(netpoll_init);
896 
897 void __netpoll_cleanup(struct netpoll *np)
898 {
899 	struct netpoll_info *npinfo;
900 	unsigned long flags;
901 
902 	npinfo = np->dev->npinfo;
903 	if (!npinfo)
904 		return;
905 
906 	if (!list_empty(&npinfo->rx_np)) {
907 		spin_lock_irqsave(&npinfo->rx_lock, flags);
908 		list_del(&np->rx);
909 		if (list_empty(&npinfo->rx_np))
910 			npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
911 		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
912 	}
913 
914 	if (atomic_dec_and_test(&npinfo->refcnt)) {
915 		const struct net_device_ops *ops;
916 
917 		ops = np->dev->netdev_ops;
918 		if (ops->ndo_netpoll_cleanup)
919 			ops->ndo_netpoll_cleanup(np->dev);
920 
921 		rcu_assign_pointer(np->dev->npinfo, NULL);
922 
923 		/* avoid racing with NAPI reading npinfo */
924 		synchronize_rcu_bh();
925 
926 		skb_queue_purge(&npinfo->arp_tx);
927 		skb_queue_purge(&npinfo->txq);
928 		cancel_rearming_delayed_work(&npinfo->tx_work);
929 
930 		/* clean after last, unfinished work */
931 		__skb_queue_purge(&npinfo->txq);
932 		kfree(npinfo);
933 	}
934 }
935 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
936 
937 void netpoll_cleanup(struct netpoll *np)
938 {
939 	if (!np->dev)
940 		return;
941 
942 	rtnl_lock();
943 	__netpoll_cleanup(np);
944 	rtnl_unlock();
945 
946 	dev_put(np->dev);
947 	np->dev = NULL;
948 }
949 EXPORT_SYMBOL(netpoll_cleanup);
950 
951 int netpoll_trap(void)
952 {
953 	return atomic_read(&trapped);
954 }
955 EXPORT_SYMBOL(netpoll_trap);
956 
957 void netpoll_set_trap(int trap)
958 {
959 	if (trap)
960 		atomic_inc(&trapped);
961 	else
962 		atomic_dec(&trapped);
963 }
964 EXPORT_SYMBOL(netpoll_set_trap);
965