xref: /linux/drivers/infiniband/ulp/ipoib/ipoib_main.c (revision 776cfebb430c7b22c208b1b17add97f354d97cab)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
33  */
34 
35 #include "ipoib.h"
36 
37 #include <linux/version.h>
38 #include <linux/module.h>
39 
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 
44 #include <linux/if_arp.h>	/* For ARPHRD_xxx */
45 
46 #include <linux/ip.h>
47 #include <linux/in.h>
48 
49 MODULE_AUTHOR("Roland Dreier");
50 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
51 MODULE_LICENSE("Dual BSD/GPL");
52 
53 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
54 int ipoib_debug_level;
55 
56 module_param_named(debug_level, ipoib_debug_level, int, 0644);
57 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
58 #endif
59 
60 static const u8 ipv4_bcast_addr[] = {
61 	0x00, 0xff, 0xff, 0xff,
62 	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
63 	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
64 };
65 
66 struct workqueue_struct *ipoib_workqueue;
67 
68 static void ipoib_add_one(struct ib_device *device);
69 static void ipoib_remove_one(struct ib_device *device);
70 
71 static struct ib_client ipoib_client = {
72 	.name   = "ipoib",
73 	.add    = ipoib_add_one,
74 	.remove = ipoib_remove_one
75 };
76 
77 int ipoib_open(struct net_device *dev)
78 {
79 	struct ipoib_dev_priv *priv = netdev_priv(dev);
80 
81 	ipoib_dbg(priv, "bringing up interface\n");
82 
83 	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
84 
85 	if (ipoib_pkey_dev_delay_open(dev))
86 		return 0;
87 
88 	if (ipoib_ib_dev_open(dev))
89 		return -EINVAL;
90 
91 	if (ipoib_ib_dev_up(dev))
92 		return -EINVAL;
93 
94 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
95 		struct ipoib_dev_priv *cpriv;
96 
97 		/* Bring up any child interfaces too */
98 		down(&priv->vlan_mutex);
99 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
100 			int flags;
101 
102 			flags = cpriv->dev->flags;
103 			if (flags & IFF_UP)
104 				continue;
105 
106 			dev_change_flags(cpriv->dev, flags | IFF_UP);
107 		}
108 		up(&priv->vlan_mutex);
109 	}
110 
111 	netif_start_queue(dev);
112 
113 	return 0;
114 }
115 
116 static int ipoib_stop(struct net_device *dev)
117 {
118 	struct ipoib_dev_priv *priv = netdev_priv(dev);
119 
120 	ipoib_dbg(priv, "stopping interface\n");
121 
122 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
123 
124 	netif_stop_queue(dev);
125 
126 	ipoib_ib_dev_down(dev);
127 	ipoib_ib_dev_stop(dev);
128 
129 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
130 		struct ipoib_dev_priv *cpriv;
131 
132 		/* Bring down any child interfaces too */
133 		down(&priv->vlan_mutex);
134 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
135 			int flags;
136 
137 			flags = cpriv->dev->flags;
138 			if (!(flags & IFF_UP))
139 				continue;
140 
141 			dev_change_flags(cpriv->dev, flags & ~IFF_UP);
142 		}
143 		up(&priv->vlan_mutex);
144 	}
145 
146 	return 0;
147 }
148 
149 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
150 {
151 	struct ipoib_dev_priv *priv = netdev_priv(dev);
152 
153 	if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
154 		return -EINVAL;
155 
156 	priv->admin_mtu = new_mtu;
157 
158 	dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
159 
160 	return 0;
161 }
162 
163 static struct ipoib_path *__path_find(struct net_device *dev,
164 				      union ib_gid *gid)
165 {
166 	struct ipoib_dev_priv *priv = netdev_priv(dev);
167 	struct rb_node *n = priv->path_tree.rb_node;
168 	struct ipoib_path *path;
169 	int ret;
170 
171 	while (n) {
172 		path = rb_entry(n, struct ipoib_path, rb_node);
173 
174 		ret = memcmp(gid->raw, path->pathrec.dgid.raw,
175 			     sizeof (union ib_gid));
176 
177 		if (ret < 0)
178 			n = n->rb_left;
179 		else if (ret > 0)
180 			n = n->rb_right;
181 		else
182 			return path;
183 	}
184 
185 	return NULL;
186 }
187 
188 static int __path_add(struct net_device *dev, struct ipoib_path *path)
189 {
190 	struct ipoib_dev_priv *priv = netdev_priv(dev);
191 	struct rb_node **n = &priv->path_tree.rb_node;
192 	struct rb_node *pn = NULL;
193 	struct ipoib_path *tpath;
194 	int ret;
195 
196 	while (*n) {
197 		pn = *n;
198 		tpath = rb_entry(pn, struct ipoib_path, rb_node);
199 
200 		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
201 			     sizeof (union ib_gid));
202 		if (ret < 0)
203 			n = &pn->rb_left;
204 		else if (ret > 0)
205 			n = &pn->rb_right;
206 		else
207 			return -EEXIST;
208 	}
209 
210 	rb_link_node(&path->rb_node, pn, n);
211 	rb_insert_color(&path->rb_node, &priv->path_tree);
212 
213 	list_add_tail(&path->list, &priv->path_list);
214 
215 	return 0;
216 }
217 
218 static void path_free(struct net_device *dev, struct ipoib_path *path)
219 {
220 	struct ipoib_dev_priv *priv = netdev_priv(dev);
221 	struct ipoib_neigh *neigh, *tn;
222 	struct sk_buff *skb;
223 	unsigned long flags;
224 
225 	while ((skb = __skb_dequeue(&path->queue)))
226 		dev_kfree_skb_irq(skb);
227 
228 	spin_lock_irqsave(&priv->lock, flags);
229 
230 	list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
231 		/*
232 		 * It's safe to call ipoib_put_ah() inside priv->lock
233 		 * here, because we know that path->ah will always
234 		 * hold one more reference, so ipoib_put_ah() will
235 		 * never do more than decrement the ref count.
236 		 */
237 		if (neigh->ah)
238 			ipoib_put_ah(neigh->ah);
239 		*to_ipoib_neigh(neigh->neighbour) = NULL;
240 		neigh->neighbour->ops->destructor = NULL;
241 		kfree(neigh);
242 	}
243 
244 	spin_unlock_irqrestore(&priv->lock, flags);
245 
246 	if (path->ah)
247 		ipoib_put_ah(path->ah);
248 
249 	kfree(path);
250 }
251 
252 void ipoib_flush_paths(struct net_device *dev)
253 {
254 	struct ipoib_dev_priv *priv = netdev_priv(dev);
255 	struct ipoib_path *path, *tp;
256 	LIST_HEAD(remove_list);
257 	unsigned long flags;
258 
259 	spin_lock_irqsave(&priv->lock, flags);
260 
261 	list_splice(&priv->path_list, &remove_list);
262 	INIT_LIST_HEAD(&priv->path_list);
263 
264 	list_for_each_entry(path, &remove_list, list)
265 		rb_erase(&path->rb_node, &priv->path_tree);
266 
267 	spin_unlock_irqrestore(&priv->lock, flags);
268 
269 	list_for_each_entry_safe(path, tp, &remove_list, list) {
270 		if (path->query)
271 			ib_sa_cancel_query(path->query_id, path->query);
272 		wait_for_completion(&path->done);
273 		path_free(dev, path);
274 	}
275 }
276 
277 static void path_rec_completion(int status,
278 				struct ib_sa_path_rec *pathrec,
279 				void *path_ptr)
280 {
281 	struct ipoib_path *path = path_ptr;
282 	struct net_device *dev = path->dev;
283 	struct ipoib_dev_priv *priv = netdev_priv(dev);
284 	struct ipoib_ah *ah = NULL;
285 	struct ipoib_neigh *neigh;
286 	struct sk_buff_head skqueue;
287 	struct sk_buff *skb;
288 	unsigned long flags;
289 
290 	if (pathrec)
291 		ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
292 			  be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
293 	else
294 		ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
295 			  status, IPOIB_GID_ARG(path->pathrec.dgid));
296 
297 	skb_queue_head_init(&skqueue);
298 
299 	if (!status) {
300 		struct ib_ah_attr av = {
301 			.dlid 	       = be16_to_cpu(pathrec->dlid),
302 			.sl 	       = pathrec->sl,
303 			.port_num      = priv->port
304 		};
305 		int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
306 
307 		if (path_rate > 0 && priv->local_rate > path_rate)
308 			av.static_rate = (priv->local_rate - 1) / path_rate;
309 
310 		ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
311 			  av.static_rate, priv->local_rate,
312 			  ib_sa_rate_enum_to_int(pathrec->rate));
313 
314 		ah = ipoib_create_ah(dev, priv->pd, &av);
315 	}
316 
317 	spin_lock_irqsave(&priv->lock, flags);
318 
319 	path->ah = ah;
320 
321 	if (ah) {
322 		path->pathrec = *pathrec;
323 
324 		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
325 			  ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
326 
327 		while ((skb = __skb_dequeue(&path->queue)))
328 			__skb_queue_tail(&skqueue, skb);
329 
330 		list_for_each_entry(neigh, &path->neigh_list, list) {
331 			kref_get(&path->ah->ref);
332 			neigh->ah = path->ah;
333 
334 			while ((skb = __skb_dequeue(&neigh->queue)))
335 				__skb_queue_tail(&skqueue, skb);
336 		}
337 	} else
338 		path->query = NULL;
339 
340 	complete(&path->done);
341 
342 	spin_unlock_irqrestore(&priv->lock, flags);
343 
344 	while ((skb = __skb_dequeue(&skqueue))) {
345 		skb->dev = dev;
346 		if (dev_queue_xmit(skb))
347 			ipoib_warn(priv, "dev_queue_xmit failed "
348 				   "to requeue packet\n");
349 	}
350 }
351 
352 static struct ipoib_path *path_rec_create(struct net_device *dev,
353 					  union ib_gid *gid)
354 {
355 	struct ipoib_dev_priv *priv = netdev_priv(dev);
356 	struct ipoib_path *path;
357 
358 	path = kmalloc(sizeof *path, GFP_ATOMIC);
359 	if (!path)
360 		return NULL;
361 
362 	path->dev          = dev;
363 	path->pathrec.dlid = 0;
364 	path->ah           = NULL;
365 
366 	skb_queue_head_init(&path->queue);
367 
368 	INIT_LIST_HEAD(&path->neigh_list);
369 	path->query = NULL;
370 	init_completion(&path->done);
371 
372 	memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
373 	path->pathrec.sgid      = priv->local_gid;
374 	path->pathrec.pkey      = cpu_to_be16(priv->pkey);
375 	path->pathrec.numb_path = 1;
376 
377 	return path;
378 }
379 
380 static int path_rec_start(struct net_device *dev,
381 			  struct ipoib_path *path)
382 {
383 	struct ipoib_dev_priv *priv = netdev_priv(dev);
384 
385 	ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
386 		  IPOIB_GID_ARG(path->pathrec.dgid));
387 
388 	path->query_id =
389 		ib_sa_path_rec_get(priv->ca, priv->port,
390 				   &path->pathrec,
391 				   IB_SA_PATH_REC_DGID		|
392 				   IB_SA_PATH_REC_SGID		|
393 				   IB_SA_PATH_REC_NUMB_PATH	|
394 				   IB_SA_PATH_REC_PKEY,
395 				   1000, GFP_ATOMIC,
396 				   path_rec_completion,
397 				   path, &path->query);
398 	if (path->query_id < 0) {
399 		ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
400 		path->query = NULL;
401 		return path->query_id;
402 	}
403 
404 	return 0;
405 }
406 
407 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
408 {
409 	struct ipoib_dev_priv *priv = netdev_priv(dev);
410 	struct ipoib_path *path;
411 	struct ipoib_neigh *neigh;
412 
413 	neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
414 	if (!neigh) {
415 		++priv->stats.tx_dropped;
416 		dev_kfree_skb_any(skb);
417 		return;
418 	}
419 
420 	skb_queue_head_init(&neigh->queue);
421 	neigh->neighbour = skb->dst->neighbour;
422 	*to_ipoib_neigh(skb->dst->neighbour) = neigh;
423 
424 	/*
425 	 * We can only be called from ipoib_start_xmit, so we're
426 	 * inside tx_lock -- no need to save/restore flags.
427 	 */
428 	spin_lock(&priv->lock);
429 
430 	path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
431 	if (!path) {
432 		path = path_rec_create(dev,
433 				       (union ib_gid *) (skb->dst->neighbour->ha + 4));
434 		if (!path)
435 			goto err;
436 
437 		__path_add(dev, path);
438 	}
439 
440 	list_add_tail(&neigh->list, &path->neigh_list);
441 
442 	if (path->pathrec.dlid) {
443 		kref_get(&path->ah->ref);
444 		neigh->ah = path->ah;
445 
446 		ipoib_send(dev, skb, path->ah,
447 			   be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
448 	} else {
449 		neigh->ah  = NULL;
450 		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
451 			__skb_queue_tail(&neigh->queue, skb);
452 		} else {
453 			++priv->stats.tx_dropped;
454 			dev_kfree_skb_any(skb);
455 		}
456 
457 		if (!path->query && path_rec_start(dev, path))
458 			goto err;
459 	}
460 
461 	spin_unlock(&priv->lock);
462 	return;
463 
464 err:
465 	*to_ipoib_neigh(skb->dst->neighbour) = NULL;
466 	list_del(&neigh->list);
467 	neigh->neighbour->ops->destructor = NULL;
468 	kfree(neigh);
469 
470 	++priv->stats.tx_dropped;
471 	dev_kfree_skb_any(skb);
472 
473 	spin_unlock(&priv->lock);
474 }
475 
476 static void path_lookup(struct sk_buff *skb, struct net_device *dev)
477 {
478 	struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
479 
480 	/* Look up path record for unicasts */
481 	if (skb->dst->neighbour->ha[4] != 0xff) {
482 		neigh_add_path(skb, dev);
483 		return;
484 	}
485 
486 	/* Add in the P_Key for multicasts */
487 	skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
488 	skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
489 	ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
490 }
491 
492 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
493 			     struct ipoib_pseudoheader *phdr)
494 {
495 	struct ipoib_dev_priv *priv = netdev_priv(dev);
496 	struct ipoib_path *path;
497 
498 	/*
499 	 * We can only be called from ipoib_start_xmit, so we're
500 	 * inside tx_lock -- no need to save/restore flags.
501 	 */
502 	spin_lock(&priv->lock);
503 
504 	path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
505 	if (!path) {
506 		path = path_rec_create(dev,
507 				       (union ib_gid *) (phdr->hwaddr + 4));
508 		if (path) {
509 			/* put pseudoheader back on for next time */
510 			skb_push(skb, sizeof *phdr);
511 			__skb_queue_tail(&path->queue, skb);
512 
513 			if (path_rec_start(dev, path)) {
514 				spin_unlock(&priv->lock);
515 				path_free(dev, path);
516 				return;
517 			} else
518 				__path_add(dev, path);
519 		} else {
520 			++priv->stats.tx_dropped;
521 			dev_kfree_skb_any(skb);
522 		}
523 
524 		spin_unlock(&priv->lock);
525 		return;
526 	}
527 
528 	if (path->pathrec.dlid) {
529 		ipoib_dbg(priv, "Send unicast ARP to %04x\n",
530 			  be16_to_cpu(path->pathrec.dlid));
531 
532 		ipoib_send(dev, skb, path->ah,
533 			   be32_to_cpup((__be32 *) phdr->hwaddr));
534 	} else if ((path->query || !path_rec_start(dev, path)) &&
535 		   skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
536 		/* put pseudoheader back on for next time */
537 		skb_push(skb, sizeof *phdr);
538 		__skb_queue_tail(&path->queue, skb);
539 	} else {
540 		++priv->stats.tx_dropped;
541 		dev_kfree_skb_any(skb);
542 	}
543 
544 	spin_unlock(&priv->lock);
545 }
546 
547 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
548 {
549 	struct ipoib_dev_priv *priv = netdev_priv(dev);
550 	struct ipoib_neigh *neigh;
551 	unsigned long flags;
552 
553 	local_irq_save(flags);
554 	if (!spin_trylock(&priv->tx_lock)) {
555 		local_irq_restore(flags);
556 		return NETDEV_TX_LOCKED;
557 	}
558 
559 	/*
560 	 * Check if our queue is stopped.  Since we have the LLTX bit
561 	 * set, we can't rely on netif_stop_queue() preventing our
562 	 * xmit function from being called with a full queue.
563 	 */
564 	if (unlikely(netif_queue_stopped(dev))) {
565 		spin_unlock_irqrestore(&priv->tx_lock, flags);
566 		return NETDEV_TX_BUSY;
567 	}
568 
569 	if (skb->dst && skb->dst->neighbour) {
570 		if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
571 			path_lookup(skb, dev);
572 			goto out;
573 		}
574 
575 		neigh = *to_ipoib_neigh(skb->dst->neighbour);
576 
577 		if (likely(neigh->ah)) {
578 			ipoib_send(dev, skb, neigh->ah,
579 				   be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
580 			goto out;
581 		}
582 
583 		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
584 			spin_lock(&priv->lock);
585 			__skb_queue_tail(&neigh->queue, skb);
586 			spin_unlock(&priv->lock);
587 		} else {
588 			++priv->stats.tx_dropped;
589 			dev_kfree_skb_any(skb);
590 		}
591 	} else {
592 		struct ipoib_pseudoheader *phdr =
593 			(struct ipoib_pseudoheader *) skb->data;
594 		skb_pull(skb, sizeof *phdr);
595 
596 		if (phdr->hwaddr[4] == 0xff) {
597 			/* Add in the P_Key for multicast*/
598 			phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
599 			phdr->hwaddr[9] = priv->pkey & 0xff;
600 
601 			ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
602 		} else {
603 			/* unicast GID -- should be ARP reply */
604 
605 			if (be16_to_cpup((u16 *) skb->data) != ETH_P_ARP) {
606 				ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
607 					   IPOIB_GID_FMT "\n",
608 					   skb->dst ? "neigh" : "dst",
609 					   be16_to_cpup((u16 *) skb->data),
610 					   be32_to_cpup((u32 *) phdr->hwaddr),
611 					   IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
612 				dev_kfree_skb_any(skb);
613 				++priv->stats.tx_dropped;
614 				goto out;
615 			}
616 
617 			unicast_arp_send(skb, dev, phdr);
618 		}
619 	}
620 
621 out:
622 	spin_unlock_irqrestore(&priv->tx_lock, flags);
623 
624 	return NETDEV_TX_OK;
625 }
626 
627 static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
628 {
629 	struct ipoib_dev_priv *priv = netdev_priv(dev);
630 
631 	return &priv->stats;
632 }
633 
634 static void ipoib_timeout(struct net_device *dev)
635 {
636 	struct ipoib_dev_priv *priv = netdev_priv(dev);
637 
638 	ipoib_warn(priv, "transmit timeout: latency %ld\n",
639 		   jiffies - dev->trans_start);
640 	/* XXX reset QP, etc. */
641 }
642 
643 static int ipoib_hard_header(struct sk_buff *skb,
644 			     struct net_device *dev,
645 			     unsigned short type,
646 			     void *daddr, void *saddr, unsigned len)
647 {
648 	struct ipoib_header *header;
649 
650 	header = (struct ipoib_header *) skb_push(skb, sizeof *header);
651 
652 	header->proto = htons(type);
653 	header->reserved = 0;
654 
655 	/*
656 	 * If we don't have a neighbour structure, stuff the
657 	 * destination address onto the front of the skb so we can
658 	 * figure out where to send the packet later.
659 	 */
660 	if (!skb->dst || !skb->dst->neighbour) {
661 		struct ipoib_pseudoheader *phdr =
662 			(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
663 		memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
664 	}
665 
666 	return 0;
667 }
668 
669 static void ipoib_set_mcast_list(struct net_device *dev)
670 {
671 	struct ipoib_dev_priv *priv = netdev_priv(dev);
672 
673 	schedule_work(&priv->restart_task);
674 }
675 
676 static void ipoib_neigh_destructor(struct neighbour *n)
677 {
678 	struct ipoib_neigh *neigh;
679 	struct ipoib_dev_priv *priv = netdev_priv(n->dev);
680 	unsigned long flags;
681 	struct ipoib_ah *ah = NULL;
682 
683 	ipoib_dbg(priv,
684 		  "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
685 		  be32_to_cpup((__be32 *) n->ha),
686 		  IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
687 
688 	spin_lock_irqsave(&priv->lock, flags);
689 
690 	neigh = *to_ipoib_neigh(n);
691 	if (neigh) {
692 		if (neigh->ah)
693 			ah = neigh->ah;
694 		list_del(&neigh->list);
695 		*to_ipoib_neigh(n) = NULL;
696 		kfree(neigh);
697 	}
698 
699 	spin_unlock_irqrestore(&priv->lock, flags);
700 
701 	if (ah)
702 		ipoib_put_ah(ah);
703 }
704 
705 static int ipoib_neigh_setup(struct neighbour *neigh)
706 {
707 	/*
708 	 * Is this kosher?  I can't find anybody in the kernel that
709 	 * sets neigh->destructor, so we should be able to set it here
710 	 * without trouble.
711 	 */
712 	neigh->ops->destructor = ipoib_neigh_destructor;
713 
714 	return 0;
715 }
716 
717 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
718 {
719 	parms->neigh_setup = ipoib_neigh_setup;
720 
721 	return 0;
722 }
723 
724 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
725 {
726 	struct ipoib_dev_priv *priv = netdev_priv(dev);
727 
728 	/* Allocate RX/TX "rings" to hold queued skbs */
729 
730 	priv->rx_ring =	kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf),
731 				GFP_KERNEL);
732 	if (!priv->rx_ring) {
733 		printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
734 		       ca->name, IPOIB_RX_RING_SIZE);
735 		goto out;
736 	}
737 	memset(priv->rx_ring, 0,
738 	       IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf));
739 
740 	priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf),
741 				GFP_KERNEL);
742 	if (!priv->tx_ring) {
743 		printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
744 		       ca->name, IPOIB_TX_RING_SIZE);
745 		goto out_rx_ring_cleanup;
746 	}
747 	memset(priv->tx_ring, 0,
748 	       IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf));
749 
750 	/* priv->tx_head & tx_tail are already 0 */
751 
752 	if (ipoib_ib_dev_init(dev, ca, port))
753 		goto out_tx_ring_cleanup;
754 
755 	return 0;
756 
757 out_tx_ring_cleanup:
758 	kfree(priv->tx_ring);
759 
760 out_rx_ring_cleanup:
761 	kfree(priv->rx_ring);
762 
763 out:
764 	return -ENOMEM;
765 }
766 
767 void ipoib_dev_cleanup(struct net_device *dev)
768 {
769 	struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
770 
771 	ipoib_delete_debug_file(dev);
772 
773 	/* Delete any child interfaces first */
774 	list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
775 		unregister_netdev(cpriv->dev);
776 		ipoib_dev_cleanup(cpriv->dev);
777 		free_netdev(cpriv->dev);
778 	}
779 
780 	ipoib_ib_dev_cleanup(dev);
781 
782 	if (priv->rx_ring) {
783 		kfree(priv->rx_ring);
784 		priv->rx_ring = NULL;
785 	}
786 
787 	if (priv->tx_ring) {
788 		kfree(priv->tx_ring);
789 		priv->tx_ring = NULL;
790 	}
791 }
792 
793 static void ipoib_setup(struct net_device *dev)
794 {
795 	struct ipoib_dev_priv *priv = netdev_priv(dev);
796 
797 	dev->open 		 = ipoib_open;
798 	dev->stop 		 = ipoib_stop;
799 	dev->change_mtu 	 = ipoib_change_mtu;
800 	dev->hard_start_xmit 	 = ipoib_start_xmit;
801 	dev->get_stats 		 = ipoib_get_stats;
802 	dev->tx_timeout 	 = ipoib_timeout;
803 	dev->hard_header 	 = ipoib_hard_header;
804 	dev->set_multicast_list  = ipoib_set_mcast_list;
805 	dev->neigh_setup         = ipoib_neigh_setup_dev;
806 
807 	dev->watchdog_timeo 	 = HZ;
808 
809 	dev->rebuild_header 	 = NULL;
810 	dev->set_mac_address 	 = NULL;
811 	dev->header_cache_update = NULL;
812 
813 	dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
814 
815 	/*
816 	 * We add in INFINIBAND_ALEN to allow for the destination
817 	 * address "pseudoheader" for skbs without neighbour struct.
818 	 */
819 	dev->hard_header_len 	 = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
820 	dev->addr_len 		 = INFINIBAND_ALEN;
821 	dev->type 		 = ARPHRD_INFINIBAND;
822 	dev->tx_queue_len 	 = IPOIB_TX_RING_SIZE * 2;
823 	dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
824 
825 	/* MTU will be reset when mcast join happens */
826 	dev->mtu 		 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
827 	priv->mcast_mtu 	 = priv->admin_mtu = dev->mtu;
828 
829 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
830 
831 	netif_carrier_off(dev);
832 
833 	SET_MODULE_OWNER(dev);
834 
835 	priv->dev = dev;
836 
837 	spin_lock_init(&priv->lock);
838 	spin_lock_init(&priv->tx_lock);
839 
840 	init_MUTEX(&priv->mcast_mutex);
841 	init_MUTEX(&priv->vlan_mutex);
842 
843 	INIT_LIST_HEAD(&priv->path_list);
844 	INIT_LIST_HEAD(&priv->child_intfs);
845 	INIT_LIST_HEAD(&priv->dead_ahs);
846 	INIT_LIST_HEAD(&priv->multicast_list);
847 
848 	INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
849 	INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
850 	INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
851 	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
852 	INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
853 }
854 
855 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
856 {
857 	struct net_device *dev;
858 
859 	dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
860 			   ipoib_setup);
861 	if (!dev)
862 		return NULL;
863 
864 	return netdev_priv(dev);
865 }
866 
867 static ssize_t show_pkey(struct class_device *cdev, char *buf)
868 {
869 	struct ipoib_dev_priv *priv =
870 		netdev_priv(container_of(cdev, struct net_device, class_dev));
871 
872 	return sprintf(buf, "0x%04x\n", priv->pkey);
873 }
874 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
875 
876 static ssize_t create_child(struct class_device *cdev,
877 			    const char *buf, size_t count)
878 {
879 	int pkey;
880 	int ret;
881 
882 	if (sscanf(buf, "%i", &pkey) != 1)
883 		return -EINVAL;
884 
885 	if (pkey < 0 || pkey > 0xffff)
886 		return -EINVAL;
887 
888 	ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
889 			     pkey);
890 
891 	return ret ? ret : count;
892 }
893 static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
894 
895 static ssize_t delete_child(struct class_device *cdev,
896 			    const char *buf, size_t count)
897 {
898 	int pkey;
899 	int ret;
900 
901 	if (sscanf(buf, "%i", &pkey) != 1)
902 		return -EINVAL;
903 
904 	if (pkey < 0 || pkey > 0xffff)
905 		return -EINVAL;
906 
907 	ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
908 				pkey);
909 
910 	return ret ? ret : count;
911 
912 }
913 static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
914 
915 int ipoib_add_pkey_attr(struct net_device *dev)
916 {
917 	return class_device_create_file(&dev->class_dev,
918 					&class_device_attr_pkey);
919 }
920 
921 static struct net_device *ipoib_add_port(const char *format,
922 					 struct ib_device *hca, u8 port)
923 {
924 	struct ipoib_dev_priv *priv;
925 	int result = -ENOMEM;
926 
927 	priv = ipoib_intf_alloc(format);
928 	if (!priv)
929 		goto alloc_mem_failed;
930 
931 	SET_NETDEV_DEV(priv->dev, hca->dma_device);
932 
933 	result = ib_query_pkey(hca, port, 0, &priv->pkey);
934 	if (result) {
935 		printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
936 		       hca->name, port, result);
937 		goto alloc_mem_failed;
938 	}
939 
940 	priv->dev->broadcast[8] = priv->pkey >> 8;
941 	priv->dev->broadcast[9] = priv->pkey & 0xff;
942 
943 	result = ib_query_gid(hca, port, 0, &priv->local_gid);
944 	if (result) {
945 		printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
946 		       hca->name, port, result);
947 		goto alloc_mem_failed;
948 	} else
949 		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
950 
951 
952 	result = ipoib_dev_init(priv->dev, hca, port);
953 	if (result < 0) {
954 		printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
955 		       hca->name, port, result);
956 		goto device_init_failed;
957 	}
958 
959 	INIT_IB_EVENT_HANDLER(&priv->event_handler,
960 			      priv->ca, ipoib_event);
961 	result = ib_register_event_handler(&priv->event_handler);
962 	if (result < 0) {
963 		printk(KERN_WARNING "%s: ib_register_event_handler failed for "
964 		       "port %d (ret = %d)\n",
965 		       hca->name, port, result);
966 		goto event_failed;
967 	}
968 
969 	result = register_netdev(priv->dev);
970 	if (result) {
971 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
972 		       hca->name, port, result);
973 		goto register_failed;
974 	}
975 
976 	if (ipoib_create_debug_file(priv->dev))
977 		goto debug_failed;
978 
979 	if (ipoib_add_pkey_attr(priv->dev))
980 		goto sysfs_failed;
981 	if (class_device_create_file(&priv->dev->class_dev,
982 				     &class_device_attr_create_child))
983 		goto sysfs_failed;
984 	if (class_device_create_file(&priv->dev->class_dev,
985 				     &class_device_attr_delete_child))
986 		goto sysfs_failed;
987 
988 	return priv->dev;
989 
990 sysfs_failed:
991 	ipoib_delete_debug_file(priv->dev);
992 
993 debug_failed:
994 	unregister_netdev(priv->dev);
995 
996 register_failed:
997 	ib_unregister_event_handler(&priv->event_handler);
998 
999 event_failed:
1000 	ipoib_dev_cleanup(priv->dev);
1001 
1002 device_init_failed:
1003 	free_netdev(priv->dev);
1004 
1005 alloc_mem_failed:
1006 	return ERR_PTR(result);
1007 }
1008 
1009 static void ipoib_add_one(struct ib_device *device)
1010 {
1011 	struct list_head *dev_list;
1012 	struct net_device *dev;
1013 	struct ipoib_dev_priv *priv;
1014 	int s, e, p;
1015 
1016 	dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1017 	if (!dev_list)
1018 		return;
1019 
1020 	INIT_LIST_HEAD(dev_list);
1021 
1022 	if (device->node_type == IB_NODE_SWITCH) {
1023 		s = 0;
1024 		e = 0;
1025 	} else {
1026 		s = 1;
1027 		e = device->phys_port_cnt;
1028 	}
1029 
1030 	for (p = s; p <= e; ++p) {
1031 		dev = ipoib_add_port("ib%d", device, p);
1032 		if (!IS_ERR(dev)) {
1033 			priv = netdev_priv(dev);
1034 			list_add_tail(&priv->list, dev_list);
1035 		}
1036 	}
1037 
1038 	ib_set_client_data(device, &ipoib_client, dev_list);
1039 }
1040 
1041 static void ipoib_remove_one(struct ib_device *device)
1042 {
1043 	struct ipoib_dev_priv *priv, *tmp;
1044 	struct list_head *dev_list;
1045 
1046 	dev_list = ib_get_client_data(device, &ipoib_client);
1047 
1048 	list_for_each_entry_safe(priv, tmp, dev_list, list) {
1049 		ib_unregister_event_handler(&priv->event_handler);
1050 
1051 		unregister_netdev(priv->dev);
1052 		ipoib_dev_cleanup(priv->dev);
1053 		free_netdev(priv->dev);
1054 	}
1055 }
1056 
1057 static int __init ipoib_init_module(void)
1058 {
1059 	int ret;
1060 
1061 	ret = ipoib_register_debugfs();
1062 	if (ret)
1063 		return ret;
1064 
1065 	/*
1066 	 * We create our own workqueue mainly because we want to be
1067 	 * able to flush it when devices are being removed.  We can't
1068 	 * use schedule_work()/flush_scheduled_work() because both
1069 	 * unregister_netdev() and linkwatch_event take the rtnl lock,
1070 	 * so flush_scheduled_work() can deadlock during device
1071 	 * removal.
1072 	 */
1073 	ipoib_workqueue = create_singlethread_workqueue("ipoib");
1074 	if (!ipoib_workqueue) {
1075 		ret = -ENOMEM;
1076 		goto err_fs;
1077 	}
1078 
1079 	ret = ib_register_client(&ipoib_client);
1080 	if (ret)
1081 		goto err_wq;
1082 
1083 	return 0;
1084 
1085 err_wq:
1086 	destroy_workqueue(ipoib_workqueue);
1087 
1088 err_fs:
1089 	ipoib_unregister_debugfs();
1090 
1091 	return ret;
1092 }
1093 
1094 static void __exit ipoib_cleanup_module(void)
1095 {
1096 	ib_unregister_client(&ipoib_client);
1097 	ipoib_unregister_debugfs();
1098 	destroy_workqueue(ipoib_workqueue);
1099 }
1100 
1101 module_init(ipoib_init_module);
1102 module_exit(ipoib_cleanup_module);
1103