xref: /linux/drivers/infiniband/core/multicast.c (revision f3a8b6645dc2e60d11f20c1c23afd964ff4e55ae)
1 /*
2  * Copyright (c) 2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/completion.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/bitops.h>
40 #include <linux/random.h>
41 
42 #include <rdma/ib_cache.h>
43 #include "sa.h"
44 
45 static void mcast_add_one(struct ib_device *device);
46 static void mcast_remove_one(struct ib_device *device, void *client_data);
47 
48 static struct ib_client mcast_client = {
49 	.name   = "ib_multicast",
50 	.add    = mcast_add_one,
51 	.remove = mcast_remove_one
52 };
53 
54 static struct ib_sa_client	sa_client;
55 static struct workqueue_struct	*mcast_wq;
56 static union ib_gid mgid0;
57 
58 struct mcast_device;
59 
60 struct mcast_port {
61 	struct mcast_device	*dev;
62 	spinlock_t		lock;
63 	struct rb_root		table;
64 	atomic_t		refcount;
65 	struct completion	comp;
66 	u8			port_num;
67 };
68 
69 struct mcast_device {
70 	struct ib_device	*device;
71 	struct ib_event_handler	event_handler;
72 	int			start_port;
73 	int			end_port;
74 	struct mcast_port	port[0];
75 };
76 
77 enum mcast_state {
78 	MCAST_JOINING,
79 	MCAST_MEMBER,
80 	MCAST_ERROR,
81 };
82 
83 enum mcast_group_state {
84 	MCAST_IDLE,
85 	MCAST_BUSY,
86 	MCAST_GROUP_ERROR,
87 	MCAST_PKEY_EVENT
88 };
89 
90 enum {
91 	MCAST_INVALID_PKEY_INDEX = 0xFFFF
92 };
93 
94 struct mcast_member;
95 
96 struct mcast_group {
97 	struct ib_sa_mcmember_rec rec;
98 	struct rb_node		node;
99 	struct mcast_port	*port;
100 	spinlock_t		lock;
101 	struct work_struct	work;
102 	struct list_head	pending_list;
103 	struct list_head	active_list;
104 	struct mcast_member	*last_join;
105 	int			members[NUM_JOIN_MEMBERSHIP_TYPES];
106 	atomic_t		refcount;
107 	enum mcast_group_state	state;
108 	struct ib_sa_query	*query;
109 	u16			pkey_index;
110 	u8			leave_state;
111 	int			retries;
112 };
113 
114 struct mcast_member {
115 	struct ib_sa_multicast	multicast;
116 	struct ib_sa_client	*client;
117 	struct mcast_group	*group;
118 	struct list_head	list;
119 	enum mcast_state	state;
120 	atomic_t		refcount;
121 	struct completion	comp;
122 };
123 
124 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
125 			 void *context);
126 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
127 			  void *context);
128 
129 static struct mcast_group *mcast_find(struct mcast_port *port,
130 				      union ib_gid *mgid)
131 {
132 	struct rb_node *node = port->table.rb_node;
133 	struct mcast_group *group;
134 	int ret;
135 
136 	while (node) {
137 		group = rb_entry(node, struct mcast_group, node);
138 		ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
139 		if (!ret)
140 			return group;
141 
142 		if (ret < 0)
143 			node = node->rb_left;
144 		else
145 			node = node->rb_right;
146 	}
147 	return NULL;
148 }
149 
150 static struct mcast_group *mcast_insert(struct mcast_port *port,
151 					struct mcast_group *group,
152 					int allow_duplicates)
153 {
154 	struct rb_node **link = &port->table.rb_node;
155 	struct rb_node *parent = NULL;
156 	struct mcast_group *cur_group;
157 	int ret;
158 
159 	while (*link) {
160 		parent = *link;
161 		cur_group = rb_entry(parent, struct mcast_group, node);
162 
163 		ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
164 			     sizeof group->rec.mgid);
165 		if (ret < 0)
166 			link = &(*link)->rb_left;
167 		else if (ret > 0)
168 			link = &(*link)->rb_right;
169 		else if (allow_duplicates)
170 			link = &(*link)->rb_left;
171 		else
172 			return cur_group;
173 	}
174 	rb_link_node(&group->node, parent, link);
175 	rb_insert_color(&group->node, &port->table);
176 	return NULL;
177 }
178 
179 static void deref_port(struct mcast_port *port)
180 {
181 	if (atomic_dec_and_test(&port->refcount))
182 		complete(&port->comp);
183 }
184 
185 static void release_group(struct mcast_group *group)
186 {
187 	struct mcast_port *port = group->port;
188 	unsigned long flags;
189 
190 	spin_lock_irqsave(&port->lock, flags);
191 	if (atomic_dec_and_test(&group->refcount)) {
192 		rb_erase(&group->node, &port->table);
193 		spin_unlock_irqrestore(&port->lock, flags);
194 		kfree(group);
195 		deref_port(port);
196 	} else
197 		spin_unlock_irqrestore(&port->lock, flags);
198 }
199 
200 static void deref_member(struct mcast_member *member)
201 {
202 	if (atomic_dec_and_test(&member->refcount))
203 		complete(&member->comp);
204 }
205 
206 static void queue_join(struct mcast_member *member)
207 {
208 	struct mcast_group *group = member->group;
209 	unsigned long flags;
210 
211 	spin_lock_irqsave(&group->lock, flags);
212 	list_add_tail(&member->list, &group->pending_list);
213 	if (group->state == MCAST_IDLE) {
214 		group->state = MCAST_BUSY;
215 		atomic_inc(&group->refcount);
216 		queue_work(mcast_wq, &group->work);
217 	}
218 	spin_unlock_irqrestore(&group->lock, flags);
219 }
220 
221 /*
222  * A multicast group has four types of members: full member, non member,
223  * sendonly non member and sendonly full member.
224  * We need to keep track of the number of members of each
225  * type based on their join state.  Adjust the number of members the belong to
226  * the specified join states.
227  */
228 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
229 {
230 	int i;
231 
232 	for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1)
233 		if (join_state & 0x1)
234 			group->members[i] += inc;
235 }
236 
237 /*
238  * If a multicast group has zero members left for a particular join state, but
239  * the group is still a member with the SA, we need to leave that join state.
240  * Determine which join states we still belong to, but that do not have any
241  * active members.
242  */
243 static u8 get_leave_state(struct mcast_group *group)
244 {
245 	u8 leave_state = 0;
246 	int i;
247 
248 	for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++)
249 		if (!group->members[i])
250 			leave_state |= (0x1 << i);
251 
252 	return leave_state & group->rec.join_state;
253 }
254 
255 static int check_selector(ib_sa_comp_mask comp_mask,
256 			  ib_sa_comp_mask selector_mask,
257 			  ib_sa_comp_mask value_mask,
258 			  u8 selector, u8 src_value, u8 dst_value)
259 {
260 	int err;
261 
262 	if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
263 		return 0;
264 
265 	switch (selector) {
266 	case IB_SA_GT:
267 		err = (src_value <= dst_value);
268 		break;
269 	case IB_SA_LT:
270 		err = (src_value >= dst_value);
271 		break;
272 	case IB_SA_EQ:
273 		err = (src_value != dst_value);
274 		break;
275 	default:
276 		err = 0;
277 		break;
278 	}
279 
280 	return err;
281 }
282 
283 static int cmp_rec(struct ib_sa_mcmember_rec *src,
284 		   struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
285 {
286 	/* MGID must already match */
287 
288 	if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
289 	    memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
290 		return -EINVAL;
291 	if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
292 		return -EINVAL;
293 	if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
294 		return -EINVAL;
295 	if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
296 			   IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
297 			   src->mtu, dst->mtu))
298 		return -EINVAL;
299 	if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
300 	    src->traffic_class != dst->traffic_class)
301 		return -EINVAL;
302 	if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
303 		return -EINVAL;
304 	if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
305 			   IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
306 			   src->rate, dst->rate))
307 		return -EINVAL;
308 	if (check_selector(comp_mask,
309 			   IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
310 			   IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
311 			   dst->packet_life_time_selector,
312 			   src->packet_life_time, dst->packet_life_time))
313 		return -EINVAL;
314 	if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
315 		return -EINVAL;
316 	if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
317 	    src->flow_label != dst->flow_label)
318 		return -EINVAL;
319 	if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
320 	    src->hop_limit != dst->hop_limit)
321 		return -EINVAL;
322 	if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
323 		return -EINVAL;
324 
325 	/* join_state checked separately, proxy_join ignored */
326 
327 	return 0;
328 }
329 
330 static int send_join(struct mcast_group *group, struct mcast_member *member)
331 {
332 	struct mcast_port *port = group->port;
333 	int ret;
334 
335 	group->last_join = member;
336 	ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
337 				       port->port_num, IB_MGMT_METHOD_SET,
338 				       &member->multicast.rec,
339 				       member->multicast.comp_mask,
340 				       3000, GFP_KERNEL, join_handler, group,
341 				       &group->query);
342 	return (ret > 0) ? 0 : ret;
343 }
344 
345 static int send_leave(struct mcast_group *group, u8 leave_state)
346 {
347 	struct mcast_port *port = group->port;
348 	struct ib_sa_mcmember_rec rec;
349 	int ret;
350 
351 	rec = group->rec;
352 	rec.join_state = leave_state;
353 	group->leave_state = leave_state;
354 
355 	ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
356 				       port->port_num, IB_SA_METHOD_DELETE, &rec,
357 				       IB_SA_MCMEMBER_REC_MGID     |
358 				       IB_SA_MCMEMBER_REC_PORT_GID |
359 				       IB_SA_MCMEMBER_REC_JOIN_STATE,
360 				       3000, GFP_KERNEL, leave_handler,
361 				       group, &group->query);
362 	return (ret > 0) ? 0 : ret;
363 }
364 
365 static void join_group(struct mcast_group *group, struct mcast_member *member,
366 		       u8 join_state)
367 {
368 	member->state = MCAST_MEMBER;
369 	adjust_membership(group, join_state, 1);
370 	group->rec.join_state |= join_state;
371 	member->multicast.rec = group->rec;
372 	member->multicast.rec.join_state = join_state;
373 	list_move(&member->list, &group->active_list);
374 }
375 
376 static int fail_join(struct mcast_group *group, struct mcast_member *member,
377 		     int status)
378 {
379 	spin_lock_irq(&group->lock);
380 	list_del_init(&member->list);
381 	spin_unlock_irq(&group->lock);
382 	return member->multicast.callback(status, &member->multicast);
383 }
384 
385 static void process_group_error(struct mcast_group *group)
386 {
387 	struct mcast_member *member;
388 	int ret = 0;
389 	u16 pkey_index;
390 
391 	if (group->state == MCAST_PKEY_EVENT)
392 		ret = ib_find_pkey(group->port->dev->device,
393 				   group->port->port_num,
394 				   be16_to_cpu(group->rec.pkey), &pkey_index);
395 
396 	spin_lock_irq(&group->lock);
397 	if (group->state == MCAST_PKEY_EVENT && !ret &&
398 	    group->pkey_index == pkey_index)
399 		goto out;
400 
401 	while (!list_empty(&group->active_list)) {
402 		member = list_entry(group->active_list.next,
403 				    struct mcast_member, list);
404 		atomic_inc(&member->refcount);
405 		list_del_init(&member->list);
406 		adjust_membership(group, member->multicast.rec.join_state, -1);
407 		member->state = MCAST_ERROR;
408 		spin_unlock_irq(&group->lock);
409 
410 		ret = member->multicast.callback(-ENETRESET,
411 						 &member->multicast);
412 		deref_member(member);
413 		if (ret)
414 			ib_sa_free_multicast(&member->multicast);
415 		spin_lock_irq(&group->lock);
416 	}
417 
418 	group->rec.join_state = 0;
419 out:
420 	group->state = MCAST_BUSY;
421 	spin_unlock_irq(&group->lock);
422 }
423 
424 static void mcast_work_handler(struct work_struct *work)
425 {
426 	struct mcast_group *group;
427 	struct mcast_member *member;
428 	struct ib_sa_multicast *multicast;
429 	int status, ret;
430 	u8 join_state;
431 
432 	group = container_of(work, typeof(*group), work);
433 retest:
434 	spin_lock_irq(&group->lock);
435 	while (!list_empty(&group->pending_list) ||
436 	       (group->state != MCAST_BUSY)) {
437 
438 		if (group->state != MCAST_BUSY) {
439 			spin_unlock_irq(&group->lock);
440 			process_group_error(group);
441 			goto retest;
442 		}
443 
444 		member = list_entry(group->pending_list.next,
445 				    struct mcast_member, list);
446 		multicast = &member->multicast;
447 		join_state = multicast->rec.join_state;
448 		atomic_inc(&member->refcount);
449 
450 		if (join_state == (group->rec.join_state & join_state)) {
451 			status = cmp_rec(&group->rec, &multicast->rec,
452 					 multicast->comp_mask);
453 			if (!status)
454 				join_group(group, member, join_state);
455 			else
456 				list_del_init(&member->list);
457 			spin_unlock_irq(&group->lock);
458 			ret = multicast->callback(status, multicast);
459 		} else {
460 			spin_unlock_irq(&group->lock);
461 			status = send_join(group, member);
462 			if (!status) {
463 				deref_member(member);
464 				return;
465 			}
466 			ret = fail_join(group, member, status);
467 		}
468 
469 		deref_member(member);
470 		if (ret)
471 			ib_sa_free_multicast(&member->multicast);
472 		spin_lock_irq(&group->lock);
473 	}
474 
475 	join_state = get_leave_state(group);
476 	if (join_state) {
477 		group->rec.join_state &= ~join_state;
478 		spin_unlock_irq(&group->lock);
479 		if (send_leave(group, join_state))
480 			goto retest;
481 	} else {
482 		group->state = MCAST_IDLE;
483 		spin_unlock_irq(&group->lock);
484 		release_group(group);
485 	}
486 }
487 
488 /*
489  * Fail a join request if it is still active - at the head of the pending queue.
490  */
491 static void process_join_error(struct mcast_group *group, int status)
492 {
493 	struct mcast_member *member;
494 	int ret;
495 
496 	spin_lock_irq(&group->lock);
497 	member = list_entry(group->pending_list.next,
498 			    struct mcast_member, list);
499 	if (group->last_join == member) {
500 		atomic_inc(&member->refcount);
501 		list_del_init(&member->list);
502 		spin_unlock_irq(&group->lock);
503 		ret = member->multicast.callback(status, &member->multicast);
504 		deref_member(member);
505 		if (ret)
506 			ib_sa_free_multicast(&member->multicast);
507 	} else
508 		spin_unlock_irq(&group->lock);
509 }
510 
511 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
512 			 void *context)
513 {
514 	struct mcast_group *group = context;
515 	u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
516 
517 	if (status)
518 		process_join_error(group, status);
519 	else {
520 		int mgids_changed, is_mgid0;
521 		ib_find_pkey(group->port->dev->device, group->port->port_num,
522 			     be16_to_cpu(rec->pkey), &pkey_index);
523 
524 		spin_lock_irq(&group->port->lock);
525 		if (group->state == MCAST_BUSY &&
526 		    group->pkey_index == MCAST_INVALID_PKEY_INDEX)
527 			group->pkey_index = pkey_index;
528 		mgids_changed = memcmp(&rec->mgid, &group->rec.mgid,
529 				       sizeof(group->rec.mgid));
530 		group->rec = *rec;
531 		if (mgids_changed) {
532 			rb_erase(&group->node, &group->port->table);
533 			is_mgid0 = !memcmp(&mgid0, &group->rec.mgid,
534 					   sizeof(mgid0));
535 			mcast_insert(group->port, group, is_mgid0);
536 		}
537 		spin_unlock_irq(&group->port->lock);
538 	}
539 	mcast_work_handler(&group->work);
540 }
541 
542 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
543 			  void *context)
544 {
545 	struct mcast_group *group = context;
546 
547 	if (status && group->retries > 0 &&
548 	    !send_leave(group, group->leave_state))
549 		group->retries--;
550 	else
551 		mcast_work_handler(&group->work);
552 }
553 
554 static struct mcast_group *acquire_group(struct mcast_port *port,
555 					 union ib_gid *mgid, gfp_t gfp_mask)
556 {
557 	struct mcast_group *group, *cur_group;
558 	unsigned long flags;
559 	int is_mgid0;
560 
561 	is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
562 	if (!is_mgid0) {
563 		spin_lock_irqsave(&port->lock, flags);
564 		group = mcast_find(port, mgid);
565 		if (group)
566 			goto found;
567 		spin_unlock_irqrestore(&port->lock, flags);
568 	}
569 
570 	group = kzalloc(sizeof *group, gfp_mask);
571 	if (!group)
572 		return NULL;
573 
574 	group->retries = 3;
575 	group->port = port;
576 	group->rec.mgid = *mgid;
577 	group->pkey_index = MCAST_INVALID_PKEY_INDEX;
578 	INIT_LIST_HEAD(&group->pending_list);
579 	INIT_LIST_HEAD(&group->active_list);
580 	INIT_WORK(&group->work, mcast_work_handler);
581 	spin_lock_init(&group->lock);
582 
583 	spin_lock_irqsave(&port->lock, flags);
584 	cur_group = mcast_insert(port, group, is_mgid0);
585 	if (cur_group) {
586 		kfree(group);
587 		group = cur_group;
588 	} else
589 		atomic_inc(&port->refcount);
590 found:
591 	atomic_inc(&group->refcount);
592 	spin_unlock_irqrestore(&port->lock, flags);
593 	return group;
594 }
595 
596 /*
597  * We serialize all join requests to a single group to make our lives much
598  * easier.  Otherwise, two users could try to join the same group
599  * simultaneously, with different configurations, one could leave while the
600  * join is in progress, etc., which makes locking around error recovery
601  * difficult.
602  */
603 struct ib_sa_multicast *
604 ib_sa_join_multicast(struct ib_sa_client *client,
605 		     struct ib_device *device, u8 port_num,
606 		     struct ib_sa_mcmember_rec *rec,
607 		     ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
608 		     int (*callback)(int status,
609 				     struct ib_sa_multicast *multicast),
610 		     void *context)
611 {
612 	struct mcast_device *dev;
613 	struct mcast_member *member;
614 	struct ib_sa_multicast *multicast;
615 	int ret;
616 
617 	dev = ib_get_client_data(device, &mcast_client);
618 	if (!dev)
619 		return ERR_PTR(-ENODEV);
620 
621 	member = kmalloc(sizeof *member, gfp_mask);
622 	if (!member)
623 		return ERR_PTR(-ENOMEM);
624 
625 	ib_sa_client_get(client);
626 	member->client = client;
627 	member->multicast.rec = *rec;
628 	member->multicast.comp_mask = comp_mask;
629 	member->multicast.callback = callback;
630 	member->multicast.context = context;
631 	init_completion(&member->comp);
632 	atomic_set(&member->refcount, 1);
633 	member->state = MCAST_JOINING;
634 
635 	member->group = acquire_group(&dev->port[port_num - dev->start_port],
636 				      &rec->mgid, gfp_mask);
637 	if (!member->group) {
638 		ret = -ENOMEM;
639 		goto err;
640 	}
641 
642 	/*
643 	 * The user will get the multicast structure in their callback.  They
644 	 * could then free the multicast structure before we can return from
645 	 * this routine.  So we save the pointer to return before queuing
646 	 * any callback.
647 	 */
648 	multicast = &member->multicast;
649 	queue_join(member);
650 	return multicast;
651 
652 err:
653 	ib_sa_client_put(client);
654 	kfree(member);
655 	return ERR_PTR(ret);
656 }
657 EXPORT_SYMBOL(ib_sa_join_multicast);
658 
659 void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
660 {
661 	struct mcast_member *member;
662 	struct mcast_group *group;
663 
664 	member = container_of(multicast, struct mcast_member, multicast);
665 	group = member->group;
666 
667 	spin_lock_irq(&group->lock);
668 	if (member->state == MCAST_MEMBER)
669 		adjust_membership(group, multicast->rec.join_state, -1);
670 
671 	list_del_init(&member->list);
672 
673 	if (group->state == MCAST_IDLE) {
674 		group->state = MCAST_BUSY;
675 		spin_unlock_irq(&group->lock);
676 		/* Continue to hold reference on group until callback */
677 		queue_work(mcast_wq, &group->work);
678 	} else {
679 		spin_unlock_irq(&group->lock);
680 		release_group(group);
681 	}
682 
683 	deref_member(member);
684 	wait_for_completion(&member->comp);
685 	ib_sa_client_put(member->client);
686 	kfree(member);
687 }
688 EXPORT_SYMBOL(ib_sa_free_multicast);
689 
690 int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
691 			   union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
692 {
693 	struct mcast_device *dev;
694 	struct mcast_port *port;
695 	struct mcast_group *group;
696 	unsigned long flags;
697 	int ret = 0;
698 
699 	dev = ib_get_client_data(device, &mcast_client);
700 	if (!dev)
701 		return -ENODEV;
702 
703 	port = &dev->port[port_num - dev->start_port];
704 	spin_lock_irqsave(&port->lock, flags);
705 	group = mcast_find(port, mgid);
706 	if (group)
707 		*rec = group->rec;
708 	else
709 		ret = -EADDRNOTAVAIL;
710 	spin_unlock_irqrestore(&port->lock, flags);
711 
712 	return ret;
713 }
714 EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
715 
716 int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
717 			     struct ib_sa_mcmember_rec *rec,
718 			     struct net_device *ndev,
719 			     enum ib_gid_type gid_type,
720 			     struct ib_ah_attr *ah_attr)
721 {
722 	int ret;
723 	u16 gid_index;
724 	u8 p;
725 
726 	if (rdma_protocol_roce(device, port_num)) {
727 		ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
728 						 gid_type, port_num,
729 						 ndev,
730 						 &gid_index);
731 	} else if (rdma_protocol_ib(device, port_num)) {
732 		ret = ib_find_cached_gid(device, &rec->port_gid,
733 					 IB_GID_TYPE_IB, NULL, &p,
734 					 &gid_index);
735 	} else {
736 		ret = -EINVAL;
737 	}
738 
739 	if (ret)
740 		return ret;
741 
742 	memset(ah_attr, 0, sizeof *ah_attr);
743 	ah_attr->dlid = be16_to_cpu(rec->mlid);
744 	ah_attr->sl = rec->sl;
745 	ah_attr->port_num = port_num;
746 	ah_attr->static_rate = rec->rate;
747 
748 	ah_attr->ah_flags = IB_AH_GRH;
749 	ah_attr->grh.dgid = rec->mgid;
750 
751 	ah_attr->grh.sgid_index = (u8) gid_index;
752 	ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
753 	ah_attr->grh.hop_limit = rec->hop_limit;
754 	ah_attr->grh.traffic_class = rec->traffic_class;
755 
756 	return 0;
757 }
758 EXPORT_SYMBOL(ib_init_ah_from_mcmember);
759 
760 static void mcast_groups_event(struct mcast_port *port,
761 			       enum mcast_group_state state)
762 {
763 	struct mcast_group *group;
764 	struct rb_node *node;
765 	unsigned long flags;
766 
767 	spin_lock_irqsave(&port->lock, flags);
768 	for (node = rb_first(&port->table); node; node = rb_next(node)) {
769 		group = rb_entry(node, struct mcast_group, node);
770 		spin_lock(&group->lock);
771 		if (group->state == MCAST_IDLE) {
772 			atomic_inc(&group->refcount);
773 			queue_work(mcast_wq, &group->work);
774 		}
775 		if (group->state != MCAST_GROUP_ERROR)
776 			group->state = state;
777 		spin_unlock(&group->lock);
778 	}
779 	spin_unlock_irqrestore(&port->lock, flags);
780 }
781 
782 static void mcast_event_handler(struct ib_event_handler *handler,
783 				struct ib_event *event)
784 {
785 	struct mcast_device *dev;
786 	int index;
787 
788 	dev = container_of(handler, struct mcast_device, event_handler);
789 	if (!rdma_cap_ib_mcast(dev->device, event->element.port_num))
790 		return;
791 
792 	index = event->element.port_num - dev->start_port;
793 
794 	switch (event->event) {
795 	case IB_EVENT_PORT_ERR:
796 	case IB_EVENT_LID_CHANGE:
797 	case IB_EVENT_SM_CHANGE:
798 	case IB_EVENT_CLIENT_REREGISTER:
799 		mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
800 		break;
801 	case IB_EVENT_PKEY_CHANGE:
802 		mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
803 		break;
804 	default:
805 		break;
806 	}
807 }
808 
809 static void mcast_add_one(struct ib_device *device)
810 {
811 	struct mcast_device *dev;
812 	struct mcast_port *port;
813 	int i;
814 	int count = 0;
815 
816 	dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
817 		      GFP_KERNEL);
818 	if (!dev)
819 		return;
820 
821 	dev->start_port = rdma_start_port(device);
822 	dev->end_port = rdma_end_port(device);
823 
824 	for (i = 0; i <= dev->end_port - dev->start_port; i++) {
825 		if (!rdma_cap_ib_mcast(device, dev->start_port + i))
826 			continue;
827 		port = &dev->port[i];
828 		port->dev = dev;
829 		port->port_num = dev->start_port + i;
830 		spin_lock_init(&port->lock);
831 		port->table = RB_ROOT;
832 		init_completion(&port->comp);
833 		atomic_set(&port->refcount, 1);
834 		++count;
835 	}
836 
837 	if (!count) {
838 		kfree(dev);
839 		return;
840 	}
841 
842 	dev->device = device;
843 	ib_set_client_data(device, &mcast_client, dev);
844 
845 	INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
846 	ib_register_event_handler(&dev->event_handler);
847 }
848 
849 static void mcast_remove_one(struct ib_device *device, void *client_data)
850 {
851 	struct mcast_device *dev = client_data;
852 	struct mcast_port *port;
853 	int i;
854 
855 	if (!dev)
856 		return;
857 
858 	ib_unregister_event_handler(&dev->event_handler);
859 	flush_workqueue(mcast_wq);
860 
861 	for (i = 0; i <= dev->end_port - dev->start_port; i++) {
862 		if (rdma_cap_ib_mcast(device, dev->start_port + i)) {
863 			port = &dev->port[i];
864 			deref_port(port);
865 			wait_for_completion(&port->comp);
866 		}
867 	}
868 
869 	kfree(dev);
870 }
871 
872 int mcast_init(void)
873 {
874 	int ret;
875 
876 	mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM);
877 	if (!mcast_wq)
878 		return -ENOMEM;
879 
880 	ib_sa_register_client(&sa_client);
881 
882 	ret = ib_register_client(&mcast_client);
883 	if (ret)
884 		goto err;
885 	return 0;
886 
887 err:
888 	ib_sa_unregister_client(&sa_client);
889 	destroy_workqueue(mcast_wq);
890 	return ret;
891 }
892 
893 void mcast_cleanup(void)
894 {
895 	ib_unregister_client(&mcast_client);
896 	ib_sa_unregister_client(&sa_client);
897 	destroy_workqueue(mcast_wq);
898 }
899