Lines Matching +full:- +full:group
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
34 #include <linux/dma-mapping.h>
117 struct mcast_group *group; member
132 struct rb_node *node = port->table.rb_node; in mcast_find()
133 struct mcast_group *group; in mcast_find() local
137 group = rb_entry(node, struct mcast_group, node); in mcast_find()
138 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
140 return group; in mcast_find()
143 node = node->rb_left; in mcast_find()
145 node = node->rb_right; in mcast_find()
151 struct mcast_group *group, in mcast_insert() argument
154 struct rb_node **link = &port->table.rb_node; in mcast_insert()
163 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
164 sizeof group->rec.mgid); in mcast_insert()
166 link = &(*link)->rb_left; in mcast_insert()
168 link = &(*link)->rb_right; in mcast_insert()
170 link = &(*link)->rb_left; in mcast_insert()
174 rb_link_node(&group->node, parent, link); in mcast_insert()
175 rb_insert_color(&group->node, &port->table); in mcast_insert()
181 if (refcount_dec_and_test(&port->refcount)) in deref_port()
182 complete(&port->comp); in deref_port()
185 static void release_group(struct mcast_group *group) in release_group() argument
187 struct mcast_port *port = group->port; in release_group()
190 spin_lock_irqsave(&port->lock, flags); in release_group()
191 if (atomic_dec_and_test(&group->refcount)) { in release_group()
192 rb_erase(&group->node, &port->table); in release_group()
193 spin_unlock_irqrestore(&port->lock, flags); in release_group()
194 kfree(group); in release_group()
197 spin_unlock_irqrestore(&port->lock, flags); in release_group()
202 if (refcount_dec_and_test(&member->refcount)) in deref_member()
203 complete(&member->comp); in deref_member()
208 struct mcast_group *group = member->group; in queue_join() local
211 spin_lock_irqsave(&group->lock, flags); in queue_join()
212 list_add_tail(&member->list, &group->pending_list); in queue_join()
213 if (group->state == MCAST_IDLE) { in queue_join()
214 group->state = MCAST_BUSY; in queue_join()
215 atomic_inc(&group->refcount); in queue_join()
216 queue_work(mcast_wq, &group->work); in queue_join()
218 spin_unlock_irqrestore(&group->lock, flags); in queue_join()
222 * A multicast group has four types of members: full member, non member,
228 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) in adjust_membership() argument
234 group->members[i] += inc; in adjust_membership()
238 * If a multicast group has zero members left for a particular join state, but
239 * the group is still a member with the SA, we need to leave that join state.
243 static u8 get_leave_state(struct mcast_group *group) in get_leave_state() argument
249 if (!group->members[i]) in get_leave_state()
252 return leave_state & group->rec.join_state; in get_leave_state()
289 memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid)) in cmp_rec()
290 return -EINVAL; in cmp_rec()
291 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) in cmp_rec()
292 return -EINVAL; in cmp_rec()
293 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) in cmp_rec()
294 return -EINVAL; in cmp_rec()
296 IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector, in cmp_rec()
297 src->mtu, dst->mtu)) in cmp_rec()
298 return -EINVAL; in cmp_rec()
300 src->traffic_class != dst->traffic_class) in cmp_rec()
301 return -EINVAL; in cmp_rec()
302 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) in cmp_rec()
303 return -EINVAL; in cmp_rec()
305 IB_SA_MCMEMBER_REC_RATE, dst->rate_selector, in cmp_rec()
306 src->rate, dst->rate)) in cmp_rec()
307 return -EINVAL; in cmp_rec()
311 dst->packet_life_time_selector, in cmp_rec()
312 src->packet_life_time, dst->packet_life_time)) in cmp_rec()
313 return -EINVAL; in cmp_rec()
314 if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl) in cmp_rec()
315 return -EINVAL; in cmp_rec()
317 src->flow_label != dst->flow_label) in cmp_rec()
318 return -EINVAL; in cmp_rec()
320 src->hop_limit != dst->hop_limit) in cmp_rec()
321 return -EINVAL; in cmp_rec()
322 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope) in cmp_rec()
323 return -EINVAL; in cmp_rec()
330 static int send_join(struct mcast_group *group, struct mcast_member *member) in send_join() argument
332 struct mcast_port *port = group->port; in send_join()
335 group->last_join = member; in send_join()
336 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, in send_join()
337 port->port_num, IB_MGMT_METHOD_SET, in send_join()
338 &member->multicast.rec, in send_join()
339 member->multicast.comp_mask, in send_join()
340 3000, GFP_KERNEL, join_handler, group, in send_join()
341 &group->query); in send_join()
345 static int send_leave(struct mcast_group *group, u8 leave_state) in send_leave() argument
347 struct mcast_port *port = group->port; in send_leave()
351 rec = group->rec; in send_leave()
353 group->leave_state = leave_state; in send_leave()
355 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, in send_leave()
356 port->port_num, IB_SA_METHOD_DELETE, &rec, in send_leave()
361 group, &group->query); in send_leave()
365 static void join_group(struct mcast_group *group, struct mcast_member *member, in join_group() argument
368 member->state = MCAST_MEMBER; in join_group()
369 adjust_membership(group, join_state, 1); in join_group()
370 group->rec.join_state |= join_state; in join_group()
371 member->multicast.rec = group->rec; in join_group()
372 member->multicast.rec.join_state = join_state; in join_group()
373 list_move(&member->list, &group->active_list); in join_group()
376 static int fail_join(struct mcast_group *group, struct mcast_member *member, in fail_join() argument
379 spin_lock_irq(&group->lock); in fail_join()
380 list_del_init(&member->list); in fail_join()
381 spin_unlock_irq(&group->lock); in fail_join()
382 return member->multicast.callback(status, &member->multicast); in fail_join()
385 static void process_group_error(struct mcast_group *group) in process_group_error() argument
391 if (group->state == MCAST_PKEY_EVENT) in process_group_error()
392 ret = ib_find_pkey(group->port->dev->device, in process_group_error()
393 group->port->port_num, in process_group_error()
394 be16_to_cpu(group->rec.pkey), &pkey_index); in process_group_error()
396 spin_lock_irq(&group->lock); in process_group_error()
397 if (group->state == MCAST_PKEY_EVENT && !ret && in process_group_error()
398 group->pkey_index == pkey_index) in process_group_error()
401 while (!list_empty(&group->active_list)) { in process_group_error()
402 member = list_entry(group->active_list.next, in process_group_error()
404 refcount_inc(&member->refcount); in process_group_error()
405 list_del_init(&member->list); in process_group_error()
406 adjust_membership(group, member->multicast.rec.join_state, -1); in process_group_error()
407 member->state = MCAST_ERROR; in process_group_error()
408 spin_unlock_irq(&group->lock); in process_group_error()
410 ret = member->multicast.callback(-ENETRESET, in process_group_error()
411 &member->multicast); in process_group_error()
414 ib_sa_free_multicast(&member->multicast); in process_group_error()
415 spin_lock_irq(&group->lock); in process_group_error()
418 group->rec.join_state = 0; in process_group_error()
420 group->state = MCAST_BUSY; in process_group_error()
421 spin_unlock_irq(&group->lock); in process_group_error()
426 struct mcast_group *group; in mcast_work_handler() local
432 group = container_of(work, typeof(*group), work); in mcast_work_handler()
434 spin_lock_irq(&group->lock); in mcast_work_handler()
435 while (!list_empty(&group->pending_list) || in mcast_work_handler()
436 (group->state != MCAST_BUSY)) { in mcast_work_handler()
438 if (group->state != MCAST_BUSY) { in mcast_work_handler()
439 spin_unlock_irq(&group->lock); in mcast_work_handler()
440 process_group_error(group); in mcast_work_handler()
444 member = list_entry(group->pending_list.next, in mcast_work_handler()
446 multicast = &member->multicast; in mcast_work_handler()
447 join_state = multicast->rec.join_state; in mcast_work_handler()
448 refcount_inc(&member->refcount); in mcast_work_handler()
450 if (join_state == (group->rec.join_state & join_state)) { in mcast_work_handler()
451 status = cmp_rec(&group->rec, &multicast->rec, in mcast_work_handler()
452 multicast->comp_mask); in mcast_work_handler()
454 join_group(group, member, join_state); in mcast_work_handler()
456 list_del_init(&member->list); in mcast_work_handler()
457 spin_unlock_irq(&group->lock); in mcast_work_handler()
458 ret = multicast->callback(status, multicast); in mcast_work_handler()
460 spin_unlock_irq(&group->lock); in mcast_work_handler()
461 status = send_join(group, member); in mcast_work_handler()
466 ret = fail_join(group, member, status); in mcast_work_handler()
471 ib_sa_free_multicast(&member->multicast); in mcast_work_handler()
472 spin_lock_irq(&group->lock); in mcast_work_handler()
475 join_state = get_leave_state(group); in mcast_work_handler()
477 group->rec.join_state &= ~join_state; in mcast_work_handler()
478 spin_unlock_irq(&group->lock); in mcast_work_handler()
479 if (send_leave(group, join_state)) in mcast_work_handler()
482 group->state = MCAST_IDLE; in mcast_work_handler()
483 spin_unlock_irq(&group->lock); in mcast_work_handler()
484 release_group(group); in mcast_work_handler()
489 * Fail a join request if it is still active - at the head of the pending queue.
491 static void process_join_error(struct mcast_group *group, int status) in process_join_error() argument
496 spin_lock_irq(&group->lock); in process_join_error()
497 member = list_entry(group->pending_list.next, in process_join_error()
499 if (group->last_join == member) { in process_join_error()
500 refcount_inc(&member->refcount); in process_join_error()
501 list_del_init(&member->list); in process_join_error()
502 spin_unlock_irq(&group->lock); in process_join_error()
503 ret = member->multicast.callback(status, &member->multicast); in process_join_error()
506 ib_sa_free_multicast(&member->multicast); in process_join_error()
508 spin_unlock_irq(&group->lock); in process_join_error()
514 struct mcast_group *group = context; in join_handler() local
518 process_join_error(group, status); in join_handler()
522 if (ib_find_pkey(group->port->dev->device, in join_handler()
523 group->port->port_num, be16_to_cpu(rec->pkey), in join_handler()
527 spin_lock_irq(&group->port->lock); in join_handler()
528 if (group->state == MCAST_BUSY && in join_handler()
529 group->pkey_index == MCAST_INVALID_PKEY_INDEX) in join_handler()
530 group->pkey_index = pkey_index; in join_handler()
531 mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, in join_handler()
532 sizeof(group->rec.mgid)); in join_handler()
533 group->rec = *rec; in join_handler()
535 rb_erase(&group->node, &group->port->table); in join_handler()
536 is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, in join_handler()
538 mcast_insert(group->port, group, is_mgid0); in join_handler()
540 spin_unlock_irq(&group->port->lock); in join_handler()
542 mcast_work_handler(&group->work); in join_handler()
548 struct mcast_group *group = context; in leave_handler() local
550 if (status && group->retries > 0 && in leave_handler()
551 !send_leave(group, group->leave_state)) in leave_handler()
552 group->retries--; in leave_handler()
554 mcast_work_handler(&group->work); in leave_handler()
560 struct mcast_group *group, *cur_group; in acquire_group() local
566 spin_lock_irqsave(&port->lock, flags); in acquire_group()
567 group = mcast_find(port, mgid); in acquire_group()
568 if (group) in acquire_group()
570 spin_unlock_irqrestore(&port->lock, flags); in acquire_group()
573 group = kzalloc(sizeof *group, gfp_mask); in acquire_group()
574 if (!group) in acquire_group()
577 group->retries = 3; in acquire_group()
578 group->port = port; in acquire_group()
579 group->rec.mgid = *mgid; in acquire_group()
580 group->pkey_index = MCAST_INVALID_PKEY_INDEX; in acquire_group()
581 INIT_LIST_HEAD(&group->pending_list); in acquire_group()
582 INIT_LIST_HEAD(&group->active_list); in acquire_group()
583 INIT_WORK(&group->work, mcast_work_handler); in acquire_group()
584 spin_lock_init(&group->lock); in acquire_group()
586 spin_lock_irqsave(&port->lock, flags); in acquire_group()
587 cur_group = mcast_insert(port, group, is_mgid0); in acquire_group()
589 kfree(group); in acquire_group()
590 group = cur_group; in acquire_group()
592 refcount_inc(&port->refcount); in acquire_group()
594 atomic_inc(&group->refcount); in acquire_group()
595 spin_unlock_irqrestore(&port->lock, flags); in acquire_group()
596 return group; in acquire_group()
600 * We serialize all join requests to a single group to make our lives much
601 * easier. Otherwise, two users could try to join the same group
622 return ERR_PTR(-ENODEV); in ib_sa_join_multicast()
626 return ERR_PTR(-ENOMEM); in ib_sa_join_multicast()
629 member->client = client; in ib_sa_join_multicast()
630 member->multicast.rec = *rec; in ib_sa_join_multicast()
631 member->multicast.comp_mask = comp_mask; in ib_sa_join_multicast()
632 member->multicast.callback = callback; in ib_sa_join_multicast()
633 member->multicast.context = context; in ib_sa_join_multicast()
634 init_completion(&member->comp); in ib_sa_join_multicast()
635 refcount_set(&member->refcount, 1); in ib_sa_join_multicast()
636 member->state = MCAST_JOINING; in ib_sa_join_multicast()
638 member->group = acquire_group(&dev->port[port_num - dev->start_port], in ib_sa_join_multicast()
639 &rec->mgid, gfp_mask); in ib_sa_join_multicast()
640 if (!member->group) { in ib_sa_join_multicast()
641 ret = -ENOMEM; in ib_sa_join_multicast()
651 multicast = &member->multicast; in ib_sa_join_multicast()
665 struct mcast_group *group; in ib_sa_free_multicast() local
668 group = member->group; in ib_sa_free_multicast()
670 spin_lock_irq(&group->lock); in ib_sa_free_multicast()
671 if (member->state == MCAST_MEMBER) in ib_sa_free_multicast()
672 adjust_membership(group, multicast->rec.join_state, -1); in ib_sa_free_multicast()
674 list_del_init(&member->list); in ib_sa_free_multicast()
676 if (group->state == MCAST_IDLE) { in ib_sa_free_multicast()
677 group->state = MCAST_BUSY; in ib_sa_free_multicast()
678 spin_unlock_irq(&group->lock); in ib_sa_free_multicast()
679 /* Continue to hold reference on group until callback */ in ib_sa_free_multicast()
680 queue_work(mcast_wq, &group->work); in ib_sa_free_multicast()
682 spin_unlock_irq(&group->lock); in ib_sa_free_multicast()
683 release_group(group); in ib_sa_free_multicast()
687 wait_for_completion(&member->comp); in ib_sa_free_multicast()
688 ib_sa_client_put(member->client); in ib_sa_free_multicast()
698 struct mcast_group *group; in ib_sa_get_mcmember_rec() local
704 return -ENODEV; in ib_sa_get_mcmember_rec()
706 port = &dev->port[port_num - dev->start_port]; in ib_sa_get_mcmember_rec()
707 spin_lock_irqsave(&port->lock, flags); in ib_sa_get_mcmember_rec()
708 group = mcast_find(port, mgid); in ib_sa_get_mcmember_rec()
709 if (group) in ib_sa_get_mcmember_rec()
710 *rec = group->rec; in ib_sa_get_mcmember_rec()
712 ret = -EADDRNOTAVAIL; in ib_sa_get_mcmember_rec()
713 spin_unlock_irqrestore(&port->lock, flags); in ib_sa_get_mcmember_rec()
720 * ib_init_ah_from_mcmember - Initialize AH attribute from multicast
749 return -EINVAL; in ib_init_ah_from_mcmember()
751 sgid_attr = rdma_find_gid_by_port(device, &rec->port_gid, in ib_init_ah_from_mcmember()
757 ah_attr->type = rdma_ah_find_type(device, port_num); in ib_init_ah_from_mcmember()
759 rdma_ah_set_dlid(ah_attr, be16_to_cpu(rec->mlid)); in ib_init_ah_from_mcmember()
760 rdma_ah_set_sl(ah_attr, rec->sl); in ib_init_ah_from_mcmember()
762 rdma_ah_set_static_rate(ah_attr, rec->rate); in ib_init_ah_from_mcmember()
763 rdma_move_grh_sgid_attr(ah_attr, &rec->mgid, in ib_init_ah_from_mcmember()
764 be32_to_cpu(rec->flow_label), in ib_init_ah_from_mcmember()
765 rec->hop_limit, rec->traffic_class, in ib_init_ah_from_mcmember()
774 struct mcast_group *group; in mcast_groups_event() local
778 spin_lock_irqsave(&port->lock, flags); in mcast_groups_event()
779 for (node = rb_first(&port->table); node; node = rb_next(node)) { in mcast_groups_event()
780 group = rb_entry(node, struct mcast_group, node); in mcast_groups_event()
781 spin_lock(&group->lock); in mcast_groups_event()
782 if (group->state == MCAST_IDLE) { in mcast_groups_event()
783 atomic_inc(&group->refcount); in mcast_groups_event()
784 queue_work(mcast_wq, &group->work); in mcast_groups_event()
786 if (group->state != MCAST_GROUP_ERROR) in mcast_groups_event()
787 group->state = state; in mcast_groups_event()
788 spin_unlock(&group->lock); in mcast_groups_event()
790 spin_unlock_irqrestore(&port->lock, flags); in mcast_groups_event()
800 if (!rdma_cap_ib_mcast(dev->device, event->element.port_num)) in mcast_event_handler()
803 index = event->element.port_num - dev->start_port; in mcast_event_handler()
805 switch (event->event) { in mcast_event_handler()
809 mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR); in mcast_event_handler()
812 mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT); in mcast_event_handler()
826 dev = kmalloc(struct_size(dev, port, device->phys_port_cnt), in mcast_add_one()
829 return -ENOMEM; in mcast_add_one()
831 dev->start_port = rdma_start_port(device); in mcast_add_one()
832 dev->end_port = rdma_end_port(device); in mcast_add_one()
834 for (i = 0; i <= dev->end_port - dev->start_port; i++) { in mcast_add_one()
835 if (!rdma_cap_ib_mcast(device, dev->start_port + i)) in mcast_add_one()
837 port = &dev->port[i]; in mcast_add_one()
838 port->dev = dev; in mcast_add_one()
839 port->port_num = dev->start_port + i; in mcast_add_one()
840 spin_lock_init(&port->lock); in mcast_add_one()
841 port->table = RB_ROOT; in mcast_add_one()
842 init_completion(&port->comp); in mcast_add_one()
843 refcount_set(&port->refcount, 1); in mcast_add_one()
849 return -EOPNOTSUPP; in mcast_add_one()
852 dev->device = device; in mcast_add_one()
855 INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler); in mcast_add_one()
856 ib_register_event_handler(&dev->event_handler); in mcast_add_one()
866 ib_unregister_event_handler(&dev->event_handler); in mcast_remove_one()
869 for (i = 0; i <= dev->end_port - dev->start_port; i++) { in mcast_remove_one()
870 if (rdma_cap_ib_mcast(device, dev->start_port + i)) { in mcast_remove_one()
871 port = &dev->port[i]; in mcast_remove_one()
873 wait_for_completion(&port->comp); in mcast_remove_one()
886 return -ENOMEM; in mcast_init()