Lines Matching +full:- +full:group
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
50 #define mcg_warn_group(group, format, arg...) \ argument
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_debug_group(group, format, arg...) \ argument
55 pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
56 (group)->name, (group)->demux->port, ## arg)
58 #define mcg_error_group(group, format, arg...) \ argument
59 pr_err(" %16s: " format, (group)->name, ## arg)
136 struct mcast_group *group; member
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
165 struct rb_node *node = ctx->mcg_table.rb_node; in mcast_find()
166 struct mcast_group *group; in mcast_find() local
170 group = rb_entry(node, struct mcast_group, node); in mcast_find()
171 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
173 return group; in mcast_find()
176 node = node->rb_left; in mcast_find()
178 node = node->rb_right; in mcast_find()
184 struct mcast_group *group) in mcast_insert() argument
186 struct rb_node **link = &ctx->mcg_table.rb_node; in mcast_insert()
195 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
196 sizeof group->rec.mgid); in mcast_insert()
198 link = &(*link)->rb_left; in mcast_insert()
200 link = &(*link)->rb_right; in mcast_insert()
204 rb_link_node(&group->node, parent, link); in mcast_insert()
205 rb_insert_color(&group->node, &ctx->mcg_table); in mcast_insert()
211 struct mlx4_ib_dev *dev = ctx->dev; in send_mad_to_wire()
215 spin_lock_irqsave(&dev->sm_lock, flags); in send_mad_to_wire()
216 if (!dev->sm_ah[ctx->port - 1]) { in send_mad_to_wire()
218 spin_unlock_irqrestore(&dev->sm_lock, flags); in send_mad_to_wire()
219 return -EAGAIN; in send_mad_to_wire()
221 mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); in send_mad_to_wire()
222 spin_unlock_irqrestore(&dev->sm_lock, flags); in send_mad_to_wire()
223 return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), in send_mad_to_wire()
224 ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY, in send_mad_to_wire()
231 struct mlx4_ib_dev *dev = ctx->dev; in send_mad_to_slave()
232 struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1]; in send_mad_to_slave()
238 return -EAGAIN; in send_mad_to_slave()
240 rdma_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); in send_mad_to_slave()
242 if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index)) in send_mad_to_slave()
243 return -EINVAL; in send_mad_to_slave()
246 wc.port_num = ctx->port; in send_mad_to_slave()
249 return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad); in send_mad_to_slave()
252 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad) in send_join_to_wire() argument
262 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0]; in send_join_to_wire()
265 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_join_to_wire()
266 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_join_to_wire()
268 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_join_to_wire()
272 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, in send_join_to_wire()
279 static int send_leave_to_wire(struct mcast_group *group, u8 join_state) in send_leave_to_wire() argument
292 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_leave_to_wire()
293 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_leave_to_wire()
301 *sa_data = group->rec; in send_leave_to_wire()
302 sa_data->scope_join_state = join_state; in send_leave_to_wire()
304 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_leave_to_wire()
306 group->state = MCAST_IDLE; in send_leave_to_wire()
311 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, in send_leave_to_wire()
318 static int send_reply_to_slave(int slave, struct mcast_group *group, in send_reply_to_slave() argument
323 struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data; in send_reply_to_slave()
333 mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid; in send_reply_to_slave()
337 mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key; in send_reply_to_slave()
341 *sa_data = group->rec; in send_reply_to_slave()
344 sa_data->scope_join_state &= 0xf0; in send_reply_to_slave()
345 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f); in send_reply_to_slave()
346 memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid); in send_reply_to_slave()
348 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad); in send_reply_to_slave()
386 /* src is group record, dst is request record */ in cmp_rec()
391 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) in cmp_rec()
393 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) in cmp_rec()
397 src->mtusel_mtu, dst->mtusel_mtu)) in cmp_rec()
400 src->tclass != dst->tclass) in cmp_rec()
402 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) in cmp_rec()
406 src->ratesel_rate, dst->ratesel_rate)) in cmp_rec()
411 src->lifetmsel_lifetm, dst->lifetmsel_lifetm)) in cmp_rec()
414 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) != in cmp_rec()
415 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000)) in cmp_rec()
418 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) != in cmp_rec()
419 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00)) in cmp_rec()
422 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) != in cmp_rec()
423 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff)) in cmp_rec()
426 (src->scope_join_state & 0xf0) != in cmp_rec()
427 (dst->scope_join_state & 0xf0)) in cmp_rec()
435 /* release group, return 1 if this was last release and group is destroyed
437 static int release_group(struct mcast_group *group, int from_timeout_handler) in release_group() argument
439 struct mlx4_ib_demux_ctx *ctx = group->demux; in release_group()
442 mutex_lock(&ctx->mcg_table_lock); in release_group()
443 mutex_lock(&group->lock); in release_group()
444 if (atomic_dec_and_test(&group->refcount)) { in release_group()
446 if (group->state != MCAST_IDLE && in release_group()
447 !cancel_delayed_work(&group->timeout_work)) { in release_group()
448 atomic_inc(&group->refcount); in release_group()
449 mutex_unlock(&group->lock); in release_group()
450 mutex_unlock(&ctx->mcg_table_lock); in release_group()
455 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0); in release_group()
457 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in release_group()
458 if (!list_empty(&group->pending_list)) in release_group()
459 mcg_warn_group(group, "releasing a group with non empty pending list\n"); in release_group()
461 rb_erase(&group->node, &ctx->mcg_table); in release_group()
462 list_del_init(&group->mgid0_list); in release_group()
463 mutex_unlock(&group->lock); in release_group()
464 mutex_unlock(&ctx->mcg_table_lock); in release_group()
465 kfree(group); in release_group()
468 mutex_unlock(&group->lock); in release_group()
469 mutex_unlock(&ctx->mcg_table_lock); in release_group()
474 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) in adjust_membership() argument
480 group->members[i] += inc; in adjust_membership()
483 static u8 get_leave_state(struct mcast_group *group) in get_leave_state() argument
489 if (!group->members[i]) in get_leave_state()
492 return leave_state & (group->rec.scope_join_state & 0xf); in get_leave_state()
495 static int join_group(struct mcast_group *group, int slave, u8 join_mask) in join_group() argument
501 join_state = join_mask & (~group->func[slave].join_state); in join_group()
502 adjust_membership(group, join_state, 1); in join_group()
503 group->func[slave].join_state |= join_state; in join_group()
504 if (group->func[slave].state != MCAST_MEMBER && join_state) { in join_group()
505 group->func[slave].state = MCAST_MEMBER; in join_group()
511 static int leave_group(struct mcast_group *group, int slave, u8 leave_state) in leave_group() argument
515 adjust_membership(group, leave_state, -1); in leave_group()
516 group->func[slave].join_state &= ~leave_state; in leave_group()
517 if (!group->func[slave].join_state) { in leave_group()
518 group->func[slave].state = MCAST_NOT_MEMBER; in leave_group()
524 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask) in check_leave() argument
526 if (group->func[slave].state != MCAST_MEMBER) in check_leave()
530 if (~group->func[slave].join_state & leave_mask) in check_leave()
542 struct mcast_group *group; in mlx4_ib_mcg_timeout_handler() local
545 group = container_of(delay, typeof(*group), timeout_work); in mlx4_ib_mcg_timeout_handler()
547 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
548 if (group->state == MCAST_JOIN_SENT) { in mlx4_ib_mcg_timeout_handler()
549 if (!list_empty(&group->pending_list)) { in mlx4_ib_mcg_timeout_handler()
550 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in mlx4_ib_mcg_timeout_handler()
551 list_del(&req->group_list); in mlx4_ib_mcg_timeout_handler()
552 list_del(&req->func_list); in mlx4_ib_mcg_timeout_handler()
553 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_timeout_handler()
554 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
556 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { in mlx4_ib_mcg_timeout_handler()
557 if (release_group(group, 1)) in mlx4_ib_mcg_timeout_handler()
560 kfree(group); in mlx4_ib_mcg_timeout_handler()
563 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
565 mcg_warn_group(group, "DRIVER BUG\n"); in mlx4_ib_mcg_timeout_handler()
566 } else if (group->state == MCAST_LEAVE_SENT) { in mlx4_ib_mcg_timeout_handler()
567 if (group->rec.scope_join_state & 0xf) in mlx4_ib_mcg_timeout_handler()
568 group->rec.scope_join_state &= 0xf0; in mlx4_ib_mcg_timeout_handler()
569 group->state = MCAST_IDLE; in mlx4_ib_mcg_timeout_handler()
570 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
571 if (release_group(group, 1)) in mlx4_ib_mcg_timeout_handler()
573 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
575 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state)); in mlx4_ib_mcg_timeout_handler()
576 group->state = MCAST_IDLE; in mlx4_ib_mcg_timeout_handler()
577 atomic_inc(&group->refcount); in mlx4_ib_mcg_timeout_handler()
578 if (!queue_work(group->demux->mcg_wq, &group->work)) in mlx4_ib_mcg_timeout_handler()
579 safe_atomic_dec(&group->refcount); in mlx4_ib_mcg_timeout_handler()
581 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
584 static int handle_leave_req(struct mcast_group *group, u8 leave_mask, in handle_leave_req() argument
589 if (req->clean) in handle_leave_req()
590 leave_mask = group->func[req->func].join_state; in handle_leave_req()
592 status = check_leave(group, req->func, leave_mask); in handle_leave_req()
594 leave_group(group, req->func, leave_mask); in handle_leave_req()
596 if (!req->clean) in handle_leave_req()
597 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_leave_req()
598 --group->func[req->func].num_pend_reqs; in handle_leave_req()
599 list_del(&req->group_list); in handle_leave_req()
600 list_del(&req->func_list); in handle_leave_req()
605 static int handle_join_req(struct mcast_group *group, u8 join_mask, in handle_join_req() argument
608 u8 group_join_state = group->rec.scope_join_state & 0xf; in handle_join_req()
611 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; in handle_join_req()
615 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); in handle_join_req()
617 join_group(group, req->func, join_mask); in handle_join_req()
619 --group->func[req->func].num_pend_reqs; in handle_join_req()
620 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_join_req()
621 list_del(&req->group_list); in handle_join_req()
622 list_del(&req->func_list); in handle_join_req()
627 group->prev_state = group->state; in handle_join_req()
628 if (send_join_to_wire(group, &req->sa_mad)) { in handle_join_req()
629 --group->func[req->func].num_pend_reqs; in handle_join_req()
630 list_del(&req->group_list); in handle_join_req()
631 list_del(&req->func_list); in handle_join_req()
634 group->state = group->prev_state; in handle_join_req()
636 group->state = MCAST_JOIN_SENT; in handle_join_req()
644 struct mcast_group *group; in mlx4_ib_mcg_work_handler() local
648 int rc = 1; /* release_count - this is for the scheduled work */ in mlx4_ib_mcg_work_handler()
652 group = container_of(work, typeof(*group), work); in mlx4_ib_mcg_work_handler()
654 mutex_lock(&group->lock); in mlx4_ib_mcg_work_handler()
656 /* First, let's see if a response from SM is waiting regarding this group. in mlx4_ib_mcg_work_handler()
657 * If so, we need to update the group's REC. If this is a bad response, we in mlx4_ib_mcg_work_handler()
660 if (group->state == MCAST_RESP_READY) { in mlx4_ib_mcg_work_handler()
662 cancel_delayed_work(&group->timeout_work); in mlx4_ib_mcg_work_handler()
663 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status); in mlx4_ib_mcg_work_handler()
664 method = group->response_sa_mad.mad_hdr.method; in mlx4_ib_mcg_work_handler()
665 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) { in mlx4_ib_mcg_work_handler()
666 …mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, g… in mlx4_ib_mcg_work_handler()
667 be64_to_cpu(group->response_sa_mad.mad_hdr.tid), in mlx4_ib_mcg_work_handler()
668 be64_to_cpu(group->last_req_tid)); in mlx4_ib_mcg_work_handler()
669 group->state = group->prev_state; in mlx4_ib_mcg_work_handler()
673 if (!list_empty(&group->pending_list)) in mlx4_ib_mcg_work_handler()
674 req = list_first_entry(&group->pending_list, in mlx4_ib_mcg_work_handler()
678 send_reply_to_slave(req->func, group, &req->sa_mad, status); in mlx4_ib_mcg_work_handler()
679 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_work_handler()
680 list_del(&req->group_list); in mlx4_ib_mcg_work_handler()
681 list_del(&req->func_list); in mlx4_ib_mcg_work_handler()
685 mcg_warn_group(group, "no request for failed join\n"); in mlx4_ib_mcg_work_handler()
686 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) in mlx4_ib_mcg_work_handler()
693 group->response_sa_mad.data)->scope_join_state & 0xf; in mlx4_ib_mcg_work_handler()
694 cur_join_state = group->rec.scope_join_state & 0xf; in mlx4_ib_mcg_work_handler()
699 --rc; in mlx4_ib_mcg_work_handler()
702 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec); in mlx4_ib_mcg_work_handler()
704 group->state = MCAST_IDLE; in mlx4_ib_mcg_work_handler()
709 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) { in mlx4_ib_mcg_work_handler()
710 req = list_first_entry(&group->pending_list, struct mcast_req, in mlx4_ib_mcg_work_handler()
712 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; in mlx4_ib_mcg_work_handler()
713 req_join_state = sa_data->scope_join_state & 0xf; in mlx4_ib_mcg_work_handler()
718 if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE) in mlx4_ib_mcg_work_handler()
719 rc += handle_leave_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
721 rc += handle_join_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
725 if (group->state == MCAST_IDLE) { in mlx4_ib_mcg_work_handler()
726 req_join_state = get_leave_state(group); in mlx4_ib_mcg_work_handler()
728 group->rec.scope_join_state &= ~req_join_state; in mlx4_ib_mcg_work_handler()
729 group->prev_state = group->state; in mlx4_ib_mcg_work_handler()
730 if (send_leave_to_wire(group, req_join_state)) { in mlx4_ib_mcg_work_handler()
731 group->state = group->prev_state; in mlx4_ib_mcg_work_handler()
734 group->state = MCAST_LEAVE_SENT; in mlx4_ib_mcg_work_handler()
738 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) in mlx4_ib_mcg_work_handler()
740 mutex_unlock(&group->lock); in mlx4_ib_mcg_work_handler()
742 while (rc--) in mlx4_ib_mcg_work_handler()
743 release_group(group, 0); in mlx4_ib_mcg_work_handler()
750 struct mcast_group *group = NULL, *cur_group, *n; in search_relocate_mgid0_group() local
753 mutex_lock(&ctx->mcg_table_lock); in search_relocate_mgid0_group()
754 list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) { in search_relocate_mgid0_group()
755 mutex_lock(&group->lock); in search_relocate_mgid0_group()
756 if (group->last_req_tid == tid) { in search_relocate_mgid0_group()
758 group->rec.mgid = *new_mgid; in search_relocate_mgid0_group()
759 sprintf(group->name, "%016llx%016llx", in search_relocate_mgid0_group()
760 be64_to_cpu(group->rec.mgid.global.subnet_prefix), in search_relocate_mgid0_group()
761 be64_to_cpu(group->rec.mgid.global.interface_id)); in search_relocate_mgid0_group()
762 list_del_init(&group->mgid0_list); in search_relocate_mgid0_group()
763 cur_group = mcast_insert(ctx, group); in search_relocate_mgid0_group()
766 req = list_first_entry(&group->pending_list, in search_relocate_mgid0_group()
768 --group->func[req->func].num_pend_reqs; in search_relocate_mgid0_group()
769 list_del(&req->group_list); in search_relocate_mgid0_group()
770 list_del(&req->func_list); in search_relocate_mgid0_group()
772 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
773 mutex_unlock(&ctx->mcg_table_lock); in search_relocate_mgid0_group()
774 release_group(group, 0); in search_relocate_mgid0_group()
778 atomic_inc(&group->refcount); in search_relocate_mgid0_group()
779 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in search_relocate_mgid0_group()
780 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
781 mutex_unlock(&ctx->mcg_table_lock); in search_relocate_mgid0_group()
782 return group; in search_relocate_mgid0_group()
786 list_del(&group->mgid0_list); in search_relocate_mgid0_group()
787 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE) in search_relocate_mgid0_group()
788 cancel_delayed_work_sync(&group->timeout_work); in search_relocate_mgid0_group()
790 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) { in search_relocate_mgid0_group()
791 list_del(&tmp1->group_list); in search_relocate_mgid0_group()
794 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
795 mutex_unlock(&ctx->mcg_table_lock); in search_relocate_mgid0_group()
796 kfree(group); in search_relocate_mgid0_group()
800 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
802 mutex_unlock(&ctx->mcg_table_lock); in search_relocate_mgid0_group()
813 struct mcast_group *group, *cur_group; in acquire_group() local
819 group = mcast_find(ctx, mgid); in acquire_group()
820 if (group) in acquire_group()
825 return ERR_PTR(-ENOENT); in acquire_group()
827 group = kzalloc(sizeof(*group), GFP_KERNEL); in acquire_group()
828 if (!group) in acquire_group()
829 return ERR_PTR(-ENOMEM); in acquire_group()
831 group->demux = ctx; in acquire_group()
832 group->rec.mgid = *mgid; in acquire_group()
833 INIT_LIST_HEAD(&group->pending_list); in acquire_group()
834 INIT_LIST_HEAD(&group->mgid0_list); in acquire_group()
836 INIT_LIST_HEAD(&group->func[i].pending); in acquire_group()
837 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); in acquire_group()
838 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler); in acquire_group()
839 mutex_init(&group->lock); in acquire_group()
840 sprintf(group->name, "%016llx%016llx", in acquire_group()
841 be64_to_cpu(group->rec.mgid.global.subnet_prefix), in acquire_group()
842 be64_to_cpu(group->rec.mgid.global.interface_id)); in acquire_group()
843 sysfs_attr_init(&group->dentry.attr); in acquire_group()
844 group->dentry.show = sysfs_show_group; in acquire_group()
845 group->dentry.store = NULL; in acquire_group()
846 group->dentry.attr.name = group->name; in acquire_group()
847 group->dentry.attr.mode = 0400; in acquire_group()
848 group->state = MCAST_IDLE; in acquire_group()
851 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list); in acquire_group()
855 cur_group = mcast_insert(ctx, group); in acquire_group()
857 mcg_warn("group just showed up %s - confused\n", cur_group->name); in acquire_group()
858 kfree(group); in acquire_group()
859 return ERR_PTR(-EINVAL); in acquire_group()
862 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in acquire_group()
865 atomic_inc(&group->refcount); in acquire_group()
866 return group; in acquire_group()
871 struct mcast_group *group = req->group; in queue_req() local
873 atomic_inc(&group->refcount); /* for the request */ in queue_req()
874 atomic_inc(&group->refcount); /* for scheduling the work */ in queue_req()
875 list_add_tail(&req->group_list, &group->pending_list); in queue_req()
876 list_add_tail(&req->func_list, &group->func[req->func].pending); in queue_req()
878 if (!queue_work(group->demux->mcg_wq, &group->work)) in queue_req()
879 safe_atomic_dec(&group->refcount); in queue_req()
886 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data; in mlx4_ib_mcg_demux_handler()
887 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; in mlx4_ib_mcg_demux_handler()
888 struct mcast_group *group; in mlx4_ib_mcg_demux_handler() local
890 switch (mad->mad_hdr.method) { in mlx4_ib_mcg_demux_handler()
893 mutex_lock(&ctx->mcg_table_lock); in mlx4_ib_mcg_demux_handler()
894 group = acquire_group(ctx, &rec->mgid, 0); in mlx4_ib_mcg_demux_handler()
895 mutex_unlock(&ctx->mcg_table_lock); in mlx4_ib_mcg_demux_handler()
896 if (IS_ERR(group)) { in mlx4_ib_mcg_demux_handler()
897 if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) { in mlx4_ib_mcg_demux_handler()
898 __be64 tid = mad->mad_hdr.tid; in mlx4_ib_mcg_demux_handler()
899 *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */ in mlx4_ib_mcg_demux_handler()
900 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid); in mlx4_ib_mcg_demux_handler()
902 group = NULL; in mlx4_ib_mcg_demux_handler()
905 if (!group) in mlx4_ib_mcg_demux_handler()
908 mutex_lock(&group->lock); in mlx4_ib_mcg_demux_handler()
909 group->response_sa_mad = *mad; in mlx4_ib_mcg_demux_handler()
910 group->prev_state = group->state; in mlx4_ib_mcg_demux_handler()
911 group->state = MCAST_RESP_READY; in mlx4_ib_mcg_demux_handler()
913 atomic_inc(&group->refcount); in mlx4_ib_mcg_demux_handler()
914 if (!queue_work(ctx->mcg_wq, &group->work)) in mlx4_ib_mcg_demux_handler()
915 safe_atomic_dec(&group->refcount); in mlx4_ib_mcg_demux_handler()
916 mutex_unlock(&group->lock); in mlx4_ib_mcg_demux_handler()
917 release_group(group, 0); in mlx4_ib_mcg_demux_handler()
923 return 0; /* not consumed, pass-through to guest over tunnel */ in mlx4_ib_mcg_demux_handler()
926 port, mad->mad_hdr.method); in mlx4_ib_mcg_demux_handler()
935 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data; in mlx4_ib_mcg_multiplex_handler()
936 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; in mlx4_ib_mcg_multiplex_handler()
937 struct mcast_group *group; in mlx4_ib_mcg_multiplex_handler() local
941 if (ctx->flushing) in mlx4_ib_mcg_multiplex_handler()
942 return -EAGAIN; in mlx4_ib_mcg_multiplex_handler()
944 switch (sa_mad->mad_hdr.method) { in mlx4_ib_mcg_multiplex_handler()
951 return -ENOMEM; in mlx4_ib_mcg_multiplex_handler()
953 req->func = slave; in mlx4_ib_mcg_multiplex_handler()
954 req->sa_mad = *sa_mad; in mlx4_ib_mcg_multiplex_handler()
956 mutex_lock(&ctx->mcg_table_lock); in mlx4_ib_mcg_multiplex_handler()
957 group = acquire_group(ctx, &rec->mgid, may_create); in mlx4_ib_mcg_multiplex_handler()
958 mutex_unlock(&ctx->mcg_table_lock); in mlx4_ib_mcg_multiplex_handler()
959 if (IS_ERR(group)) { in mlx4_ib_mcg_multiplex_handler()
961 return PTR_ERR(group); in mlx4_ib_mcg_multiplex_handler()
963 mutex_lock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
964 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { in mlx4_ib_mcg_multiplex_handler()
965 mutex_unlock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
966 mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", in mlx4_ib_mcg_multiplex_handler()
968 release_group(group, 0); in mlx4_ib_mcg_multiplex_handler()
970 return -ENOMEM; in mlx4_ib_mcg_multiplex_handler()
972 ++group->func[slave].num_pend_reqs; in mlx4_ib_mcg_multiplex_handler()
973 req->group = group; in mlx4_ib_mcg_multiplex_handler()
975 mutex_unlock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
976 release_group(group, 0); in mlx4_ib_mcg_multiplex_handler()
982 return 0; /* not consumed, pass-through */ in mlx4_ib_mcg_multiplex_handler()
985 port, slave, sa_mad->mad_hdr.method); in mlx4_ib_mcg_multiplex_handler()
993 struct mcast_group *group = in sysfs_show_group() local
1002 if (group->state == MCAST_IDLE) in sysfs_show_group()
1004 get_state_string(group->state)); in sysfs_show_group()
1007 get_state_string(group->state), in sysfs_show_group()
1008 be64_to_cpu(group->last_req_tid)); in sysfs_show_group()
1010 if (list_empty(&group->pending_list)) { in sysfs_show_group()
1013 req = list_first_entry(&group->pending_list, struct mcast_req, in sysfs_show_group()
1016 be64_to_cpu(req->sa_mad.mad_hdr.tid)); in sysfs_show_group()
1020 group->rec.scope_join_state & 0xf, in sysfs_show_group()
1021 group->members[2], in sysfs_show_group()
1022 group->members[1], in sysfs_show_group()
1023 group->members[0], in sysfs_show_group()
1024 atomic_read(&group->refcount), in sysfs_show_group()
1029 if (group->func[i].state == MCAST_MEMBER) in sysfs_show_group()
1031 group->func[i].join_state); in sysfs_show_group()
1034 hoplimit = be32_to_cpu(group->rec.sl_flowlabel_hoplimit); in sysfs_show_group()
1037 be16_to_cpu(group->rec.pkey), in sysfs_show_group()
1038 be32_to_cpu(group->rec.qkey), in sysfs_show_group()
1039 (group->rec.mtusel_mtu & 0xc0) >> 6, in sysfs_show_group()
1040 (group->rec.mtusel_mtu & 0x3f), in sysfs_show_group()
1041 group->rec.tclass, in sysfs_show_group()
1042 (group->rec.ratesel_rate & 0xc0) >> 6, in sysfs_show_group()
1043 (group->rec.ratesel_rate & 0x3f), in sysfs_show_group()
1047 group->rec.proxy_join); in sysfs_show_group()
1056 atomic_set(&ctx->tid, 0); in mlx4_ib_mcg_port_init()
1057 sprintf(name, "mlx4_ib_mcg%d", ctx->port); in mlx4_ib_mcg_port_init()
1058 ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); in mlx4_ib_mcg_port_init()
1059 if (!ctx->mcg_wq) in mlx4_ib_mcg_port_init()
1060 return -ENOMEM; in mlx4_ib_mcg_port_init()
1062 mutex_init(&ctx->mcg_table_lock); in mlx4_ib_mcg_port_init()
1063 ctx->mcg_table = RB_ROOT; in mlx4_ib_mcg_port_init()
1064 INIT_LIST_HEAD(&ctx->mcg_mgid0_list); in mlx4_ib_mcg_port_init()
1065 ctx->flushing = 0; in mlx4_ib_mcg_port_init()
1070 static void force_clean_group(struct mcast_group *group) in force_clean_group() argument
1074 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { in force_clean_group()
1075 list_del(&req->group_list); in force_clean_group()
1078 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr); in force_clean_group()
1079 rb_erase(&group->node, &group->demux->mcg_table); in force_clean_group()
1080 kfree(group); in force_clean_group()
1087 struct mcast_group *group; in _mlx4_ib_mcg_port_cleanup() local
1097 mutex_lock(&ctx->mcg_table_lock); in _mlx4_ib_mcg_port_cleanup()
1098 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) in _mlx4_ib_mcg_port_cleanup()
1100 mutex_unlock(&ctx->mcg_table_lock); in _mlx4_ib_mcg_port_cleanup()
1107 flush_workqueue(ctx->mcg_wq); in _mlx4_ib_mcg_port_cleanup()
1109 destroy_workqueue(ctx->mcg_wq); in _mlx4_ib_mcg_port_cleanup()
1111 mutex_lock(&ctx->mcg_table_lock); in _mlx4_ib_mcg_port_cleanup()
1112 while ((p = rb_first(&ctx->mcg_table)) != NULL) { in _mlx4_ib_mcg_port_cleanup()
1113 group = rb_entry(p, struct mcast_group, node); in _mlx4_ib_mcg_port_cleanup()
1114 if (atomic_read(&group->refcount)) in _mlx4_ib_mcg_port_cleanup()
1115 mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n", in _mlx4_ib_mcg_port_cleanup()
1116 atomic_read(&group->refcount), group); in _mlx4_ib_mcg_port_cleanup()
1118 force_clean_group(group); in _mlx4_ib_mcg_port_cleanup()
1120 mutex_unlock(&ctx->mcg_table_lock); in _mlx4_ib_mcg_port_cleanup()
1133 _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq); in mcg_clean_task()
1134 cw->ctx->flushing = 0; in mcg_clean_task()
1142 if (ctx->flushing) in mlx4_ib_mcg_port_cleanup()
1145 ctx->flushing = 1; in mlx4_ib_mcg_port_cleanup()
1149 ctx->flushing = 0; in mlx4_ib_mcg_port_cleanup()
1155 ctx->flushing = 0; in mlx4_ib_mcg_port_cleanup()
1159 work->ctx = ctx; in mlx4_ib_mcg_port_cleanup()
1160 work->destroy_wq = destroy_wq; in mlx4_ib_mcg_port_cleanup()
1161 INIT_WORK(&work->work, mcg_clean_task); in mlx4_ib_mcg_port_cleanup()
1162 queue_work(clean_wq, &work->work); in mlx4_ib_mcg_port_cleanup()
1167 struct ib_sa_mad *mad = &req->sa_mad; in build_leave_mad()
1169 mad->mad_hdr.method = IB_SA_METHOD_DELETE; in build_leave_mad()
1173 static void clear_pending_reqs(struct mcast_group *group, int vf) in clear_pending_reqs() argument
1179 if (!list_empty(&group->pending_list)) in clear_pending_reqs()
1180 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list); in clear_pending_reqs()
1182 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { in clear_pending_reqs()
1185 (group->state == MCAST_JOIN_SENT || in clear_pending_reqs()
1186 group->state == MCAST_LEAVE_SENT)) { in clear_pending_reqs()
1187 clear = cancel_delayed_work(&group->timeout_work); in clear_pending_reqs()
1189 group->state = MCAST_IDLE; in clear_pending_reqs()
1192 --group->func[vf].num_pend_reqs; in clear_pending_reqs()
1193 list_del(&req->group_list); in clear_pending_reqs()
1194 list_del(&req->func_list); in clear_pending_reqs()
1196 atomic_dec(&group->refcount); in clear_pending_reqs()
1200 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) { in clear_pending_reqs()
1201 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n", in clear_pending_reqs()
1202 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs); in clear_pending_reqs()
1206 static int push_deleteing_req(struct mcast_group *group, int slave) in push_deleteing_req() argument
1211 if (!group->func[slave].join_state) in push_deleteing_req()
1216 return -ENOMEM; in push_deleteing_req()
1218 if (!list_empty(&group->func[slave].pending)) { in push_deleteing_req()
1219 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); in push_deleteing_req()
1220 if (pend_req->clean) { in push_deleteing_req()
1226 req->clean = 1; in push_deleteing_req()
1227 req->func = slave; in push_deleteing_req()
1228 req->group = group; in push_deleteing_req()
1229 ++group->func[slave].num_pend_reqs; in push_deleteing_req()
1237 struct mcast_group *group; in clean_vf_mcast() local
1240 mutex_lock(&ctx->mcg_table_lock); in clean_vf_mcast()
1241 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) { in clean_vf_mcast()
1242 group = rb_entry(p, struct mcast_group, node); in clean_vf_mcast()
1243 mutex_lock(&group->lock); in clean_vf_mcast()
1244 if (atomic_read(&group->refcount)) { in clean_vf_mcast()
1246 clear_pending_reqs(group, slave); in clean_vf_mcast()
1247 push_deleteing_req(group, slave); in clean_vf_mcast()
1249 mutex_unlock(&group->lock); in clean_vf_mcast()
1251 mutex_unlock(&ctx->mcg_table_lock); in clean_vf_mcast()
1259 return -ENOMEM; in mlx4_ib_mcg_init()