197549c34SHans Petter Selasky /*
297549c34SHans Petter Selasky * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
397549c34SHans Petter Selasky *
497549c34SHans Petter Selasky * This software is available to you under a choice of one of two
597549c34SHans Petter Selasky * licenses. You may choose to be licensed under the terms of the GNU
697549c34SHans Petter Selasky * General Public License (GPL) Version 2, available from the file
797549c34SHans Petter Selasky * COPYING in the main directory of this source tree, or the
897549c34SHans Petter Selasky * OpenIB.org BSD license below:
997549c34SHans Petter Selasky *
1097549c34SHans Petter Selasky * Redistribution and use in source and binary forms, with or
1197549c34SHans Petter Selasky * without modification, are permitted provided that the following
1297549c34SHans Petter Selasky * conditions are met:
1397549c34SHans Petter Selasky *
1497549c34SHans Petter Selasky * - Redistributions of source code must retain the above
1597549c34SHans Petter Selasky * copyright notice, this list of conditions and the following
1697549c34SHans Petter Selasky * disclaimer.
1797549c34SHans Petter Selasky *
1897549c34SHans Petter Selasky * - Redistributions in binary form must reproduce the above
1997549c34SHans Petter Selasky * copyright notice, this list of conditions and the following
2097549c34SHans Petter Selasky * disclaimer in the documentation and/or other materials
2197549c34SHans Petter Selasky * provided with the distribution.
2297549c34SHans Petter Selasky *
2397549c34SHans Petter Selasky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2497549c34SHans Petter Selasky * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2597549c34SHans Petter Selasky * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2697549c34SHans Petter Selasky * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2797549c34SHans Petter Selasky * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2897549c34SHans Petter Selasky * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2997549c34SHans Petter Selasky * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3097549c34SHans Petter Selasky * SOFTWARE.
3197549c34SHans Petter Selasky */
3297549c34SHans Petter Selasky
3397549c34SHans Petter Selasky #include <rdma/ib_mad.h>
3497549c34SHans Petter Selasky #include <rdma/ib_smi.h>
3597549c34SHans Petter Selasky #include <rdma/ib_cache.h>
3697549c34SHans Petter Selasky #include <rdma/ib_sa.h>
3797549c34SHans Petter Selasky
3897549c34SHans Petter Selasky #include <dev/mlx4/cmd.h>
3997549c34SHans Petter Selasky #include <linux/rbtree.h>
4097549c34SHans Petter Selasky #include <linux/delay.h>
4197549c34SHans Petter Selasky
4297549c34SHans Petter Selasky #include "mlx4_ib.h"
4397549c34SHans Petter Selasky
4497549c34SHans Petter Selasky #define MAX_VFS 80
4597549c34SHans Petter Selasky #define MAX_PEND_REQS_PER_FUNC 4
4697549c34SHans Petter Selasky #define MAD_TIMEOUT_MS 2000
4797549c34SHans Petter Selasky
4897549c34SHans Petter Selasky #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
4997549c34SHans Petter Selasky #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
5097549c34SHans Petter Selasky #define mcg_warn_group(group, format, arg...) \
5197549c34SHans Petter Selasky pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
5297549c34SHans Petter Selasky (group)->name, group->demux->port, ## arg)
5397549c34SHans Petter Selasky
54*8cc48704SHans Petter Selasky #define mcg_debug_group(group, format, arg...) \
55*8cc48704SHans Petter Selasky pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
56*8cc48704SHans Petter Selasky (group)->name, (group)->demux->port, ## arg)
57*8cc48704SHans Petter Selasky
5897549c34SHans Petter Selasky #define mcg_error_group(group, format, arg...) \
5997549c34SHans Petter Selasky pr_err(" %16s: " format, (group)->name, ## arg)
6097549c34SHans Petter Selasky
6197549c34SHans Petter Selasky
6297549c34SHans Petter Selasky static union ib_gid mgid0;
6397549c34SHans Petter Selasky
6497549c34SHans Petter Selasky static struct workqueue_struct *clean_wq;
6597549c34SHans Petter Selasky
6697549c34SHans Petter Selasky enum mcast_state {
6797549c34SHans Petter Selasky MCAST_NOT_MEMBER = 0,
6897549c34SHans Petter Selasky MCAST_MEMBER,
6997549c34SHans Petter Selasky };
7097549c34SHans Petter Selasky
7197549c34SHans Petter Selasky enum mcast_group_state {
7297549c34SHans Petter Selasky MCAST_IDLE,
7397549c34SHans Petter Selasky MCAST_JOIN_SENT,
7497549c34SHans Petter Selasky MCAST_LEAVE_SENT,
7597549c34SHans Petter Selasky MCAST_RESP_READY
7697549c34SHans Petter Selasky };
7797549c34SHans Petter Selasky
7897549c34SHans Petter Selasky struct mcast_member {
7997549c34SHans Petter Selasky enum mcast_state state;
8097549c34SHans Petter Selasky uint8_t join_state;
8197549c34SHans Petter Selasky int num_pend_reqs;
8297549c34SHans Petter Selasky struct list_head pending;
8397549c34SHans Petter Selasky };
8497549c34SHans Petter Selasky
8597549c34SHans Petter Selasky struct ib_sa_mcmember_data {
8697549c34SHans Petter Selasky union ib_gid mgid;
8797549c34SHans Petter Selasky union ib_gid port_gid;
8897549c34SHans Petter Selasky __be32 qkey;
8997549c34SHans Petter Selasky __be16 mlid;
9097549c34SHans Petter Selasky u8 mtusel_mtu;
9197549c34SHans Petter Selasky u8 tclass;
9297549c34SHans Petter Selasky __be16 pkey;
9397549c34SHans Petter Selasky u8 ratesel_rate;
9497549c34SHans Petter Selasky u8 lifetmsel_lifetm;
9597549c34SHans Petter Selasky __be32 sl_flowlabel_hoplimit;
9697549c34SHans Petter Selasky u8 scope_join_state;
9797549c34SHans Petter Selasky u8 proxy_join;
9897549c34SHans Petter Selasky u8 reserved[2];
99*8cc48704SHans Petter Selasky } __packed __aligned(4);
10097549c34SHans Petter Selasky
10197549c34SHans Petter Selasky struct mcast_group {
10297549c34SHans Petter Selasky struct ib_sa_mcmember_data rec;
10397549c34SHans Petter Selasky struct rb_node node;
10497549c34SHans Petter Selasky struct list_head mgid0_list;
10597549c34SHans Petter Selasky struct mlx4_ib_demux_ctx *demux;
10697549c34SHans Petter Selasky struct mcast_member func[MAX_VFS];
10797549c34SHans Petter Selasky struct mutex lock;
10897549c34SHans Petter Selasky struct work_struct work;
10997549c34SHans Petter Selasky struct list_head pending_list;
11097549c34SHans Petter Selasky int members[3];
11197549c34SHans Petter Selasky enum mcast_group_state state;
11297549c34SHans Petter Selasky enum mcast_group_state prev_state;
11397549c34SHans Petter Selasky struct ib_sa_mad response_sa_mad;
11497549c34SHans Petter Selasky __be64 last_req_tid;
11597549c34SHans Petter Selasky
11697549c34SHans Petter Selasky char name[33]; /* MGID string */
11797549c34SHans Petter Selasky struct device_attribute dentry;
11897549c34SHans Petter Selasky
11997549c34SHans Petter Selasky /* refcount is the reference count for the following:
12097549c34SHans Petter Selasky 1. Each queued request
12197549c34SHans Petter Selasky 2. Each invocation of the worker thread
12297549c34SHans Petter Selasky 3. Membership of the port at the SA
12397549c34SHans Petter Selasky */
12497549c34SHans Petter Selasky atomic_t refcount;
12597549c34SHans Petter Selasky
12697549c34SHans Petter Selasky /* delayed work to clean pending SM request */
12797549c34SHans Petter Selasky struct delayed_work timeout_work;
12897549c34SHans Petter Selasky struct list_head cleanup_list;
12997549c34SHans Petter Selasky };
13097549c34SHans Petter Selasky
13197549c34SHans Petter Selasky struct mcast_req {
13297549c34SHans Petter Selasky int func;
13397549c34SHans Petter Selasky struct ib_sa_mad sa_mad;
13497549c34SHans Petter Selasky struct list_head group_list;
13597549c34SHans Petter Selasky struct list_head func_list;
13697549c34SHans Petter Selasky struct mcast_group *group;
13797549c34SHans Petter Selasky int clean;
13897549c34SHans Petter Selasky };
13997549c34SHans Petter Selasky
14097549c34SHans Petter Selasky
14197549c34SHans Petter Selasky #define safe_atomic_dec(ref) \
14297549c34SHans Petter Selasky do {\
14397549c34SHans Petter Selasky if (atomic_dec_and_test(ref)) \
14497549c34SHans Petter Selasky mcg_warn_group(group, "did not expect to reach zero\n"); \
14597549c34SHans Petter Selasky } while (0)
14697549c34SHans Petter Selasky
get_state_string(enum mcast_group_state state)14797549c34SHans Petter Selasky static const char *get_state_string(enum mcast_group_state state)
14897549c34SHans Petter Selasky {
14997549c34SHans Petter Selasky switch (state) {
15097549c34SHans Petter Selasky case MCAST_IDLE:
15197549c34SHans Petter Selasky return "MCAST_IDLE";
15297549c34SHans Petter Selasky case MCAST_JOIN_SENT:
15397549c34SHans Petter Selasky return "MCAST_JOIN_SENT";
15497549c34SHans Petter Selasky case MCAST_LEAVE_SENT:
15597549c34SHans Petter Selasky return "MCAST_LEAVE_SENT";
15697549c34SHans Petter Selasky case MCAST_RESP_READY:
15797549c34SHans Petter Selasky return "MCAST_RESP_READY";
15897549c34SHans Petter Selasky }
15997549c34SHans Petter Selasky return "Invalid State";
16097549c34SHans Petter Selasky }
16197549c34SHans Petter Selasky
mcast_find(struct mlx4_ib_demux_ctx * ctx,union ib_gid * mgid)16297549c34SHans Petter Selasky static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx,
16397549c34SHans Petter Selasky union ib_gid *mgid)
16497549c34SHans Petter Selasky {
16597549c34SHans Petter Selasky struct rb_node *node = ctx->mcg_table.rb_node;
16697549c34SHans Petter Selasky struct mcast_group *group;
16797549c34SHans Petter Selasky int ret;
16897549c34SHans Petter Selasky
16997549c34SHans Petter Selasky while (node) {
17097549c34SHans Petter Selasky group = rb_entry(node, struct mcast_group, node);
17197549c34SHans Petter Selasky ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
17297549c34SHans Petter Selasky if (!ret)
17397549c34SHans Petter Selasky return group;
17497549c34SHans Petter Selasky
17597549c34SHans Petter Selasky if (ret < 0)
17697549c34SHans Petter Selasky node = node->rb_left;
17797549c34SHans Petter Selasky else
17897549c34SHans Petter Selasky node = node->rb_right;
17997549c34SHans Petter Selasky }
18097549c34SHans Petter Selasky return NULL;
18197549c34SHans Petter Selasky }
18297549c34SHans Petter Selasky
mcast_insert(struct mlx4_ib_demux_ctx * ctx,struct mcast_group * group)18397549c34SHans Petter Selasky static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
18497549c34SHans Petter Selasky struct mcast_group *group)
18597549c34SHans Petter Selasky {
18697549c34SHans Petter Selasky struct rb_node **link = &ctx->mcg_table.rb_node;
18797549c34SHans Petter Selasky struct rb_node *parent = NULL;
18897549c34SHans Petter Selasky struct mcast_group *cur_group;
18997549c34SHans Petter Selasky int ret;
19097549c34SHans Petter Selasky
19197549c34SHans Petter Selasky while (*link) {
19297549c34SHans Petter Selasky parent = *link;
19397549c34SHans Petter Selasky cur_group = rb_entry(parent, struct mcast_group, node);
19497549c34SHans Petter Selasky
19597549c34SHans Petter Selasky ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
19697549c34SHans Petter Selasky sizeof group->rec.mgid);
19797549c34SHans Petter Selasky if (ret < 0)
19897549c34SHans Petter Selasky link = &(*link)->rb_left;
19997549c34SHans Petter Selasky else if (ret > 0)
20097549c34SHans Petter Selasky link = &(*link)->rb_right;
20197549c34SHans Petter Selasky else
20297549c34SHans Petter Selasky return cur_group;
20397549c34SHans Petter Selasky }
20497549c34SHans Petter Selasky rb_link_node(&group->node, parent, link);
20597549c34SHans Petter Selasky rb_insert_color(&group->node, &ctx->mcg_table);
20697549c34SHans Petter Selasky return NULL;
20797549c34SHans Petter Selasky }
20897549c34SHans Petter Selasky
send_mad_to_wire(struct mlx4_ib_demux_ctx * ctx,struct ib_mad * mad)20997549c34SHans Petter Selasky static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
21097549c34SHans Petter Selasky {
21197549c34SHans Petter Selasky struct mlx4_ib_dev *dev = ctx->dev;
21297549c34SHans Petter Selasky struct ib_ah_attr ah_attr;
213*8cc48704SHans Petter Selasky unsigned long flags;
21497549c34SHans Petter Selasky
215*8cc48704SHans Petter Selasky spin_lock_irqsave(&dev->sm_lock, flags);
21697549c34SHans Petter Selasky if (!dev->sm_ah[ctx->port - 1]) {
21797549c34SHans Petter Selasky /* port is not yet Active, sm_ah not ready */
218*8cc48704SHans Petter Selasky spin_unlock_irqrestore(&dev->sm_lock, flags);
21997549c34SHans Petter Selasky return -EAGAIN;
22097549c34SHans Petter Selasky }
22197549c34SHans Petter Selasky mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
222*8cc48704SHans Petter Selasky spin_unlock_irqrestore(&dev->sm_lock, flags);
223*8cc48704SHans Petter Selasky return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
224*8cc48704SHans Petter Selasky ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
225*8cc48704SHans Petter Selasky &ah_attr, NULL, 0xffff, mad);
22697549c34SHans Petter Selasky }
22797549c34SHans Petter Selasky
send_mad_to_slave(int slave,struct mlx4_ib_demux_ctx * ctx,struct ib_mad * mad)22897549c34SHans Petter Selasky static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
22997549c34SHans Petter Selasky struct ib_mad *mad)
23097549c34SHans Petter Selasky {
23197549c34SHans Petter Selasky struct mlx4_ib_dev *dev = ctx->dev;
23297549c34SHans Petter Selasky struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1];
23397549c34SHans Petter Selasky struct ib_wc wc;
23497549c34SHans Petter Selasky struct ib_ah_attr ah_attr;
23597549c34SHans Petter Selasky
23697549c34SHans Petter Selasky /* Our agent might not yet be registered when mads start to arrive */
23797549c34SHans Petter Selasky if (!agent)
23897549c34SHans Petter Selasky return -EAGAIN;
23997549c34SHans Petter Selasky
24097549c34SHans Petter Selasky ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
24197549c34SHans Petter Selasky
24297549c34SHans Petter Selasky if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
24397549c34SHans Petter Selasky return -EINVAL;
24497549c34SHans Petter Selasky wc.sl = 0;
24597549c34SHans Petter Selasky wc.dlid_path_bits = 0;
24697549c34SHans Petter Selasky wc.port_num = ctx->port;
24797549c34SHans Petter Selasky wc.slid = ah_attr.dlid; /* opensm lid */
24897549c34SHans Petter Selasky wc.src_qp = 1;
24997549c34SHans Petter Selasky return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
25097549c34SHans Petter Selasky }
25197549c34SHans Petter Selasky
send_join_to_wire(struct mcast_group * group,struct ib_sa_mad * sa_mad)25297549c34SHans Petter Selasky static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
25397549c34SHans Petter Selasky {
25497549c34SHans Petter Selasky struct ib_sa_mad mad;
25597549c34SHans Petter Selasky struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data;
25697549c34SHans Petter Selasky int ret;
25797549c34SHans Petter Selasky
25897549c34SHans Petter Selasky /* we rely on a mad request as arrived from a VF */
25997549c34SHans Petter Selasky memcpy(&mad, sa_mad, sizeof mad);
26097549c34SHans Petter Selasky
26197549c34SHans Petter Selasky /* fix port GID to be the real one (slave 0) */
26297549c34SHans Petter Selasky sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0];
26397549c34SHans Petter Selasky
26497549c34SHans Petter Selasky /* assign our own TID */
26597549c34SHans Petter Selasky mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
26697549c34SHans Petter Selasky group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
26797549c34SHans Petter Selasky
26897549c34SHans Petter Selasky ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
26997549c34SHans Petter Selasky /* set timeout handler */
27097549c34SHans Petter Selasky if (!ret) {
27197549c34SHans Petter Selasky /* calls mlx4_ib_mcg_timeout_handler */
27297549c34SHans Petter Selasky queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
27397549c34SHans Petter Selasky msecs_to_jiffies(MAD_TIMEOUT_MS));
27497549c34SHans Petter Selasky }
27597549c34SHans Petter Selasky
27697549c34SHans Petter Selasky return ret;
27797549c34SHans Petter Selasky }
27897549c34SHans Petter Selasky
send_leave_to_wire(struct mcast_group * group,u8 join_state)27997549c34SHans Petter Selasky static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
28097549c34SHans Petter Selasky {
28197549c34SHans Petter Selasky struct ib_sa_mad mad;
28297549c34SHans Petter Selasky struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
28397549c34SHans Petter Selasky int ret;
28497549c34SHans Petter Selasky
28597549c34SHans Petter Selasky memset(&mad, 0, sizeof mad);
28697549c34SHans Petter Selasky mad.mad_hdr.base_version = 1;
28797549c34SHans Petter Selasky mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
28897549c34SHans Petter Selasky mad.mad_hdr.class_version = 2;
28997549c34SHans Petter Selasky mad.mad_hdr.method = IB_SA_METHOD_DELETE;
29097549c34SHans Petter Selasky mad.mad_hdr.status = cpu_to_be16(0);
29197549c34SHans Petter Selasky mad.mad_hdr.class_specific = cpu_to_be16(0);
29297549c34SHans Petter Selasky mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
29397549c34SHans Petter Selasky group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
29497549c34SHans Petter Selasky mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
29597549c34SHans Petter Selasky mad.mad_hdr.attr_mod = cpu_to_be32(0);
29697549c34SHans Petter Selasky mad.sa_hdr.sm_key = 0x0;
29797549c34SHans Petter Selasky mad.sa_hdr.attr_offset = cpu_to_be16(7);
29897549c34SHans Petter Selasky mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID |
29997549c34SHans Petter Selasky IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE;
30097549c34SHans Petter Selasky
30197549c34SHans Petter Selasky *sa_data = group->rec;
30297549c34SHans Petter Selasky sa_data->scope_join_state = join_state;
30397549c34SHans Petter Selasky
30497549c34SHans Petter Selasky ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
30597549c34SHans Petter Selasky if (ret)
30697549c34SHans Petter Selasky group->state = MCAST_IDLE;
30797549c34SHans Petter Selasky
30897549c34SHans Petter Selasky /* set timeout handler */
30997549c34SHans Petter Selasky if (!ret) {
31097549c34SHans Petter Selasky /* calls mlx4_ib_mcg_timeout_handler */
31197549c34SHans Petter Selasky queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
31297549c34SHans Petter Selasky msecs_to_jiffies(MAD_TIMEOUT_MS));
31397549c34SHans Petter Selasky }
31497549c34SHans Petter Selasky
31597549c34SHans Petter Selasky return ret;
31697549c34SHans Petter Selasky }
31797549c34SHans Petter Selasky
send_reply_to_slave(int slave,struct mcast_group * group,struct ib_sa_mad * req_sa_mad,u16 status)31897549c34SHans Petter Selasky static int send_reply_to_slave(int slave, struct mcast_group *group,
31997549c34SHans Petter Selasky struct ib_sa_mad *req_sa_mad, u16 status)
32097549c34SHans Petter Selasky {
32197549c34SHans Petter Selasky struct ib_sa_mad mad;
32297549c34SHans Petter Selasky struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
32397549c34SHans Petter Selasky struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data;
32497549c34SHans Petter Selasky int ret;
32597549c34SHans Petter Selasky
32697549c34SHans Petter Selasky memset(&mad, 0, sizeof mad);
32797549c34SHans Petter Selasky mad.mad_hdr.base_version = 1;
32897549c34SHans Petter Selasky mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
32997549c34SHans Petter Selasky mad.mad_hdr.class_version = 2;
33097549c34SHans Petter Selasky mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
33197549c34SHans Petter Selasky mad.mad_hdr.status = cpu_to_be16(status);
33297549c34SHans Petter Selasky mad.mad_hdr.class_specific = cpu_to_be16(0);
33397549c34SHans Petter Selasky mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid;
33497549c34SHans Petter Selasky *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */
33597549c34SHans Petter Selasky mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
33697549c34SHans Petter Selasky mad.mad_hdr.attr_mod = cpu_to_be32(0);
33797549c34SHans Petter Selasky mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key;
33897549c34SHans Petter Selasky mad.sa_hdr.attr_offset = cpu_to_be16(7);
33997549c34SHans Petter Selasky mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */
34097549c34SHans Petter Selasky
34197549c34SHans Petter Selasky *sa_data = group->rec;
34297549c34SHans Petter Selasky
34397549c34SHans Petter Selasky /* reconstruct VF's requested join_state and port_gid */
34497549c34SHans Petter Selasky sa_data->scope_join_state &= 0xf0;
34597549c34SHans Petter Selasky sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f);
34697549c34SHans Petter Selasky memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid);
34797549c34SHans Petter Selasky
34897549c34SHans Petter Selasky ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad);
34997549c34SHans Petter Selasky return ret;
35097549c34SHans Petter Selasky }
35197549c34SHans Petter Selasky
check_selector(ib_sa_comp_mask comp_mask,ib_sa_comp_mask selector_mask,ib_sa_comp_mask value_mask,u8 src_value,u8 dst_value)35297549c34SHans Petter Selasky static int check_selector(ib_sa_comp_mask comp_mask,
35397549c34SHans Petter Selasky ib_sa_comp_mask selector_mask,
35497549c34SHans Petter Selasky ib_sa_comp_mask value_mask,
35597549c34SHans Petter Selasky u8 src_value, u8 dst_value)
35697549c34SHans Petter Selasky {
35797549c34SHans Petter Selasky int err;
35897549c34SHans Petter Selasky u8 selector = dst_value >> 6;
35997549c34SHans Petter Selasky dst_value &= 0x3f;
36097549c34SHans Petter Selasky src_value &= 0x3f;
36197549c34SHans Petter Selasky
36297549c34SHans Petter Selasky if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
36397549c34SHans Petter Selasky return 0;
36497549c34SHans Petter Selasky
36597549c34SHans Petter Selasky switch (selector) {
36697549c34SHans Petter Selasky case IB_SA_GT:
36797549c34SHans Petter Selasky err = (src_value <= dst_value);
36897549c34SHans Petter Selasky break;
36997549c34SHans Petter Selasky case IB_SA_LT:
37097549c34SHans Petter Selasky err = (src_value >= dst_value);
37197549c34SHans Petter Selasky break;
37297549c34SHans Petter Selasky case IB_SA_EQ:
37397549c34SHans Petter Selasky err = (src_value != dst_value);
37497549c34SHans Petter Selasky break;
37597549c34SHans Petter Selasky default:
37697549c34SHans Petter Selasky err = 0;
37797549c34SHans Petter Selasky break;
37897549c34SHans Petter Selasky }
37997549c34SHans Petter Selasky
38097549c34SHans Petter Selasky return err;
38197549c34SHans Petter Selasky }
38297549c34SHans Petter Selasky
cmp_rec(struct ib_sa_mcmember_data * src,struct ib_sa_mcmember_data * dst,ib_sa_comp_mask comp_mask)38397549c34SHans Petter Selasky static u16 cmp_rec(struct ib_sa_mcmember_data *src,
38497549c34SHans Petter Selasky struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask)
38597549c34SHans Petter Selasky {
38697549c34SHans Petter Selasky /* src is group record, dst is request record */
38797549c34SHans Petter Selasky /* MGID must already match */
38897549c34SHans Petter Selasky /* Port_GID we always replace to our Port_GID, so it is a match */
38997549c34SHans Petter Selasky
39097549c34SHans Petter Selasky #define MAD_STATUS_REQ_INVALID 0x0200
39197549c34SHans Petter Selasky if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
39297549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
39397549c34SHans Petter Selasky if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
39497549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
39597549c34SHans Petter Selasky if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
39697549c34SHans Petter Selasky IB_SA_MCMEMBER_REC_MTU,
39797549c34SHans Petter Selasky src->mtusel_mtu, dst->mtusel_mtu))
39897549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
39997549c34SHans Petter Selasky if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
40097549c34SHans Petter Selasky src->tclass != dst->tclass)
40197549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
40297549c34SHans Petter Selasky if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
40397549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
40497549c34SHans Petter Selasky if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
40597549c34SHans Petter Selasky IB_SA_MCMEMBER_REC_RATE,
40697549c34SHans Petter Selasky src->ratesel_rate, dst->ratesel_rate))
40797549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
40897549c34SHans Petter Selasky if (check_selector(comp_mask,
40997549c34SHans Petter Selasky IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
41097549c34SHans Petter Selasky IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
41197549c34SHans Petter Selasky src->lifetmsel_lifetm, dst->lifetmsel_lifetm))
41297549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
41397549c34SHans Petter Selasky if (comp_mask & IB_SA_MCMEMBER_REC_SL &&
41497549c34SHans Petter Selasky (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) !=
41597549c34SHans Petter Selasky (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000))
41697549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
41797549c34SHans Petter Selasky if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
41897549c34SHans Petter Selasky (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) !=
41997549c34SHans Petter Selasky (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00))
42097549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
42197549c34SHans Petter Selasky if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
42297549c34SHans Petter Selasky (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) !=
42397549c34SHans Petter Selasky (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff))
42497549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
42597549c34SHans Petter Selasky if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE &&
42697549c34SHans Petter Selasky (src->scope_join_state & 0xf0) !=
42797549c34SHans Petter Selasky (dst->scope_join_state & 0xf0))
42897549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
42997549c34SHans Petter Selasky
43097549c34SHans Petter Selasky /* join_state checked separately, proxy_join ignored */
43197549c34SHans Petter Selasky
43297549c34SHans Petter Selasky return 0;
43397549c34SHans Petter Selasky }
43497549c34SHans Petter Selasky
43597549c34SHans Petter Selasky /* release group, return 1 if this was last release and group is destroyed
43697549c34SHans Petter Selasky * timout work is canceled sync */
release_group(struct mcast_group * group,int from_timeout_handler)43797549c34SHans Petter Selasky static int release_group(struct mcast_group *group, int from_timeout_handler)
43897549c34SHans Petter Selasky {
43997549c34SHans Petter Selasky struct mlx4_ib_demux_ctx *ctx = group->demux;
44097549c34SHans Petter Selasky int nzgroup;
44197549c34SHans Petter Selasky
44297549c34SHans Petter Selasky mutex_lock(&ctx->mcg_table_lock);
44397549c34SHans Petter Selasky mutex_lock(&group->lock);
44497549c34SHans Petter Selasky if (atomic_dec_and_test(&group->refcount)) {
44597549c34SHans Petter Selasky if (!from_timeout_handler) {
44697549c34SHans Petter Selasky if (group->state != MCAST_IDLE &&
44797549c34SHans Petter Selasky !cancel_delayed_work(&group->timeout_work)) {
44897549c34SHans Petter Selasky atomic_inc(&group->refcount);
44997549c34SHans Petter Selasky mutex_unlock(&group->lock);
45097549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
45197549c34SHans Petter Selasky return 0;
45297549c34SHans Petter Selasky }
45397549c34SHans Petter Selasky }
45497549c34SHans Petter Selasky
45597549c34SHans Petter Selasky nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
45697549c34SHans Petter Selasky if (nzgroup)
45797549c34SHans Petter Selasky del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
45897549c34SHans Petter Selasky if (!list_empty(&group->pending_list))
45997549c34SHans Petter Selasky mcg_warn_group(group, "releasing a group with non empty pending list\n");
46097549c34SHans Petter Selasky if (nzgroup)
46197549c34SHans Petter Selasky rb_erase(&group->node, &ctx->mcg_table);
46297549c34SHans Petter Selasky list_del_init(&group->mgid0_list);
46397549c34SHans Petter Selasky mutex_unlock(&group->lock);
46497549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
46597549c34SHans Petter Selasky kfree(group);
46697549c34SHans Petter Selasky return 1;
46797549c34SHans Petter Selasky } else {
46897549c34SHans Petter Selasky mutex_unlock(&group->lock);
46997549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
47097549c34SHans Petter Selasky }
47197549c34SHans Petter Selasky return 0;
47297549c34SHans Petter Selasky }
47397549c34SHans Petter Selasky
adjust_membership(struct mcast_group * group,u8 join_state,int inc)47497549c34SHans Petter Selasky static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
47597549c34SHans Petter Selasky {
47697549c34SHans Petter Selasky int i;
47797549c34SHans Petter Selasky
47897549c34SHans Petter Selasky for (i = 0; i < 3; i++, join_state >>= 1)
47997549c34SHans Petter Selasky if (join_state & 0x1)
48097549c34SHans Petter Selasky group->members[i] += inc;
48197549c34SHans Petter Selasky }
48297549c34SHans Petter Selasky
get_leave_state(struct mcast_group * group)48397549c34SHans Petter Selasky static u8 get_leave_state(struct mcast_group *group)
48497549c34SHans Petter Selasky {
48597549c34SHans Petter Selasky u8 leave_state = 0;
48697549c34SHans Petter Selasky int i;
48797549c34SHans Petter Selasky
48897549c34SHans Petter Selasky for (i = 0; i < 3; i++)
48997549c34SHans Petter Selasky if (!group->members[i])
49097549c34SHans Petter Selasky leave_state |= (1 << i);
49197549c34SHans Petter Selasky
492*8cc48704SHans Petter Selasky return leave_state & (group->rec.scope_join_state & 0xf);
49397549c34SHans Petter Selasky }
49497549c34SHans Petter Selasky
join_group(struct mcast_group * group,int slave,u8 join_mask)49597549c34SHans Petter Selasky static int join_group(struct mcast_group *group, int slave, u8 join_mask)
49697549c34SHans Petter Selasky {
49797549c34SHans Petter Selasky int ret = 0;
49897549c34SHans Petter Selasky u8 join_state;
49997549c34SHans Petter Selasky
50097549c34SHans Petter Selasky /* remove bits that slave is already member of, and adjust */
50197549c34SHans Petter Selasky join_state = join_mask & (~group->func[slave].join_state);
50297549c34SHans Petter Selasky adjust_membership(group, join_state, 1);
50397549c34SHans Petter Selasky group->func[slave].join_state |= join_state;
50497549c34SHans Petter Selasky if (group->func[slave].state != MCAST_MEMBER && join_state) {
50597549c34SHans Petter Selasky group->func[slave].state = MCAST_MEMBER;
50697549c34SHans Petter Selasky ret = 1;
50797549c34SHans Petter Selasky }
50897549c34SHans Petter Selasky return ret;
50997549c34SHans Petter Selasky }
51097549c34SHans Petter Selasky
leave_group(struct mcast_group * group,int slave,u8 leave_state)51197549c34SHans Petter Selasky static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
51297549c34SHans Petter Selasky {
51397549c34SHans Petter Selasky int ret = 0;
51497549c34SHans Petter Selasky
51597549c34SHans Petter Selasky adjust_membership(group, leave_state, -1);
51697549c34SHans Petter Selasky group->func[slave].join_state &= ~leave_state;
51797549c34SHans Petter Selasky if (!group->func[slave].join_state) {
51897549c34SHans Petter Selasky group->func[slave].state = MCAST_NOT_MEMBER;
51997549c34SHans Petter Selasky ret = 1;
52097549c34SHans Petter Selasky }
52197549c34SHans Petter Selasky return ret;
52297549c34SHans Petter Selasky }
52397549c34SHans Petter Selasky
check_leave(struct mcast_group * group,int slave,u8 leave_mask)52497549c34SHans Petter Selasky static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
52597549c34SHans Petter Selasky {
52697549c34SHans Petter Selasky if (group->func[slave].state != MCAST_MEMBER)
52797549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
52897549c34SHans Petter Selasky
52997549c34SHans Petter Selasky /* make sure we're not deleting unset bits */
53097549c34SHans Petter Selasky if (~group->func[slave].join_state & leave_mask)
53197549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
53297549c34SHans Petter Selasky
53397549c34SHans Petter Selasky if (!leave_mask)
53497549c34SHans Petter Selasky return MAD_STATUS_REQ_INVALID;
53597549c34SHans Petter Selasky
53697549c34SHans Petter Selasky return 0;
53797549c34SHans Petter Selasky }
53897549c34SHans Petter Selasky
mlx4_ib_mcg_timeout_handler(struct work_struct * work)53997549c34SHans Petter Selasky static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
54097549c34SHans Petter Selasky {
54197549c34SHans Petter Selasky struct delayed_work *delay = to_delayed_work(work);
54297549c34SHans Petter Selasky struct mcast_group *group;
54397549c34SHans Petter Selasky struct mcast_req *req = NULL;
54497549c34SHans Petter Selasky
54597549c34SHans Petter Selasky group = container_of(delay, typeof(*group), timeout_work);
54697549c34SHans Petter Selasky
54797549c34SHans Petter Selasky mutex_lock(&group->lock);
54897549c34SHans Petter Selasky if (group->state == MCAST_JOIN_SENT) {
54997549c34SHans Petter Selasky if (!list_empty(&group->pending_list)) {
55097549c34SHans Petter Selasky req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
55197549c34SHans Petter Selasky list_del(&req->group_list);
55297549c34SHans Petter Selasky list_del(&req->func_list);
55397549c34SHans Petter Selasky --group->func[req->func].num_pend_reqs;
55497549c34SHans Petter Selasky mutex_unlock(&group->lock);
55597549c34SHans Petter Selasky kfree(req);
55697549c34SHans Petter Selasky if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) {
55797549c34SHans Petter Selasky if (release_group(group, 1))
55897549c34SHans Petter Selasky return;
55997549c34SHans Petter Selasky } else {
56097549c34SHans Petter Selasky kfree(group);
56197549c34SHans Petter Selasky return;
56297549c34SHans Petter Selasky }
56397549c34SHans Petter Selasky mutex_lock(&group->lock);
56497549c34SHans Petter Selasky } else
56597549c34SHans Petter Selasky mcg_warn_group(group, "DRIVER BUG\n");
56697549c34SHans Petter Selasky } else if (group->state == MCAST_LEAVE_SENT) {
567*8cc48704SHans Petter Selasky if (group->rec.scope_join_state & 0xf)
568*8cc48704SHans Petter Selasky group->rec.scope_join_state &= 0xf0;
56997549c34SHans Petter Selasky group->state = MCAST_IDLE;
57097549c34SHans Petter Selasky mutex_unlock(&group->lock);
57197549c34SHans Petter Selasky if (release_group(group, 1))
57297549c34SHans Petter Selasky return;
57397549c34SHans Petter Selasky mutex_lock(&group->lock);
57497549c34SHans Petter Selasky } else
57597549c34SHans Petter Selasky mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state));
57697549c34SHans Petter Selasky group->state = MCAST_IDLE;
57797549c34SHans Petter Selasky atomic_inc(&group->refcount);
57897549c34SHans Petter Selasky if (!queue_work(group->demux->mcg_wq, &group->work))
57997549c34SHans Petter Selasky safe_atomic_dec(&group->refcount);
58097549c34SHans Petter Selasky
58197549c34SHans Petter Selasky mutex_unlock(&group->lock);
58297549c34SHans Petter Selasky }
58397549c34SHans Petter Selasky
handle_leave_req(struct mcast_group * group,u8 leave_mask,struct mcast_req * req)58497549c34SHans Petter Selasky static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
58597549c34SHans Petter Selasky struct mcast_req *req)
58697549c34SHans Petter Selasky {
58797549c34SHans Petter Selasky u16 status;
58897549c34SHans Petter Selasky
58997549c34SHans Petter Selasky if (req->clean)
59097549c34SHans Petter Selasky leave_mask = group->func[req->func].join_state;
59197549c34SHans Petter Selasky
59297549c34SHans Petter Selasky status = check_leave(group, req->func, leave_mask);
59397549c34SHans Petter Selasky if (!status)
59497549c34SHans Petter Selasky leave_group(group, req->func, leave_mask);
59597549c34SHans Petter Selasky
59697549c34SHans Petter Selasky if (!req->clean)
59797549c34SHans Petter Selasky send_reply_to_slave(req->func, group, &req->sa_mad, status);
59897549c34SHans Petter Selasky --group->func[req->func].num_pend_reqs;
59997549c34SHans Petter Selasky list_del(&req->group_list);
60097549c34SHans Petter Selasky list_del(&req->func_list);
60197549c34SHans Petter Selasky kfree(req);
60297549c34SHans Petter Selasky return 1;
60397549c34SHans Petter Selasky }
60497549c34SHans Petter Selasky
handle_join_req(struct mcast_group * group,u8 join_mask,struct mcast_req * req)60597549c34SHans Petter Selasky static int handle_join_req(struct mcast_group *group, u8 join_mask,
60697549c34SHans Petter Selasky struct mcast_req *req)
60797549c34SHans Petter Selasky {
608*8cc48704SHans Petter Selasky u8 group_join_state = group->rec.scope_join_state & 0xf;
60997549c34SHans Petter Selasky int ref = 0;
61097549c34SHans Petter Selasky u16 status;
61197549c34SHans Petter Selasky struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
61297549c34SHans Petter Selasky
61397549c34SHans Petter Selasky if (join_mask == (group_join_state & join_mask)) {
61497549c34SHans Petter Selasky /* port's membership need not change */
61597549c34SHans Petter Selasky status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
61697549c34SHans Petter Selasky if (!status)
61797549c34SHans Petter Selasky join_group(group, req->func, join_mask);
61897549c34SHans Petter Selasky
61997549c34SHans Petter Selasky --group->func[req->func].num_pend_reqs;
62097549c34SHans Petter Selasky send_reply_to_slave(req->func, group, &req->sa_mad, status);
62197549c34SHans Petter Selasky list_del(&req->group_list);
62297549c34SHans Petter Selasky list_del(&req->func_list);
62397549c34SHans Petter Selasky kfree(req);
62497549c34SHans Petter Selasky ++ref;
62597549c34SHans Petter Selasky } else {
62697549c34SHans Petter Selasky /* port's membership needs to be updated */
62797549c34SHans Petter Selasky group->prev_state = group->state;
62897549c34SHans Petter Selasky if (send_join_to_wire(group, &req->sa_mad)) {
62997549c34SHans Petter Selasky --group->func[req->func].num_pend_reqs;
63097549c34SHans Petter Selasky list_del(&req->group_list);
63197549c34SHans Petter Selasky list_del(&req->func_list);
63297549c34SHans Petter Selasky kfree(req);
63397549c34SHans Petter Selasky ref = 1;
63497549c34SHans Petter Selasky group->state = group->prev_state;
63597549c34SHans Petter Selasky } else
63697549c34SHans Petter Selasky group->state = MCAST_JOIN_SENT;
63797549c34SHans Petter Selasky }
63897549c34SHans Petter Selasky
63997549c34SHans Petter Selasky return ref;
64097549c34SHans Petter Selasky }
64197549c34SHans Petter Selasky
mlx4_ib_mcg_work_handler(struct work_struct * work)64297549c34SHans Petter Selasky static void mlx4_ib_mcg_work_handler(struct work_struct *work)
64397549c34SHans Petter Selasky {
64497549c34SHans Petter Selasky struct mcast_group *group;
64597549c34SHans Petter Selasky struct mcast_req *req = NULL;
64697549c34SHans Petter Selasky struct ib_sa_mcmember_data *sa_data;
64797549c34SHans Petter Selasky u8 req_join_state;
64897549c34SHans Petter Selasky int rc = 1; /* release_count - this is for the scheduled work */
64997549c34SHans Petter Selasky u16 status;
65097549c34SHans Petter Selasky u8 method;
65197549c34SHans Petter Selasky
65297549c34SHans Petter Selasky group = container_of(work, typeof(*group), work);
65397549c34SHans Petter Selasky
65497549c34SHans Petter Selasky mutex_lock(&group->lock);
65597549c34SHans Petter Selasky
65697549c34SHans Petter Selasky /* First, let's see if a response from SM is waiting regarding this group.
65797549c34SHans Petter Selasky * If so, we need to update the group's REC. If this is a bad response, we
65897549c34SHans Petter Selasky * may need to send a bad response to a VF waiting for it. If VF is waiting
65997549c34SHans Petter Selasky * and this is a good response, the VF will be answered later in this func. */
66097549c34SHans Petter Selasky if (group->state == MCAST_RESP_READY) {
66197549c34SHans Petter Selasky /* cancels mlx4_ib_mcg_timeout_handler */
66297549c34SHans Petter Selasky cancel_delayed_work(&group->timeout_work);
66397549c34SHans Petter Selasky status = be16_to_cpu(group->response_sa_mad.mad_hdr.status);
66497549c34SHans Petter Selasky method = group->response_sa_mad.mad_hdr.method;
66597549c34SHans Petter Selasky if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) {
66697549c34SHans Petter Selasky mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
66797549c34SHans Petter Selasky (long long)be64_to_cpu(
66897549c34SHans Petter Selasky group->response_sa_mad.mad_hdr.tid),
66997549c34SHans Petter Selasky (long long)be64_to_cpu(group->last_req_tid));
67097549c34SHans Petter Selasky group->state = group->prev_state;
67197549c34SHans Petter Selasky goto process_requests;
67297549c34SHans Petter Selasky }
67397549c34SHans Petter Selasky if (status) {
67497549c34SHans Petter Selasky if (!list_empty(&group->pending_list))
67597549c34SHans Petter Selasky req = list_first_entry(&group->pending_list,
67697549c34SHans Petter Selasky struct mcast_req, group_list);
67797549c34SHans Petter Selasky if (method == IB_MGMT_METHOD_GET_RESP) {
67897549c34SHans Petter Selasky if (req) {
67997549c34SHans Petter Selasky send_reply_to_slave(req->func, group, &req->sa_mad, status);
68097549c34SHans Petter Selasky --group->func[req->func].num_pend_reqs;
68197549c34SHans Petter Selasky list_del(&req->group_list);
68297549c34SHans Petter Selasky list_del(&req->func_list);
68397549c34SHans Petter Selasky kfree(req);
68497549c34SHans Petter Selasky ++rc;
68597549c34SHans Petter Selasky } else
68697549c34SHans Petter Selasky mcg_warn_group(group, "no request for failed join\n");
68797549c34SHans Petter Selasky } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing)
68897549c34SHans Petter Selasky ++rc;
68997549c34SHans Petter Selasky } else {
69097549c34SHans Petter Selasky u8 resp_join_state;
69197549c34SHans Petter Selasky u8 cur_join_state;
69297549c34SHans Petter Selasky
69397549c34SHans Petter Selasky resp_join_state = ((struct ib_sa_mcmember_data *)
694*8cc48704SHans Petter Selasky group->response_sa_mad.data)->scope_join_state & 0xf;
695*8cc48704SHans Petter Selasky cur_join_state = group->rec.scope_join_state & 0xf;
69697549c34SHans Petter Selasky
69797549c34SHans Petter Selasky if (method == IB_MGMT_METHOD_GET_RESP) {
698*8cc48704SHans Petter Selasky /* successfull join */
69997549c34SHans Petter Selasky if (!cur_join_state && resp_join_state)
70097549c34SHans Petter Selasky --rc;
70197549c34SHans Petter Selasky } else if (!resp_join_state)
70297549c34SHans Petter Selasky ++rc;
70397549c34SHans Petter Selasky memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec);
70497549c34SHans Petter Selasky }
70597549c34SHans Petter Selasky group->state = MCAST_IDLE;
70697549c34SHans Petter Selasky }
70797549c34SHans Petter Selasky
70897549c34SHans Petter Selasky process_requests:
70997549c34SHans Petter Selasky /* We should now go over pending join/leave requests, as long as we are idle. */
71097549c34SHans Petter Selasky while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) {
71197549c34SHans Petter Selasky req = list_first_entry(&group->pending_list, struct mcast_req,
71297549c34SHans Petter Selasky group_list);
71397549c34SHans Petter Selasky sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
714*8cc48704SHans Petter Selasky req_join_state = sa_data->scope_join_state & 0xf;
71597549c34SHans Petter Selasky
71697549c34SHans Petter Selasky /* For a leave request, we will immediately answer the VF, and
71797549c34SHans Petter Selasky * update our internal counters. The actual leave will be sent
71897549c34SHans Petter Selasky * to SM later, if at all needed. We dequeue the request now. */
71997549c34SHans Petter Selasky if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE)
72097549c34SHans Petter Selasky rc += handle_leave_req(group, req_join_state, req);
72197549c34SHans Petter Selasky else
72297549c34SHans Petter Selasky rc += handle_join_req(group, req_join_state, req);
72397549c34SHans Petter Selasky }
72497549c34SHans Petter Selasky
72597549c34SHans Petter Selasky /* Handle leaves */
72697549c34SHans Petter Selasky if (group->state == MCAST_IDLE) {
72797549c34SHans Petter Selasky req_join_state = get_leave_state(group);
72897549c34SHans Petter Selasky if (req_join_state) {
72997549c34SHans Petter Selasky group->rec.scope_join_state &= ~req_join_state;
73097549c34SHans Petter Selasky group->prev_state = group->state;
73197549c34SHans Petter Selasky if (send_leave_to_wire(group, req_join_state)) {
73297549c34SHans Petter Selasky group->state = group->prev_state;
73397549c34SHans Petter Selasky ++rc;
73497549c34SHans Petter Selasky } else
73597549c34SHans Petter Selasky group->state = MCAST_LEAVE_SENT;
73697549c34SHans Petter Selasky }
73797549c34SHans Petter Selasky }
73897549c34SHans Petter Selasky
73997549c34SHans Petter Selasky if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE)
74097549c34SHans Petter Selasky goto process_requests;
74197549c34SHans Petter Selasky mutex_unlock(&group->lock);
74297549c34SHans Petter Selasky
74397549c34SHans Petter Selasky while (rc--)
74497549c34SHans Petter Selasky release_group(group, 0);
74597549c34SHans Petter Selasky }
74697549c34SHans Petter Selasky
search_relocate_mgid0_group(struct mlx4_ib_demux_ctx * ctx,__be64 tid,union ib_gid * new_mgid)74797549c34SHans Petter Selasky static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx,
74897549c34SHans Petter Selasky __be64 tid,
74997549c34SHans Petter Selasky union ib_gid *new_mgid)
75097549c34SHans Petter Selasky {
751*8cc48704SHans Petter Selasky struct mcast_group *group = NULL, *cur_group, *n;
75297549c34SHans Petter Selasky struct mcast_req *req;
75397549c34SHans Petter Selasky
75497549c34SHans Petter Selasky mutex_lock(&ctx->mcg_table_lock);
755*8cc48704SHans Petter Selasky list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
75697549c34SHans Petter Selasky mutex_lock(&group->lock);
75797549c34SHans Petter Selasky if (group->last_req_tid == tid) {
75897549c34SHans Petter Selasky if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
75997549c34SHans Petter Selasky group->rec.mgid = *new_mgid;
76097549c34SHans Petter Selasky sprintf(group->name, "%016llx%016llx",
76197549c34SHans Petter Selasky (long long)be64_to_cpu(group->rec.mgid.global.subnet_prefix),
76297549c34SHans Petter Selasky (long long)be64_to_cpu(group->rec.mgid.global.interface_id));
76397549c34SHans Petter Selasky list_del_init(&group->mgid0_list);
76497549c34SHans Petter Selasky cur_group = mcast_insert(ctx, group);
76597549c34SHans Petter Selasky if (cur_group) {
76697549c34SHans Petter Selasky /* A race between our code and SM. Silently cleaning the new one */
76797549c34SHans Petter Selasky req = list_first_entry(&group->pending_list,
76897549c34SHans Petter Selasky struct mcast_req, group_list);
76997549c34SHans Petter Selasky --group->func[req->func].num_pend_reqs;
77097549c34SHans Petter Selasky list_del(&req->group_list);
77197549c34SHans Petter Selasky list_del(&req->func_list);
77297549c34SHans Petter Selasky kfree(req);
77397549c34SHans Petter Selasky mutex_unlock(&group->lock);
77497549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
77597549c34SHans Petter Selasky release_group(group, 0);
77697549c34SHans Petter Selasky return NULL;
77797549c34SHans Petter Selasky }
77897549c34SHans Petter Selasky
77997549c34SHans Petter Selasky atomic_inc(&group->refcount);
78097549c34SHans Petter Selasky add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
78197549c34SHans Petter Selasky mutex_unlock(&group->lock);
78297549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
78397549c34SHans Petter Selasky return group;
78497549c34SHans Petter Selasky } else {
78597549c34SHans Petter Selasky struct mcast_req *tmp1, *tmp2;
78697549c34SHans Petter Selasky
78797549c34SHans Petter Selasky list_del(&group->mgid0_list);
78897549c34SHans Petter Selasky if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE)
78997549c34SHans Petter Selasky cancel_delayed_work_sync(&group->timeout_work);
79097549c34SHans Petter Selasky
79197549c34SHans Petter Selasky list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) {
79297549c34SHans Petter Selasky list_del(&tmp1->group_list);
79397549c34SHans Petter Selasky kfree(tmp1);
79497549c34SHans Petter Selasky }
79597549c34SHans Petter Selasky mutex_unlock(&group->lock);
79697549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
79797549c34SHans Petter Selasky kfree(group);
79897549c34SHans Petter Selasky return NULL;
79997549c34SHans Petter Selasky }
80097549c34SHans Petter Selasky }
80197549c34SHans Petter Selasky mutex_unlock(&group->lock);
80297549c34SHans Petter Selasky }
80397549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
80497549c34SHans Petter Selasky
80597549c34SHans Petter Selasky return NULL;
80697549c34SHans Petter Selasky }
80797549c34SHans Petter Selasky
80897549c34SHans Petter Selasky static ssize_t sysfs_show_group(struct device *dev,
80997549c34SHans Petter Selasky struct device_attribute *attr, char *buf);
81097549c34SHans Petter Selasky
acquire_group(struct mlx4_ib_demux_ctx * ctx,union ib_gid * mgid,int create,gfp_t gfp_mask)81197549c34SHans Petter Selasky static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
81297549c34SHans Petter Selasky union ib_gid *mgid, int create,
81397549c34SHans Petter Selasky gfp_t gfp_mask)
81497549c34SHans Petter Selasky {
81597549c34SHans Petter Selasky struct mcast_group *group, *cur_group;
81697549c34SHans Petter Selasky int is_mgid0;
81797549c34SHans Petter Selasky int i;
81897549c34SHans Petter Selasky
81997549c34SHans Petter Selasky is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
82097549c34SHans Petter Selasky if (!is_mgid0) {
82197549c34SHans Petter Selasky group = mcast_find(ctx, mgid);
82297549c34SHans Petter Selasky if (group)
82397549c34SHans Petter Selasky goto found;
82497549c34SHans Petter Selasky }
82597549c34SHans Petter Selasky
82697549c34SHans Petter Selasky if (!create)
82797549c34SHans Petter Selasky return ERR_PTR(-ENOENT);
82897549c34SHans Petter Selasky
82997549c34SHans Petter Selasky group = kzalloc(sizeof *group, gfp_mask);
83097549c34SHans Petter Selasky if (!group)
83197549c34SHans Petter Selasky return ERR_PTR(-ENOMEM);
83297549c34SHans Petter Selasky
83397549c34SHans Petter Selasky group->demux = ctx;
83497549c34SHans Petter Selasky group->rec.mgid = *mgid;
83597549c34SHans Petter Selasky INIT_LIST_HEAD(&group->pending_list);
83697549c34SHans Petter Selasky INIT_LIST_HEAD(&group->mgid0_list);
83797549c34SHans Petter Selasky for (i = 0; i < MAX_VFS; ++i)
83897549c34SHans Petter Selasky INIT_LIST_HEAD(&group->func[i].pending);
83997549c34SHans Petter Selasky INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
84097549c34SHans Petter Selasky INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler);
84197549c34SHans Petter Selasky mutex_init(&group->lock);
84297549c34SHans Petter Selasky sprintf(group->name, "%016llx%016llx",
84397549c34SHans Petter Selasky (long long)be64_to_cpu(
84497549c34SHans Petter Selasky group->rec.mgid.global.subnet_prefix),
84597549c34SHans Petter Selasky (long long)be64_to_cpu(
84697549c34SHans Petter Selasky group->rec.mgid.global.interface_id));
84797549c34SHans Petter Selasky sysfs_attr_init(&group->dentry.attr);
84897549c34SHans Petter Selasky group->dentry.show = sysfs_show_group;
84997549c34SHans Petter Selasky group->dentry.store = NULL;
85097549c34SHans Petter Selasky group->dentry.attr.name = group->name;
85197549c34SHans Petter Selasky group->dentry.attr.mode = 0400;
85297549c34SHans Petter Selasky group->state = MCAST_IDLE;
85397549c34SHans Petter Selasky
85497549c34SHans Petter Selasky if (is_mgid0) {
85597549c34SHans Petter Selasky list_add(&group->mgid0_list, &ctx->mcg_mgid0_list);
85697549c34SHans Petter Selasky goto found;
85797549c34SHans Petter Selasky }
85897549c34SHans Petter Selasky
85997549c34SHans Petter Selasky cur_group = mcast_insert(ctx, group);
86097549c34SHans Petter Selasky if (cur_group) {
86197549c34SHans Petter Selasky mcg_warn("group just showed up %s - confused\n", cur_group->name);
86297549c34SHans Petter Selasky kfree(group);
86397549c34SHans Petter Selasky return ERR_PTR(-EINVAL);
86497549c34SHans Petter Selasky }
86597549c34SHans Petter Selasky
86697549c34SHans Petter Selasky add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
86797549c34SHans Petter Selasky
86897549c34SHans Petter Selasky found:
86997549c34SHans Petter Selasky atomic_inc(&group->refcount);
87097549c34SHans Petter Selasky return group;
87197549c34SHans Petter Selasky }
87297549c34SHans Petter Selasky
queue_req(struct mcast_req * req)87397549c34SHans Petter Selasky static void queue_req(struct mcast_req *req)
87497549c34SHans Petter Selasky {
87597549c34SHans Petter Selasky struct mcast_group *group = req->group;
87697549c34SHans Petter Selasky
87797549c34SHans Petter Selasky atomic_inc(&group->refcount); /* for the request */
87897549c34SHans Petter Selasky atomic_inc(&group->refcount); /* for scheduling the work */
87997549c34SHans Petter Selasky list_add_tail(&req->group_list, &group->pending_list);
88097549c34SHans Petter Selasky list_add_tail(&req->func_list, &group->func[req->func].pending);
88197549c34SHans Petter Selasky /* calls mlx4_ib_mcg_work_handler */
88297549c34SHans Petter Selasky if (!queue_work(group->demux->mcg_wq, &group->work))
88397549c34SHans Petter Selasky safe_atomic_dec(&group->refcount);
88497549c34SHans Petter Selasky }
88597549c34SHans Petter Selasky
mlx4_ib_mcg_demux_handler(struct ib_device * ibdev,int port,int slave,struct ib_sa_mad * mad)88697549c34SHans Petter Selasky int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
88797549c34SHans Petter Selasky struct ib_sa_mad *mad)
88897549c34SHans Petter Selasky {
88997549c34SHans Petter Selasky struct mlx4_ib_dev *dev = to_mdev(ibdev);
89097549c34SHans Petter Selasky struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data;
89197549c34SHans Petter Selasky struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
89297549c34SHans Petter Selasky struct mcast_group *group;
89397549c34SHans Petter Selasky
89497549c34SHans Petter Selasky switch (mad->mad_hdr.method) {
89597549c34SHans Petter Selasky case IB_MGMT_METHOD_GET_RESP:
89697549c34SHans Petter Selasky case IB_SA_METHOD_DELETE_RESP:
89797549c34SHans Petter Selasky mutex_lock(&ctx->mcg_table_lock);
89897549c34SHans Petter Selasky group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
89997549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
90097549c34SHans Petter Selasky if (IS_ERR(group)) {
90197549c34SHans Petter Selasky if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
90297549c34SHans Petter Selasky __be64 tid = mad->mad_hdr.tid;
90397549c34SHans Petter Selasky *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */
90497549c34SHans Petter Selasky group = search_relocate_mgid0_group(ctx, tid, &rec->mgid);
90597549c34SHans Petter Selasky } else
90697549c34SHans Petter Selasky group = NULL;
90797549c34SHans Petter Selasky }
90897549c34SHans Petter Selasky
90997549c34SHans Petter Selasky if (!group)
91097549c34SHans Petter Selasky return 1;
91197549c34SHans Petter Selasky
91297549c34SHans Petter Selasky mutex_lock(&group->lock);
91397549c34SHans Petter Selasky group->response_sa_mad = *mad;
91497549c34SHans Petter Selasky group->prev_state = group->state;
91597549c34SHans Petter Selasky group->state = MCAST_RESP_READY;
91697549c34SHans Petter Selasky /* calls mlx4_ib_mcg_work_handler */
91797549c34SHans Petter Selasky atomic_inc(&group->refcount);
91897549c34SHans Petter Selasky if (!queue_work(ctx->mcg_wq, &group->work))
91997549c34SHans Petter Selasky safe_atomic_dec(&group->refcount);
92097549c34SHans Petter Selasky mutex_unlock(&group->lock);
92197549c34SHans Petter Selasky release_group(group, 0);
92297549c34SHans Petter Selasky return 1; /* consumed */
92397549c34SHans Petter Selasky case IB_MGMT_METHOD_SET:
92497549c34SHans Petter Selasky case IB_SA_METHOD_GET_TABLE:
92597549c34SHans Petter Selasky case IB_SA_METHOD_GET_TABLE_RESP:
92697549c34SHans Petter Selasky case IB_SA_METHOD_DELETE:
92797549c34SHans Petter Selasky return 0; /* not consumed, pass-through to guest over tunnel */
92897549c34SHans Petter Selasky default:
92997549c34SHans Petter Selasky mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
93097549c34SHans Petter Selasky port, mad->mad_hdr.method);
93197549c34SHans Petter Selasky return 1; /* consumed */
93297549c34SHans Petter Selasky }
93397549c34SHans Petter Selasky }
93497549c34SHans Petter Selasky
mlx4_ib_mcg_multiplex_handler(struct ib_device * ibdev,int port,int slave,struct ib_sa_mad * sa_mad)93597549c34SHans Petter Selasky int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
93697549c34SHans Petter Selasky int slave, struct ib_sa_mad *sa_mad)
93797549c34SHans Petter Selasky {
93897549c34SHans Petter Selasky struct mlx4_ib_dev *dev = to_mdev(ibdev);
93997549c34SHans Petter Selasky struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data;
94097549c34SHans Petter Selasky struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
94197549c34SHans Petter Selasky struct mcast_group *group;
94297549c34SHans Petter Selasky struct mcast_req *req;
94397549c34SHans Petter Selasky int may_create = 0;
94497549c34SHans Petter Selasky
94597549c34SHans Petter Selasky if (ctx->flushing)
94697549c34SHans Petter Selasky return -EAGAIN;
94797549c34SHans Petter Selasky
94897549c34SHans Petter Selasky switch (sa_mad->mad_hdr.method) {
94997549c34SHans Petter Selasky case IB_MGMT_METHOD_SET:
95097549c34SHans Petter Selasky may_create = 1;
95197549c34SHans Petter Selasky case IB_SA_METHOD_DELETE:
95297549c34SHans Petter Selasky req = kzalloc(sizeof *req, GFP_KERNEL);
95397549c34SHans Petter Selasky if (!req)
95497549c34SHans Petter Selasky return -ENOMEM;
95597549c34SHans Petter Selasky
95697549c34SHans Petter Selasky req->func = slave;
95797549c34SHans Petter Selasky req->sa_mad = *sa_mad;
95897549c34SHans Petter Selasky
95997549c34SHans Petter Selasky mutex_lock(&ctx->mcg_table_lock);
96097549c34SHans Petter Selasky group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
96197549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
96297549c34SHans Petter Selasky if (IS_ERR(group)) {
96397549c34SHans Petter Selasky kfree(req);
96497549c34SHans Petter Selasky return PTR_ERR(group);
96597549c34SHans Petter Selasky }
96697549c34SHans Petter Selasky mutex_lock(&group->lock);
96797549c34SHans Petter Selasky if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
96897549c34SHans Petter Selasky mutex_unlock(&group->lock);
969*8cc48704SHans Petter Selasky mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
97097549c34SHans Petter Selasky port, slave, MAX_PEND_REQS_PER_FUNC);
97197549c34SHans Petter Selasky release_group(group, 0);
97297549c34SHans Petter Selasky kfree(req);
97397549c34SHans Petter Selasky return -ENOMEM;
97497549c34SHans Petter Selasky }
97597549c34SHans Petter Selasky ++group->func[slave].num_pend_reqs;
97697549c34SHans Petter Selasky req->group = group;
97797549c34SHans Petter Selasky queue_req(req);
97897549c34SHans Petter Selasky mutex_unlock(&group->lock);
97997549c34SHans Petter Selasky release_group(group, 0);
98097549c34SHans Petter Selasky return 1; /* consumed */
98197549c34SHans Petter Selasky case IB_SA_METHOD_GET_TABLE:
98297549c34SHans Petter Selasky case IB_MGMT_METHOD_GET_RESP:
98397549c34SHans Petter Selasky case IB_SA_METHOD_GET_TABLE_RESP:
98497549c34SHans Petter Selasky case IB_SA_METHOD_DELETE_RESP:
98597549c34SHans Petter Selasky return 0; /* not consumed, pass-through */
98697549c34SHans Petter Selasky default:
98797549c34SHans Petter Selasky mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
98897549c34SHans Petter Selasky port, slave, sa_mad->mad_hdr.method);
98997549c34SHans Petter Selasky return 1; /* consumed */
99097549c34SHans Petter Selasky }
99197549c34SHans Petter Selasky }
99297549c34SHans Petter Selasky
sysfs_show_group(struct device * dev,struct device_attribute * attr,char * buf)99397549c34SHans Petter Selasky static ssize_t sysfs_show_group(struct device *dev,
99497549c34SHans Petter Selasky struct device_attribute *attr, char *buf)
99597549c34SHans Petter Selasky {
99697549c34SHans Petter Selasky struct mcast_group *group =
99797549c34SHans Petter Selasky container_of(attr, struct mcast_group, dentry);
99897549c34SHans Petter Selasky struct mcast_req *req = NULL;
99997549c34SHans Petter Selasky char pending_str[40];
100097549c34SHans Petter Selasky char state_str[40];
100197549c34SHans Petter Selasky ssize_t len = 0;
100297549c34SHans Petter Selasky int f;
100397549c34SHans Petter Selasky
100497549c34SHans Petter Selasky if (group->state == MCAST_IDLE)
100597549c34SHans Petter Selasky sprintf(state_str, "%s", get_state_string(group->state));
100697549c34SHans Petter Selasky else
100797549c34SHans Petter Selasky sprintf(state_str, "%s(TID=0x%llx)",
100897549c34SHans Petter Selasky get_state_string(group->state),
100997549c34SHans Petter Selasky (long long)be64_to_cpu(group->last_req_tid));
101097549c34SHans Petter Selasky if (list_empty(&group->pending_list)) {
101197549c34SHans Petter Selasky sprintf(pending_str, "No");
101297549c34SHans Petter Selasky } else {
101397549c34SHans Petter Selasky req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
101497549c34SHans Petter Selasky sprintf(pending_str, "Yes(TID=0x%llx)",
101597549c34SHans Petter Selasky (long long)be64_to_cpu(
101697549c34SHans Petter Selasky req->sa_mad.mad_hdr.tid));
101797549c34SHans Petter Selasky }
101897549c34SHans Petter Selasky len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
101997549c34SHans Petter Selasky group->rec.scope_join_state & 0xf,
102097549c34SHans Petter Selasky group->members[2], group->members[1], group->members[0],
102197549c34SHans Petter Selasky atomic_read(&group->refcount),
102297549c34SHans Petter Selasky pending_str,
102397549c34SHans Petter Selasky state_str);
102497549c34SHans Petter Selasky for (f = 0; f < MAX_VFS; ++f)
102597549c34SHans Petter Selasky if (group->func[f].state == MCAST_MEMBER)
102697549c34SHans Petter Selasky len += sprintf(buf + len, "%d[%1x] ",
102797549c34SHans Petter Selasky f, group->func[f].join_state);
102897549c34SHans Petter Selasky
102997549c34SHans Petter Selasky len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
103097549c34SHans Petter Selasky "%4x %4x %2x %2x)\n",
103197549c34SHans Petter Selasky be16_to_cpu(group->rec.pkey),
103297549c34SHans Petter Selasky be32_to_cpu(group->rec.qkey),
103397549c34SHans Petter Selasky (group->rec.mtusel_mtu & 0xc0) >> 6,
103497549c34SHans Petter Selasky group->rec.mtusel_mtu & 0x3f,
103597549c34SHans Petter Selasky group->rec.tclass,
103697549c34SHans Petter Selasky (group->rec.ratesel_rate & 0xc0) >> 6,
103797549c34SHans Petter Selasky group->rec.ratesel_rate & 0x3f,
103897549c34SHans Petter Selasky (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
103997549c34SHans Petter Selasky (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
104097549c34SHans Petter Selasky be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
104197549c34SHans Petter Selasky group->rec.proxy_join);
104297549c34SHans Petter Selasky
104397549c34SHans Petter Selasky return len;
104497549c34SHans Petter Selasky }
104597549c34SHans Petter Selasky
mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx * ctx)104697549c34SHans Petter Selasky int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
104797549c34SHans Petter Selasky {
104897549c34SHans Petter Selasky char name[20];
104997549c34SHans Petter Selasky
105097549c34SHans Petter Selasky atomic_set(&ctx->tid, 0);
105197549c34SHans Petter Selasky sprintf(name, "mlx4_ib_mcg%d", ctx->port);
1052*8cc48704SHans Petter Selasky ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
105397549c34SHans Petter Selasky if (!ctx->mcg_wq)
105497549c34SHans Petter Selasky return -ENOMEM;
105597549c34SHans Petter Selasky
105697549c34SHans Petter Selasky mutex_init(&ctx->mcg_table_lock);
105797549c34SHans Petter Selasky ctx->mcg_table = RB_ROOT;
105897549c34SHans Petter Selasky INIT_LIST_HEAD(&ctx->mcg_mgid0_list);
105997549c34SHans Petter Selasky ctx->flushing = 0;
106097549c34SHans Petter Selasky
106197549c34SHans Petter Selasky return 0;
106297549c34SHans Petter Selasky }
106397549c34SHans Petter Selasky
force_clean_group(struct mcast_group * group)106497549c34SHans Petter Selasky static void force_clean_group(struct mcast_group *group)
106597549c34SHans Petter Selasky {
106697549c34SHans Petter Selasky struct mcast_req *req, *tmp
106797549c34SHans Petter Selasky ;
106897549c34SHans Petter Selasky list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
106997549c34SHans Petter Selasky list_del(&req->group_list);
107097549c34SHans Petter Selasky kfree(req);
107197549c34SHans Petter Selasky }
107297549c34SHans Petter Selasky del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr);
107397549c34SHans Petter Selasky rb_erase(&group->node, &group->demux->mcg_table);
107497549c34SHans Petter Selasky kfree(group);
107597549c34SHans Petter Selasky }
107697549c34SHans Petter Selasky
_mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx * ctx,int destroy_wq)107797549c34SHans Petter Selasky static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
107897549c34SHans Petter Selasky {
107997549c34SHans Petter Selasky int i;
108097549c34SHans Petter Selasky struct rb_node *p;
108197549c34SHans Petter Selasky struct mcast_group *group;
108297549c34SHans Petter Selasky unsigned long end;
108397549c34SHans Petter Selasky int count;
108497549c34SHans Petter Selasky
108597549c34SHans Petter Selasky for (i = 0; i < MAX_VFS; ++i)
108697549c34SHans Petter Selasky clean_vf_mcast(ctx, i);
108797549c34SHans Petter Selasky
108897549c34SHans Petter Selasky end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
108997549c34SHans Petter Selasky do {
109097549c34SHans Petter Selasky count = 0;
109197549c34SHans Petter Selasky mutex_lock(&ctx->mcg_table_lock);
109297549c34SHans Petter Selasky for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p))
109397549c34SHans Petter Selasky ++count;
109497549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
109597549c34SHans Petter Selasky if (!count)
109697549c34SHans Petter Selasky break;
109797549c34SHans Petter Selasky
109897549c34SHans Petter Selasky msleep(1);
109997549c34SHans Petter Selasky } while (time_after(end, jiffies));
110097549c34SHans Petter Selasky
110197549c34SHans Petter Selasky flush_workqueue(ctx->mcg_wq);
110297549c34SHans Petter Selasky if (destroy_wq)
110397549c34SHans Petter Selasky destroy_workqueue(ctx->mcg_wq);
110497549c34SHans Petter Selasky
110597549c34SHans Petter Selasky mutex_lock(&ctx->mcg_table_lock);
110697549c34SHans Petter Selasky while ((p = rb_first(&ctx->mcg_table)) != NULL) {
110797549c34SHans Petter Selasky group = rb_entry(p, struct mcast_group, node);
110897549c34SHans Petter Selasky if (atomic_read(&group->refcount))
110997549c34SHans Petter Selasky mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
111097549c34SHans Petter Selasky
111197549c34SHans Petter Selasky force_clean_group(group);
111297549c34SHans Petter Selasky }
111397549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
111497549c34SHans Petter Selasky }
111597549c34SHans Petter Selasky
111697549c34SHans Petter Selasky struct clean_work {
111797549c34SHans Petter Selasky struct work_struct work;
111897549c34SHans Petter Selasky struct mlx4_ib_demux_ctx *ctx;
111997549c34SHans Petter Selasky int destroy_wq;
112097549c34SHans Petter Selasky };
112197549c34SHans Petter Selasky
mcg_clean_task(struct work_struct * work)112297549c34SHans Petter Selasky static void mcg_clean_task(struct work_struct *work)
112397549c34SHans Petter Selasky {
112497549c34SHans Petter Selasky struct clean_work *cw = container_of(work, struct clean_work, work);
112597549c34SHans Petter Selasky
112697549c34SHans Petter Selasky _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
112797549c34SHans Petter Selasky cw->ctx->flushing = 0;
112897549c34SHans Petter Selasky kfree(cw);
112997549c34SHans Petter Selasky }
113097549c34SHans Petter Selasky
mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx * ctx,int destroy_wq)113197549c34SHans Petter Selasky void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
113297549c34SHans Petter Selasky {
113397549c34SHans Petter Selasky struct clean_work *work;
113497549c34SHans Petter Selasky
113597549c34SHans Petter Selasky if (ctx->flushing)
113697549c34SHans Petter Selasky return;
113797549c34SHans Petter Selasky
113897549c34SHans Petter Selasky ctx->flushing = 1;
113997549c34SHans Petter Selasky
114097549c34SHans Petter Selasky if (destroy_wq) {
114197549c34SHans Petter Selasky _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
114297549c34SHans Petter Selasky ctx->flushing = 0;
114397549c34SHans Petter Selasky return;
114497549c34SHans Petter Selasky }
114597549c34SHans Petter Selasky
114697549c34SHans Petter Selasky work = kmalloc(sizeof *work, GFP_KERNEL);
114797549c34SHans Petter Selasky if (!work) {
114897549c34SHans Petter Selasky ctx->flushing = 0;
114997549c34SHans Petter Selasky mcg_warn("failed allocating work for cleanup\n");
115097549c34SHans Petter Selasky return;
115197549c34SHans Petter Selasky }
115297549c34SHans Petter Selasky
115397549c34SHans Petter Selasky work->ctx = ctx;
115497549c34SHans Petter Selasky work->destroy_wq = destroy_wq;
115597549c34SHans Petter Selasky INIT_WORK(&work->work, mcg_clean_task);
115697549c34SHans Petter Selasky queue_work(clean_wq, &work->work);
115797549c34SHans Petter Selasky }
115897549c34SHans Petter Selasky
build_leave_mad(struct mcast_req * req)115997549c34SHans Petter Selasky static void build_leave_mad(struct mcast_req *req)
116097549c34SHans Petter Selasky {
116197549c34SHans Petter Selasky struct ib_sa_mad *mad = &req->sa_mad;
116297549c34SHans Petter Selasky
116397549c34SHans Petter Selasky mad->mad_hdr.method = IB_SA_METHOD_DELETE;
116497549c34SHans Petter Selasky }
116597549c34SHans Petter Selasky
116697549c34SHans Petter Selasky
clear_pending_reqs(struct mcast_group * group,int vf)116797549c34SHans Petter Selasky static void clear_pending_reqs(struct mcast_group *group, int vf)
116897549c34SHans Petter Selasky {
116997549c34SHans Petter Selasky struct mcast_req *req, *tmp, *group_first = NULL;
117097549c34SHans Petter Selasky int clear;
117197549c34SHans Petter Selasky int pend = 0;
117297549c34SHans Petter Selasky
117397549c34SHans Petter Selasky if (!list_empty(&group->pending_list))
117497549c34SHans Petter Selasky group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list);
117597549c34SHans Petter Selasky
117697549c34SHans Petter Selasky list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
117797549c34SHans Petter Selasky clear = 1;
117897549c34SHans Petter Selasky if (group_first == req &&
117997549c34SHans Petter Selasky (group->state == MCAST_JOIN_SENT ||
118097549c34SHans Petter Selasky group->state == MCAST_LEAVE_SENT)) {
118197549c34SHans Petter Selasky clear = cancel_delayed_work(&group->timeout_work);
118297549c34SHans Petter Selasky pend = !clear;
118397549c34SHans Petter Selasky group->state = MCAST_IDLE;
118497549c34SHans Petter Selasky }
118597549c34SHans Petter Selasky if (clear) {
118697549c34SHans Petter Selasky --group->func[vf].num_pend_reqs;
118797549c34SHans Petter Selasky list_del(&req->group_list);
118897549c34SHans Petter Selasky list_del(&req->func_list);
118997549c34SHans Petter Selasky kfree(req);
119097549c34SHans Petter Selasky atomic_dec(&group->refcount);
119197549c34SHans Petter Selasky }
119297549c34SHans Petter Selasky }
119397549c34SHans Petter Selasky
119497549c34SHans Petter Selasky if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) {
119597549c34SHans Petter Selasky mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
119697549c34SHans Petter Selasky list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs);
119797549c34SHans Petter Selasky }
119897549c34SHans Petter Selasky }
119997549c34SHans Petter Selasky
push_deleteing_req(struct mcast_group * group,int slave)120097549c34SHans Petter Selasky static int push_deleteing_req(struct mcast_group *group, int slave)
120197549c34SHans Petter Selasky {
120297549c34SHans Petter Selasky struct mcast_req *req;
120397549c34SHans Petter Selasky struct mcast_req *pend_req;
120497549c34SHans Petter Selasky
120597549c34SHans Petter Selasky if (!group->func[slave].join_state)
120697549c34SHans Petter Selasky return 0;
120797549c34SHans Petter Selasky
120897549c34SHans Petter Selasky req = kzalloc(sizeof *req, GFP_KERNEL);
120997549c34SHans Petter Selasky if (!req) {
121097549c34SHans Petter Selasky mcg_warn_group(group, "failed allocation - may leave stall groups\n");
121197549c34SHans Petter Selasky return -ENOMEM;
121297549c34SHans Petter Selasky }
121397549c34SHans Petter Selasky
121497549c34SHans Petter Selasky if (!list_empty(&group->func[slave].pending)) {
121597549c34SHans Petter Selasky pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
121697549c34SHans Petter Selasky if (pend_req->clean) {
121797549c34SHans Petter Selasky kfree(req);
121897549c34SHans Petter Selasky return 0;
121997549c34SHans Petter Selasky }
122097549c34SHans Petter Selasky }
122197549c34SHans Petter Selasky
122297549c34SHans Petter Selasky req->clean = 1;
122397549c34SHans Petter Selasky req->func = slave;
122497549c34SHans Petter Selasky req->group = group;
122597549c34SHans Petter Selasky ++group->func[slave].num_pend_reqs;
122697549c34SHans Petter Selasky build_leave_mad(req);
122797549c34SHans Petter Selasky queue_req(req);
122897549c34SHans Petter Selasky return 0;
122997549c34SHans Petter Selasky }
123097549c34SHans Petter Selasky
clean_vf_mcast(struct mlx4_ib_demux_ctx * ctx,int slave)123197549c34SHans Petter Selasky void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
123297549c34SHans Petter Selasky {
123397549c34SHans Petter Selasky struct mcast_group *group;
123497549c34SHans Petter Selasky struct rb_node *p;
123597549c34SHans Petter Selasky
123697549c34SHans Petter Selasky mutex_lock(&ctx->mcg_table_lock);
123797549c34SHans Petter Selasky for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) {
123897549c34SHans Petter Selasky group = rb_entry(p, struct mcast_group, node);
123997549c34SHans Petter Selasky mutex_lock(&group->lock);
124097549c34SHans Petter Selasky if (atomic_read(&group->refcount)) {
124197549c34SHans Petter Selasky /* clear pending requests of this VF */
124297549c34SHans Petter Selasky clear_pending_reqs(group, slave);
124397549c34SHans Petter Selasky push_deleteing_req(group, slave);
124497549c34SHans Petter Selasky }
124597549c34SHans Petter Selasky mutex_unlock(&group->lock);
124697549c34SHans Petter Selasky }
124797549c34SHans Petter Selasky mutex_unlock(&ctx->mcg_table_lock);
124897549c34SHans Petter Selasky }
124997549c34SHans Petter Selasky
125097549c34SHans Petter Selasky
mlx4_ib_mcg_init(void)125197549c34SHans Petter Selasky int mlx4_ib_mcg_init(void)
125297549c34SHans Petter Selasky {
1253*8cc48704SHans Petter Selasky clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM);
125497549c34SHans Petter Selasky if (!clean_wq)
125597549c34SHans Petter Selasky return -ENOMEM;
125697549c34SHans Petter Selasky
125797549c34SHans Petter Selasky return 0;
125897549c34SHans Petter Selasky }
125997549c34SHans Petter Selasky
mlx4_ib_mcg_destroy(void)126097549c34SHans Petter Selasky void mlx4_ib_mcg_destroy(void)
126197549c34SHans Petter Selasky {
126297549c34SHans Petter Selasky destroy_workqueue(clean_wq);
126397549c34SHans Petter Selasky }
1264