1b9c5d6a6SOren Duer /* 2b9c5d6a6SOren Duer * Copyright (c) 2012 Mellanox Technologies. All rights reserved. 3b9c5d6a6SOren Duer * 4b9c5d6a6SOren Duer * This software is available to you under a choice of one of two 5b9c5d6a6SOren Duer * licenses. You may choose to be licensed under the terms of the GNU 6b9c5d6a6SOren Duer * General Public License (GPL) Version 2, available from the file 7b9c5d6a6SOren Duer * COPYING in the main directory of this source tree, or the 8b9c5d6a6SOren Duer * OpenIB.org BSD license below: 9b9c5d6a6SOren Duer * 10b9c5d6a6SOren Duer * Redistribution and use in source and binary forms, with or 11b9c5d6a6SOren Duer * without modification, are permitted provided that the following 12b9c5d6a6SOren Duer * conditions are met: 13b9c5d6a6SOren Duer * 14b9c5d6a6SOren Duer * - Redistributions of source code must retain the above 15b9c5d6a6SOren Duer * copyright notice, this list of conditions and the following 16b9c5d6a6SOren Duer * disclaimer. 17b9c5d6a6SOren Duer * 18b9c5d6a6SOren Duer * - Redistributions in binary form must reproduce the above 19b9c5d6a6SOren Duer * copyright notice, this list of conditions and the following 20b9c5d6a6SOren Duer * disclaimer in the documentation and/or other materials 21b9c5d6a6SOren Duer * provided with the distribution. 22b9c5d6a6SOren Duer * 23b9c5d6a6SOren Duer * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24b9c5d6a6SOren Duer * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25b9c5d6a6SOren Duer * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26b9c5d6a6SOren Duer * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27b9c5d6a6SOren Duer * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28b9c5d6a6SOren Duer * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29b9c5d6a6SOren Duer * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30b9c5d6a6SOren Duer * SOFTWARE. 31b9c5d6a6SOren Duer */ 32b9c5d6a6SOren Duer 33b9c5d6a6SOren Duer #include <rdma/ib_mad.h> 34b9c5d6a6SOren Duer #include <rdma/ib_smi.h> 35b9c5d6a6SOren Duer #include <rdma/ib_cache.h> 36b9c5d6a6SOren Duer #include <rdma/ib_sa.h> 37b9c5d6a6SOren Duer 38b9c5d6a6SOren Duer #include <linux/mlx4/cmd.h> 39b9c5d6a6SOren Duer #include <linux/rbtree.h> 40b9c5d6a6SOren Duer #include <linux/delay.h> 41b9c5d6a6SOren Duer 42b9c5d6a6SOren Duer #include "mlx4_ib.h" 43b9c5d6a6SOren Duer 44b9c5d6a6SOren Duer #define MAX_VFS 80 45b9c5d6a6SOren Duer #define MAX_PEND_REQS_PER_FUNC 4 46b9c5d6a6SOren Duer #define MAD_TIMEOUT_MS 2000 47b9c5d6a6SOren Duer 48b9c5d6a6SOren Duer #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg) 49b9c5d6a6SOren Duer #define mcg_error(fmt, arg...) pr_err(fmt, ##arg) 50b9c5d6a6SOren Duer #define mcg_warn_group(group, format, arg...) \ 51b9c5d6a6SOren Duer pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\ 52b9c5d6a6SOren Duer (group)->name, group->demux->port, ## arg) 53b9c5d6a6SOren Duer 542cb8e7f8SJack Morgenstein #define mcg_debug_group(group, format, arg...) \ 552cb8e7f8SJack Morgenstein pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\ 562cb8e7f8SJack Morgenstein (group)->name, (group)->demux->port, ## arg) 572cb8e7f8SJack Morgenstein 58b9c5d6a6SOren Duer #define mcg_error_group(group, format, arg...) \ 59b9c5d6a6SOren Duer pr_err(" %16s: " format, (group)->name, ## arg) 60b9c5d6a6SOren Duer 61b9c5d6a6SOren Duer 62b9c5d6a6SOren Duer static union ib_gid mgid0; 63b9c5d6a6SOren Duer 64b9c5d6a6SOren Duer static struct workqueue_struct *clean_wq; 65b9c5d6a6SOren Duer 66b9c5d6a6SOren Duer enum mcast_state { 67b9c5d6a6SOren Duer MCAST_NOT_MEMBER = 0, 68b9c5d6a6SOren Duer MCAST_MEMBER, 69b9c5d6a6SOren Duer }; 70b9c5d6a6SOren Duer 71b9c5d6a6SOren Duer enum mcast_group_state { 72b9c5d6a6SOren Duer MCAST_IDLE, 73b9c5d6a6SOren Duer MCAST_JOIN_SENT, 74b9c5d6a6SOren Duer MCAST_LEAVE_SENT, 75b9c5d6a6SOren Duer MCAST_RESP_READY 76b9c5d6a6SOren Duer }; 77b9c5d6a6SOren Duer 78b9c5d6a6SOren Duer struct mcast_member { 79b9c5d6a6SOren Duer enum mcast_state state; 80b9c5d6a6SOren Duer uint8_t join_state; 81b9c5d6a6SOren Duer int num_pend_reqs; 82b9c5d6a6SOren Duer struct list_head pending; 83b9c5d6a6SOren Duer }; 84b9c5d6a6SOren Duer 85b9c5d6a6SOren Duer struct ib_sa_mcmember_data { 86b9c5d6a6SOren Duer union ib_gid mgid; 87b9c5d6a6SOren Duer union ib_gid port_gid; 88b9c5d6a6SOren Duer __be32 qkey; 89b9c5d6a6SOren Duer __be16 mlid; 90b9c5d6a6SOren Duer u8 mtusel_mtu; 91b9c5d6a6SOren Duer u8 tclass; 92b9c5d6a6SOren Duer __be16 pkey; 93b9c5d6a6SOren Duer u8 ratesel_rate; 94b9c5d6a6SOren Duer u8 lifetmsel_lifetm; 95b9c5d6a6SOren Duer __be32 sl_flowlabel_hoplimit; 96b9c5d6a6SOren Duer u8 scope_join_state; 97b9c5d6a6SOren Duer u8 proxy_join; 98b9c5d6a6SOren Duer u8 reserved[2]; 9904ef0f1aSshamir rabinovitch } __packed __aligned(4); 100b9c5d6a6SOren Duer 101b9c5d6a6SOren Duer struct mcast_group { 102b9c5d6a6SOren Duer struct ib_sa_mcmember_data rec; 103b9c5d6a6SOren Duer struct rb_node node; 104b9c5d6a6SOren Duer struct list_head mgid0_list; 105b9c5d6a6SOren Duer struct mlx4_ib_demux_ctx *demux; 106b9c5d6a6SOren Duer struct mcast_member func[MAX_VFS]; 107b9c5d6a6SOren Duer struct mutex lock; 108b9c5d6a6SOren Duer struct work_struct work; 109b9c5d6a6SOren Duer struct list_head pending_list; 110b9c5d6a6SOren Duer int members[3]; 111b9c5d6a6SOren Duer enum mcast_group_state state; 112b9c5d6a6SOren Duer enum mcast_group_state prev_state; 113b9c5d6a6SOren Duer struct ib_sa_mad response_sa_mad; 114b9c5d6a6SOren Duer __be64 last_req_tid; 115b9c5d6a6SOren Duer 116b9c5d6a6SOren Duer char name[33]; /* MGID string */ 117c1e7e466SJack Morgenstein struct device_attribute dentry; 118b9c5d6a6SOren Duer 119b9c5d6a6SOren Duer /* refcount is the reference count for the following: 120b9c5d6a6SOren Duer 1. Each queued request 121b9c5d6a6SOren Duer 2. Each invocation of the worker thread 122b9c5d6a6SOren Duer 3. Membership of the port at the SA 123b9c5d6a6SOren Duer */ 124b9c5d6a6SOren Duer atomic_t refcount; 125b9c5d6a6SOren Duer 126b9c5d6a6SOren Duer /* delayed work to clean pending SM request */ 127b9c5d6a6SOren Duer struct delayed_work timeout_work; 128b9c5d6a6SOren Duer struct list_head cleanup_list; 129b9c5d6a6SOren Duer }; 130b9c5d6a6SOren Duer 131b9c5d6a6SOren Duer struct mcast_req { 132b9c5d6a6SOren Duer int func; 133b9c5d6a6SOren Duer struct ib_sa_mad sa_mad; 134b9c5d6a6SOren Duer struct list_head group_list; 135b9c5d6a6SOren Duer struct list_head func_list; 136b9c5d6a6SOren Duer struct mcast_group *group; 137b9c5d6a6SOren Duer int clean; 138b9c5d6a6SOren Duer }; 139b9c5d6a6SOren Duer 140b9c5d6a6SOren Duer 141b9c5d6a6SOren Duer #define safe_atomic_dec(ref) \ 142b9c5d6a6SOren Duer do {\ 143b9c5d6a6SOren Duer if (atomic_dec_and_test(ref)) \ 144b9c5d6a6SOren Duer mcg_warn_group(group, "did not expect to reach zero\n"); \ 145b9c5d6a6SOren Duer } while (0) 146b9c5d6a6SOren Duer 147b9c5d6a6SOren Duer static const char *get_state_string(enum mcast_group_state state) 148b9c5d6a6SOren Duer { 149b9c5d6a6SOren Duer switch (state) { 150b9c5d6a6SOren Duer case MCAST_IDLE: 151b9c5d6a6SOren Duer return "MCAST_IDLE"; 152b9c5d6a6SOren Duer case MCAST_JOIN_SENT: 153b9c5d6a6SOren Duer return "MCAST_JOIN_SENT"; 154b9c5d6a6SOren Duer case MCAST_LEAVE_SENT: 155b9c5d6a6SOren Duer return "MCAST_LEAVE_SENT"; 156b9c5d6a6SOren Duer case MCAST_RESP_READY: 157b9c5d6a6SOren Duer return "MCAST_RESP_READY"; 158b9c5d6a6SOren Duer } 159b9c5d6a6SOren Duer return "Invalid State"; 160b9c5d6a6SOren Duer } 161b9c5d6a6SOren Duer 162b9c5d6a6SOren Duer static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx, 163b9c5d6a6SOren Duer union ib_gid *mgid) 164b9c5d6a6SOren Duer { 165b9c5d6a6SOren Duer struct rb_node *node = ctx->mcg_table.rb_node; 166b9c5d6a6SOren Duer struct mcast_group *group; 167b9c5d6a6SOren Duer int ret; 168b9c5d6a6SOren Duer 169b9c5d6a6SOren Duer while (node) { 170b9c5d6a6SOren Duer group = rb_entry(node, struct mcast_group, node); 171b9c5d6a6SOren Duer ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); 172b9c5d6a6SOren Duer if (!ret) 173b9c5d6a6SOren Duer return group; 174b9c5d6a6SOren Duer 175b9c5d6a6SOren Duer if (ret < 0) 176b9c5d6a6SOren Duer node = node->rb_left; 177b9c5d6a6SOren Duer else 178b9c5d6a6SOren Duer node = node->rb_right; 179b9c5d6a6SOren Duer } 180b9c5d6a6SOren Duer return NULL; 181b9c5d6a6SOren Duer } 182b9c5d6a6SOren Duer 183b9c5d6a6SOren Duer static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx, 184b9c5d6a6SOren Duer struct mcast_group *group) 185b9c5d6a6SOren Duer { 186b9c5d6a6SOren Duer struct rb_node **link = &ctx->mcg_table.rb_node; 187b9c5d6a6SOren Duer struct rb_node *parent = NULL; 188b9c5d6a6SOren Duer struct mcast_group *cur_group; 189b9c5d6a6SOren Duer int ret; 190b9c5d6a6SOren Duer 191b9c5d6a6SOren Duer while (*link) { 192b9c5d6a6SOren Duer parent = *link; 193b9c5d6a6SOren Duer cur_group = rb_entry(parent, struct mcast_group, node); 194b9c5d6a6SOren Duer 195b9c5d6a6SOren Duer ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, 196b9c5d6a6SOren Duer sizeof group->rec.mgid); 197b9c5d6a6SOren Duer if (ret < 0) 198b9c5d6a6SOren Duer link = &(*link)->rb_left; 199b9c5d6a6SOren Duer else if (ret > 0) 200b9c5d6a6SOren Duer link = &(*link)->rb_right; 201b9c5d6a6SOren Duer else 202b9c5d6a6SOren Duer return cur_group; 203b9c5d6a6SOren Duer } 204b9c5d6a6SOren Duer rb_link_node(&group->node, parent, link); 205b9c5d6a6SOren Duer rb_insert_color(&group->node, &ctx->mcg_table); 206b9c5d6a6SOren Duer return NULL; 207b9c5d6a6SOren Duer } 208b9c5d6a6SOren Duer 209b9c5d6a6SOren Duer static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) 210b9c5d6a6SOren Duer { 211b9c5d6a6SOren Duer struct mlx4_ib_dev *dev = ctx->dev; 21290898850SDasaratharaman Chandramouli struct rdma_ah_attr ah_attr; 21390c1d8b6SJack Morgenstein unsigned long flags; 214b9c5d6a6SOren Duer 21590c1d8b6SJack Morgenstein spin_lock_irqsave(&dev->sm_lock, flags); 216b9c5d6a6SOren Duer if (!dev->sm_ah[ctx->port - 1]) { 217b9c5d6a6SOren Duer /* port is not yet Active, sm_ah not ready */ 21890c1d8b6SJack Morgenstein spin_unlock_irqrestore(&dev->sm_lock, flags); 219b9c5d6a6SOren Duer return -EAGAIN; 220b9c5d6a6SOren Duer } 221b9c5d6a6SOren Duer mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); 22290c1d8b6SJack Morgenstein spin_unlock_irqrestore(&dev->sm_lock, flags); 2235ea8bbfcSJack Morgenstein return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), 2245ea8bbfcSJack Morgenstein ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY, 225dbf727deSMatan Barak &ah_attr, NULL, 0xffff, mad); 226b9c5d6a6SOren Duer } 227b9c5d6a6SOren Duer 228b9c5d6a6SOren Duer static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx, 229b9c5d6a6SOren Duer struct ib_mad *mad) 230b9c5d6a6SOren Duer { 231b9c5d6a6SOren Duer struct mlx4_ib_dev *dev = ctx->dev; 232b9c5d6a6SOren Duer struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1]; 233b9c5d6a6SOren Duer struct ib_wc wc; 23490898850SDasaratharaman Chandramouli struct rdma_ah_attr ah_attr; 235b9c5d6a6SOren Duer 236b9c5d6a6SOren Duer /* Our agent might not yet be registered when mads start to arrive */ 237b9c5d6a6SOren Duer if (!agent) 238b9c5d6a6SOren Duer return -EAGAIN; 239b9c5d6a6SOren Duer 240*bfbfd661SDasaratharaman Chandramouli rdma_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); 241b9c5d6a6SOren Duer 2422c75d2ccSJack Morgenstein if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index)) 2432c75d2ccSJack Morgenstein return -EINVAL; 244b9c5d6a6SOren Duer wc.sl = 0; 245b9c5d6a6SOren Duer wc.dlid_path_bits = 0; 246b9c5d6a6SOren Duer wc.port_num = ctx->port; 247b9c5d6a6SOren Duer wc.slid = ah_attr.dlid; /* opensm lid */ 248b9c5d6a6SOren Duer wc.src_qp = 1; 249b9c5d6a6SOren Duer return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad); 250b9c5d6a6SOren Duer } 251b9c5d6a6SOren Duer 252b9c5d6a6SOren Duer static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad) 253b9c5d6a6SOren Duer { 254b9c5d6a6SOren Duer struct ib_sa_mad mad; 255b9c5d6a6SOren Duer struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data; 256b9c5d6a6SOren Duer int ret; 257b9c5d6a6SOren Duer 258b9c5d6a6SOren Duer /* we rely on a mad request as arrived from a VF */ 259b9c5d6a6SOren Duer memcpy(&mad, sa_mad, sizeof mad); 260b9c5d6a6SOren Duer 261b9c5d6a6SOren Duer /* fix port GID to be the real one (slave 0) */ 262b9c5d6a6SOren Duer sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0]; 263b9c5d6a6SOren Duer 264b9c5d6a6SOren Duer /* assign our own TID */ 265b9c5d6a6SOren Duer mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); 266b9c5d6a6SOren Duer group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ 267b9c5d6a6SOren Duer 268b9c5d6a6SOren Duer ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); 269b9c5d6a6SOren Duer /* set timeout handler */ 270b9c5d6a6SOren Duer if (!ret) { 271b9c5d6a6SOren Duer /* calls mlx4_ib_mcg_timeout_handler */ 272b9c5d6a6SOren Duer queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, 273b9c5d6a6SOren Duer msecs_to_jiffies(MAD_TIMEOUT_MS)); 274b9c5d6a6SOren Duer } 275b9c5d6a6SOren Duer 276b9c5d6a6SOren Duer return ret; 277b9c5d6a6SOren Duer } 278b9c5d6a6SOren Duer 279b9c5d6a6SOren Duer static int send_leave_to_wire(struct mcast_group *group, u8 join_state) 280b9c5d6a6SOren Duer { 281b9c5d6a6SOren Duer struct ib_sa_mad mad; 282b9c5d6a6SOren Duer struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data; 283b9c5d6a6SOren Duer int ret; 284b9c5d6a6SOren Duer 285b9c5d6a6SOren Duer memset(&mad, 0, sizeof mad); 286b9c5d6a6SOren Duer mad.mad_hdr.base_version = 1; 287b9c5d6a6SOren Duer mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 288b9c5d6a6SOren Duer mad.mad_hdr.class_version = 2; 289b9c5d6a6SOren Duer mad.mad_hdr.method = IB_SA_METHOD_DELETE; 290b9c5d6a6SOren Duer mad.mad_hdr.status = cpu_to_be16(0); 291b9c5d6a6SOren Duer mad.mad_hdr.class_specific = cpu_to_be16(0); 292b9c5d6a6SOren Duer mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); 293b9c5d6a6SOren Duer group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ 294b9c5d6a6SOren Duer mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 295b9c5d6a6SOren Duer mad.mad_hdr.attr_mod = cpu_to_be32(0); 296b9c5d6a6SOren Duer mad.sa_hdr.sm_key = 0x0; 297b9c5d6a6SOren Duer mad.sa_hdr.attr_offset = cpu_to_be16(7); 298b9c5d6a6SOren Duer mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID | 299b9c5d6a6SOren Duer IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE; 300b9c5d6a6SOren Duer 301b9c5d6a6SOren Duer *sa_data = group->rec; 302b9c5d6a6SOren Duer sa_data->scope_join_state = join_state; 303b9c5d6a6SOren Duer 304b9c5d6a6SOren Duer ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); 305b9c5d6a6SOren Duer if (ret) 306b9c5d6a6SOren Duer group->state = MCAST_IDLE; 307b9c5d6a6SOren Duer 308b9c5d6a6SOren Duer /* set timeout handler */ 309b9c5d6a6SOren Duer if (!ret) { 310b9c5d6a6SOren Duer /* calls mlx4_ib_mcg_timeout_handler */ 311b9c5d6a6SOren Duer queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, 312b9c5d6a6SOren Duer msecs_to_jiffies(MAD_TIMEOUT_MS)); 313b9c5d6a6SOren Duer } 314b9c5d6a6SOren Duer 315b9c5d6a6SOren Duer return ret; 316b9c5d6a6SOren Duer } 317b9c5d6a6SOren Duer 318b9c5d6a6SOren Duer static int send_reply_to_slave(int slave, struct mcast_group *group, 319b9c5d6a6SOren Duer struct ib_sa_mad *req_sa_mad, u16 status) 320b9c5d6a6SOren Duer { 321b9c5d6a6SOren Duer struct ib_sa_mad mad; 322b9c5d6a6SOren Duer struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data; 323b9c5d6a6SOren Duer struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data; 324b9c5d6a6SOren Duer int ret; 325b9c5d6a6SOren Duer 326b9c5d6a6SOren Duer memset(&mad, 0, sizeof mad); 327b9c5d6a6SOren Duer mad.mad_hdr.base_version = 1; 328b9c5d6a6SOren Duer mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 329b9c5d6a6SOren Duer mad.mad_hdr.class_version = 2; 330b9c5d6a6SOren Duer mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 331b9c5d6a6SOren Duer mad.mad_hdr.status = cpu_to_be16(status); 332b9c5d6a6SOren Duer mad.mad_hdr.class_specific = cpu_to_be16(0); 333b9c5d6a6SOren Duer mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid; 334b9c5d6a6SOren Duer *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */ 335b9c5d6a6SOren Duer mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 336b9c5d6a6SOren Duer mad.mad_hdr.attr_mod = cpu_to_be32(0); 337b9c5d6a6SOren Duer mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key; 338b9c5d6a6SOren Duer mad.sa_hdr.attr_offset = cpu_to_be16(7); 339b9c5d6a6SOren Duer mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */ 340b9c5d6a6SOren Duer 341b9c5d6a6SOren Duer *sa_data = group->rec; 342b9c5d6a6SOren Duer 343b9c5d6a6SOren Duer /* reconstruct VF's requested join_state and port_gid */ 344b9c5d6a6SOren Duer sa_data->scope_join_state &= 0xf0; 345b9c5d6a6SOren Duer sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f); 346b9c5d6a6SOren Duer memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid); 347b9c5d6a6SOren Duer 348b9c5d6a6SOren Duer ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad); 349b9c5d6a6SOren Duer return ret; 350b9c5d6a6SOren Duer } 351b9c5d6a6SOren Duer 352b9c5d6a6SOren Duer static int check_selector(ib_sa_comp_mask comp_mask, 353b9c5d6a6SOren Duer ib_sa_comp_mask selector_mask, 354b9c5d6a6SOren Duer ib_sa_comp_mask value_mask, 355b9c5d6a6SOren Duer u8 src_value, u8 dst_value) 356b9c5d6a6SOren Duer { 357b9c5d6a6SOren Duer int err; 358b9c5d6a6SOren Duer u8 selector = dst_value >> 6; 359b9c5d6a6SOren Duer dst_value &= 0x3f; 360b9c5d6a6SOren Duer src_value &= 0x3f; 361b9c5d6a6SOren Duer 362b9c5d6a6SOren Duer if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) 363b9c5d6a6SOren Duer return 0; 364b9c5d6a6SOren Duer 365b9c5d6a6SOren Duer switch (selector) { 366b9c5d6a6SOren Duer case IB_SA_GT: 367b9c5d6a6SOren Duer err = (src_value <= dst_value); 368b9c5d6a6SOren Duer break; 369b9c5d6a6SOren Duer case IB_SA_LT: 370b9c5d6a6SOren Duer err = (src_value >= dst_value); 371b9c5d6a6SOren Duer break; 372b9c5d6a6SOren Duer case IB_SA_EQ: 373b9c5d6a6SOren Duer err = (src_value != dst_value); 374b9c5d6a6SOren Duer break; 375b9c5d6a6SOren Duer default: 376b9c5d6a6SOren Duer err = 0; 377b9c5d6a6SOren Duer break; 378b9c5d6a6SOren Duer } 379b9c5d6a6SOren Duer 380b9c5d6a6SOren Duer return err; 381b9c5d6a6SOren Duer } 382b9c5d6a6SOren Duer 383b9c5d6a6SOren Duer static u16 cmp_rec(struct ib_sa_mcmember_data *src, 384b9c5d6a6SOren Duer struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask) 385b9c5d6a6SOren Duer { 386b9c5d6a6SOren Duer /* src is group record, dst is request record */ 387b9c5d6a6SOren Duer /* MGID must already match */ 388b9c5d6a6SOren Duer /* Port_GID we always replace to our Port_GID, so it is a match */ 389b9c5d6a6SOren Duer 390b9c5d6a6SOren Duer #define MAD_STATUS_REQ_INVALID 0x0200 391b9c5d6a6SOren Duer if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) 392b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 393b9c5d6a6SOren Duer if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) 394b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 395b9c5d6a6SOren Duer if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, 396b9c5d6a6SOren Duer IB_SA_MCMEMBER_REC_MTU, 397b9c5d6a6SOren Duer src->mtusel_mtu, dst->mtusel_mtu)) 398b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 399b9c5d6a6SOren Duer if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && 400b9c5d6a6SOren Duer src->tclass != dst->tclass) 401b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 402b9c5d6a6SOren Duer if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) 403b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 404b9c5d6a6SOren Duer if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, 405b9c5d6a6SOren Duer IB_SA_MCMEMBER_REC_RATE, 406b9c5d6a6SOren Duer src->ratesel_rate, dst->ratesel_rate)) 407b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 408b9c5d6a6SOren Duer if (check_selector(comp_mask, 409b9c5d6a6SOren Duer IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, 410b9c5d6a6SOren Duer IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, 411b9c5d6a6SOren Duer src->lifetmsel_lifetm, dst->lifetmsel_lifetm)) 412b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 413b9c5d6a6SOren Duer if (comp_mask & IB_SA_MCMEMBER_REC_SL && 414b9c5d6a6SOren Duer (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) != 415b9c5d6a6SOren Duer (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000)) 416b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 417b9c5d6a6SOren Duer if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && 418b9c5d6a6SOren Duer (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) != 419b9c5d6a6SOren Duer (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00)) 420b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 421b9c5d6a6SOren Duer if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && 422b9c5d6a6SOren Duer (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) != 423b9c5d6a6SOren Duer (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff)) 424b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 425b9c5d6a6SOren Duer if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && 426b9c5d6a6SOren Duer (src->scope_join_state & 0xf0) != 427b9c5d6a6SOren Duer (dst->scope_join_state & 0xf0)) 428b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 429b9c5d6a6SOren Duer 430b9c5d6a6SOren Duer /* join_state checked separately, proxy_join ignored */ 431b9c5d6a6SOren Duer 432b9c5d6a6SOren Duer return 0; 433b9c5d6a6SOren Duer } 434b9c5d6a6SOren Duer 435b9c5d6a6SOren Duer /* release group, return 1 if this was last release and group is destroyed 436b9c5d6a6SOren Duer * timout work is canceled sync */ 437b9c5d6a6SOren Duer static int release_group(struct mcast_group *group, int from_timeout_handler) 438b9c5d6a6SOren Duer { 439b9c5d6a6SOren Duer struct mlx4_ib_demux_ctx *ctx = group->demux; 440b9c5d6a6SOren Duer int nzgroup; 441b9c5d6a6SOren Duer 442b9c5d6a6SOren Duer mutex_lock(&ctx->mcg_table_lock); 443b9c5d6a6SOren Duer mutex_lock(&group->lock); 444b9c5d6a6SOren Duer if (atomic_dec_and_test(&group->refcount)) { 445b9c5d6a6SOren Duer if (!from_timeout_handler) { 446b9c5d6a6SOren Duer if (group->state != MCAST_IDLE && 447b9c5d6a6SOren Duer !cancel_delayed_work(&group->timeout_work)) { 448b9c5d6a6SOren Duer atomic_inc(&group->refcount); 449b9c5d6a6SOren Duer mutex_unlock(&group->lock); 450b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 451b9c5d6a6SOren Duer return 0; 452b9c5d6a6SOren Duer } 453b9c5d6a6SOren Duer } 454b9c5d6a6SOren Duer 455b9c5d6a6SOren Duer nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0); 456c1e7e466SJack Morgenstein if (nzgroup) 457c1e7e466SJack Morgenstein del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); 458b9c5d6a6SOren Duer if (!list_empty(&group->pending_list)) 459b9c5d6a6SOren Duer mcg_warn_group(group, "releasing a group with non empty pending list\n"); 460b9c5d6a6SOren Duer if (nzgroup) 461b9c5d6a6SOren Duer rb_erase(&group->node, &ctx->mcg_table); 462b9c5d6a6SOren Duer list_del_init(&group->mgid0_list); 463b9c5d6a6SOren Duer mutex_unlock(&group->lock); 464b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 465b9c5d6a6SOren Duer kfree(group); 466b9c5d6a6SOren Duer return 1; 467b9c5d6a6SOren Duer } else { 468b9c5d6a6SOren Duer mutex_unlock(&group->lock); 469b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 470b9c5d6a6SOren Duer } 471b9c5d6a6SOren Duer return 0; 472b9c5d6a6SOren Duer } 473b9c5d6a6SOren Duer 474b9c5d6a6SOren Duer static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) 475b9c5d6a6SOren Duer { 476b9c5d6a6SOren Duer int i; 477b9c5d6a6SOren Duer 478b9c5d6a6SOren Duer for (i = 0; i < 3; i++, join_state >>= 1) 479b9c5d6a6SOren Duer if (join_state & 0x1) 480b9c5d6a6SOren Duer group->members[i] += inc; 481b9c5d6a6SOren Duer } 482b9c5d6a6SOren Duer 483b9c5d6a6SOren Duer static u8 get_leave_state(struct mcast_group *group) 484b9c5d6a6SOren Duer { 485b9c5d6a6SOren Duer u8 leave_state = 0; 486b9c5d6a6SOren Duer int i; 487b9c5d6a6SOren Duer 488b9c5d6a6SOren Duer for (i = 0; i < 3; i++) 489b9c5d6a6SOren Duer if (!group->members[i]) 490b9c5d6a6SOren Duer leave_state |= (1 << i); 491b9c5d6a6SOren Duer 492e5ac40cdSAlex Vesker return leave_state & (group->rec.scope_join_state & 0xf); 493b9c5d6a6SOren Duer } 494b9c5d6a6SOren Duer 495b9c5d6a6SOren Duer static int join_group(struct mcast_group *group, int slave, u8 join_mask) 496b9c5d6a6SOren Duer { 497b9c5d6a6SOren Duer int ret = 0; 498b9c5d6a6SOren Duer u8 join_state; 499b9c5d6a6SOren Duer 500b9c5d6a6SOren Duer /* remove bits that slave is already member of, and adjust */ 501b9c5d6a6SOren Duer join_state = join_mask & (~group->func[slave].join_state); 502b9c5d6a6SOren Duer adjust_membership(group, join_state, 1); 503b9c5d6a6SOren Duer group->func[slave].join_state |= join_state; 504b9c5d6a6SOren Duer if (group->func[slave].state != MCAST_MEMBER && join_state) { 505b9c5d6a6SOren Duer group->func[slave].state = MCAST_MEMBER; 506b9c5d6a6SOren Duer ret = 1; 507b9c5d6a6SOren Duer } 508b9c5d6a6SOren Duer return ret; 509b9c5d6a6SOren Duer } 510b9c5d6a6SOren Duer 511b9c5d6a6SOren Duer static int leave_group(struct mcast_group *group, int slave, u8 leave_state) 512b9c5d6a6SOren Duer { 513b9c5d6a6SOren Duer int ret = 0; 514b9c5d6a6SOren Duer 515b9c5d6a6SOren Duer adjust_membership(group, leave_state, -1); 516b9c5d6a6SOren Duer group->func[slave].join_state &= ~leave_state; 517b9c5d6a6SOren Duer if (!group->func[slave].join_state) { 518b9c5d6a6SOren Duer group->func[slave].state = MCAST_NOT_MEMBER; 519b9c5d6a6SOren Duer ret = 1; 520b9c5d6a6SOren Duer } 521b9c5d6a6SOren Duer return ret; 522b9c5d6a6SOren Duer } 523b9c5d6a6SOren Duer 524b9c5d6a6SOren Duer static int check_leave(struct mcast_group *group, int slave, u8 leave_mask) 525b9c5d6a6SOren Duer { 526b9c5d6a6SOren Duer if (group->func[slave].state != MCAST_MEMBER) 527b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 528b9c5d6a6SOren Duer 529b9c5d6a6SOren Duer /* make sure we're not deleting unset bits */ 530b9c5d6a6SOren Duer if (~group->func[slave].join_state & leave_mask) 531b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 532b9c5d6a6SOren Duer 533b9c5d6a6SOren Duer if (!leave_mask) 534b9c5d6a6SOren Duer return MAD_STATUS_REQ_INVALID; 535b9c5d6a6SOren Duer 536b9c5d6a6SOren Duer return 0; 537b9c5d6a6SOren Duer } 538b9c5d6a6SOren Duer 539b9c5d6a6SOren Duer static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) 540b9c5d6a6SOren Duer { 541b9c5d6a6SOren Duer struct delayed_work *delay = to_delayed_work(work); 542b9c5d6a6SOren Duer struct mcast_group *group; 543b9c5d6a6SOren Duer struct mcast_req *req = NULL; 544b9c5d6a6SOren Duer 545b9c5d6a6SOren Duer group = container_of(delay, typeof(*group), timeout_work); 546b9c5d6a6SOren Duer 547b9c5d6a6SOren Duer mutex_lock(&group->lock); 548b9c5d6a6SOren Duer if (group->state == MCAST_JOIN_SENT) { 549b9c5d6a6SOren Duer if (!list_empty(&group->pending_list)) { 550b9c5d6a6SOren Duer req = list_first_entry(&group->pending_list, struct mcast_req, group_list); 551b9c5d6a6SOren Duer list_del(&req->group_list); 552b9c5d6a6SOren Duer list_del(&req->func_list); 553b9c5d6a6SOren Duer --group->func[req->func].num_pend_reqs; 554b9c5d6a6SOren Duer mutex_unlock(&group->lock); 555b9c5d6a6SOren Duer kfree(req); 556b9c5d6a6SOren Duer if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { 557b9c5d6a6SOren Duer if (release_group(group, 1)) 558b9c5d6a6SOren Duer return; 559b9c5d6a6SOren Duer } else { 560b9c5d6a6SOren Duer kfree(group); 561b9c5d6a6SOren Duer return; 562b9c5d6a6SOren Duer } 563b9c5d6a6SOren Duer mutex_lock(&group->lock); 564b9c5d6a6SOren Duer } else 565b9c5d6a6SOren Duer mcg_warn_group(group, "DRIVER BUG\n"); 566b9c5d6a6SOren Duer } else if (group->state == MCAST_LEAVE_SENT) { 567e5ac40cdSAlex Vesker if (group->rec.scope_join_state & 0xf) 568e5ac40cdSAlex Vesker group->rec.scope_join_state &= 0xf0; 569b9c5d6a6SOren Duer group->state = MCAST_IDLE; 570b9c5d6a6SOren Duer mutex_unlock(&group->lock); 571b9c5d6a6SOren Duer if (release_group(group, 1)) 572b9c5d6a6SOren Duer return; 573b9c5d6a6SOren Duer mutex_lock(&group->lock); 574b9c5d6a6SOren Duer } else 575b9c5d6a6SOren Duer mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state)); 576b9c5d6a6SOren Duer group->state = MCAST_IDLE; 577b9c5d6a6SOren Duer atomic_inc(&group->refcount); 578b9c5d6a6SOren Duer if (!queue_work(group->demux->mcg_wq, &group->work)) 579b9c5d6a6SOren Duer safe_atomic_dec(&group->refcount); 580b9c5d6a6SOren Duer 581b9c5d6a6SOren Duer mutex_unlock(&group->lock); 582b9c5d6a6SOren Duer } 583b9c5d6a6SOren Duer 584b9c5d6a6SOren Duer static int handle_leave_req(struct mcast_group *group, u8 leave_mask, 585b9c5d6a6SOren Duer struct mcast_req *req) 586b9c5d6a6SOren Duer { 587b9c5d6a6SOren Duer u16 status; 588b9c5d6a6SOren Duer 589b9c5d6a6SOren Duer if (req->clean) 590b9c5d6a6SOren Duer leave_mask = group->func[req->func].join_state; 591b9c5d6a6SOren Duer 592b9c5d6a6SOren Duer status = check_leave(group, req->func, leave_mask); 593b9c5d6a6SOren Duer if (!status) 594b9c5d6a6SOren Duer leave_group(group, req->func, leave_mask); 595b9c5d6a6SOren Duer 596b9c5d6a6SOren Duer if (!req->clean) 597b9c5d6a6SOren Duer send_reply_to_slave(req->func, group, &req->sa_mad, status); 598b9c5d6a6SOren Duer --group->func[req->func].num_pend_reqs; 599b9c5d6a6SOren Duer list_del(&req->group_list); 600b9c5d6a6SOren Duer list_del(&req->func_list); 601b9c5d6a6SOren Duer kfree(req); 602b9c5d6a6SOren Duer return 1; 603b9c5d6a6SOren Duer } 604b9c5d6a6SOren Duer 605b9c5d6a6SOren Duer static int handle_join_req(struct mcast_group *group, u8 join_mask, 606b9c5d6a6SOren Duer struct mcast_req *req) 607b9c5d6a6SOren Duer { 608e5ac40cdSAlex Vesker u8 group_join_state = group->rec.scope_join_state & 0xf; 609b9c5d6a6SOren Duer int ref = 0; 610b9c5d6a6SOren Duer u16 status; 611b9c5d6a6SOren Duer struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; 612b9c5d6a6SOren Duer 613b9c5d6a6SOren Duer if (join_mask == (group_join_state & join_mask)) { 614b9c5d6a6SOren Duer /* port's membership need not change */ 615b9c5d6a6SOren Duer status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); 616b9c5d6a6SOren Duer if (!status) 617b9c5d6a6SOren Duer join_group(group, req->func, join_mask); 618b9c5d6a6SOren Duer 619b9c5d6a6SOren Duer --group->func[req->func].num_pend_reqs; 620b9c5d6a6SOren Duer send_reply_to_slave(req->func, group, &req->sa_mad, status); 621b9c5d6a6SOren Duer list_del(&req->group_list); 622b9c5d6a6SOren Duer list_del(&req->func_list); 623b9c5d6a6SOren Duer kfree(req); 624b9c5d6a6SOren Duer ++ref; 625b9c5d6a6SOren Duer } else { 626b9c5d6a6SOren Duer /* port's membership needs to be updated */ 627b9c5d6a6SOren Duer group->prev_state = group->state; 628b9c5d6a6SOren Duer if (send_join_to_wire(group, &req->sa_mad)) { 629b9c5d6a6SOren Duer --group->func[req->func].num_pend_reqs; 630b9c5d6a6SOren Duer list_del(&req->group_list); 631b9c5d6a6SOren Duer list_del(&req->func_list); 632b9c5d6a6SOren Duer kfree(req); 633b9c5d6a6SOren Duer ref = 1; 634b9c5d6a6SOren Duer group->state = group->prev_state; 635b9c5d6a6SOren Duer } else 636b9c5d6a6SOren Duer group->state = MCAST_JOIN_SENT; 637b9c5d6a6SOren Duer } 638b9c5d6a6SOren Duer 639b9c5d6a6SOren Duer return ref; 640b9c5d6a6SOren Duer } 641b9c5d6a6SOren Duer 642b9c5d6a6SOren Duer static void mlx4_ib_mcg_work_handler(struct work_struct *work) 643b9c5d6a6SOren Duer { 644b9c5d6a6SOren Duer struct mcast_group *group; 645b9c5d6a6SOren Duer struct mcast_req *req = NULL; 646b9c5d6a6SOren Duer struct ib_sa_mcmember_data *sa_data; 647b9c5d6a6SOren Duer u8 req_join_state; 648b9c5d6a6SOren Duer int rc = 1; /* release_count - this is for the scheduled work */ 649b9c5d6a6SOren Duer u16 status; 650b9c5d6a6SOren Duer u8 method; 651b9c5d6a6SOren Duer 652b9c5d6a6SOren Duer group = container_of(work, typeof(*group), work); 653b9c5d6a6SOren Duer 654b9c5d6a6SOren Duer mutex_lock(&group->lock); 655b9c5d6a6SOren Duer 656b9c5d6a6SOren Duer /* First, let's see if a response from SM is waiting regarding this group. 657b9c5d6a6SOren Duer * If so, we need to update the group's REC. If this is a bad response, we 658b9c5d6a6SOren Duer * may need to send a bad response to a VF waiting for it. If VF is waiting 659b9c5d6a6SOren Duer * and this is a good response, the VF will be answered later in this func. */ 660b9c5d6a6SOren Duer if (group->state == MCAST_RESP_READY) { 661b9c5d6a6SOren Duer /* cancels mlx4_ib_mcg_timeout_handler */ 662b9c5d6a6SOren Duer cancel_delayed_work(&group->timeout_work); 663b9c5d6a6SOren Duer status = be16_to_cpu(group->response_sa_mad.mad_hdr.status); 664b9c5d6a6SOren Duer method = group->response_sa_mad.mad_hdr.method; 665b9c5d6a6SOren Duer if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) { 666b9c5d6a6SOren Duer mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n", 667b9c5d6a6SOren Duer be64_to_cpu(group->response_sa_mad.mad_hdr.tid), 668b9c5d6a6SOren Duer be64_to_cpu(group->last_req_tid)); 669b9c5d6a6SOren Duer group->state = group->prev_state; 670b9c5d6a6SOren Duer goto process_requests; 671b9c5d6a6SOren Duer } 672b9c5d6a6SOren Duer if (status) { 673b9c5d6a6SOren Duer if (!list_empty(&group->pending_list)) 674b9c5d6a6SOren Duer req = list_first_entry(&group->pending_list, 675b9c5d6a6SOren Duer struct mcast_req, group_list); 676b9c5d6a6SOren Duer if ((method == IB_MGMT_METHOD_GET_RESP)) { 677b9c5d6a6SOren Duer if (req) { 678b9c5d6a6SOren Duer send_reply_to_slave(req->func, group, &req->sa_mad, status); 679b9c5d6a6SOren Duer --group->func[req->func].num_pend_reqs; 680b9c5d6a6SOren Duer list_del(&req->group_list); 681b9c5d6a6SOren Duer list_del(&req->func_list); 682b9c5d6a6SOren Duer kfree(req); 683b9c5d6a6SOren Duer ++rc; 684b9c5d6a6SOren Duer } else 685b9c5d6a6SOren Duer mcg_warn_group(group, "no request for failed join\n"); 686b9c5d6a6SOren Duer } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) 687b9c5d6a6SOren Duer ++rc; 688b9c5d6a6SOren Duer } else { 689b9c5d6a6SOren Duer u8 resp_join_state; 690b9c5d6a6SOren Duer u8 cur_join_state; 691b9c5d6a6SOren Duer 692b9c5d6a6SOren Duer resp_join_state = ((struct ib_sa_mcmember_data *) 693e5ac40cdSAlex Vesker group->response_sa_mad.data)->scope_join_state & 0xf; 694e5ac40cdSAlex Vesker cur_join_state = group->rec.scope_join_state & 0xf; 695b9c5d6a6SOren Duer 696b9c5d6a6SOren Duer if (method == IB_MGMT_METHOD_GET_RESP) { 697b9c5d6a6SOren Duer /* successfull join */ 698b9c5d6a6SOren Duer if (!cur_join_state && resp_join_state) 699b9c5d6a6SOren Duer --rc; 700b9c5d6a6SOren Duer } else if (!resp_join_state) 701b9c5d6a6SOren Duer ++rc; 702b9c5d6a6SOren Duer memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec); 703b9c5d6a6SOren Duer } 704b9c5d6a6SOren Duer group->state = MCAST_IDLE; 705b9c5d6a6SOren Duer } 706b9c5d6a6SOren Duer 707b9c5d6a6SOren Duer process_requests: 708b9c5d6a6SOren Duer /* We should now go over pending join/leave requests, as long as we are idle. */ 709b9c5d6a6SOren Duer while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) { 710b9c5d6a6SOren Duer req = list_first_entry(&group->pending_list, struct mcast_req, 711b9c5d6a6SOren Duer group_list); 712b9c5d6a6SOren Duer sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; 713e5ac40cdSAlex Vesker req_join_state = sa_data->scope_join_state & 0xf; 714b9c5d6a6SOren Duer 715b9c5d6a6SOren Duer /* For a leave request, we will immediately answer the VF, and 716b9c5d6a6SOren Duer * update our internal counters. The actual leave will be sent 717b9c5d6a6SOren Duer * to SM later, if at all needed. We dequeue the request now. */ 718b9c5d6a6SOren Duer if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE) 719b9c5d6a6SOren Duer rc += handle_leave_req(group, req_join_state, req); 720b9c5d6a6SOren Duer else 721b9c5d6a6SOren Duer rc += handle_join_req(group, req_join_state, req); 722b9c5d6a6SOren Duer } 723b9c5d6a6SOren Duer 724b9c5d6a6SOren Duer /* Handle leaves */ 725b9c5d6a6SOren Duer if (group->state == MCAST_IDLE) { 726b9c5d6a6SOren Duer req_join_state = get_leave_state(group); 727b9c5d6a6SOren Duer if (req_join_state) { 728b9c5d6a6SOren Duer group->rec.scope_join_state &= ~req_join_state; 729b9c5d6a6SOren Duer group->prev_state = group->state; 730b9c5d6a6SOren Duer if (send_leave_to_wire(group, req_join_state)) { 731b9c5d6a6SOren Duer group->state = group->prev_state; 732b9c5d6a6SOren Duer ++rc; 733b9c5d6a6SOren Duer } else 734b9c5d6a6SOren Duer group->state = MCAST_LEAVE_SENT; 735b9c5d6a6SOren Duer } 736b9c5d6a6SOren Duer } 737b9c5d6a6SOren Duer 738b9c5d6a6SOren Duer if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) 739b9c5d6a6SOren Duer goto process_requests; 740b9c5d6a6SOren Duer mutex_unlock(&group->lock); 741b9c5d6a6SOren Duer 742b9c5d6a6SOren Duer while (rc--) 743b9c5d6a6SOren Duer release_group(group, 0); 744b9c5d6a6SOren Duer } 745b9c5d6a6SOren Duer 746b9c5d6a6SOren Duer static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx, 747b9c5d6a6SOren Duer __be64 tid, 748b9c5d6a6SOren Duer union ib_gid *new_mgid) 749b9c5d6a6SOren Duer { 750ee71b968SGeliang Tang struct mcast_group *group = NULL, *cur_group, *n; 751b9c5d6a6SOren Duer struct mcast_req *req; 752b9c5d6a6SOren Duer 753b9c5d6a6SOren Duer mutex_lock(&ctx->mcg_table_lock); 754ee71b968SGeliang Tang list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) { 755b9c5d6a6SOren Duer mutex_lock(&group->lock); 756b9c5d6a6SOren Duer if (group->last_req_tid == tid) { 757b9c5d6a6SOren Duer if (memcmp(new_mgid, &mgid0, sizeof mgid0)) { 758b9c5d6a6SOren Duer group->rec.mgid = *new_mgid; 759b9c5d6a6SOren Duer sprintf(group->name, "%016llx%016llx", 760b9c5d6a6SOren Duer be64_to_cpu(group->rec.mgid.global.subnet_prefix), 761b9c5d6a6SOren Duer be64_to_cpu(group->rec.mgid.global.interface_id)); 762b9c5d6a6SOren Duer list_del_init(&group->mgid0_list); 763b9c5d6a6SOren Duer cur_group = mcast_insert(ctx, group); 764b9c5d6a6SOren Duer if (cur_group) { 765b9c5d6a6SOren Duer /* A race between our code and SM. Silently cleaning the new one */ 766b9c5d6a6SOren Duer req = list_first_entry(&group->pending_list, 767b9c5d6a6SOren Duer struct mcast_req, group_list); 768b9c5d6a6SOren Duer --group->func[req->func].num_pend_reqs; 769b9c5d6a6SOren Duer list_del(&req->group_list); 770b9c5d6a6SOren Duer list_del(&req->func_list); 771b9c5d6a6SOren Duer kfree(req); 772b9c5d6a6SOren Duer mutex_unlock(&group->lock); 773b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 774b9c5d6a6SOren Duer release_group(group, 0); 775b9c5d6a6SOren Duer return NULL; 776b9c5d6a6SOren Duer } 777b9c5d6a6SOren Duer 778b9c5d6a6SOren Duer atomic_inc(&group->refcount); 779c1e7e466SJack Morgenstein add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); 780b9c5d6a6SOren Duer mutex_unlock(&group->lock); 781b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 782b9c5d6a6SOren Duer return group; 783b9c5d6a6SOren Duer } else { 784b9c5d6a6SOren Duer struct mcast_req *tmp1, *tmp2; 785b9c5d6a6SOren Duer 786b9c5d6a6SOren Duer list_del(&group->mgid0_list); 787b9c5d6a6SOren Duer if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE) 788b9c5d6a6SOren Duer cancel_delayed_work_sync(&group->timeout_work); 789b9c5d6a6SOren Duer 790b9c5d6a6SOren Duer list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) { 791b9c5d6a6SOren Duer list_del(&tmp1->group_list); 792b9c5d6a6SOren Duer kfree(tmp1); 793b9c5d6a6SOren Duer } 794b9c5d6a6SOren Duer mutex_unlock(&group->lock); 795b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 796b9c5d6a6SOren Duer kfree(group); 797b9c5d6a6SOren Duer return NULL; 798b9c5d6a6SOren Duer } 799b9c5d6a6SOren Duer } 800b9c5d6a6SOren Duer mutex_unlock(&group->lock); 801b9c5d6a6SOren Duer } 802b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 803b9c5d6a6SOren Duer 804b9c5d6a6SOren Duer return NULL; 805b9c5d6a6SOren Duer } 806b9c5d6a6SOren Duer 807c1e7e466SJack Morgenstein static ssize_t sysfs_show_group(struct device *dev, 808c1e7e466SJack Morgenstein struct device_attribute *attr, char *buf); 809c1e7e466SJack Morgenstein 810b9c5d6a6SOren Duer static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx, 811b9c5d6a6SOren Duer union ib_gid *mgid, int create, 812b9c5d6a6SOren Duer gfp_t gfp_mask) 813b9c5d6a6SOren Duer { 814b9c5d6a6SOren Duer struct mcast_group *group, *cur_group; 815b9c5d6a6SOren Duer int is_mgid0; 816b9c5d6a6SOren Duer int i; 817b9c5d6a6SOren Duer 818b9c5d6a6SOren Duer is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); 819b9c5d6a6SOren Duer if (!is_mgid0) { 820b9c5d6a6SOren Duer group = mcast_find(ctx, mgid); 821b9c5d6a6SOren Duer if (group) 822b9c5d6a6SOren Duer goto found; 823b9c5d6a6SOren Duer } 824b9c5d6a6SOren Duer 825b9c5d6a6SOren Duer if (!create) 826b9c5d6a6SOren Duer return ERR_PTR(-ENOENT); 827b9c5d6a6SOren Duer 828b9c5d6a6SOren Duer group = kzalloc(sizeof *group, gfp_mask); 829b9c5d6a6SOren Duer if (!group) 830b9c5d6a6SOren Duer return ERR_PTR(-ENOMEM); 831b9c5d6a6SOren Duer 832b9c5d6a6SOren Duer group->demux = ctx; 833b9c5d6a6SOren Duer group->rec.mgid = *mgid; 834b9c5d6a6SOren Duer INIT_LIST_HEAD(&group->pending_list); 835b9c5d6a6SOren Duer INIT_LIST_HEAD(&group->mgid0_list); 836b9c5d6a6SOren Duer for (i = 0; i < MAX_VFS; ++i) 837b9c5d6a6SOren Duer INIT_LIST_HEAD(&group->func[i].pending); 838b9c5d6a6SOren Duer INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); 839b9c5d6a6SOren Duer INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler); 840b9c5d6a6SOren Duer mutex_init(&group->lock); 841b9c5d6a6SOren Duer sprintf(group->name, "%016llx%016llx", 842b9c5d6a6SOren Duer be64_to_cpu(group->rec.mgid.global.subnet_prefix), 843b9c5d6a6SOren Duer be64_to_cpu(group->rec.mgid.global.interface_id)); 844c1e7e466SJack Morgenstein sysfs_attr_init(&group->dentry.attr); 845c1e7e466SJack Morgenstein group->dentry.show = sysfs_show_group; 846c1e7e466SJack Morgenstein group->dentry.store = NULL; 847c1e7e466SJack Morgenstein group->dentry.attr.name = group->name; 848c1e7e466SJack Morgenstein group->dentry.attr.mode = 0400; 849b9c5d6a6SOren Duer group->state = MCAST_IDLE; 850b9c5d6a6SOren Duer 851b9c5d6a6SOren Duer if (is_mgid0) { 852b9c5d6a6SOren Duer list_add(&group->mgid0_list, &ctx->mcg_mgid0_list); 853b9c5d6a6SOren Duer goto found; 854b9c5d6a6SOren Duer } 855b9c5d6a6SOren Duer 856b9c5d6a6SOren Duer cur_group = mcast_insert(ctx, group); 857b9c5d6a6SOren Duer if (cur_group) { 858b9c5d6a6SOren Duer mcg_warn("group just showed up %s - confused\n", cur_group->name); 859b9c5d6a6SOren Duer kfree(group); 860b9c5d6a6SOren Duer return ERR_PTR(-EINVAL); 861b9c5d6a6SOren Duer } 862b9c5d6a6SOren Duer 863c1e7e466SJack Morgenstein add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); 864c1e7e466SJack Morgenstein 865b9c5d6a6SOren Duer found: 866b9c5d6a6SOren Duer atomic_inc(&group->refcount); 867b9c5d6a6SOren Duer return group; 868b9c5d6a6SOren Duer } 869b9c5d6a6SOren Duer 870b9c5d6a6SOren Duer static void queue_req(struct mcast_req *req) 871b9c5d6a6SOren Duer { 872b9c5d6a6SOren Duer struct mcast_group *group = req->group; 873b9c5d6a6SOren Duer 874b9c5d6a6SOren Duer atomic_inc(&group->refcount); /* for the request */ 875b9c5d6a6SOren Duer atomic_inc(&group->refcount); /* for scheduling the work */ 876b9c5d6a6SOren Duer list_add_tail(&req->group_list, &group->pending_list); 877b9c5d6a6SOren Duer list_add_tail(&req->func_list, &group->func[req->func].pending); 878b9c5d6a6SOren Duer /* calls mlx4_ib_mcg_work_handler */ 879b9c5d6a6SOren Duer if (!queue_work(group->demux->mcg_wq, &group->work)) 880b9c5d6a6SOren Duer safe_atomic_dec(&group->refcount); 881b9c5d6a6SOren Duer } 882b9c5d6a6SOren Duer 883b9c5d6a6SOren Duer int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave, 884b9c5d6a6SOren Duer struct ib_sa_mad *mad) 885b9c5d6a6SOren Duer { 886b9c5d6a6SOren Duer struct mlx4_ib_dev *dev = to_mdev(ibdev); 887b9c5d6a6SOren Duer struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data; 888b9c5d6a6SOren Duer struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; 889b9c5d6a6SOren Duer struct mcast_group *group; 890b9c5d6a6SOren Duer 891b9c5d6a6SOren Duer switch (mad->mad_hdr.method) { 892b9c5d6a6SOren Duer case IB_MGMT_METHOD_GET_RESP: 893b9c5d6a6SOren Duer case IB_SA_METHOD_DELETE_RESP: 894b9c5d6a6SOren Duer mutex_lock(&ctx->mcg_table_lock); 895b9c5d6a6SOren Duer group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL); 896b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 897b9c5d6a6SOren Duer if (IS_ERR(group)) { 898b9c5d6a6SOren Duer if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) { 899b9c5d6a6SOren Duer __be64 tid = mad->mad_hdr.tid; 900b9c5d6a6SOren Duer *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */ 901b9c5d6a6SOren Duer group = search_relocate_mgid0_group(ctx, tid, &rec->mgid); 902b9c5d6a6SOren Duer } else 903b9c5d6a6SOren Duer group = NULL; 904b9c5d6a6SOren Duer } 905b9c5d6a6SOren Duer 906b9c5d6a6SOren Duer if (!group) 907b9c5d6a6SOren Duer return 1; 908b9c5d6a6SOren Duer 909b9c5d6a6SOren Duer mutex_lock(&group->lock); 910b9c5d6a6SOren Duer group->response_sa_mad = *mad; 911b9c5d6a6SOren Duer group->prev_state = group->state; 912b9c5d6a6SOren Duer group->state = MCAST_RESP_READY; 913b9c5d6a6SOren Duer /* calls mlx4_ib_mcg_work_handler */ 914b9c5d6a6SOren Duer atomic_inc(&group->refcount); 915b9c5d6a6SOren Duer if (!queue_work(ctx->mcg_wq, &group->work)) 916b9c5d6a6SOren Duer safe_atomic_dec(&group->refcount); 917b9c5d6a6SOren Duer mutex_unlock(&group->lock); 918b9c5d6a6SOren Duer release_group(group, 0); 919b9c5d6a6SOren Duer return 1; /* consumed */ 920b9c5d6a6SOren Duer case IB_MGMT_METHOD_SET: 921b9c5d6a6SOren Duer case IB_SA_METHOD_GET_TABLE: 922b9c5d6a6SOren Duer case IB_SA_METHOD_GET_TABLE_RESP: 923b9c5d6a6SOren Duer case IB_SA_METHOD_DELETE: 924b9c5d6a6SOren Duer return 0; /* not consumed, pass-through to guest over tunnel */ 925b9c5d6a6SOren Duer default: 926b9c5d6a6SOren Duer mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n", 927b9c5d6a6SOren Duer port, mad->mad_hdr.method); 928b9c5d6a6SOren Duer return 1; /* consumed */ 929b9c5d6a6SOren Duer } 930b9c5d6a6SOren Duer } 931b9c5d6a6SOren Duer 932b9c5d6a6SOren Duer int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, 933b9c5d6a6SOren Duer int slave, struct ib_sa_mad *sa_mad) 934b9c5d6a6SOren Duer { 935b9c5d6a6SOren Duer struct mlx4_ib_dev *dev = to_mdev(ibdev); 936b9c5d6a6SOren Duer struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data; 937b9c5d6a6SOren Duer struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; 938b9c5d6a6SOren Duer struct mcast_group *group; 939b9c5d6a6SOren Duer struct mcast_req *req; 940b9c5d6a6SOren Duer int may_create = 0; 941b9c5d6a6SOren Duer 942b9c5d6a6SOren Duer if (ctx->flushing) 943b9c5d6a6SOren Duer return -EAGAIN; 944b9c5d6a6SOren Duer 945b9c5d6a6SOren Duer switch (sa_mad->mad_hdr.method) { 946b9c5d6a6SOren Duer case IB_MGMT_METHOD_SET: 947b9c5d6a6SOren Duer may_create = 1; 948b9c5d6a6SOren Duer case IB_SA_METHOD_DELETE: 949b9c5d6a6SOren Duer req = kzalloc(sizeof *req, GFP_KERNEL); 950b9c5d6a6SOren Duer if (!req) 951b9c5d6a6SOren Duer return -ENOMEM; 952b9c5d6a6SOren Duer 953b9c5d6a6SOren Duer req->func = slave; 954b9c5d6a6SOren Duer req->sa_mad = *sa_mad; 955b9c5d6a6SOren Duer 956b9c5d6a6SOren Duer mutex_lock(&ctx->mcg_table_lock); 957b9c5d6a6SOren Duer group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL); 958b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 959b9c5d6a6SOren Duer if (IS_ERR(group)) { 960b9c5d6a6SOren Duer kfree(req); 961b9c5d6a6SOren Duer return PTR_ERR(group); 962b9c5d6a6SOren Duer } 963b9c5d6a6SOren Duer mutex_lock(&group->lock); 964b9c5d6a6SOren Duer if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { 965b9c5d6a6SOren Duer mutex_unlock(&group->lock); 9662cb8e7f8SJack Morgenstein mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", 967b9c5d6a6SOren Duer port, slave, MAX_PEND_REQS_PER_FUNC); 968b9c5d6a6SOren Duer release_group(group, 0); 969b9c5d6a6SOren Duer kfree(req); 970b9c5d6a6SOren Duer return -ENOMEM; 971b9c5d6a6SOren Duer } 972b9c5d6a6SOren Duer ++group->func[slave].num_pend_reqs; 973b9c5d6a6SOren Duer req->group = group; 974b9c5d6a6SOren Duer queue_req(req); 975b9c5d6a6SOren Duer mutex_unlock(&group->lock); 976b9c5d6a6SOren Duer release_group(group, 0); 977b9c5d6a6SOren Duer return 1; /* consumed */ 978b9c5d6a6SOren Duer case IB_SA_METHOD_GET_TABLE: 979b9c5d6a6SOren Duer case IB_MGMT_METHOD_GET_RESP: 980b9c5d6a6SOren Duer case IB_SA_METHOD_GET_TABLE_RESP: 981b9c5d6a6SOren Duer case IB_SA_METHOD_DELETE_RESP: 982b9c5d6a6SOren Duer return 0; /* not consumed, pass-through */ 983b9c5d6a6SOren Duer default: 984b9c5d6a6SOren Duer mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n", 985b9c5d6a6SOren Duer port, slave, sa_mad->mad_hdr.method); 986b9c5d6a6SOren Duer return 1; /* consumed */ 987b9c5d6a6SOren Duer } 988b9c5d6a6SOren Duer } 989b9c5d6a6SOren Duer 990c1e7e466SJack Morgenstein static ssize_t sysfs_show_group(struct device *dev, 991c1e7e466SJack Morgenstein struct device_attribute *attr, char *buf) 992c1e7e466SJack Morgenstein { 993c1e7e466SJack Morgenstein struct mcast_group *group = 994c1e7e466SJack Morgenstein container_of(attr, struct mcast_group, dentry); 995c1e7e466SJack Morgenstein struct mcast_req *req = NULL; 996c1e7e466SJack Morgenstein char pending_str[40]; 997c1e7e466SJack Morgenstein char state_str[40]; 998c1e7e466SJack Morgenstein ssize_t len = 0; 999c1e7e466SJack Morgenstein int f; 1000c1e7e466SJack Morgenstein 1001c1e7e466SJack Morgenstein if (group->state == MCAST_IDLE) 1002c1e7e466SJack Morgenstein sprintf(state_str, "%s", get_state_string(group->state)); 1003c1e7e466SJack Morgenstein else 1004c1e7e466SJack Morgenstein sprintf(state_str, "%s(TID=0x%llx)", 1005c1e7e466SJack Morgenstein get_state_string(group->state), 1006c1e7e466SJack Morgenstein be64_to_cpu(group->last_req_tid)); 1007c1e7e466SJack Morgenstein if (list_empty(&group->pending_list)) { 1008c1e7e466SJack Morgenstein sprintf(pending_str, "No"); 1009c1e7e466SJack Morgenstein } else { 1010c1e7e466SJack Morgenstein req = list_first_entry(&group->pending_list, struct mcast_req, group_list); 1011c1e7e466SJack Morgenstein sprintf(pending_str, "Yes(TID=0x%llx)", 1012c1e7e466SJack Morgenstein be64_to_cpu(req->sa_mad.mad_hdr.tid)); 1013c1e7e466SJack Morgenstein } 1014c1e7e466SJack Morgenstein len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ", 1015c1e7e466SJack Morgenstein group->rec.scope_join_state & 0xf, 1016c1e7e466SJack Morgenstein group->members[2], group->members[1], group->members[0], 1017c1e7e466SJack Morgenstein atomic_read(&group->refcount), 1018c1e7e466SJack Morgenstein pending_str, 1019c1e7e466SJack Morgenstein state_str); 1020c1e7e466SJack Morgenstein for (f = 0; f < MAX_VFS; ++f) 1021c1e7e466SJack Morgenstein if (group->func[f].state == MCAST_MEMBER) 1022c1e7e466SJack Morgenstein len += sprintf(buf + len, "%d[%1x] ", 1023c1e7e466SJack Morgenstein f, group->func[f].join_state); 1024c1e7e466SJack Morgenstein 1025c1e7e466SJack Morgenstein len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x " 1026c1e7e466SJack Morgenstein "%4x %4x %2x %2x)\n", 1027c1e7e466SJack Morgenstein be16_to_cpu(group->rec.pkey), 1028c1e7e466SJack Morgenstein be32_to_cpu(group->rec.qkey), 1029c1e7e466SJack Morgenstein (group->rec.mtusel_mtu & 0xc0) >> 6, 1030c1e7e466SJack Morgenstein group->rec.mtusel_mtu & 0x3f, 1031c1e7e466SJack Morgenstein group->rec.tclass, 1032c1e7e466SJack Morgenstein (group->rec.ratesel_rate & 0xc0) >> 6, 1033c1e7e466SJack Morgenstein group->rec.ratesel_rate & 0x3f, 1034c1e7e466SJack Morgenstein (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28, 1035c1e7e466SJack Morgenstein (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8, 1036c1e7e466SJack Morgenstein be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff, 1037c1e7e466SJack Morgenstein group->rec.proxy_join); 1038c1e7e466SJack Morgenstein 1039c1e7e466SJack Morgenstein return len; 1040c1e7e466SJack Morgenstein } 1041c1e7e466SJack Morgenstein 1042b9c5d6a6SOren Duer int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx) 1043b9c5d6a6SOren Duer { 1044b9c5d6a6SOren Duer char name[20]; 1045b9c5d6a6SOren Duer 1046b9c5d6a6SOren Duer atomic_set(&ctx->tid, 0); 1047b9c5d6a6SOren Duer sprintf(name, "mlx4_ib_mcg%d", ctx->port); 1048fcf621ddSBhaktipriya Shridhar ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 1049b9c5d6a6SOren Duer if (!ctx->mcg_wq) 1050b9c5d6a6SOren Duer return -ENOMEM; 1051b9c5d6a6SOren Duer 1052b9c5d6a6SOren Duer mutex_init(&ctx->mcg_table_lock); 1053b9c5d6a6SOren Duer ctx->mcg_table = RB_ROOT; 1054b9c5d6a6SOren Duer INIT_LIST_HEAD(&ctx->mcg_mgid0_list); 1055b9c5d6a6SOren Duer ctx->flushing = 0; 1056b9c5d6a6SOren Duer 1057b9c5d6a6SOren Duer return 0; 1058b9c5d6a6SOren Duer } 1059b9c5d6a6SOren Duer 1060b9c5d6a6SOren Duer static void force_clean_group(struct mcast_group *group) 1061b9c5d6a6SOren Duer { 1062b9c5d6a6SOren Duer struct mcast_req *req, *tmp 1063b9c5d6a6SOren Duer ; 1064b9c5d6a6SOren Duer list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { 1065b9c5d6a6SOren Duer list_del(&req->group_list); 1066b9c5d6a6SOren Duer kfree(req); 1067b9c5d6a6SOren Duer } 1068c1e7e466SJack Morgenstein del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr); 1069b9c5d6a6SOren Duer rb_erase(&group->node, &group->demux->mcg_table); 1070b9c5d6a6SOren Duer kfree(group); 1071b9c5d6a6SOren Duer } 1072b9c5d6a6SOren Duer 1073b9c5d6a6SOren Duer static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq) 1074b9c5d6a6SOren Duer { 1075b9c5d6a6SOren Duer int i; 1076b9c5d6a6SOren Duer struct rb_node *p; 1077b9c5d6a6SOren Duer struct mcast_group *group; 1078b9c5d6a6SOren Duer unsigned long end; 1079b9c5d6a6SOren Duer int count; 1080b9c5d6a6SOren Duer 1081b9c5d6a6SOren Duer for (i = 0; i < MAX_VFS; ++i) 1082b9c5d6a6SOren Duer clean_vf_mcast(ctx, i); 1083b9c5d6a6SOren Duer 1084b9c5d6a6SOren Duer end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000); 1085b9c5d6a6SOren Duer do { 1086b9c5d6a6SOren Duer count = 0; 1087b9c5d6a6SOren Duer mutex_lock(&ctx->mcg_table_lock); 1088b9c5d6a6SOren Duer for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) 1089b9c5d6a6SOren Duer ++count; 1090b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 1091b9c5d6a6SOren Duer if (!count) 1092b9c5d6a6SOren Duer break; 1093b9c5d6a6SOren Duer 1094b9c5d6a6SOren Duer msleep(1); 1095b9c5d6a6SOren Duer } while (time_after(end, jiffies)); 1096b9c5d6a6SOren Duer 1097b9c5d6a6SOren Duer flush_workqueue(ctx->mcg_wq); 1098b9c5d6a6SOren Duer if (destroy_wq) 1099b9c5d6a6SOren Duer destroy_workqueue(ctx->mcg_wq); 1100b9c5d6a6SOren Duer 1101b9c5d6a6SOren Duer mutex_lock(&ctx->mcg_table_lock); 1102b9c5d6a6SOren Duer while ((p = rb_first(&ctx->mcg_table)) != NULL) { 1103b9c5d6a6SOren Duer group = rb_entry(p, struct mcast_group, node); 1104b9c5d6a6SOren Duer if (atomic_read(&group->refcount)) 1105fb7a9174SJack Morgenstein mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n", 1106fb7a9174SJack Morgenstein atomic_read(&group->refcount), group); 1107b9c5d6a6SOren Duer 1108b9c5d6a6SOren Duer force_clean_group(group); 1109b9c5d6a6SOren Duer } 1110b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 1111b9c5d6a6SOren Duer } 1112b9c5d6a6SOren Duer 1113b9c5d6a6SOren Duer struct clean_work { 1114b9c5d6a6SOren Duer struct work_struct work; 1115b9c5d6a6SOren Duer struct mlx4_ib_demux_ctx *ctx; 1116b9c5d6a6SOren Duer int destroy_wq; 1117b9c5d6a6SOren Duer }; 1118b9c5d6a6SOren Duer 1119b9c5d6a6SOren Duer static void mcg_clean_task(struct work_struct *work) 1120b9c5d6a6SOren Duer { 1121b9c5d6a6SOren Duer struct clean_work *cw = container_of(work, struct clean_work, work); 1122b9c5d6a6SOren Duer 1123b9c5d6a6SOren Duer _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq); 1124bef83ed9SEli Cohen cw->ctx->flushing = 0; 1125b9c5d6a6SOren Duer kfree(cw); 1126b9c5d6a6SOren Duer } 1127b9c5d6a6SOren Duer 1128b9c5d6a6SOren Duer void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq) 1129b9c5d6a6SOren Duer { 1130b9c5d6a6SOren Duer struct clean_work *work; 1131b9c5d6a6SOren Duer 1132bef83ed9SEli Cohen if (ctx->flushing) 1133bef83ed9SEli Cohen return; 1134bef83ed9SEli Cohen 1135bef83ed9SEli Cohen ctx->flushing = 1; 1136bef83ed9SEli Cohen 1137b9c5d6a6SOren Duer if (destroy_wq) { 1138b9c5d6a6SOren Duer _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq); 1139bef83ed9SEli Cohen ctx->flushing = 0; 1140b9c5d6a6SOren Duer return; 1141b9c5d6a6SOren Duer } 1142b9c5d6a6SOren Duer 1143b9c5d6a6SOren Duer work = kmalloc(sizeof *work, GFP_KERNEL); 1144b9c5d6a6SOren Duer if (!work) { 1145bef83ed9SEli Cohen ctx->flushing = 0; 1146b9c5d6a6SOren Duer return; 1147b9c5d6a6SOren Duer } 1148b9c5d6a6SOren Duer 1149b9c5d6a6SOren Duer work->ctx = ctx; 1150b9c5d6a6SOren Duer work->destroy_wq = destroy_wq; 1151b9c5d6a6SOren Duer INIT_WORK(&work->work, mcg_clean_task); 1152b9c5d6a6SOren Duer queue_work(clean_wq, &work->work); 1153b9c5d6a6SOren Duer } 1154b9c5d6a6SOren Duer 1155b9c5d6a6SOren Duer static void build_leave_mad(struct mcast_req *req) 1156b9c5d6a6SOren Duer { 1157b9c5d6a6SOren Duer struct ib_sa_mad *mad = &req->sa_mad; 1158b9c5d6a6SOren Duer 1159b9c5d6a6SOren Duer mad->mad_hdr.method = IB_SA_METHOD_DELETE; 1160b9c5d6a6SOren Duer } 1161b9c5d6a6SOren Duer 1162b9c5d6a6SOren Duer 1163b9c5d6a6SOren Duer static void clear_pending_reqs(struct mcast_group *group, int vf) 1164b9c5d6a6SOren Duer { 1165b9c5d6a6SOren Duer struct mcast_req *req, *tmp, *group_first = NULL; 1166b9c5d6a6SOren Duer int clear; 1167b9c5d6a6SOren Duer int pend = 0; 1168b9c5d6a6SOren Duer 1169b9c5d6a6SOren Duer if (!list_empty(&group->pending_list)) 1170b9c5d6a6SOren Duer group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list); 1171b9c5d6a6SOren Duer 1172b9c5d6a6SOren Duer list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { 1173b9c5d6a6SOren Duer clear = 1; 1174b9c5d6a6SOren Duer if (group_first == req && 1175b9c5d6a6SOren Duer (group->state == MCAST_JOIN_SENT || 1176b9c5d6a6SOren Duer group->state == MCAST_LEAVE_SENT)) { 1177b9c5d6a6SOren Duer clear = cancel_delayed_work(&group->timeout_work); 1178b9c5d6a6SOren Duer pend = !clear; 1179b9c5d6a6SOren Duer group->state = MCAST_IDLE; 1180b9c5d6a6SOren Duer } 1181b9c5d6a6SOren Duer if (clear) { 1182b9c5d6a6SOren Duer --group->func[vf].num_pend_reqs; 1183b9c5d6a6SOren Duer list_del(&req->group_list); 1184b9c5d6a6SOren Duer list_del(&req->func_list); 1185b9c5d6a6SOren Duer kfree(req); 1186b9c5d6a6SOren Duer atomic_dec(&group->refcount); 1187b9c5d6a6SOren Duer } 1188b9c5d6a6SOren Duer } 1189b9c5d6a6SOren Duer 1190b9c5d6a6SOren Duer if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) { 1191b9c5d6a6SOren Duer mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n", 1192b9c5d6a6SOren Duer list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs); 1193b9c5d6a6SOren Duer } 1194b9c5d6a6SOren Duer } 1195b9c5d6a6SOren Duer 1196b9c5d6a6SOren Duer static int push_deleteing_req(struct mcast_group *group, int slave) 1197b9c5d6a6SOren Duer { 1198b9c5d6a6SOren Duer struct mcast_req *req; 1199b9c5d6a6SOren Duer struct mcast_req *pend_req; 1200b9c5d6a6SOren Duer 1201b9c5d6a6SOren Duer if (!group->func[slave].join_state) 1202b9c5d6a6SOren Duer return 0; 1203b9c5d6a6SOren Duer 1204b9c5d6a6SOren Duer req = kzalloc(sizeof *req, GFP_KERNEL); 120515d4626eSLeon Romanovsky if (!req) 1206b9c5d6a6SOren Duer return -ENOMEM; 1207b9c5d6a6SOren Duer 1208b9c5d6a6SOren Duer if (!list_empty(&group->func[slave].pending)) { 1209b9c5d6a6SOren Duer pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); 1210b9c5d6a6SOren Duer if (pend_req->clean) { 1211b9c5d6a6SOren Duer kfree(req); 1212b9c5d6a6SOren Duer return 0; 1213b9c5d6a6SOren Duer } 1214b9c5d6a6SOren Duer } 1215b9c5d6a6SOren Duer 1216b9c5d6a6SOren Duer req->clean = 1; 1217b9c5d6a6SOren Duer req->func = slave; 1218b9c5d6a6SOren Duer req->group = group; 1219b9c5d6a6SOren Duer ++group->func[slave].num_pend_reqs; 1220b9c5d6a6SOren Duer build_leave_mad(req); 1221b9c5d6a6SOren Duer queue_req(req); 1222b9c5d6a6SOren Duer return 0; 1223b9c5d6a6SOren Duer } 1224b9c5d6a6SOren Duer 1225b9c5d6a6SOren Duer void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave) 1226b9c5d6a6SOren Duer { 1227b9c5d6a6SOren Duer struct mcast_group *group; 1228b9c5d6a6SOren Duer struct rb_node *p; 1229b9c5d6a6SOren Duer 1230b9c5d6a6SOren Duer mutex_lock(&ctx->mcg_table_lock); 1231b9c5d6a6SOren Duer for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) { 1232b9c5d6a6SOren Duer group = rb_entry(p, struct mcast_group, node); 1233b9c5d6a6SOren Duer mutex_lock(&group->lock); 1234b9c5d6a6SOren Duer if (atomic_read(&group->refcount)) { 1235b9c5d6a6SOren Duer /* clear pending requests of this VF */ 1236b9c5d6a6SOren Duer clear_pending_reqs(group, slave); 1237b9c5d6a6SOren Duer push_deleteing_req(group, slave); 1238b9c5d6a6SOren Duer } 1239b9c5d6a6SOren Duer mutex_unlock(&group->lock); 1240b9c5d6a6SOren Duer } 1241b9c5d6a6SOren Duer mutex_unlock(&ctx->mcg_table_lock); 1242b9c5d6a6SOren Duer } 1243b9c5d6a6SOren Duer 1244b9c5d6a6SOren Duer 1245b9c5d6a6SOren Duer int mlx4_ib_mcg_init(void) 1246b9c5d6a6SOren Duer { 1247fcf621ddSBhaktipriya Shridhar clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM); 1248b9c5d6a6SOren Duer if (!clean_wq) 1249b9c5d6a6SOren Duer return -ENOMEM; 1250b9c5d6a6SOren Duer 1251b9c5d6a6SOren Duer return 0; 1252b9c5d6a6SOren Duer } 1253b9c5d6a6SOren Duer 1254b9c5d6a6SOren Duer void mlx4_ib_mcg_destroy(void) 1255b9c5d6a6SOren Duer { 1256b9c5d6a6SOren Duer destroy_workqueue(clean_wq); 1257b9c5d6a6SOren Duer } 1258