1 /*
2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
37
38 #include <dev/mlx4/cmd.h>
39 #include <linux/rbtree.h>
40 #include <linux/delay.h>
41
42 #include "mlx4_ib.h"
43
44 #define MAX_VFS 80
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
47
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
53
54 #define mcg_debug_group(group, format, arg...) \
55 pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
56 (group)->name, (group)->demux->port, ## arg)
57
58 #define mcg_error_group(group, format, arg...) \
59 pr_err(" %16s: " format, (group)->name, ## arg)
60
61
62 static union ib_gid mgid0;
63
64 static struct workqueue_struct *clean_wq;
65
66 enum mcast_state {
67 MCAST_NOT_MEMBER = 0,
68 MCAST_MEMBER,
69 };
70
71 enum mcast_group_state {
72 MCAST_IDLE,
73 MCAST_JOIN_SENT,
74 MCAST_LEAVE_SENT,
75 MCAST_RESP_READY
76 };
77
78 struct mcast_member {
79 enum mcast_state state;
80 uint8_t join_state;
81 int num_pend_reqs;
82 struct list_head pending;
83 };
84
85 struct ib_sa_mcmember_data {
86 union ib_gid mgid;
87 union ib_gid port_gid;
88 __be32 qkey;
89 __be16 mlid;
90 u8 mtusel_mtu;
91 u8 tclass;
92 __be16 pkey;
93 u8 ratesel_rate;
94 u8 lifetmsel_lifetm;
95 __be32 sl_flowlabel_hoplimit;
96 u8 scope_join_state;
97 u8 proxy_join;
98 u8 reserved[2];
99 } __packed __aligned(4);
100
101 struct mcast_group {
102 struct ib_sa_mcmember_data rec;
103 struct rb_node node;
104 struct list_head mgid0_list;
105 struct mlx4_ib_demux_ctx *demux;
106 struct mcast_member func[MAX_VFS];
107 struct mutex lock;
108 struct work_struct work;
109 struct list_head pending_list;
110 int members[3];
111 enum mcast_group_state state;
112 enum mcast_group_state prev_state;
113 struct ib_sa_mad response_sa_mad;
114 __be64 last_req_tid;
115
116 char name[33]; /* MGID string */
117 struct device_attribute dentry;
118
119 /* refcount is the reference count for the following:
120 1. Each queued request
121 2. Each invocation of the worker thread
122 3. Membership of the port at the SA
123 */
124 atomic_t refcount;
125
126 /* delayed work to clean pending SM request */
127 struct delayed_work timeout_work;
128 struct list_head cleanup_list;
129 };
130
131 struct mcast_req {
132 int func;
133 struct ib_sa_mad sa_mad;
134 struct list_head group_list;
135 struct list_head func_list;
136 struct mcast_group *group;
137 int clean;
138 };
139
140
141 #define safe_atomic_dec(ref) \
142 do {\
143 if (atomic_dec_and_test(ref)) \
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
145 } while (0)
146
get_state_string(enum mcast_group_state state)147 static const char *get_state_string(enum mcast_group_state state)
148 {
149 switch (state) {
150 case MCAST_IDLE:
151 return "MCAST_IDLE";
152 case MCAST_JOIN_SENT:
153 return "MCAST_JOIN_SENT";
154 case MCAST_LEAVE_SENT:
155 return "MCAST_LEAVE_SENT";
156 case MCAST_RESP_READY:
157 return "MCAST_RESP_READY";
158 }
159 return "Invalid State";
160 }
161
mcast_find(struct mlx4_ib_demux_ctx * ctx,union ib_gid * mgid)162 static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx,
163 union ib_gid *mgid)
164 {
165 struct rb_node *node = ctx->mcg_table.rb_node;
166 struct mcast_group *group;
167 int ret;
168
169 while (node) {
170 group = rb_entry(node, struct mcast_group, node);
171 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
172 if (!ret)
173 return group;
174
175 if (ret < 0)
176 node = node->rb_left;
177 else
178 node = node->rb_right;
179 }
180 return NULL;
181 }
182
mcast_insert(struct mlx4_ib_demux_ctx * ctx,struct mcast_group * group)183 static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
184 struct mcast_group *group)
185 {
186 struct rb_node **link = &ctx->mcg_table.rb_node;
187 struct rb_node *parent = NULL;
188 struct mcast_group *cur_group;
189 int ret;
190
191 while (*link) {
192 parent = *link;
193 cur_group = rb_entry(parent, struct mcast_group, node);
194
195 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
196 sizeof group->rec.mgid);
197 if (ret < 0)
198 link = &(*link)->rb_left;
199 else if (ret > 0)
200 link = &(*link)->rb_right;
201 else
202 return cur_group;
203 }
204 rb_link_node(&group->node, parent, link);
205 rb_insert_color(&group->node, &ctx->mcg_table);
206 return NULL;
207 }
208
send_mad_to_wire(struct mlx4_ib_demux_ctx * ctx,struct ib_mad * mad)209 static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
210 {
211 struct mlx4_ib_dev *dev = ctx->dev;
212 struct ib_ah_attr ah_attr;
213 unsigned long flags;
214
215 spin_lock_irqsave(&dev->sm_lock, flags);
216 if (!dev->sm_ah[ctx->port - 1]) {
217 /* port is not yet Active, sm_ah not ready */
218 spin_unlock_irqrestore(&dev->sm_lock, flags);
219 return -EAGAIN;
220 }
221 mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
222 spin_unlock_irqrestore(&dev->sm_lock, flags);
223 return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
224 ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
225 &ah_attr, NULL, 0xffff, mad);
226 }
227
send_mad_to_slave(int slave,struct mlx4_ib_demux_ctx * ctx,struct ib_mad * mad)228 static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
229 struct ib_mad *mad)
230 {
231 struct mlx4_ib_dev *dev = ctx->dev;
232 struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1];
233 struct ib_wc wc;
234 struct ib_ah_attr ah_attr;
235
236 /* Our agent might not yet be registered when mads start to arrive */
237 if (!agent)
238 return -EAGAIN;
239
240 ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
241
242 if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
243 return -EINVAL;
244 wc.sl = 0;
245 wc.dlid_path_bits = 0;
246 wc.port_num = ctx->port;
247 wc.slid = ah_attr.dlid; /* opensm lid */
248 wc.src_qp = 1;
249 return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
250 }
251
send_join_to_wire(struct mcast_group * group,struct ib_sa_mad * sa_mad)252 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
253 {
254 struct ib_sa_mad mad;
255 struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data;
256 int ret;
257
258 /* we rely on a mad request as arrived from a VF */
259 memcpy(&mad, sa_mad, sizeof mad);
260
261 /* fix port GID to be the real one (slave 0) */
262 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0];
263
264 /* assign our own TID */
265 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
266 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
267
268 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
269 /* set timeout handler */
270 if (!ret) {
271 /* calls mlx4_ib_mcg_timeout_handler */
272 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
273 msecs_to_jiffies(MAD_TIMEOUT_MS));
274 }
275
276 return ret;
277 }
278
send_leave_to_wire(struct mcast_group * group,u8 join_state)279 static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
280 {
281 struct ib_sa_mad mad;
282 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
283 int ret;
284
285 memset(&mad, 0, sizeof mad);
286 mad.mad_hdr.base_version = 1;
287 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
288 mad.mad_hdr.class_version = 2;
289 mad.mad_hdr.method = IB_SA_METHOD_DELETE;
290 mad.mad_hdr.status = cpu_to_be16(0);
291 mad.mad_hdr.class_specific = cpu_to_be16(0);
292 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
293 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
294 mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
295 mad.mad_hdr.attr_mod = cpu_to_be32(0);
296 mad.sa_hdr.sm_key = 0x0;
297 mad.sa_hdr.attr_offset = cpu_to_be16(7);
298 mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID |
299 IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE;
300
301 *sa_data = group->rec;
302 sa_data->scope_join_state = join_state;
303
304 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
305 if (ret)
306 group->state = MCAST_IDLE;
307
308 /* set timeout handler */
309 if (!ret) {
310 /* calls mlx4_ib_mcg_timeout_handler */
311 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
312 msecs_to_jiffies(MAD_TIMEOUT_MS));
313 }
314
315 return ret;
316 }
317
send_reply_to_slave(int slave,struct mcast_group * group,struct ib_sa_mad * req_sa_mad,u16 status)318 static int send_reply_to_slave(int slave, struct mcast_group *group,
319 struct ib_sa_mad *req_sa_mad, u16 status)
320 {
321 struct ib_sa_mad mad;
322 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
323 struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data;
324 int ret;
325
326 memset(&mad, 0, sizeof mad);
327 mad.mad_hdr.base_version = 1;
328 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
329 mad.mad_hdr.class_version = 2;
330 mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
331 mad.mad_hdr.status = cpu_to_be16(status);
332 mad.mad_hdr.class_specific = cpu_to_be16(0);
333 mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid;
334 *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */
335 mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
336 mad.mad_hdr.attr_mod = cpu_to_be32(0);
337 mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key;
338 mad.sa_hdr.attr_offset = cpu_to_be16(7);
339 mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */
340
341 *sa_data = group->rec;
342
343 /* reconstruct VF's requested join_state and port_gid */
344 sa_data->scope_join_state &= 0xf0;
345 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f);
346 memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid);
347
348 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad);
349 return ret;
350 }
351
check_selector(ib_sa_comp_mask comp_mask,ib_sa_comp_mask selector_mask,ib_sa_comp_mask value_mask,u8 src_value,u8 dst_value)352 static int check_selector(ib_sa_comp_mask comp_mask,
353 ib_sa_comp_mask selector_mask,
354 ib_sa_comp_mask value_mask,
355 u8 src_value, u8 dst_value)
356 {
357 int err;
358 u8 selector = dst_value >> 6;
359 dst_value &= 0x3f;
360 src_value &= 0x3f;
361
362 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
363 return 0;
364
365 switch (selector) {
366 case IB_SA_GT:
367 err = (src_value <= dst_value);
368 break;
369 case IB_SA_LT:
370 err = (src_value >= dst_value);
371 break;
372 case IB_SA_EQ:
373 err = (src_value != dst_value);
374 break;
375 default:
376 err = 0;
377 break;
378 }
379
380 return err;
381 }
382
cmp_rec(struct ib_sa_mcmember_data * src,struct ib_sa_mcmember_data * dst,ib_sa_comp_mask comp_mask)383 static u16 cmp_rec(struct ib_sa_mcmember_data *src,
384 struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask)
385 {
386 /* src is group record, dst is request record */
387 /* MGID must already match */
388 /* Port_GID we always replace to our Port_GID, so it is a match */
389
390 #define MAD_STATUS_REQ_INVALID 0x0200
391 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
392 return MAD_STATUS_REQ_INVALID;
393 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
394 return MAD_STATUS_REQ_INVALID;
395 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
396 IB_SA_MCMEMBER_REC_MTU,
397 src->mtusel_mtu, dst->mtusel_mtu))
398 return MAD_STATUS_REQ_INVALID;
399 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
400 src->tclass != dst->tclass)
401 return MAD_STATUS_REQ_INVALID;
402 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
403 return MAD_STATUS_REQ_INVALID;
404 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
405 IB_SA_MCMEMBER_REC_RATE,
406 src->ratesel_rate, dst->ratesel_rate))
407 return MAD_STATUS_REQ_INVALID;
408 if (check_selector(comp_mask,
409 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
410 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
411 src->lifetmsel_lifetm, dst->lifetmsel_lifetm))
412 return MAD_STATUS_REQ_INVALID;
413 if (comp_mask & IB_SA_MCMEMBER_REC_SL &&
414 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) !=
415 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000))
416 return MAD_STATUS_REQ_INVALID;
417 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
418 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) !=
419 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00))
420 return MAD_STATUS_REQ_INVALID;
421 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
422 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) !=
423 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff))
424 return MAD_STATUS_REQ_INVALID;
425 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE &&
426 (src->scope_join_state & 0xf0) !=
427 (dst->scope_join_state & 0xf0))
428 return MAD_STATUS_REQ_INVALID;
429
430 /* join_state checked separately, proxy_join ignored */
431
432 return 0;
433 }
434
435 /* release group, return 1 if this was last release and group is destroyed
436 * timout work is canceled sync */
release_group(struct mcast_group * group,int from_timeout_handler)437 static int release_group(struct mcast_group *group, int from_timeout_handler)
438 {
439 struct mlx4_ib_demux_ctx *ctx = group->demux;
440 int nzgroup;
441
442 mutex_lock(&ctx->mcg_table_lock);
443 mutex_lock(&group->lock);
444 if (atomic_dec_and_test(&group->refcount)) {
445 if (!from_timeout_handler) {
446 if (group->state != MCAST_IDLE &&
447 !cancel_delayed_work(&group->timeout_work)) {
448 atomic_inc(&group->refcount);
449 mutex_unlock(&group->lock);
450 mutex_unlock(&ctx->mcg_table_lock);
451 return 0;
452 }
453 }
454
455 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
456 if (nzgroup)
457 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
458 if (!list_empty(&group->pending_list))
459 mcg_warn_group(group, "releasing a group with non empty pending list\n");
460 if (nzgroup)
461 rb_erase(&group->node, &ctx->mcg_table);
462 list_del_init(&group->mgid0_list);
463 mutex_unlock(&group->lock);
464 mutex_unlock(&ctx->mcg_table_lock);
465 kfree(group);
466 return 1;
467 } else {
468 mutex_unlock(&group->lock);
469 mutex_unlock(&ctx->mcg_table_lock);
470 }
471 return 0;
472 }
473
adjust_membership(struct mcast_group * group,u8 join_state,int inc)474 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
475 {
476 int i;
477
478 for (i = 0; i < 3; i++, join_state >>= 1)
479 if (join_state & 0x1)
480 group->members[i] += inc;
481 }
482
get_leave_state(struct mcast_group * group)483 static u8 get_leave_state(struct mcast_group *group)
484 {
485 u8 leave_state = 0;
486 int i;
487
488 for (i = 0; i < 3; i++)
489 if (!group->members[i])
490 leave_state |= (1 << i);
491
492 return leave_state & (group->rec.scope_join_state & 0xf);
493 }
494
join_group(struct mcast_group * group,int slave,u8 join_mask)495 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
496 {
497 int ret = 0;
498 u8 join_state;
499
500 /* remove bits that slave is already member of, and adjust */
501 join_state = join_mask & (~group->func[slave].join_state);
502 adjust_membership(group, join_state, 1);
503 group->func[slave].join_state |= join_state;
504 if (group->func[slave].state != MCAST_MEMBER && join_state) {
505 group->func[slave].state = MCAST_MEMBER;
506 ret = 1;
507 }
508 return ret;
509 }
510
leave_group(struct mcast_group * group,int slave,u8 leave_state)511 static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
512 {
513 int ret = 0;
514
515 adjust_membership(group, leave_state, -1);
516 group->func[slave].join_state &= ~leave_state;
517 if (!group->func[slave].join_state) {
518 group->func[slave].state = MCAST_NOT_MEMBER;
519 ret = 1;
520 }
521 return ret;
522 }
523
check_leave(struct mcast_group * group,int slave,u8 leave_mask)524 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
525 {
526 if (group->func[slave].state != MCAST_MEMBER)
527 return MAD_STATUS_REQ_INVALID;
528
529 /* make sure we're not deleting unset bits */
530 if (~group->func[slave].join_state & leave_mask)
531 return MAD_STATUS_REQ_INVALID;
532
533 if (!leave_mask)
534 return MAD_STATUS_REQ_INVALID;
535
536 return 0;
537 }
538
mlx4_ib_mcg_timeout_handler(struct work_struct * work)539 static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
540 {
541 struct delayed_work *delay = to_delayed_work(work);
542 struct mcast_group *group;
543 struct mcast_req *req = NULL;
544
545 group = container_of(delay, typeof(*group), timeout_work);
546
547 mutex_lock(&group->lock);
548 if (group->state == MCAST_JOIN_SENT) {
549 if (!list_empty(&group->pending_list)) {
550 req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
551 list_del(&req->group_list);
552 list_del(&req->func_list);
553 --group->func[req->func].num_pend_reqs;
554 mutex_unlock(&group->lock);
555 kfree(req);
556 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) {
557 if (release_group(group, 1))
558 return;
559 } else {
560 kfree(group);
561 return;
562 }
563 mutex_lock(&group->lock);
564 } else
565 mcg_warn_group(group, "DRIVER BUG\n");
566 } else if (group->state == MCAST_LEAVE_SENT) {
567 if (group->rec.scope_join_state & 0xf)
568 group->rec.scope_join_state &= 0xf0;
569 group->state = MCAST_IDLE;
570 mutex_unlock(&group->lock);
571 if (release_group(group, 1))
572 return;
573 mutex_lock(&group->lock);
574 } else
575 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state));
576 group->state = MCAST_IDLE;
577 atomic_inc(&group->refcount);
578 if (!queue_work(group->demux->mcg_wq, &group->work))
579 safe_atomic_dec(&group->refcount);
580
581 mutex_unlock(&group->lock);
582 }
583
handle_leave_req(struct mcast_group * group,u8 leave_mask,struct mcast_req * req)584 static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
585 struct mcast_req *req)
586 {
587 u16 status;
588
589 if (req->clean)
590 leave_mask = group->func[req->func].join_state;
591
592 status = check_leave(group, req->func, leave_mask);
593 if (!status)
594 leave_group(group, req->func, leave_mask);
595
596 if (!req->clean)
597 send_reply_to_slave(req->func, group, &req->sa_mad, status);
598 --group->func[req->func].num_pend_reqs;
599 list_del(&req->group_list);
600 list_del(&req->func_list);
601 kfree(req);
602 return 1;
603 }
604
handle_join_req(struct mcast_group * group,u8 join_mask,struct mcast_req * req)605 static int handle_join_req(struct mcast_group *group, u8 join_mask,
606 struct mcast_req *req)
607 {
608 u8 group_join_state = group->rec.scope_join_state & 0xf;
609 int ref = 0;
610 u16 status;
611 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
612
613 if (join_mask == (group_join_state & join_mask)) {
614 /* port's membership need not change */
615 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
616 if (!status)
617 join_group(group, req->func, join_mask);
618
619 --group->func[req->func].num_pend_reqs;
620 send_reply_to_slave(req->func, group, &req->sa_mad, status);
621 list_del(&req->group_list);
622 list_del(&req->func_list);
623 kfree(req);
624 ++ref;
625 } else {
626 /* port's membership needs to be updated */
627 group->prev_state = group->state;
628 if (send_join_to_wire(group, &req->sa_mad)) {
629 --group->func[req->func].num_pend_reqs;
630 list_del(&req->group_list);
631 list_del(&req->func_list);
632 kfree(req);
633 ref = 1;
634 group->state = group->prev_state;
635 } else
636 group->state = MCAST_JOIN_SENT;
637 }
638
639 return ref;
640 }
641
mlx4_ib_mcg_work_handler(struct work_struct * work)642 static void mlx4_ib_mcg_work_handler(struct work_struct *work)
643 {
644 struct mcast_group *group;
645 struct mcast_req *req = NULL;
646 struct ib_sa_mcmember_data *sa_data;
647 u8 req_join_state;
648 int rc = 1; /* release_count - this is for the scheduled work */
649 u16 status;
650 u8 method;
651
652 group = container_of(work, typeof(*group), work);
653
654 mutex_lock(&group->lock);
655
656 /* First, let's see if a response from SM is waiting regarding this group.
657 * If so, we need to update the group's REC. If this is a bad response, we
658 * may need to send a bad response to a VF waiting for it. If VF is waiting
659 * and this is a good response, the VF will be answered later in this func. */
660 if (group->state == MCAST_RESP_READY) {
661 /* cancels mlx4_ib_mcg_timeout_handler */
662 cancel_delayed_work(&group->timeout_work);
663 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status);
664 method = group->response_sa_mad.mad_hdr.method;
665 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) {
666 mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
667 (long long)be64_to_cpu(
668 group->response_sa_mad.mad_hdr.tid),
669 (long long)be64_to_cpu(group->last_req_tid));
670 group->state = group->prev_state;
671 goto process_requests;
672 }
673 if (status) {
674 if (!list_empty(&group->pending_list))
675 req = list_first_entry(&group->pending_list,
676 struct mcast_req, group_list);
677 if (method == IB_MGMT_METHOD_GET_RESP) {
678 if (req) {
679 send_reply_to_slave(req->func, group, &req->sa_mad, status);
680 --group->func[req->func].num_pend_reqs;
681 list_del(&req->group_list);
682 list_del(&req->func_list);
683 kfree(req);
684 ++rc;
685 } else
686 mcg_warn_group(group, "no request for failed join\n");
687 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing)
688 ++rc;
689 } else {
690 u8 resp_join_state;
691 u8 cur_join_state;
692
693 resp_join_state = ((struct ib_sa_mcmember_data *)
694 group->response_sa_mad.data)->scope_join_state & 0xf;
695 cur_join_state = group->rec.scope_join_state & 0xf;
696
697 if (method == IB_MGMT_METHOD_GET_RESP) {
698 /* successfull join */
699 if (!cur_join_state && resp_join_state)
700 --rc;
701 } else if (!resp_join_state)
702 ++rc;
703 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec);
704 }
705 group->state = MCAST_IDLE;
706 }
707
708 process_requests:
709 /* We should now go over pending join/leave requests, as long as we are idle. */
710 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) {
711 req = list_first_entry(&group->pending_list, struct mcast_req,
712 group_list);
713 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
714 req_join_state = sa_data->scope_join_state & 0xf;
715
716 /* For a leave request, we will immediately answer the VF, and
717 * update our internal counters. The actual leave will be sent
718 * to SM later, if at all needed. We dequeue the request now. */
719 if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE)
720 rc += handle_leave_req(group, req_join_state, req);
721 else
722 rc += handle_join_req(group, req_join_state, req);
723 }
724
725 /* Handle leaves */
726 if (group->state == MCAST_IDLE) {
727 req_join_state = get_leave_state(group);
728 if (req_join_state) {
729 group->rec.scope_join_state &= ~req_join_state;
730 group->prev_state = group->state;
731 if (send_leave_to_wire(group, req_join_state)) {
732 group->state = group->prev_state;
733 ++rc;
734 } else
735 group->state = MCAST_LEAVE_SENT;
736 }
737 }
738
739 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE)
740 goto process_requests;
741 mutex_unlock(&group->lock);
742
743 while (rc--)
744 release_group(group, 0);
745 }
746
search_relocate_mgid0_group(struct mlx4_ib_demux_ctx * ctx,__be64 tid,union ib_gid * new_mgid)747 static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx,
748 __be64 tid,
749 union ib_gid *new_mgid)
750 {
751 struct mcast_group *group = NULL, *cur_group, *n;
752 struct mcast_req *req;
753
754 mutex_lock(&ctx->mcg_table_lock);
755 list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
756 mutex_lock(&group->lock);
757 if (group->last_req_tid == tid) {
758 if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
759 group->rec.mgid = *new_mgid;
760 sprintf(group->name, "%016llx%016llx",
761 (long long)be64_to_cpu(group->rec.mgid.global.subnet_prefix),
762 (long long)be64_to_cpu(group->rec.mgid.global.interface_id));
763 list_del_init(&group->mgid0_list);
764 cur_group = mcast_insert(ctx, group);
765 if (cur_group) {
766 /* A race between our code and SM. Silently cleaning the new one */
767 req = list_first_entry(&group->pending_list,
768 struct mcast_req, group_list);
769 --group->func[req->func].num_pend_reqs;
770 list_del(&req->group_list);
771 list_del(&req->func_list);
772 kfree(req);
773 mutex_unlock(&group->lock);
774 mutex_unlock(&ctx->mcg_table_lock);
775 release_group(group, 0);
776 return NULL;
777 }
778
779 atomic_inc(&group->refcount);
780 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
781 mutex_unlock(&group->lock);
782 mutex_unlock(&ctx->mcg_table_lock);
783 return group;
784 } else {
785 struct mcast_req *tmp1, *tmp2;
786
787 list_del(&group->mgid0_list);
788 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE)
789 cancel_delayed_work_sync(&group->timeout_work);
790
791 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) {
792 list_del(&tmp1->group_list);
793 kfree(tmp1);
794 }
795 mutex_unlock(&group->lock);
796 mutex_unlock(&ctx->mcg_table_lock);
797 kfree(group);
798 return NULL;
799 }
800 }
801 mutex_unlock(&group->lock);
802 }
803 mutex_unlock(&ctx->mcg_table_lock);
804
805 return NULL;
806 }
807
808 static ssize_t sysfs_show_group(struct device *dev,
809 struct device_attribute *attr, char *buf);
810
acquire_group(struct mlx4_ib_demux_ctx * ctx,union ib_gid * mgid,int create,gfp_t gfp_mask)811 static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
812 union ib_gid *mgid, int create,
813 gfp_t gfp_mask)
814 {
815 struct mcast_group *group, *cur_group;
816 int is_mgid0;
817 int i;
818
819 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
820 if (!is_mgid0) {
821 group = mcast_find(ctx, mgid);
822 if (group)
823 goto found;
824 }
825
826 if (!create)
827 return ERR_PTR(-ENOENT);
828
829 group = kzalloc(sizeof *group, gfp_mask);
830 if (!group)
831 return ERR_PTR(-ENOMEM);
832
833 group->demux = ctx;
834 group->rec.mgid = *mgid;
835 INIT_LIST_HEAD(&group->pending_list);
836 INIT_LIST_HEAD(&group->mgid0_list);
837 for (i = 0; i < MAX_VFS; ++i)
838 INIT_LIST_HEAD(&group->func[i].pending);
839 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
840 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler);
841 mutex_init(&group->lock);
842 sprintf(group->name, "%016llx%016llx",
843 (long long)be64_to_cpu(
844 group->rec.mgid.global.subnet_prefix),
845 (long long)be64_to_cpu(
846 group->rec.mgid.global.interface_id));
847 sysfs_attr_init(&group->dentry.attr);
848 group->dentry.show = sysfs_show_group;
849 group->dentry.store = NULL;
850 group->dentry.attr.name = group->name;
851 group->dentry.attr.mode = 0400;
852 group->state = MCAST_IDLE;
853
854 if (is_mgid0) {
855 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list);
856 goto found;
857 }
858
859 cur_group = mcast_insert(ctx, group);
860 if (cur_group) {
861 mcg_warn("group just showed up %s - confused\n", cur_group->name);
862 kfree(group);
863 return ERR_PTR(-EINVAL);
864 }
865
866 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
867
868 found:
869 atomic_inc(&group->refcount);
870 return group;
871 }
872
queue_req(struct mcast_req * req)873 static void queue_req(struct mcast_req *req)
874 {
875 struct mcast_group *group = req->group;
876
877 atomic_inc(&group->refcount); /* for the request */
878 atomic_inc(&group->refcount); /* for scheduling the work */
879 list_add_tail(&req->group_list, &group->pending_list);
880 list_add_tail(&req->func_list, &group->func[req->func].pending);
881 /* calls mlx4_ib_mcg_work_handler */
882 if (!queue_work(group->demux->mcg_wq, &group->work))
883 safe_atomic_dec(&group->refcount);
884 }
885
mlx4_ib_mcg_demux_handler(struct ib_device * ibdev,int port,int slave,struct ib_sa_mad * mad)886 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
887 struct ib_sa_mad *mad)
888 {
889 struct mlx4_ib_dev *dev = to_mdev(ibdev);
890 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data;
891 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
892 struct mcast_group *group;
893
894 switch (mad->mad_hdr.method) {
895 case IB_MGMT_METHOD_GET_RESP:
896 case IB_SA_METHOD_DELETE_RESP:
897 mutex_lock(&ctx->mcg_table_lock);
898 group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
899 mutex_unlock(&ctx->mcg_table_lock);
900 if (IS_ERR(group)) {
901 if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
902 __be64 tid = mad->mad_hdr.tid;
903 *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */
904 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid);
905 } else
906 group = NULL;
907 }
908
909 if (!group)
910 return 1;
911
912 mutex_lock(&group->lock);
913 group->response_sa_mad = *mad;
914 group->prev_state = group->state;
915 group->state = MCAST_RESP_READY;
916 /* calls mlx4_ib_mcg_work_handler */
917 atomic_inc(&group->refcount);
918 if (!queue_work(ctx->mcg_wq, &group->work))
919 safe_atomic_dec(&group->refcount);
920 mutex_unlock(&group->lock);
921 release_group(group, 0);
922 return 1; /* consumed */
923 case IB_MGMT_METHOD_SET:
924 case IB_SA_METHOD_GET_TABLE:
925 case IB_SA_METHOD_GET_TABLE_RESP:
926 case IB_SA_METHOD_DELETE:
927 return 0; /* not consumed, pass-through to guest over tunnel */
928 default:
929 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
930 port, mad->mad_hdr.method);
931 return 1; /* consumed */
932 }
933 }
934
mlx4_ib_mcg_multiplex_handler(struct ib_device * ibdev,int port,int slave,struct ib_sa_mad * sa_mad)935 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
936 int slave, struct ib_sa_mad *sa_mad)
937 {
938 struct mlx4_ib_dev *dev = to_mdev(ibdev);
939 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data;
940 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
941 struct mcast_group *group;
942 struct mcast_req *req;
943 int may_create = 0;
944
945 if (ctx->flushing)
946 return -EAGAIN;
947
948 switch (sa_mad->mad_hdr.method) {
949 case IB_MGMT_METHOD_SET:
950 may_create = 1;
951 case IB_SA_METHOD_DELETE:
952 req = kzalloc(sizeof *req, GFP_KERNEL);
953 if (!req)
954 return -ENOMEM;
955
956 req->func = slave;
957 req->sa_mad = *sa_mad;
958
959 mutex_lock(&ctx->mcg_table_lock);
960 group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
961 mutex_unlock(&ctx->mcg_table_lock);
962 if (IS_ERR(group)) {
963 kfree(req);
964 return PTR_ERR(group);
965 }
966 mutex_lock(&group->lock);
967 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
968 mutex_unlock(&group->lock);
969 mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
970 port, slave, MAX_PEND_REQS_PER_FUNC);
971 release_group(group, 0);
972 kfree(req);
973 return -ENOMEM;
974 }
975 ++group->func[slave].num_pend_reqs;
976 req->group = group;
977 queue_req(req);
978 mutex_unlock(&group->lock);
979 release_group(group, 0);
980 return 1; /* consumed */
981 case IB_SA_METHOD_GET_TABLE:
982 case IB_MGMT_METHOD_GET_RESP:
983 case IB_SA_METHOD_GET_TABLE_RESP:
984 case IB_SA_METHOD_DELETE_RESP:
985 return 0; /* not consumed, pass-through */
986 default:
987 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
988 port, slave, sa_mad->mad_hdr.method);
989 return 1; /* consumed */
990 }
991 }
992
sysfs_show_group(struct device * dev,struct device_attribute * attr,char * buf)993 static ssize_t sysfs_show_group(struct device *dev,
994 struct device_attribute *attr, char *buf)
995 {
996 struct mcast_group *group =
997 container_of(attr, struct mcast_group, dentry);
998 struct mcast_req *req = NULL;
999 char pending_str[40];
1000 char state_str[40];
1001 ssize_t len = 0;
1002 int f;
1003
1004 if (group->state == MCAST_IDLE)
1005 sprintf(state_str, "%s", get_state_string(group->state));
1006 else
1007 sprintf(state_str, "%s(TID=0x%llx)",
1008 get_state_string(group->state),
1009 (long long)be64_to_cpu(group->last_req_tid));
1010 if (list_empty(&group->pending_list)) {
1011 sprintf(pending_str, "No");
1012 } else {
1013 req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1014 sprintf(pending_str, "Yes(TID=0x%llx)",
1015 (long long)be64_to_cpu(
1016 req->sa_mad.mad_hdr.tid));
1017 }
1018 len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1019 group->rec.scope_join_state & 0xf,
1020 group->members[2], group->members[1], group->members[0],
1021 atomic_read(&group->refcount),
1022 pending_str,
1023 state_str);
1024 for (f = 0; f < MAX_VFS; ++f)
1025 if (group->func[f].state == MCAST_MEMBER)
1026 len += sprintf(buf + len, "%d[%1x] ",
1027 f, group->func[f].join_state);
1028
1029 len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1030 "%4x %4x %2x %2x)\n",
1031 be16_to_cpu(group->rec.pkey),
1032 be32_to_cpu(group->rec.qkey),
1033 (group->rec.mtusel_mtu & 0xc0) >> 6,
1034 group->rec.mtusel_mtu & 0x3f,
1035 group->rec.tclass,
1036 (group->rec.ratesel_rate & 0xc0) >> 6,
1037 group->rec.ratesel_rate & 0x3f,
1038 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
1039 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
1040 be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
1041 group->rec.proxy_join);
1042
1043 return len;
1044 }
1045
mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx * ctx)1046 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
1047 {
1048 char name[20];
1049
1050 atomic_set(&ctx->tid, 0);
1051 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
1052 ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1053 if (!ctx->mcg_wq)
1054 return -ENOMEM;
1055
1056 mutex_init(&ctx->mcg_table_lock);
1057 ctx->mcg_table = RB_ROOT;
1058 INIT_LIST_HEAD(&ctx->mcg_mgid0_list);
1059 ctx->flushing = 0;
1060
1061 return 0;
1062 }
1063
force_clean_group(struct mcast_group * group)1064 static void force_clean_group(struct mcast_group *group)
1065 {
1066 struct mcast_req *req, *tmp
1067 ;
1068 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
1069 list_del(&req->group_list);
1070 kfree(req);
1071 }
1072 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr);
1073 rb_erase(&group->node, &group->demux->mcg_table);
1074 kfree(group);
1075 }
1076
_mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx * ctx,int destroy_wq)1077 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1078 {
1079 int i;
1080 struct rb_node *p;
1081 struct mcast_group *group;
1082 unsigned long end;
1083 int count;
1084
1085 for (i = 0; i < MAX_VFS; ++i)
1086 clean_vf_mcast(ctx, i);
1087
1088 end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
1089 do {
1090 count = 0;
1091 mutex_lock(&ctx->mcg_table_lock);
1092 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p))
1093 ++count;
1094 mutex_unlock(&ctx->mcg_table_lock);
1095 if (!count)
1096 break;
1097
1098 msleep(1);
1099 } while (time_after(end, jiffies));
1100
1101 flush_workqueue(ctx->mcg_wq);
1102 if (destroy_wq)
1103 destroy_workqueue(ctx->mcg_wq);
1104
1105 mutex_lock(&ctx->mcg_table_lock);
1106 while ((p = rb_first(&ctx->mcg_table)) != NULL) {
1107 group = rb_entry(p, struct mcast_group, node);
1108 if (atomic_read(&group->refcount))
1109 mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
1110
1111 force_clean_group(group);
1112 }
1113 mutex_unlock(&ctx->mcg_table_lock);
1114 }
1115
1116 struct clean_work {
1117 struct work_struct work;
1118 struct mlx4_ib_demux_ctx *ctx;
1119 int destroy_wq;
1120 };
1121
mcg_clean_task(struct work_struct * work)1122 static void mcg_clean_task(struct work_struct *work)
1123 {
1124 struct clean_work *cw = container_of(work, struct clean_work, work);
1125
1126 _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
1127 cw->ctx->flushing = 0;
1128 kfree(cw);
1129 }
1130
mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx * ctx,int destroy_wq)1131 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1132 {
1133 struct clean_work *work;
1134
1135 if (ctx->flushing)
1136 return;
1137
1138 ctx->flushing = 1;
1139
1140 if (destroy_wq) {
1141 _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
1142 ctx->flushing = 0;
1143 return;
1144 }
1145
1146 work = kmalloc(sizeof *work, GFP_KERNEL);
1147 if (!work) {
1148 ctx->flushing = 0;
1149 mcg_warn("failed allocating work for cleanup\n");
1150 return;
1151 }
1152
1153 work->ctx = ctx;
1154 work->destroy_wq = destroy_wq;
1155 INIT_WORK(&work->work, mcg_clean_task);
1156 queue_work(clean_wq, &work->work);
1157 }
1158
build_leave_mad(struct mcast_req * req)1159 static void build_leave_mad(struct mcast_req *req)
1160 {
1161 struct ib_sa_mad *mad = &req->sa_mad;
1162
1163 mad->mad_hdr.method = IB_SA_METHOD_DELETE;
1164 }
1165
1166
clear_pending_reqs(struct mcast_group * group,int vf)1167 static void clear_pending_reqs(struct mcast_group *group, int vf)
1168 {
1169 struct mcast_req *req, *tmp, *group_first = NULL;
1170 int clear;
1171 int pend = 0;
1172
1173 if (!list_empty(&group->pending_list))
1174 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1175
1176 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
1177 clear = 1;
1178 if (group_first == req &&
1179 (group->state == MCAST_JOIN_SENT ||
1180 group->state == MCAST_LEAVE_SENT)) {
1181 clear = cancel_delayed_work(&group->timeout_work);
1182 pend = !clear;
1183 group->state = MCAST_IDLE;
1184 }
1185 if (clear) {
1186 --group->func[vf].num_pend_reqs;
1187 list_del(&req->group_list);
1188 list_del(&req->func_list);
1189 kfree(req);
1190 atomic_dec(&group->refcount);
1191 }
1192 }
1193
1194 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) {
1195 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1196 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs);
1197 }
1198 }
1199
push_deleteing_req(struct mcast_group * group,int slave)1200 static int push_deleteing_req(struct mcast_group *group, int slave)
1201 {
1202 struct mcast_req *req;
1203 struct mcast_req *pend_req;
1204
1205 if (!group->func[slave].join_state)
1206 return 0;
1207
1208 req = kzalloc(sizeof *req, GFP_KERNEL);
1209 if (!req) {
1210 mcg_warn_group(group, "failed allocation - may leave stall groups\n");
1211 return -ENOMEM;
1212 }
1213
1214 if (!list_empty(&group->func[slave].pending)) {
1215 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
1216 if (pend_req->clean) {
1217 kfree(req);
1218 return 0;
1219 }
1220 }
1221
1222 req->clean = 1;
1223 req->func = slave;
1224 req->group = group;
1225 ++group->func[slave].num_pend_reqs;
1226 build_leave_mad(req);
1227 queue_req(req);
1228 return 0;
1229 }
1230
clean_vf_mcast(struct mlx4_ib_demux_ctx * ctx,int slave)1231 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
1232 {
1233 struct mcast_group *group;
1234 struct rb_node *p;
1235
1236 mutex_lock(&ctx->mcg_table_lock);
1237 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) {
1238 group = rb_entry(p, struct mcast_group, node);
1239 mutex_lock(&group->lock);
1240 if (atomic_read(&group->refcount)) {
1241 /* clear pending requests of this VF */
1242 clear_pending_reqs(group, slave);
1243 push_deleteing_req(group, slave);
1244 }
1245 mutex_unlock(&group->lock);
1246 }
1247 mutex_unlock(&ctx->mcg_table_lock);
1248 }
1249
1250
mlx4_ib_mcg_init(void)1251 int mlx4_ib_mcg_init(void)
1252 {
1253 clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM);
1254 if (!clean_wq)
1255 return -ENOMEM;
1256
1257 return 0;
1258 }
1259
mlx4_ib_mcg_destroy(void)1260 void mlx4_ib_mcg_destroy(void)
1261 {
1262 destroy_workqueue(clean_wq);
1263 }
1264