1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
44
45 #include <net/ipv6.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
48
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
53
54 #include <net/bonding.h>
55
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
59
60 #include "mlx4_ib.h"
61 #include <rdma/mlx4-abi.h>
62
63 #define DRV_NAME MLX4_IB_DRV_NAME
64 #define DRV_VERSION "4.0-0"
65
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0 0xA0
69
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
77
78 static const char mlx4_ib_version[] =
79 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
80 DRV_VERSION "\n";
81
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84 u32 port_num);
85 static int mlx4_ib_event(struct notifier_block *this, unsigned long event,
86 void *param);
87
88 static struct workqueue_struct *wq;
89
check_flow_steering_support(struct mlx4_dev * dev)90 static int check_flow_steering_support(struct mlx4_dev *dev)
91 {
92 int eth_num_ports = 0;
93 int ib_num_ports = 0;
94
95 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
96
97 if (dmfs) {
98 int i;
99 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
100 eth_num_ports++;
101 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
102 ib_num_ports++;
103 dmfs &= (!ib_num_ports ||
104 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
105 (!eth_num_ports ||
106 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
107 if (ib_num_ports && mlx4_is_mfunc(dev)) {
108 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
109 dmfs = 0;
110 }
111 }
112 return dmfs;
113 }
114
num_ib_ports(struct mlx4_dev * dev)115 static int num_ib_ports(struct mlx4_dev *dev)
116 {
117 int ib_ports = 0;
118 int i;
119
120 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
121 ib_ports++;
122
123 return ib_ports;
124 }
125
mlx4_ib_get_netdev(struct ib_device * device,u32 port_num)126 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device,
127 u32 port_num)
128 {
129 struct mlx4_ib_dev *ibdev = to_mdev(device);
130 struct net_device *dev, *ret = NULL;
131
132 rcu_read_lock();
133 for_each_netdev_rcu(&init_net, dev) {
134 if (dev->dev.parent != ibdev->ib_dev.dev.parent ||
135 dev->dev_port + 1 != port_num)
136 continue;
137
138 if (mlx4_is_bonded(ibdev->dev)) {
139 struct net_device *upper;
140
141 upper = netdev_master_upper_dev_get_rcu(dev);
142 if (upper) {
143 struct net_device *active;
144
145 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
146 if (active)
147 dev = active;
148 }
149 }
150
151 dev_hold(dev);
152 ret = dev;
153 break;
154 }
155
156 rcu_read_unlock();
157 return ret;
158 }
159
mlx4_ib_update_gids_v1(struct gid_entry * gids,struct mlx4_ib_dev * ibdev,u32 port_num)160 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
161 struct mlx4_ib_dev *ibdev,
162 u32 port_num)
163 {
164 struct mlx4_cmd_mailbox *mailbox;
165 int err;
166 struct mlx4_dev *dev = ibdev->dev;
167 int i;
168 union ib_gid *gid_tbl;
169
170 mailbox = mlx4_alloc_cmd_mailbox(dev);
171 if (IS_ERR(mailbox))
172 return -ENOMEM;
173
174 gid_tbl = mailbox->buf;
175
176 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
177 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
178
179 err = mlx4_cmd(dev, mailbox->dma,
180 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
181 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
182 MLX4_CMD_WRAPPED);
183 if (mlx4_is_bonded(dev))
184 err += mlx4_cmd(dev, mailbox->dma,
185 MLX4_SET_PORT_GID_TABLE << 8 | 2,
186 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
187 MLX4_CMD_WRAPPED);
188
189 mlx4_free_cmd_mailbox(dev, mailbox);
190 return err;
191 }
192
mlx4_ib_update_gids_v1_v2(struct gid_entry * gids,struct mlx4_ib_dev * ibdev,u32 port_num)193 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
194 struct mlx4_ib_dev *ibdev,
195 u32 port_num)
196 {
197 struct mlx4_cmd_mailbox *mailbox;
198 int err;
199 struct mlx4_dev *dev = ibdev->dev;
200 int i;
201 struct {
202 union ib_gid gid;
203 __be32 rsrvd1[2];
204 __be16 rsrvd2;
205 u8 type;
206 u8 version;
207 __be32 rsrvd3;
208 } *gid_tbl;
209
210 mailbox = mlx4_alloc_cmd_mailbox(dev);
211 if (IS_ERR(mailbox))
212 return -ENOMEM;
213
214 gid_tbl = mailbox->buf;
215 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
216 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
217 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
218 gid_tbl[i].version = 2;
219 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
220 gid_tbl[i].type = 1;
221 }
222 }
223
224 err = mlx4_cmd(dev, mailbox->dma,
225 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
226 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
227 MLX4_CMD_WRAPPED);
228 if (mlx4_is_bonded(dev))
229 err += mlx4_cmd(dev, mailbox->dma,
230 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
231 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
232 MLX4_CMD_WRAPPED);
233
234 mlx4_free_cmd_mailbox(dev, mailbox);
235 return err;
236 }
237
mlx4_ib_update_gids(struct gid_entry * gids,struct mlx4_ib_dev * ibdev,u32 port_num)238 static int mlx4_ib_update_gids(struct gid_entry *gids,
239 struct mlx4_ib_dev *ibdev,
240 u32 port_num)
241 {
242 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
243 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
244
245 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
246 }
247
free_gid_entry(struct gid_entry * entry)248 static void free_gid_entry(struct gid_entry *entry)
249 {
250 memset(&entry->gid, 0, sizeof(entry->gid));
251 kfree(entry->ctx);
252 entry->ctx = NULL;
253 }
254
mlx4_ib_add_gid(const struct ib_gid_attr * attr,void ** context)255 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
256 {
257 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
258 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
259 struct mlx4_port_gid_table *port_gid_table;
260 int free = -1, found = -1;
261 int ret = 0;
262 int hw_update = 0;
263 int i;
264 struct gid_entry *gids;
265 u16 vlan_id = 0xffff;
266 u8 mac[ETH_ALEN];
267
268 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
269 return -EINVAL;
270
271 if (attr->port_num > MLX4_MAX_PORTS)
272 return -EINVAL;
273
274 if (!context)
275 return -EINVAL;
276
277 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
278 if (ret)
279 return ret;
280 port_gid_table = &iboe->gids[attr->port_num - 1];
281 spin_lock_bh(&iboe->lock);
282 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
283 if (!memcmp(&port_gid_table->gids[i].gid,
284 &attr->gid, sizeof(attr->gid)) &&
285 port_gid_table->gids[i].gid_type == attr->gid_type &&
286 port_gid_table->gids[i].vlan_id == vlan_id) {
287 found = i;
288 break;
289 }
290 if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
291 free = i; /* HW has space */
292 }
293
294 if (found < 0) {
295 if (free < 0) {
296 ret = -ENOSPC;
297 } else {
298 port_gid_table->gids[free].ctx = kmalloc_obj(*port_gid_table->gids[free].ctx,
299 GFP_ATOMIC);
300 if (!port_gid_table->gids[free].ctx) {
301 ret = -ENOMEM;
302 } else {
303 *context = port_gid_table->gids[free].ctx;
304 port_gid_table->gids[free].gid = attr->gid;
305 port_gid_table->gids[free].gid_type = attr->gid_type;
306 port_gid_table->gids[free].vlan_id = vlan_id;
307 port_gid_table->gids[free].ctx->real_index = free;
308 port_gid_table->gids[free].ctx->refcount = 1;
309 hw_update = 1;
310 }
311 }
312 } else {
313 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
314 *context = ctx;
315 ctx->refcount++;
316 }
317 if (!ret && hw_update) {
318 gids = kmalloc_objs(*gids, MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
319 if (!gids) {
320 ret = -ENOMEM;
321 *context = NULL;
322 free_gid_entry(&port_gid_table->gids[free]);
323 } else {
324 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
325 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
326 gids[i].gid_type = port_gid_table->gids[i].gid_type;
327 }
328 }
329 }
330 spin_unlock_bh(&iboe->lock);
331
332 if (!ret && hw_update) {
333 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
334 if (ret) {
335 spin_lock_bh(&iboe->lock);
336 *context = NULL;
337 free_gid_entry(&port_gid_table->gids[free]);
338 spin_unlock_bh(&iboe->lock);
339 }
340 kfree(gids);
341 }
342
343 return ret;
344 }
345
mlx4_ib_del_gid(const struct ib_gid_attr * attr,void ** context)346 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
347 {
348 struct gid_cache_context *ctx = *context;
349 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
350 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
351 struct mlx4_port_gid_table *port_gid_table;
352 int ret = 0;
353 int hw_update = 0;
354 struct gid_entry *gids = NULL;
355
356 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
357 return -EINVAL;
358
359 if (attr->port_num > MLX4_MAX_PORTS)
360 return -EINVAL;
361
362 port_gid_table = &iboe->gids[attr->port_num - 1];
363 spin_lock_bh(&iboe->lock);
364 if (ctx) {
365 ctx->refcount--;
366 if (!ctx->refcount) {
367 unsigned int real_index = ctx->real_index;
368
369 free_gid_entry(&port_gid_table->gids[real_index]);
370 hw_update = 1;
371 }
372 }
373 if (!ret && hw_update) {
374 int i;
375
376 gids = kmalloc_objs(*gids, MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
377 if (!gids) {
378 ret = -ENOMEM;
379 } else {
380 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
381 memcpy(&gids[i].gid,
382 &port_gid_table->gids[i].gid,
383 sizeof(union ib_gid));
384 gids[i].gid_type =
385 port_gid_table->gids[i].gid_type;
386 }
387 }
388 }
389 spin_unlock_bh(&iboe->lock);
390
391 if (gids)
392 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
393
394 kfree(gids);
395 return ret;
396 }
397
mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev * ibdev,const struct ib_gid_attr * attr)398 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
399 const struct ib_gid_attr *attr)
400 {
401 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
402 struct gid_cache_context *ctx = NULL;
403 struct mlx4_port_gid_table *port_gid_table;
404 int real_index = -EINVAL;
405 int i;
406 unsigned long flags;
407 u32 port_num = attr->port_num;
408
409 if (port_num > MLX4_MAX_PORTS)
410 return -EINVAL;
411
412 if (mlx4_is_bonded(ibdev->dev))
413 port_num = 1;
414
415 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
416 return attr->index;
417
418 spin_lock_irqsave(&iboe->lock, flags);
419 port_gid_table = &iboe->gids[port_num - 1];
420
421 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
422 if (!memcmp(&port_gid_table->gids[i].gid,
423 &attr->gid, sizeof(attr->gid)) &&
424 attr->gid_type == port_gid_table->gids[i].gid_type) {
425 ctx = port_gid_table->gids[i].ctx;
426 break;
427 }
428 if (ctx)
429 real_index = ctx->real_index;
430 spin_unlock_irqrestore(&iboe->lock, flags);
431 return real_index;
432 }
433
mlx4_ib_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * uhw)434 static int mlx4_ib_query_device(struct ib_device *ibdev,
435 struct ib_device_attr *props,
436 struct ib_udata *uhw)
437 {
438 struct mlx4_ib_dev *dev = to_mdev(ibdev);
439 struct ib_smp *in_mad;
440 struct ib_smp *out_mad;
441 int err;
442 int have_ib_ports;
443 struct mlx4_uverbs_ex_query_device cmd;
444 struct mlx4_uverbs_ex_query_device_resp resp = {};
445 struct mlx4_clock_params clock_params;
446
447 if (uhw->inlen) {
448 if (uhw->inlen < sizeof(cmd))
449 return -EINVAL;
450
451 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
452 if (err)
453 return err;
454
455 if (cmd.comp_mask)
456 return -EINVAL;
457
458 if (cmd.reserved)
459 return -EINVAL;
460 }
461
462 resp.response_length = offsetof(typeof(resp), response_length) +
463 sizeof(resp.response_length);
464 in_mad = kzalloc_obj(*in_mad);
465 out_mad = kmalloc_obj(*out_mad);
466 err = -ENOMEM;
467 if (!in_mad || !out_mad)
468 goto out;
469
470 ib_init_query_mad(in_mad);
471 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
472
473 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
474 1, NULL, NULL, in_mad, out_mad);
475 if (err)
476 goto out;
477
478 memset(props, 0, sizeof *props);
479
480 have_ib_ports = num_ib_ports(dev->dev);
481
482 props->fw_ver = dev->dev->caps.fw_ver;
483 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
484 IB_DEVICE_PORT_ACTIVE_EVENT |
485 IB_DEVICE_SYS_IMAGE_GUID |
486 IB_DEVICE_RC_RNR_NAK_GEN;
487 props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK;
488 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
489 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
490 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
491 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
492 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
493 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
494 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
495 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
496 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
497 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
498 if (dev->dev->caps.max_gso_sz &&
499 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
500 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
501 props->kernel_cap_flags |= IBK_UD_TSO;
502 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
503 props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY;
504 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
505 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
506 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
507 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
508 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
509 props->device_cap_flags |= IB_DEVICE_XRC;
510 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
511 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
512 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
513 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
514 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
515 else
516 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
517 }
518 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
519 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
520
521 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
522
523 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
524 0xffffff;
525 props->vendor_part_id = dev->dev->persist->pdev->device;
526 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
527 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
528
529 props->max_mr_size = ~0ull;
530 props->page_size_cap = dev->dev->caps.page_size_cap;
531 props->max_qp = dev->dev->quotas.qp;
532 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
533 props->max_send_sge =
534 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
535 props->max_recv_sge =
536 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
537 props->max_sge_rd = MLX4_MAX_SGE_RD;
538 props->max_cq = dev->dev->quotas.cq;
539 props->max_cqe = dev->dev->caps.max_cqes;
540 props->max_mr = dev->dev->quotas.mpt;
541 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
542 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
543 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
544 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
545 props->max_srq = dev->dev->quotas.srq;
546 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
547 props->max_srq_sge = dev->dev->caps.max_srq_sge;
548 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
549 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
550 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
551 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
552 props->masked_atomic_cap = props->atomic_cap;
553 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
554 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
555 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
556 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
557 props->max_mcast_grp;
558 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
559 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
560 props->max_ah = INT_MAX;
561
562 if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
563 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
564 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
565 props->rss_caps.max_rwq_indirection_tables =
566 props->max_qp;
567 props->rss_caps.max_rwq_indirection_table_size =
568 dev->dev->caps.max_rss_tbl_sz;
569 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
570 props->max_wq_type_rq = props->max_qp;
571 }
572
573 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
574 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
575 }
576
577 props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
578 props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
579
580 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
581 resp.response_length += sizeof(resp.hca_core_clock_offset);
582 if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
583 resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
584 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
585 }
586 }
587
588 if (uhw->outlen >= resp.response_length +
589 sizeof(resp.max_inl_recv_sz)) {
590 resp.response_length += sizeof(resp.max_inl_recv_sz);
591 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
592 sizeof(struct mlx4_wqe_data_seg);
593 }
594
595 if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
596 if (props->rss_caps.supported_qpts) {
597 resp.rss_caps.rx_hash_function =
598 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
599
600 resp.rss_caps.rx_hash_fields_mask =
601 MLX4_IB_RX_HASH_SRC_IPV4 |
602 MLX4_IB_RX_HASH_DST_IPV4 |
603 MLX4_IB_RX_HASH_SRC_IPV6 |
604 MLX4_IB_RX_HASH_DST_IPV6 |
605 MLX4_IB_RX_HASH_SRC_PORT_TCP |
606 MLX4_IB_RX_HASH_DST_PORT_TCP |
607 MLX4_IB_RX_HASH_SRC_PORT_UDP |
608 MLX4_IB_RX_HASH_DST_PORT_UDP;
609
610 if (dev->dev->caps.tunnel_offload_mode ==
611 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
612 resp.rss_caps.rx_hash_fields_mask |=
613 MLX4_IB_RX_HASH_INNER;
614 }
615 resp.response_length = offsetof(typeof(resp), rss_caps) +
616 sizeof(resp.rss_caps);
617 }
618
619 if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
620 if (dev->dev->caps.max_gso_sz &&
621 ((mlx4_ib_port_link_layer(ibdev, 1) ==
622 IB_LINK_LAYER_ETHERNET) ||
623 (mlx4_ib_port_link_layer(ibdev, 2) ==
624 IB_LINK_LAYER_ETHERNET))) {
625 resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
626 resp.tso_caps.supported_qpts |=
627 1 << IB_QPT_RAW_PACKET;
628 }
629 resp.response_length = offsetof(typeof(resp), tso_caps) +
630 sizeof(resp.tso_caps);
631 }
632
633 if (uhw->outlen) {
634 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
635 if (err)
636 goto out;
637 }
638 out:
639 kfree(in_mad);
640 kfree(out_mad);
641
642 return err;
643 }
644
645 static enum rdma_link_layer
mlx4_ib_port_link_layer(struct ib_device * device,u32 port_num)646 mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num)
647 {
648 struct mlx4_dev *dev = to_mdev(device)->dev;
649
650 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
651 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
652 }
653
ib_link_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props,int netw_view)654 static int ib_link_query_port(struct ib_device *ibdev, u32 port,
655 struct ib_port_attr *props, int netw_view)
656 {
657 struct ib_smp *in_mad;
658 struct ib_smp *out_mad;
659 int ext_active_speed;
660 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
661 int err = -ENOMEM;
662
663 in_mad = kzalloc_obj(*in_mad);
664 out_mad = kmalloc_obj(*out_mad);
665 if (!in_mad || !out_mad)
666 goto out;
667
668 ib_init_query_mad(in_mad);
669 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
670 in_mad->attr_mod = cpu_to_be32(port);
671
672 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
673 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
674
675 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
676 in_mad, out_mad);
677 if (err)
678 goto out;
679
680
681 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
682 props->lmc = out_mad->data[34] & 0x7;
683 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
684 props->sm_sl = out_mad->data[36] & 0xf;
685 props->state = out_mad->data[32] & 0xf;
686 props->phys_state = out_mad->data[33] >> 4;
687 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
688 if (netw_view)
689 props->gid_tbl_len = out_mad->data[50];
690 else
691 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
692 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
693 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
694 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
695 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
696 props->active_width = out_mad->data[31] & 0xf;
697 props->active_speed = out_mad->data[35] >> 4;
698 props->max_mtu = out_mad->data[41] & 0xf;
699 props->active_mtu = out_mad->data[36] >> 4;
700 props->subnet_timeout = out_mad->data[51] & 0x1f;
701 props->max_vl_num = out_mad->data[37] >> 4;
702 props->init_type_reply = out_mad->data[41] >> 4;
703
704 /* Check if extended speeds (EDR/FDR/...) are supported */
705 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
706 ext_active_speed = out_mad->data[62] >> 4;
707
708 switch (ext_active_speed) {
709 case 1:
710 props->active_speed = IB_SPEED_FDR;
711 break;
712 case 2:
713 props->active_speed = IB_SPEED_EDR;
714 break;
715 }
716 }
717
718 /* If reported active speed is QDR, check if is FDR-10 */
719 if (props->active_speed == IB_SPEED_QDR) {
720 ib_init_query_mad(in_mad);
721 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
722 in_mad->attr_mod = cpu_to_be32(port);
723
724 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
725 NULL, NULL, in_mad, out_mad);
726 if (err)
727 goto out;
728
729 /* Checking LinkSpeedActive for FDR-10 */
730 if (out_mad->data[15] & 0x1)
731 props->active_speed = IB_SPEED_FDR10;
732 }
733
734 /* Avoid wrong speed value returned by FW if the IB link is down. */
735 if (props->state == IB_PORT_DOWN)
736 props->active_speed = IB_SPEED_SDR;
737
738 out:
739 kfree(in_mad);
740 kfree(out_mad);
741 return err;
742 }
743
state_to_phys_state(enum ib_port_state state)744 static u8 state_to_phys_state(enum ib_port_state state)
745 {
746 return state == IB_PORT_ACTIVE ?
747 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
748 }
749
eth_link_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)750 static int eth_link_query_port(struct ib_device *ibdev, u32 port,
751 struct ib_port_attr *props)
752 {
753
754 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
755 struct mlx4_ib_iboe *iboe = &mdev->iboe;
756 struct net_device *ndev;
757 enum ib_mtu tmp;
758 struct mlx4_cmd_mailbox *mailbox;
759 int err = 0;
760 int is_bonded = mlx4_is_bonded(mdev->dev);
761
762 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
763 if (IS_ERR(mailbox))
764 return PTR_ERR(mailbox);
765
766 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
767 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
768 MLX4_CMD_WRAPPED);
769 if (err)
770 goto out;
771
772 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
773 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
774 IB_WIDTH_4X : IB_WIDTH_1X;
775 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
776 IB_SPEED_FDR : IB_SPEED_QDR;
777 props->port_cap_flags = IB_PORT_CM_SUP;
778 props->ip_gids = true;
779 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
780 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
781 if (mdev->dev->caps.pkey_table_len[port])
782 props->pkey_tbl_len = 1;
783 props->max_mtu = IB_MTU_4096;
784 props->max_vl_num = 2;
785 props->state = IB_PORT_DOWN;
786 props->phys_state = state_to_phys_state(props->state);
787 props->active_mtu = IB_MTU_256;
788 spin_lock_bh(&iboe->lock);
789 ndev = iboe->netdevs[port - 1];
790 if (ndev && is_bonded) {
791 rcu_read_lock(); /* required to get upper dev */
792 ndev = netdev_master_upper_dev_get_rcu(ndev);
793 rcu_read_unlock();
794 }
795 if (!ndev)
796 goto out_unlock;
797
798 tmp = iboe_get_mtu(ndev->mtu);
799 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
800
801 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
802 IB_PORT_ACTIVE : IB_PORT_DOWN;
803 props->phys_state = state_to_phys_state(props->state);
804 out_unlock:
805 spin_unlock_bh(&iboe->lock);
806 out:
807 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
808 return err;
809 }
810
__mlx4_ib_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props,int netw_view)811 int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
812 struct ib_port_attr *props, int netw_view)
813 {
814 int err;
815
816 /* props being zeroed by the caller, avoid zeroing it here */
817
818 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
819 ib_link_query_port(ibdev, port, props, netw_view) :
820 eth_link_query_port(ibdev, port, props);
821
822 return err;
823 }
824
mlx4_ib_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)825 static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
826 struct ib_port_attr *props)
827 {
828 /* returns host view */
829 return __mlx4_ib_query_port(ibdev, port, props, 0);
830 }
831
__mlx4_ib_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid,int netw_view)832 int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
833 union ib_gid *gid, int netw_view)
834 {
835 struct ib_smp *in_mad;
836 struct ib_smp *out_mad;
837 int err = -ENOMEM;
838 struct mlx4_ib_dev *dev = to_mdev(ibdev);
839 int clear = 0;
840 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
841
842 in_mad = kzalloc_obj(*in_mad);
843 out_mad = kmalloc_obj(*out_mad);
844 if (!in_mad || !out_mad)
845 goto out;
846
847 ib_init_query_mad(in_mad);
848 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
849 in_mad->attr_mod = cpu_to_be32(port);
850
851 if (mlx4_is_mfunc(dev->dev) && netw_view)
852 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
853
854 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
855 if (err)
856 goto out;
857
858 memcpy(gid->raw, out_mad->data + 8, 8);
859
860 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
861 if (index) {
862 /* For any index > 0, return the null guid */
863 err = 0;
864 clear = 1;
865 goto out;
866 }
867 }
868
869 ib_init_query_mad(in_mad);
870 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
871 in_mad->attr_mod = cpu_to_be32(index / 8);
872
873 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
874 NULL, NULL, in_mad, out_mad);
875 if (err)
876 goto out;
877
878 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
879
880 out:
881 if (clear)
882 memset(gid->raw + 8, 0, 8);
883 kfree(in_mad);
884 kfree(out_mad);
885 return err;
886 }
887
mlx4_ib_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)888 static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
889 union ib_gid *gid)
890 {
891 if (rdma_protocol_ib(ibdev, port))
892 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
893 return 0;
894 }
895
mlx4_ib_query_sl2vl(struct ib_device * ibdev,u32 port,u64 * sl2vl_tbl)896 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
897 u64 *sl2vl_tbl)
898 {
899 union sl2vl_tbl_to_u64 sl2vl64;
900 struct ib_smp *in_mad;
901 struct ib_smp *out_mad;
902 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
903 int err = -ENOMEM;
904 int jj;
905
906 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
907 *sl2vl_tbl = 0;
908 return 0;
909 }
910
911 in_mad = kzalloc_obj(*in_mad);
912 out_mad = kmalloc_obj(*out_mad);
913 if (!in_mad || !out_mad)
914 goto out;
915
916 ib_init_query_mad(in_mad);
917 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
918 in_mad->attr_mod = 0;
919
920 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
921 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
922
923 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
924 in_mad, out_mad);
925 if (err)
926 goto out;
927
928 for (jj = 0; jj < 8; jj++)
929 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
930 *sl2vl_tbl = sl2vl64.sl64;
931
932 out:
933 kfree(in_mad);
934 kfree(out_mad);
935 return err;
936 }
937
mlx4_init_sl2vl_tbl(struct mlx4_ib_dev * mdev)938 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
939 {
940 u64 sl2vl;
941 int i;
942 int err;
943
944 for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
945 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
946 continue;
947 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
948 if (err) {
949 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
950 i, err);
951 sl2vl = 0;
952 }
953 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
954 }
955 }
956
__mlx4_ib_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey,int netw_view)957 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
958 u16 *pkey, int netw_view)
959 {
960 struct ib_smp *in_mad;
961 struct ib_smp *out_mad;
962 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
963 int err = -ENOMEM;
964
965 in_mad = kzalloc_obj(*in_mad);
966 out_mad = kmalloc_obj(*out_mad);
967 if (!in_mad || !out_mad)
968 goto out;
969
970 ib_init_query_mad(in_mad);
971 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
972 in_mad->attr_mod = cpu_to_be32(index / 32);
973
974 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
975 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
976
977 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
978 in_mad, out_mad);
979 if (err)
980 goto out;
981
982 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
983
984 out:
985 kfree(in_mad);
986 kfree(out_mad);
987 return err;
988 }
989
mlx4_ib_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)990 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
991 u16 *pkey)
992 {
993 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
994 }
995
mlx4_ib_modify_device(struct ib_device * ibdev,int mask,struct ib_device_modify * props)996 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
997 struct ib_device_modify *props)
998 {
999 struct mlx4_cmd_mailbox *mailbox;
1000 unsigned long flags;
1001
1002 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1003 return -EOPNOTSUPP;
1004
1005 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1006 return 0;
1007
1008 if (mlx4_is_slave(to_mdev(ibdev)->dev))
1009 return -EOPNOTSUPP;
1010
1011 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1012 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1013 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1014
1015 /*
1016 * If possible, pass node desc to FW, so it can generate
1017 * a 144 trap. If cmd fails, just ignore.
1018 */
1019 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1020 if (IS_ERR(mailbox))
1021 return 0;
1022
1023 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1024 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1025 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1026
1027 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1028
1029 return 0;
1030 }
1031
mlx4_ib_SET_PORT(struct mlx4_ib_dev * dev,u32 port,int reset_qkey_viols,u32 cap_mask)1032 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port,
1033 int reset_qkey_viols, u32 cap_mask)
1034 {
1035 struct mlx4_cmd_mailbox *mailbox;
1036 int err;
1037
1038 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1039 if (IS_ERR(mailbox))
1040 return PTR_ERR(mailbox);
1041
1042 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1043 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
1044 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1045 } else {
1046 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
1047 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1048 }
1049
1050 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1051 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1052 MLX4_CMD_WRAPPED);
1053
1054 mlx4_free_cmd_mailbox(dev->dev, mailbox);
1055 return err;
1056 }
1057
mlx4_ib_modify_port(struct ib_device * ibdev,u32 port,int mask,struct ib_port_modify * props)1058 static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1059 struct ib_port_modify *props)
1060 {
1061 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1062 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1063 struct ib_port_attr attr;
1064 u32 cap_mask;
1065 int err;
1066
1067 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1068 * of whether port link layer is ETH or IB. For ETH ports, qkey
1069 * violations and port capabilities are not meaningful.
1070 */
1071 if (is_eth)
1072 return 0;
1073
1074 mutex_lock(&mdev->cap_mask_mutex);
1075
1076 err = ib_query_port(ibdev, port, &attr);
1077 if (err)
1078 goto out;
1079
1080 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1081 ~props->clr_port_cap_mask;
1082
1083 err = mlx4_ib_SET_PORT(mdev, port,
1084 !!(mask & IB_PORT_RESET_QKEY_CNTR),
1085 cap_mask);
1086
1087 out:
1088 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1089 return err;
1090 }
1091
mlx4_ib_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)1092 static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1093 struct ib_udata *udata)
1094 {
1095 struct ib_device *ibdev = uctx->device;
1096 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1097 struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1098 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1099 struct mlx4_ib_alloc_ucontext_resp resp;
1100 int err;
1101
1102 if (!dev->ib_active)
1103 return -EAGAIN;
1104
1105 if (ibdev->ops.uverbs_abi_ver ==
1106 MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1107 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
1108 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
1109 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1110 } else {
1111 resp.dev_caps = dev->dev->caps.userspace_caps;
1112 resp.qp_tab_size = dev->dev->caps.num_qps;
1113 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
1114 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1115 resp.cqe_size = dev->dev->caps.cqe_size;
1116 }
1117
1118 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1119 if (err)
1120 return err;
1121
1122 INIT_LIST_HEAD(&context->db_page_list);
1123 mutex_init(&context->db_page_mutex);
1124
1125 INIT_LIST_HEAD(&context->wqn_ranges_list);
1126 mutex_init(&context->wqn_ranges_mutex);
1127
1128 if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1129 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1130 else
1131 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1132
1133 if (err) {
1134 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1135 return -EFAULT;
1136 }
1137
1138 return err;
1139 }
1140
mlx4_ib_dealloc_ucontext(struct ib_ucontext * ibcontext)1141 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1142 {
1143 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1144
1145 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1146 }
1147
mlx4_ib_disassociate_ucontext(struct ib_ucontext * ibcontext)1148 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1149 {
1150 }
1151
mlx4_ib_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)1152 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1153 {
1154 struct mlx4_ib_dev *dev = to_mdev(context->device);
1155
1156 switch (vma->vm_pgoff) {
1157 case 0:
1158 return rdma_user_mmap_io(context, vma,
1159 to_mucontext(context)->uar.pfn,
1160 PAGE_SIZE,
1161 pgprot_noncached(vma->vm_page_prot),
1162 NULL);
1163
1164 case 1:
1165 if (dev->dev->caps.bf_reg_size == 0)
1166 return -EINVAL;
1167 return rdma_user_mmap_io(
1168 context, vma,
1169 to_mucontext(context)->uar.pfn +
1170 dev->dev->caps.num_uars,
1171 PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1172 NULL);
1173
1174 case 3: {
1175 struct mlx4_clock_params params;
1176 int ret;
1177
1178 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
1179 if (ret)
1180 return ret;
1181
1182 return rdma_user_mmap_io(
1183 context, vma,
1184 (pci_resource_start(dev->dev->persist->pdev,
1185 params.bar) +
1186 params.offset) >>
1187 PAGE_SHIFT,
1188 PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1189 NULL);
1190 }
1191
1192 default:
1193 return -EINVAL;
1194 }
1195 }
1196
mlx4_ib_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)1197 static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1198 {
1199 struct mlx4_ib_pd *pd = to_mpd(ibpd);
1200 struct ib_device *ibdev = ibpd->device;
1201 int err;
1202
1203 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1204 if (err)
1205 return err;
1206
1207 if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1208 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1209 return -EFAULT;
1210 }
1211 return 0;
1212 }
1213
mlx4_ib_dealloc_pd(struct ib_pd * pd,struct ib_udata * udata)1214 static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1215 {
1216 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1217 return 0;
1218 }
1219
mlx4_ib_alloc_xrcd(struct ib_xrcd * ibxrcd,struct ib_udata * udata)1220 static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
1221 {
1222 struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
1223 struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
1224 struct ib_cq_init_attr cq_attr = {};
1225 int err;
1226
1227 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1228 return -EOPNOTSUPP;
1229
1230 err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
1231 if (err)
1232 return err;
1233
1234 xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
1235 if (IS_ERR(xrcd->pd)) {
1236 err = PTR_ERR(xrcd->pd);
1237 goto err2;
1238 }
1239
1240 cq_attr.cqe = 1;
1241 xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
1242 if (IS_ERR(xrcd->cq)) {
1243 err = PTR_ERR(xrcd->cq);
1244 goto err3;
1245 }
1246
1247 return 0;
1248
1249 err3:
1250 ib_dealloc_pd(xrcd->pd);
1251 err2:
1252 mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
1253 return err;
1254 }
1255
mlx4_ib_dealloc_xrcd(struct ib_xrcd * xrcd,struct ib_udata * udata)1256 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1257 {
1258 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1259 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1260 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1261 return 0;
1262 }
1263
add_gid_entry(struct ib_qp * ibqp,union ib_gid * gid)1264 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1265 {
1266 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1267 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1268 struct mlx4_ib_gid_entry *ge;
1269
1270 ge = kzalloc_obj(*ge);
1271 if (!ge)
1272 return -ENOMEM;
1273
1274 ge->gid = *gid;
1275 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1276 ge->port = mqp->port;
1277 ge->added = 1;
1278 }
1279
1280 mutex_lock(&mqp->mutex);
1281 list_add_tail(&ge->list, &mqp->gid_list);
1282 mutex_unlock(&mqp->mutex);
1283
1284 return 0;
1285 }
1286
mlx4_ib_delete_counters_table(struct mlx4_ib_dev * ibdev,struct mlx4_ib_counters * ctr_table)1287 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1288 struct mlx4_ib_counters *ctr_table)
1289 {
1290 struct counter_index *counter, *tmp_count;
1291
1292 mutex_lock(&ctr_table->mutex);
1293 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1294 list) {
1295 if (counter->allocated)
1296 mlx4_counter_free(ibdev->dev, counter->index);
1297 list_del(&counter->list);
1298 kfree(counter);
1299 }
1300 mutex_unlock(&ctr_table->mutex);
1301 }
1302
mlx4_ib_add_mc(struct mlx4_ib_dev * mdev,struct mlx4_ib_qp * mqp,union ib_gid * gid)1303 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1304 union ib_gid *gid)
1305 {
1306 struct net_device *ndev;
1307 int ret = 0;
1308
1309 if (!mqp->port)
1310 return 0;
1311
1312 spin_lock_bh(&mdev->iboe.lock);
1313 ndev = mdev->iboe.netdevs[mqp->port - 1];
1314 dev_hold(ndev);
1315 spin_unlock_bh(&mdev->iboe.lock);
1316
1317 if (ndev) {
1318 ret = 1;
1319 dev_put(ndev);
1320 }
1321
1322 return ret;
1323 }
1324
1325 struct mlx4_ib_steering {
1326 struct list_head list;
1327 struct mlx4_flow_reg_id reg_id;
1328 union ib_gid gid;
1329 };
1330
1331 #define LAST_ETH_FIELD vlan_tag
1332 #define LAST_IB_FIELD sl
1333 #define LAST_IPV4_FIELD dst_ip
1334 #define LAST_TCP_UDP_FIELD src_port
1335
1336 /* Field is the last supported field */
1337 #define FIELDS_NOT_SUPPORTED(filter, field)\
1338 memchr_inv((void *)&filter.field +\
1339 sizeof(filter.field), 0,\
1340 sizeof(filter) -\
1341 offsetof(typeof(filter), field) -\
1342 sizeof(filter.field))
1343
parse_flow_attr(struct mlx4_dev * dev,u32 qp_num,union ib_flow_spec * ib_spec,struct _rule_hw * mlx4_spec)1344 static int parse_flow_attr(struct mlx4_dev *dev,
1345 u32 qp_num,
1346 union ib_flow_spec *ib_spec,
1347 struct _rule_hw *mlx4_spec)
1348 {
1349 enum mlx4_net_trans_rule_id type;
1350
1351 switch (ib_spec->type) {
1352 case IB_FLOW_SPEC_ETH:
1353 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1354 return -ENOTSUPP;
1355
1356 type = MLX4_NET_TRANS_RULE_ID_ETH;
1357 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1358 ETH_ALEN);
1359 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1360 ETH_ALEN);
1361 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1362 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1363 break;
1364 case IB_FLOW_SPEC_IB:
1365 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1366 return -ENOTSUPP;
1367
1368 type = MLX4_NET_TRANS_RULE_ID_IB;
1369 mlx4_spec->ib.l3_qpn =
1370 cpu_to_be32(qp_num);
1371 mlx4_spec->ib.qpn_mask =
1372 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1373 break;
1374
1375
1376 case IB_FLOW_SPEC_IPV4:
1377 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1378 return -ENOTSUPP;
1379
1380 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1381 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1382 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1383 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1384 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1385 break;
1386
1387 case IB_FLOW_SPEC_TCP:
1388 case IB_FLOW_SPEC_UDP:
1389 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1390 return -ENOTSUPP;
1391
1392 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1393 MLX4_NET_TRANS_RULE_ID_TCP :
1394 MLX4_NET_TRANS_RULE_ID_UDP;
1395 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1396 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1397 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1398 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1399 break;
1400
1401 default:
1402 return -EINVAL;
1403 }
1404 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1405 mlx4_hw_rule_sz(dev, type) < 0)
1406 return -EINVAL;
1407 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1408 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1409 return mlx4_hw_rule_sz(dev, type);
1410 }
1411
1412 struct default_rules {
1413 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1414 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1415 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1416 __u8 link_layer;
1417 };
1418 static const struct default_rules default_table[] = {
1419 {
1420 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1421 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1422 .rules_create_list = {IB_FLOW_SPEC_IB},
1423 .link_layer = IB_LINK_LAYER_INFINIBAND
1424 }
1425 };
1426
__mlx4_ib_default_rules_match(struct ib_qp * qp,struct ib_flow_attr * flow_attr)1427 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1428 struct ib_flow_attr *flow_attr)
1429 {
1430 int i, j, k;
1431 void *ib_flow;
1432 const struct default_rules *pdefault_rules = default_table;
1433 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1434
1435 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1436 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1437 memset(&field_types, 0, sizeof(field_types));
1438
1439 if (link_layer != pdefault_rules->link_layer)
1440 continue;
1441
1442 ib_flow = flow_attr + 1;
1443 /* we assume the specs are sorted */
1444 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1445 j < flow_attr->num_of_specs; k++) {
1446 union ib_flow_spec *current_flow =
1447 (union ib_flow_spec *)ib_flow;
1448
1449 /* same layer but different type */
1450 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1451 (pdefault_rules->mandatory_fields[k] &
1452 IB_FLOW_SPEC_LAYER_MASK)) &&
1453 (current_flow->type !=
1454 pdefault_rules->mandatory_fields[k]))
1455 goto out;
1456
1457 /* same layer, try match next one */
1458 if (current_flow->type ==
1459 pdefault_rules->mandatory_fields[k]) {
1460 j++;
1461 ib_flow +=
1462 ((union ib_flow_spec *)ib_flow)->size;
1463 }
1464 }
1465
1466 ib_flow = flow_attr + 1;
1467 for (j = 0; j < flow_attr->num_of_specs;
1468 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1469 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1470 /* same layer and same type */
1471 if (((union ib_flow_spec *)ib_flow)->type ==
1472 pdefault_rules->mandatory_not_fields[k])
1473 goto out;
1474
1475 return i;
1476 }
1477 out:
1478 return -1;
1479 }
1480
__mlx4_ib_create_default_rules(struct mlx4_ib_dev * mdev,struct ib_qp * qp,const struct default_rules * pdefault_rules,struct _rule_hw * mlx4_spec)1481 static int __mlx4_ib_create_default_rules(
1482 struct mlx4_ib_dev *mdev,
1483 struct ib_qp *qp,
1484 const struct default_rules *pdefault_rules,
1485 struct _rule_hw *mlx4_spec) {
1486 int size = 0;
1487 int i;
1488
1489 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1490 union ib_flow_spec ib_spec = {};
1491 int ret;
1492
1493 switch (pdefault_rules->rules_create_list[i]) {
1494 case 0:
1495 /* no rule */
1496 continue;
1497 case IB_FLOW_SPEC_IB:
1498 ib_spec.type = IB_FLOW_SPEC_IB;
1499 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1500
1501 break;
1502 default:
1503 /* invalid rule */
1504 return -EINVAL;
1505 }
1506 /* We must put empty rule, qpn is being ignored */
1507 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1508 mlx4_spec);
1509 if (ret < 0) {
1510 pr_info("invalid parsing\n");
1511 return -EINVAL;
1512 }
1513
1514 mlx4_spec = (void *)mlx4_spec + ret;
1515 size += ret;
1516 }
1517 return size;
1518 }
1519
__mlx4_ib_create_flow(struct ib_qp * qp,struct ib_flow_attr * flow_attr,int domain,enum mlx4_net_trans_promisc_mode flow_type,u64 * reg_id)1520 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1521 int domain,
1522 enum mlx4_net_trans_promisc_mode flow_type,
1523 u64 *reg_id)
1524 {
1525 int ret, i;
1526 int size = 0;
1527 void *ib_flow;
1528 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1529 struct mlx4_cmd_mailbox *mailbox;
1530 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1531 int default_flow;
1532
1533 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1534 pr_err("Invalid priority value %d\n", flow_attr->priority);
1535 return -EINVAL;
1536 }
1537
1538 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1539 return -EINVAL;
1540
1541 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1542 if (IS_ERR(mailbox))
1543 return PTR_ERR(mailbox);
1544 ctrl = mailbox->buf;
1545
1546 ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
1547 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1548 ctrl->port = flow_attr->port;
1549 ctrl->qpn = cpu_to_be32(qp->qp_num);
1550
1551 ib_flow = flow_attr + 1;
1552 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1553 /* Add default flows */
1554 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1555 if (default_flow >= 0) {
1556 ret = __mlx4_ib_create_default_rules(
1557 mdev, qp, default_table + default_flow,
1558 mailbox->buf + size);
1559 if (ret < 0) {
1560 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1561 return -EINVAL;
1562 }
1563 size += ret;
1564 }
1565 for (i = 0; i < flow_attr->num_of_specs; i++) {
1566 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1567 mailbox->buf + size);
1568 if (ret < 0) {
1569 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1570 return -EINVAL;
1571 }
1572 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1573 size += ret;
1574 }
1575
1576 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1577 flow_attr->num_of_specs == 1) {
1578 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1579 enum ib_flow_spec_type header_spec =
1580 ((union ib_flow_spec *)(flow_attr + 1))->type;
1581
1582 if (header_spec == IB_FLOW_SPEC_ETH)
1583 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1584 }
1585
1586 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1587 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1588 MLX4_CMD_NATIVE);
1589 if (ret == -ENOMEM)
1590 pr_err("mcg table is full. Fail to register network rule.\n");
1591 else if (ret == -ENXIO)
1592 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1593 else if (ret)
1594 pr_err("Invalid argument. Fail to register network rule.\n");
1595
1596 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1597 return ret;
1598 }
1599
__mlx4_ib_destroy_flow(struct mlx4_dev * dev,u64 reg_id)1600 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1601 {
1602 int err;
1603 err = mlx4_cmd(dev, reg_id, 0, 0,
1604 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1605 MLX4_CMD_NATIVE);
1606 if (err)
1607 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1608 reg_id);
1609 return err;
1610 }
1611
mlx4_ib_tunnel_steer_add(struct ib_qp * qp,struct ib_flow_attr * flow_attr,u64 * reg_id)1612 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1613 u64 *reg_id)
1614 {
1615 void *ib_flow;
1616 union ib_flow_spec *ib_spec;
1617 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1618 int err = 0;
1619
1620 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1621 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1622 return 0; /* do nothing */
1623
1624 ib_flow = flow_attr + 1;
1625 ib_spec = (union ib_flow_spec *)ib_flow;
1626
1627 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1628 return 0; /* do nothing */
1629
1630 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1631 flow_attr->port, qp->qp_num,
1632 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1633 reg_id);
1634 return err;
1635 }
1636
mlx4_ib_add_dont_trap_rule(struct mlx4_dev * dev,struct ib_flow_attr * flow_attr,enum mlx4_net_trans_promisc_mode * type)1637 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1638 struct ib_flow_attr *flow_attr,
1639 enum mlx4_net_trans_promisc_mode *type)
1640 {
1641 int err = 0;
1642
1643 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1644 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1645 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1646 return -EOPNOTSUPP;
1647 }
1648
1649 if (flow_attr->num_of_specs == 0) {
1650 type[0] = MLX4_FS_MC_SNIFFER;
1651 type[1] = MLX4_FS_UC_SNIFFER;
1652 } else {
1653 union ib_flow_spec *ib_spec;
1654
1655 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1656 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1657 return -EINVAL;
1658
1659 /* if all is zero than MC and UC */
1660 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1661 type[0] = MLX4_FS_MC_SNIFFER;
1662 type[1] = MLX4_FS_UC_SNIFFER;
1663 } else {
1664 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1665 ib_spec->eth.mask.dst_mac[1],
1666 ib_spec->eth.mask.dst_mac[2],
1667 ib_spec->eth.mask.dst_mac[3],
1668 ib_spec->eth.mask.dst_mac[4],
1669 ib_spec->eth.mask.dst_mac[5]};
1670
1671 /* Above xor was only on MC bit, non empty mask is valid
1672 * only if this bit is set and rest are zero.
1673 */
1674 if (!is_zero_ether_addr(&mac[0]))
1675 return -EINVAL;
1676
1677 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1678 type[0] = MLX4_FS_MC_SNIFFER;
1679 else
1680 type[0] = MLX4_FS_UC_SNIFFER;
1681 }
1682 }
1683
1684 return err;
1685 }
1686
mlx4_ib_create_flow(struct ib_qp * qp,struct ib_flow_attr * flow_attr,struct ib_udata * udata)1687 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1688 struct ib_flow_attr *flow_attr,
1689 struct ib_udata *udata)
1690 {
1691 int err = 0, i = 0, j = 0;
1692 struct mlx4_ib_flow *mflow;
1693 enum mlx4_net_trans_promisc_mode type[2];
1694 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1695 int is_bonded = mlx4_is_bonded(dev);
1696
1697 if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1698 return ERR_PTR(-EOPNOTSUPP);
1699
1700 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1701 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1702 return ERR_PTR(-EOPNOTSUPP);
1703
1704 if (udata &&
1705 udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1706 return ERR_PTR(-EOPNOTSUPP);
1707
1708 memset(type, 0, sizeof(type));
1709
1710 mflow = kzalloc_obj(*mflow);
1711 if (!mflow) {
1712 err = -ENOMEM;
1713 goto err_free;
1714 }
1715
1716 switch (flow_attr->type) {
1717 case IB_FLOW_ATTR_NORMAL:
1718 /* If dont trap flag (continue match) is set, under specific
1719 * condition traffic be replicated to given qp,
1720 * without stealing it
1721 */
1722 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1723 err = mlx4_ib_add_dont_trap_rule(dev,
1724 flow_attr,
1725 type);
1726 if (err)
1727 goto err_free;
1728 } else {
1729 type[0] = MLX4_FS_REGULAR;
1730 }
1731 break;
1732
1733 case IB_FLOW_ATTR_ALL_DEFAULT:
1734 type[0] = MLX4_FS_ALL_DEFAULT;
1735 break;
1736
1737 case IB_FLOW_ATTR_MC_DEFAULT:
1738 type[0] = MLX4_FS_MC_DEFAULT;
1739 break;
1740
1741 case IB_FLOW_ATTR_SNIFFER:
1742 type[0] = MLX4_FS_MIRROR_RX_PORT;
1743 type[1] = MLX4_FS_MIRROR_SX_PORT;
1744 break;
1745
1746 default:
1747 err = -EINVAL;
1748 goto err_free;
1749 }
1750
1751 while (i < ARRAY_SIZE(type) && type[i]) {
1752 err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
1753 type[i], &mflow->reg_id[i].id);
1754 if (err)
1755 goto err_create_flow;
1756 if (is_bonded) {
1757 /* Application always sees one port so the mirror rule
1758 * must be on port #2
1759 */
1760 flow_attr->port = 2;
1761 err = __mlx4_ib_create_flow(qp, flow_attr,
1762 MLX4_DOMAIN_UVERBS, type[j],
1763 &mflow->reg_id[j].mirror);
1764 flow_attr->port = 1;
1765 if (err)
1766 goto err_create_flow;
1767 j++;
1768 }
1769
1770 i++;
1771 }
1772
1773 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1774 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1775 &mflow->reg_id[i].id);
1776 if (err)
1777 goto err_create_flow;
1778
1779 if (is_bonded) {
1780 flow_attr->port = 2;
1781 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1782 &mflow->reg_id[j].mirror);
1783 flow_attr->port = 1;
1784 if (err)
1785 goto err_create_flow;
1786 j++;
1787 }
1788 /* function to create mirror rule */
1789 i++;
1790 }
1791
1792 return &mflow->ibflow;
1793
1794 err_create_flow:
1795 while (i) {
1796 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1797 mflow->reg_id[i].id);
1798 i--;
1799 }
1800
1801 while (j) {
1802 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1803 mflow->reg_id[j].mirror);
1804 j--;
1805 }
1806 err_free:
1807 kfree(mflow);
1808 return ERR_PTR(err);
1809 }
1810
mlx4_ib_destroy_flow(struct ib_flow * flow_id)1811 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1812 {
1813 int err, ret = 0;
1814 int i = 0;
1815 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1816 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1817
1818 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1819 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1820 if (err)
1821 ret = err;
1822 if (mflow->reg_id[i].mirror) {
1823 err = __mlx4_ib_destroy_flow(mdev->dev,
1824 mflow->reg_id[i].mirror);
1825 if (err)
1826 ret = err;
1827 }
1828 i++;
1829 }
1830
1831 kfree(mflow);
1832 return ret;
1833 }
1834
mlx4_ib_mcg_attach(struct ib_qp * ibqp,union ib_gid * gid,u16 lid)1835 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1836 {
1837 int err;
1838 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1839 struct mlx4_dev *dev = mdev->dev;
1840 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1841 struct mlx4_ib_steering *ib_steering = NULL;
1842 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1843 struct mlx4_flow_reg_id reg_id;
1844
1845 if (mdev->dev->caps.steering_mode ==
1846 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1847 ib_steering = kmalloc_obj(*ib_steering);
1848 if (!ib_steering)
1849 return -ENOMEM;
1850 }
1851
1852 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1853 !!(mqp->flags &
1854 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1855 prot, ®_id.id);
1856 if (err) {
1857 pr_err("multicast attach op failed, err %d\n", err);
1858 goto err_malloc;
1859 }
1860
1861 reg_id.mirror = 0;
1862 if (mlx4_is_bonded(dev)) {
1863 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1864 (mqp->port == 1) ? 2 : 1,
1865 !!(mqp->flags &
1866 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1867 prot, ®_id.mirror);
1868 if (err)
1869 goto err_add;
1870 }
1871
1872 err = add_gid_entry(ibqp, gid);
1873 if (err)
1874 goto err_add;
1875
1876 if (ib_steering) {
1877 memcpy(ib_steering->gid.raw, gid->raw, 16);
1878 ib_steering->reg_id = reg_id;
1879 mutex_lock(&mqp->mutex);
1880 list_add(&ib_steering->list, &mqp->steering_rules);
1881 mutex_unlock(&mqp->mutex);
1882 }
1883 return 0;
1884
1885 err_add:
1886 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1887 prot, reg_id.id);
1888 if (reg_id.mirror)
1889 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1890 prot, reg_id.mirror);
1891 err_malloc:
1892 kfree(ib_steering);
1893
1894 return err;
1895 }
1896
find_gid_entry(struct mlx4_ib_qp * qp,u8 * raw)1897 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1898 {
1899 struct mlx4_ib_gid_entry *ge;
1900 struct mlx4_ib_gid_entry *tmp;
1901 struct mlx4_ib_gid_entry *ret = NULL;
1902
1903 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1904 if (!memcmp(raw, ge->gid.raw, 16)) {
1905 ret = ge;
1906 break;
1907 }
1908 }
1909
1910 return ret;
1911 }
1912
mlx4_ib_mcg_detach(struct ib_qp * ibqp,union ib_gid * gid,u16 lid)1913 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1914 {
1915 int err;
1916 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1917 struct mlx4_dev *dev = mdev->dev;
1918 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1919 struct net_device *ndev;
1920 struct mlx4_ib_gid_entry *ge;
1921 struct mlx4_flow_reg_id reg_id = {0, 0};
1922 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1923
1924 if (mdev->dev->caps.steering_mode ==
1925 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1926 struct mlx4_ib_steering *ib_steering;
1927
1928 mutex_lock(&mqp->mutex);
1929 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1930 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1931 list_del(&ib_steering->list);
1932 break;
1933 }
1934 }
1935 mutex_unlock(&mqp->mutex);
1936 if (&ib_steering->list == &mqp->steering_rules) {
1937 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1938 return -EINVAL;
1939 }
1940 reg_id = ib_steering->reg_id;
1941 kfree(ib_steering);
1942 }
1943
1944 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1945 prot, reg_id.id);
1946 if (err)
1947 return err;
1948
1949 if (mlx4_is_bonded(dev)) {
1950 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1951 prot, reg_id.mirror);
1952 if (err)
1953 return err;
1954 }
1955
1956 mutex_lock(&mqp->mutex);
1957 ge = find_gid_entry(mqp, gid->raw);
1958 if (ge) {
1959 spin_lock_bh(&mdev->iboe.lock);
1960 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1961 dev_hold(ndev);
1962 spin_unlock_bh(&mdev->iboe.lock);
1963 dev_put(ndev);
1964 list_del(&ge->list);
1965 kfree(ge);
1966 } else
1967 pr_warn("could not find mgid entry\n");
1968
1969 mutex_unlock(&mqp->mutex);
1970
1971 return 0;
1972 }
1973
init_node_data(struct mlx4_ib_dev * dev)1974 static int init_node_data(struct mlx4_ib_dev *dev)
1975 {
1976 struct ib_smp *in_mad;
1977 struct ib_smp *out_mad;
1978 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1979 int err = -ENOMEM;
1980
1981 in_mad = kzalloc_obj(*in_mad);
1982 out_mad = kmalloc_obj(*out_mad);
1983 if (!in_mad || !out_mad)
1984 goto out;
1985
1986 ib_init_query_mad(in_mad);
1987 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1988 if (mlx4_is_master(dev->dev))
1989 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1990
1991 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1992 if (err)
1993 goto out;
1994
1995 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1996
1997 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1998
1999 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2000 if (err)
2001 goto out;
2002
2003 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2004 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2005
2006 out:
2007 kfree(in_mad);
2008 kfree(out_mad);
2009 return err;
2010 }
2011
hca_type_show(struct device * device,struct device_attribute * attr,char * buf)2012 static ssize_t hca_type_show(struct device *device,
2013 struct device_attribute *attr, char *buf)
2014 {
2015 struct mlx4_ib_dev *dev =
2016 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2017
2018 return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device);
2019 }
2020 static DEVICE_ATTR_RO(hca_type);
2021
hw_rev_show(struct device * device,struct device_attribute * attr,char * buf)2022 static ssize_t hw_rev_show(struct device *device,
2023 struct device_attribute *attr, char *buf)
2024 {
2025 struct mlx4_ib_dev *dev =
2026 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2027
2028 return sysfs_emit(buf, "%x\n", dev->dev->rev_id);
2029 }
2030 static DEVICE_ATTR_RO(hw_rev);
2031
board_id_show(struct device * device,struct device_attribute * attr,char * buf)2032 static ssize_t board_id_show(struct device *device,
2033 struct device_attribute *attr, char *buf)
2034 {
2035 struct mlx4_ib_dev *dev =
2036 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2037
2038 return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
2039 }
2040 static DEVICE_ATTR_RO(board_id);
2041
2042 static struct attribute *mlx4_class_attributes[] = {
2043 &dev_attr_hw_rev.attr,
2044 &dev_attr_hca_type.attr,
2045 &dev_attr_board_id.attr,
2046 NULL
2047 };
2048
2049 static const struct attribute_group mlx4_attr_group = {
2050 .attrs = mlx4_class_attributes,
2051 };
2052
2053 struct diag_counter {
2054 const char *name;
2055 u32 offset;
2056 };
2057
2058 #define DIAG_COUNTER(_name, _offset) \
2059 { .name = #_name, .offset = _offset }
2060
2061 static const struct diag_counter diag_basic[] = {
2062 DIAG_COUNTER(rq_num_lle, 0x00),
2063 DIAG_COUNTER(sq_num_lle, 0x04),
2064 DIAG_COUNTER(rq_num_lqpoe, 0x08),
2065 DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2066 DIAG_COUNTER(rq_num_lpe, 0x18),
2067 DIAG_COUNTER(sq_num_lpe, 0x1C),
2068 DIAG_COUNTER(rq_num_wrfe, 0x20),
2069 DIAG_COUNTER(sq_num_wrfe, 0x24),
2070 DIAG_COUNTER(sq_num_mwbe, 0x2C),
2071 DIAG_COUNTER(sq_num_bre, 0x34),
2072 DIAG_COUNTER(sq_num_rire, 0x44),
2073 DIAG_COUNTER(rq_num_rire, 0x48),
2074 DIAG_COUNTER(sq_num_rae, 0x4C),
2075 DIAG_COUNTER(rq_num_rae, 0x50),
2076 DIAG_COUNTER(sq_num_roe, 0x54),
2077 DIAG_COUNTER(sq_num_tree, 0x5C),
2078 DIAG_COUNTER(sq_num_rree, 0x64),
2079 DIAG_COUNTER(rq_num_rnr, 0x68),
2080 DIAG_COUNTER(sq_num_rnr, 0x6C),
2081 DIAG_COUNTER(rq_num_oos, 0x100),
2082 DIAG_COUNTER(sq_num_oos, 0x104),
2083 };
2084
2085 static const struct diag_counter diag_ext[] = {
2086 DIAG_COUNTER(rq_num_dup, 0x130),
2087 DIAG_COUNTER(sq_num_to, 0x134),
2088 };
2089
2090 static const struct diag_counter diag_device_only[] = {
2091 DIAG_COUNTER(num_cqovf, 0x1A0),
2092 DIAG_COUNTER(rq_num_udsdprd, 0x118),
2093 };
2094
2095 static struct rdma_hw_stats *
mlx4_ib_alloc_hw_device_stats(struct ib_device * ibdev)2096 mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev)
2097 {
2098 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2099 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2100
2101 if (!diag[0].descs)
2102 return NULL;
2103
2104 return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters,
2105 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2106 }
2107
2108 static struct rdma_hw_stats *
mlx4_ib_alloc_hw_port_stats(struct ib_device * ibdev,u32 port_num)2109 mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
2110 {
2111 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2112 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2113
2114 if (!diag[1].descs)
2115 return NULL;
2116
2117 return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters,
2118 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2119 }
2120
mlx4_ib_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u32 port,int index)2121 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2122 struct rdma_hw_stats *stats,
2123 u32 port, int index)
2124 {
2125 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2126 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2127 u32 hw_value[ARRAY_SIZE(diag_device_only) +
2128 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2129 int ret;
2130 int i;
2131
2132 ret = mlx4_query_diag_counters(dev->dev,
2133 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2134 diag[!!port].offset, hw_value,
2135 diag[!!port].num_counters, port);
2136
2137 if (ret)
2138 return ret;
2139
2140 for (i = 0; i < diag[!!port].num_counters; i++)
2141 stats->value[i] = hw_value[i];
2142
2143 return diag[!!port].num_counters;
2144 }
2145
__mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev * ibdev,struct rdma_stat_desc ** pdescs,u32 ** offset,u32 * num,bool port)2146 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2147 struct rdma_stat_desc **pdescs,
2148 u32 **offset, u32 *num, bool port)
2149 {
2150 u32 num_counters;
2151
2152 num_counters = ARRAY_SIZE(diag_basic);
2153
2154 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2155 num_counters += ARRAY_SIZE(diag_ext);
2156
2157 if (!port)
2158 num_counters += ARRAY_SIZE(diag_device_only);
2159
2160 *pdescs = kzalloc_objs(struct rdma_stat_desc, num_counters);
2161 if (!*pdescs)
2162 return -ENOMEM;
2163
2164 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2165 if (!*offset)
2166 goto err;
2167
2168 *num = num_counters;
2169
2170 return 0;
2171
2172 err:
2173 kfree(*pdescs);
2174 return -ENOMEM;
2175 }
2176
mlx4_ib_fill_diag_counters(struct mlx4_ib_dev * ibdev,struct rdma_stat_desc * descs,u32 * offset,bool port)2177 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2178 struct rdma_stat_desc *descs,
2179 u32 *offset, bool port)
2180 {
2181 int i;
2182 int j;
2183
2184 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2185 descs[i].name = diag_basic[i].name;
2186 offset[i] = diag_basic[i].offset;
2187 }
2188
2189 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2190 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2191 descs[j].name = diag_ext[i].name;
2192 offset[j] = diag_ext[i].offset;
2193 }
2194 }
2195
2196 if (!port) {
2197 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2198 descs[j].name = diag_device_only[i].name;
2199 offset[j] = diag_device_only[i].offset;
2200 }
2201 }
2202 }
2203
2204 static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2205 .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
2206 .alloc_hw_port_stats = mlx4_ib_alloc_hw_port_stats,
2207 .get_hw_stats = mlx4_ib_get_hw_stats,
2208 };
2209
2210 static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = {
2211 .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
2212 .get_hw_stats = mlx4_ib_get_hw_stats,
2213 };
2214
mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev * ibdev)2215 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2216 {
2217 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2218 int i;
2219 int ret;
2220 bool per_port = !!(ibdev->dev->caps.flags2 &
2221 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2222
2223 if (mlx4_is_slave(ibdev->dev))
2224 return 0;
2225
2226 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2227 /*
2228 * i == 1 means we are building port counters, set a different
2229 * stats ops without port stats callback.
2230 */
2231 if (i && !per_port) {
2232 ib_set_device_ops(&ibdev->ib_dev,
2233 &mlx4_ib_hw_stats_ops1);
2234
2235 return 0;
2236 }
2237
2238 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
2239 &diag[i].offset,
2240 &diag[i].num_counters, i);
2241 if (ret)
2242 goto err_alloc;
2243
2244 mlx4_ib_fill_diag_counters(ibdev, diag[i].descs,
2245 diag[i].offset, i);
2246 }
2247
2248 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2249
2250 return 0;
2251
2252 err_alloc:
2253 if (i) {
2254 kfree(diag[i - 1].descs);
2255 kfree(diag[i - 1].offset);
2256 }
2257
2258 return ret;
2259 }
2260
mlx4_ib_diag_cleanup(struct mlx4_ib_dev * ibdev)2261 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2262 {
2263 int i;
2264
2265 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2266 kfree(ibdev->diag_counters[i].offset);
2267 kfree(ibdev->diag_counters[i].descs);
2268 }
2269 }
2270
2271 #define MLX4_IB_INVALID_MAC ((u64)-1)
mlx4_ib_update_qps(struct mlx4_ib_dev * ibdev,struct net_device * dev,int port)2272 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2273 struct net_device *dev,
2274 int port)
2275 {
2276 u64 new_smac = 0;
2277 u64 release_mac = MLX4_IB_INVALID_MAC;
2278 struct mlx4_ib_qp *qp;
2279
2280 new_smac = ether_addr_to_u64(dev->dev_addr);
2281 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2282
2283 /* no need for update QP1 and mac registration in non-SRIOV */
2284 if (!mlx4_is_mfunc(ibdev->dev))
2285 return;
2286
2287 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2288 qp = ibdev->qp1_proxy[port - 1];
2289 if (qp) {
2290 int new_smac_index;
2291 u64 old_smac;
2292 struct mlx4_update_qp_params update_params;
2293
2294 mutex_lock(&qp->mutex);
2295 old_smac = qp->pri.smac;
2296 if (new_smac == old_smac)
2297 goto unlock;
2298
2299 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2300
2301 if (new_smac_index < 0)
2302 goto unlock;
2303
2304 update_params.smac_index = new_smac_index;
2305 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2306 &update_params)) {
2307 release_mac = new_smac;
2308 goto unlock;
2309 }
2310 /* if old port was zero, no mac was yet registered for this QP */
2311 if (qp->pri.smac_port)
2312 release_mac = old_smac;
2313 qp->pri.smac = new_smac;
2314 qp->pri.smac_port = port;
2315 qp->pri.smac_index = new_smac_index;
2316 }
2317
2318 unlock:
2319 if (release_mac != MLX4_IB_INVALID_MAC)
2320 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2321 if (qp)
2322 mutex_unlock(&qp->mutex);
2323 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2324 }
2325
mlx4_ib_scan_netdev(struct mlx4_ib_dev * ibdev,struct net_device * dev,unsigned long event)2326 static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev,
2327 struct net_device *dev,
2328 unsigned long event)
2329
2330 {
2331 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
2332
2333 ASSERT_RTNL();
2334
2335 if (dev->dev.parent != ibdev->ib_dev.dev.parent)
2336 return;
2337
2338 spin_lock_bh(&iboe->lock);
2339
2340 iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL;
2341
2342 spin_unlock_bh(&iboe->lock);
2343
2344 if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER)
2345 mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
2346 }
2347
mlx4_ib_port_event(struct ib_device * ibdev,struct net_device * ndev,unsigned long event)2348 static void mlx4_ib_port_event(struct ib_device *ibdev, struct net_device *ndev,
2349 unsigned long event)
2350 {
2351 struct mlx4_ib_dev *mlx4_ibdev =
2352 container_of(ibdev, struct mlx4_ib_dev, ib_dev);
2353 struct mlx4_ib_iboe *iboe = &mlx4_ibdev->iboe;
2354
2355 if (!net_eq(dev_net(ndev), &init_net))
2356 return;
2357
2358 ASSERT_RTNL();
2359
2360 if (ndev->dev.parent != mlx4_ibdev->ib_dev.dev.parent)
2361 return;
2362
2363 spin_lock_bh(&iboe->lock);
2364
2365 iboe->netdevs[ndev->dev_port] = event != NETDEV_UNREGISTER ? ndev : NULL;
2366
2367 if (event == NETDEV_UP || event == NETDEV_DOWN)
2368 ib_dispatch_port_state_event(&mlx4_ibdev->ib_dev, ndev);
2369
2370 spin_unlock_bh(&iboe->lock);
2371
2372 if (event == NETDEV_UP || event == NETDEV_CHANGE)
2373 mlx4_ib_update_qps(mlx4_ibdev, ndev, ndev->dev_port + 1);
2374 }
2375
mlx4_ib_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)2376 static int mlx4_ib_netdev_event(struct notifier_block *this,
2377 unsigned long event, void *ptr)
2378 {
2379 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2380 struct mlx4_ib_dev *ibdev;
2381
2382 if (!net_eq(dev_net(dev), &init_net))
2383 return NOTIFY_DONE;
2384
2385 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2386 mlx4_ib_scan_netdev(ibdev, dev, event);
2387
2388 return NOTIFY_DONE;
2389 }
2390
init_pkeys(struct mlx4_ib_dev * ibdev)2391 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2392 {
2393 int port;
2394 int slave;
2395 int i;
2396
2397 if (mlx4_is_master(ibdev->dev)) {
2398 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2399 ++slave) {
2400 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2401 for (i = 0;
2402 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2403 ++i) {
2404 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2405 /* master has the identity virt2phys pkey mapping */
2406 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2407 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2408 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2409 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2410 }
2411 }
2412 }
2413 /* initialize pkey cache */
2414 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2415 for (i = 0;
2416 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2417 ++i)
2418 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2419 (i) ? 0 : 0xFFFF;
2420 }
2421 }
2422 }
2423
mlx4_ib_alloc_eqs(struct mlx4_dev * dev,struct mlx4_ib_dev * ibdev)2424 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2425 {
2426 int i, j, eq = 0, total_eqs = 0;
2427
2428 ibdev->eq_table = kzalloc_objs(ibdev->eq_table[0],
2429 dev->caps.num_comp_vectors);
2430 if (!ibdev->eq_table)
2431 return;
2432
2433 for (i = 1; i <= dev->caps.num_ports; i++) {
2434 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2435 j++, total_eqs++) {
2436 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2437 continue;
2438 ibdev->eq_table[eq] = total_eqs;
2439 if (!mlx4_assign_eq(dev, i,
2440 &ibdev->eq_table[eq]))
2441 eq++;
2442 else
2443 ibdev->eq_table[eq] = -1;
2444 }
2445 }
2446
2447 for (i = eq; i < dev->caps.num_comp_vectors;
2448 ibdev->eq_table[i++] = -1)
2449 ;
2450
2451 /* Advertise the new number of EQs to clients */
2452 ibdev->ib_dev.num_comp_vectors = eq;
2453 }
2454
mlx4_ib_free_eqs(struct mlx4_dev * dev,struct mlx4_ib_dev * ibdev)2455 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2456 {
2457 int i;
2458 int total_eqs = ibdev->ib_dev.num_comp_vectors;
2459
2460 /* no eqs were allocated */
2461 if (!ibdev->eq_table)
2462 return;
2463
2464 /* Reset the advertised EQ number */
2465 ibdev->ib_dev.num_comp_vectors = 0;
2466
2467 for (i = 0; i < total_eqs; i++)
2468 mlx4_release_eq(dev, ibdev->eq_table[i]);
2469
2470 kfree(ibdev->eq_table);
2471 ibdev->eq_table = NULL;
2472 }
2473
mlx4_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)2474 static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num,
2475 struct ib_port_immutable *immutable)
2476 {
2477 struct ib_port_attr attr;
2478 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2479 int err;
2480
2481 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2482 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2483 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2484 } else {
2485 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2486 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2487 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2488 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2489 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2490 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2491 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2492 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2493 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2494 }
2495
2496 err = ib_query_port(ibdev, port_num, &attr);
2497 if (err)
2498 return err;
2499
2500 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2501 immutable->gid_tbl_len = attr.gid_tbl_len;
2502
2503 return 0;
2504 }
2505
get_fw_ver_str(struct ib_device * device,char * str)2506 static void get_fw_ver_str(struct ib_device *device, char *str)
2507 {
2508 struct mlx4_ib_dev *dev =
2509 container_of(device, struct mlx4_ib_dev, ib_dev);
2510 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2511 (int) (dev->dev->caps.fw_ver >> 32),
2512 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2513 (int) dev->dev->caps.fw_ver & 0xffff);
2514 }
2515
2516 static const struct ib_device_ops mlx4_ib_dev_ops = {
2517 .owner = THIS_MODULE,
2518 .driver_id = RDMA_DRIVER_MLX4,
2519 .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2520
2521 .add_gid = mlx4_ib_add_gid,
2522 .alloc_mr = mlx4_ib_alloc_mr,
2523 .alloc_pd = mlx4_ib_alloc_pd,
2524 .alloc_ucontext = mlx4_ib_alloc_ucontext,
2525 .attach_mcast = mlx4_ib_mcg_attach,
2526 .create_ah = mlx4_ib_create_ah,
2527 .create_cq = mlx4_ib_create_cq,
2528 .create_qp = mlx4_ib_create_qp,
2529 .create_srq = mlx4_ib_create_srq,
2530 .dealloc_pd = mlx4_ib_dealloc_pd,
2531 .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2532 .del_gid = mlx4_ib_del_gid,
2533 .dereg_mr = mlx4_ib_dereg_mr,
2534 .destroy_ah = mlx4_ib_destroy_ah,
2535 .destroy_cq = mlx4_ib_destroy_cq,
2536 .destroy_qp = mlx4_ib_destroy_qp,
2537 .destroy_srq = mlx4_ib_destroy_srq,
2538 .detach_mcast = mlx4_ib_mcg_detach,
2539 .device_group = &mlx4_attr_group,
2540 .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2541 .drain_rq = mlx4_ib_drain_rq,
2542 .drain_sq = mlx4_ib_drain_sq,
2543 .get_dev_fw_str = get_fw_ver_str,
2544 .get_dma_mr = mlx4_ib_get_dma_mr,
2545 .get_link_layer = mlx4_ib_port_link_layer,
2546 .get_netdev = mlx4_ib_get_netdev,
2547 .get_port_immutable = mlx4_port_immutable,
2548 .map_mr_sg = mlx4_ib_map_mr_sg,
2549 .mmap = mlx4_ib_mmap,
2550 .modify_cq = mlx4_ib_modify_cq,
2551 .modify_device = mlx4_ib_modify_device,
2552 .modify_port = mlx4_ib_modify_port,
2553 .modify_qp = mlx4_ib_modify_qp,
2554 .modify_srq = mlx4_ib_modify_srq,
2555 .poll_cq = mlx4_ib_poll_cq,
2556 .post_recv = mlx4_ib_post_recv,
2557 .post_send = mlx4_ib_post_send,
2558 .post_srq_recv = mlx4_ib_post_srq_recv,
2559 .process_mad = mlx4_ib_process_mad,
2560 .query_ah = mlx4_ib_query_ah,
2561 .query_device = mlx4_ib_query_device,
2562 .query_gid = mlx4_ib_query_gid,
2563 .query_pkey = mlx4_ib_query_pkey,
2564 .query_port = mlx4_ib_query_port,
2565 .query_qp = mlx4_ib_query_qp,
2566 .query_srq = mlx4_ib_query_srq,
2567 .reg_user_mr = mlx4_ib_reg_user_mr,
2568 .req_notify_cq = mlx4_ib_arm_cq,
2569 .rereg_user_mr = mlx4_ib_rereg_user_mr,
2570 .resize_cq = mlx4_ib_resize_cq,
2571 .report_port_event = mlx4_ib_port_event,
2572
2573 INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2574 INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2575 INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2576 INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp),
2577 INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2578 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2579 };
2580
2581 static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2582 .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2583 .create_wq = mlx4_ib_create_wq,
2584 .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2585 .destroy_wq = mlx4_ib_destroy_wq,
2586 .modify_wq = mlx4_ib_modify_wq,
2587
2588 INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
2589 ib_rwq_ind_tbl),
2590 };
2591
2592 static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2593 .alloc_mw = mlx4_ib_alloc_mw,
2594 .dealloc_mw = mlx4_ib_dealloc_mw,
2595
2596 INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
2597 };
2598
2599 static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2600 .alloc_xrcd = mlx4_ib_alloc_xrcd,
2601 .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2602
2603 INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
2604 };
2605
2606 static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2607 .create_flow = mlx4_ib_create_flow,
2608 .destroy_flow = mlx4_ib_destroy_flow,
2609 };
2610
mlx4_ib_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)2611 static int mlx4_ib_probe(struct auxiliary_device *adev,
2612 const struct auxiliary_device_id *id)
2613 {
2614 struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev);
2615 struct mlx4_dev *dev = madev->mdev;
2616 struct mlx4_ib_dev *ibdev;
2617 int num_ports = 0;
2618 int i, j;
2619 int err;
2620 struct mlx4_ib_iboe *iboe;
2621 int ib_num_ports = 0;
2622 int num_req_counters;
2623 int allocated;
2624 u32 counter_index;
2625 struct counter_index *new_counter_index;
2626
2627 pr_info_once("%s", mlx4_ib_version);
2628
2629 num_ports = 0;
2630 mlx4_foreach_ib_transport_port(i, dev)
2631 num_ports++;
2632
2633 /* No point in registering a device with no ports... */
2634 if (num_ports == 0)
2635 return -ENODEV;
2636
2637 ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2638 if (!ibdev) {
2639 dev_err(&dev->persist->pdev->dev,
2640 "Device struct alloc failed\n");
2641 return -ENOMEM;
2642 }
2643
2644 iboe = &ibdev->iboe;
2645
2646 err = mlx4_pd_alloc(dev, &ibdev->priv_pdn);
2647 if (err)
2648 goto err_dealloc;
2649
2650 err = mlx4_uar_alloc(dev, &ibdev->priv_uar);
2651 if (err)
2652 goto err_pd;
2653
2654 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2655 PAGE_SIZE);
2656 if (!ibdev->uar_map) {
2657 err = -ENOMEM;
2658 goto err_uar;
2659 }
2660 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2661
2662 ibdev->dev = dev;
2663 ibdev->bond_next_port = 0;
2664
2665 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2666 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2667 ibdev->num_ports = num_ports;
2668 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2669 1 : ibdev->num_ports;
2670 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2671 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
2672
2673 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2674
2675 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2676 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2677 IB_LINK_LAYER_ETHERNET) ||
2678 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2679 IB_LINK_LAYER_ETHERNET)))
2680 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2681
2682 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2683 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2684 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2685
2686 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2687 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2688 }
2689
2690 if (check_flow_steering_support(dev)) {
2691 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2692 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2693 }
2694
2695 if (!dev->caps.userspace_caps)
2696 ibdev->ib_dev.ops.uverbs_abi_ver =
2697 MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2698
2699 mlx4_ib_alloc_eqs(dev, ibdev);
2700
2701 spin_lock_init(&iboe->lock);
2702
2703 err = init_node_data(ibdev);
2704 if (err)
2705 goto err_map;
2706 mlx4_init_sl2vl_tbl(ibdev);
2707
2708 for (i = 0; i < ibdev->num_ports; ++i) {
2709 mutex_init(&ibdev->counters_table[i].mutex);
2710 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2711 iboe->last_port_state[i] = IB_PORT_DOWN;
2712 }
2713
2714 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2715 for (i = 0; i < num_req_counters; ++i) {
2716 mutex_init(&ibdev->qp1_proxy_lock[i]);
2717 allocated = 0;
2718 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2719 IB_LINK_LAYER_ETHERNET) {
2720 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2721 MLX4_RES_USAGE_DRIVER);
2722 /* if failed to allocate a new counter, use default */
2723 if (err)
2724 counter_index =
2725 mlx4_get_default_counter_index(dev,
2726 i + 1);
2727 else
2728 allocated = 1;
2729 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2730 counter_index = mlx4_get_default_counter_index(dev,
2731 i + 1);
2732 }
2733 new_counter_index = kmalloc_obj(*new_counter_index);
2734 if (!new_counter_index) {
2735 err = -ENOMEM;
2736 if (allocated)
2737 mlx4_counter_free(ibdev->dev, counter_index);
2738 goto err_counter;
2739 }
2740 new_counter_index->index = counter_index;
2741 new_counter_index->allocated = allocated;
2742 list_add_tail(&new_counter_index->list,
2743 &ibdev->counters_table[i].counters_list);
2744 ibdev->counters_table[i].default_counter = counter_index;
2745 pr_info("counter index %d for port %d allocated %d\n",
2746 counter_index, i + 1, allocated);
2747 }
2748 if (mlx4_is_bonded(dev))
2749 for (i = 1; i < ibdev->num_ports ; ++i) {
2750 new_counter_index =
2751 kmalloc_obj(struct counter_index);
2752 if (!new_counter_index) {
2753 err = -ENOMEM;
2754 goto err_counter;
2755 }
2756 new_counter_index->index = counter_index;
2757 new_counter_index->allocated = 0;
2758 list_add_tail(&new_counter_index->list,
2759 &ibdev->counters_table[i].counters_list);
2760 ibdev->counters_table[i].default_counter =
2761 counter_index;
2762 }
2763
2764 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2765 ib_num_ports++;
2766
2767 spin_lock_init(&ibdev->sm_lock);
2768 mutex_init(&ibdev->cap_mask_mutex);
2769 INIT_LIST_HEAD(&ibdev->qp_list);
2770 spin_lock_init(&ibdev->reset_flow_resource_lock);
2771
2772 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2773 ib_num_ports) {
2774 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2775 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2776 MLX4_IB_UC_STEER_QPN_ALIGN,
2777 &ibdev->steer_qpn_base, 0,
2778 MLX4_RES_USAGE_DRIVER);
2779 if (err)
2780 goto err_counter;
2781
2782 ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count,
2783 GFP_KERNEL);
2784 if (!ibdev->ib_uc_qpns_bitmap) {
2785 err = -ENOMEM;
2786 goto err_steer_qp_release;
2787 }
2788
2789 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2790 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2791 ibdev->steer_qpn_count);
2792 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2793 dev, ibdev->steer_qpn_base,
2794 ibdev->steer_qpn_base +
2795 ibdev->steer_qpn_count - 1);
2796 if (err)
2797 goto err_steer_free_bitmap;
2798 } else {
2799 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2800 ibdev->steer_qpn_count);
2801 }
2802 }
2803
2804 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2805 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2806
2807 err = mlx4_ib_alloc_diag_counters(ibdev);
2808 if (err)
2809 goto err_steer_free_bitmap;
2810
2811 err = ib_register_device(&ibdev->ib_dev, "mlx4_%d",
2812 &dev->persist->pdev->dev);
2813 if (err)
2814 goto err_diag_counters;
2815
2816 err = mlx4_ib_mad_init(ibdev);
2817 if (err)
2818 goto err_reg;
2819
2820 err = mlx4_ib_init_sriov(ibdev);
2821 if (err)
2822 goto err_mad;
2823
2824 if (!iboe->nb.notifier_call) {
2825 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2826 err = register_netdevice_notifier(&iboe->nb);
2827 if (err) {
2828 iboe->nb.notifier_call = NULL;
2829 goto err_notif;
2830 }
2831 }
2832 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2833 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2834 if (err)
2835 goto err_notif;
2836 }
2837
2838 ibdev->ib_active = true;
2839 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2840 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2841 &ibdev->ib_dev);
2842
2843 if (mlx4_is_mfunc(ibdev->dev))
2844 init_pkeys(ibdev);
2845
2846 /* create paravirt contexts for any VFs which are active */
2847 if (mlx4_is_master(ibdev->dev)) {
2848 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2849 if (j == mlx4_master_func_num(ibdev->dev))
2850 continue;
2851 if (mlx4_is_slave_active(ibdev->dev, j))
2852 do_slave_init(ibdev, j, 1);
2853 }
2854 }
2855
2856 /* register mlx4 core notifier */
2857 ibdev->mlx_nb.notifier_call = mlx4_ib_event;
2858 err = mlx4_register_event_notifier(dev, &ibdev->mlx_nb);
2859 WARN(err, "failed to register mlx4 event notifier (%d)", err);
2860
2861 auxiliary_set_drvdata(adev, ibdev);
2862 return 0;
2863
2864 err_notif:
2865 if (ibdev->iboe.nb.notifier_call) {
2866 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2867 pr_warn("failure unregistering notifier\n");
2868 ibdev->iboe.nb.notifier_call = NULL;
2869 }
2870 flush_workqueue(wq);
2871
2872 mlx4_ib_close_sriov(ibdev);
2873
2874 err_mad:
2875 mlx4_ib_mad_cleanup(ibdev);
2876
2877 err_reg:
2878 ib_unregister_device(&ibdev->ib_dev);
2879
2880 err_diag_counters:
2881 mlx4_ib_diag_cleanup(ibdev);
2882
2883 err_steer_free_bitmap:
2884 bitmap_free(ibdev->ib_uc_qpns_bitmap);
2885
2886 err_steer_qp_release:
2887 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2888 ibdev->steer_qpn_count);
2889 err_counter:
2890 for (i = 0; i < ibdev->num_ports; ++i)
2891 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2892
2893 err_map:
2894 mlx4_ib_free_eqs(dev, ibdev);
2895 iounmap(ibdev->uar_map);
2896
2897 err_uar:
2898 mlx4_uar_free(dev, &ibdev->priv_uar);
2899
2900 err_pd:
2901 mlx4_pd_free(dev, ibdev->priv_pdn);
2902
2903 err_dealloc:
2904 ib_dealloc_device(&ibdev->ib_dev);
2905
2906 return err;
2907 }
2908
mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev * dev,int count,int * qpn)2909 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2910 {
2911 int offset;
2912
2913 WARN_ON(!dev->ib_uc_qpns_bitmap);
2914
2915 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2916 dev->steer_qpn_count,
2917 get_count_order(count));
2918 if (offset < 0)
2919 return offset;
2920
2921 *qpn = dev->steer_qpn_base + offset;
2922 return 0;
2923 }
2924
mlx4_ib_steer_qp_free(struct mlx4_ib_dev * dev,u32 qpn,int count)2925 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2926 {
2927 if (!qpn ||
2928 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2929 return;
2930
2931 if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2932 qpn, dev->steer_qpn_base))
2933 /* not supposed to be here */
2934 return;
2935
2936 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2937 qpn - dev->steer_qpn_base,
2938 get_count_order(count));
2939 }
2940
mlx4_ib_steer_qp_reg(struct mlx4_ib_dev * mdev,struct mlx4_ib_qp * mqp,int is_attach)2941 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2942 int is_attach)
2943 {
2944 int err;
2945 size_t flow_size;
2946 struct ib_flow_attr *flow;
2947 struct ib_flow_spec_ib *ib_spec;
2948
2949 if (is_attach) {
2950 flow_size = sizeof(struct ib_flow_attr) +
2951 sizeof(struct ib_flow_spec_ib);
2952 flow = kzalloc(flow_size, GFP_KERNEL);
2953 if (!flow)
2954 return -ENOMEM;
2955 flow->port = mqp->port;
2956 flow->num_of_specs = 1;
2957 flow->size = flow_size;
2958 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2959 ib_spec->type = IB_FLOW_SPEC_IB;
2960 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2961 /* Add an empty rule for IB L2 */
2962 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2963
2964 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
2965 MLX4_FS_REGULAR, &mqp->reg_id);
2966 kfree(flow);
2967 return err;
2968 }
2969
2970 return __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2971 }
2972
mlx4_ib_remove(struct auxiliary_device * adev)2973 static void mlx4_ib_remove(struct auxiliary_device *adev)
2974 {
2975 struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev);
2976 struct mlx4_dev *dev = madev->mdev;
2977 struct mlx4_ib_dev *ibdev = auxiliary_get_drvdata(adev);
2978 int p;
2979 int i;
2980
2981 mlx4_unregister_event_notifier(dev, &ibdev->mlx_nb);
2982
2983 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2984 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
2985 ibdev->ib_active = false;
2986 flush_workqueue(wq);
2987
2988 if (ibdev->iboe.nb.notifier_call) {
2989 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2990 pr_warn("failure unregistering notifier\n");
2991 ibdev->iboe.nb.notifier_call = NULL;
2992 }
2993
2994 mlx4_ib_close_sriov(ibdev);
2995 mlx4_ib_mad_cleanup(ibdev);
2996 ib_unregister_device(&ibdev->ib_dev);
2997 mlx4_ib_diag_cleanup(ibdev);
2998
2999 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3000 ibdev->steer_qpn_count);
3001 bitmap_free(ibdev->ib_uc_qpns_bitmap);
3002
3003 iounmap(ibdev->uar_map);
3004 for (p = 0; p < ibdev->num_ports; ++p)
3005 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3006
3007 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
3008 mlx4_CLOSE_PORT(dev, p);
3009
3010 mlx4_ib_free_eqs(dev, ibdev);
3011
3012 mlx4_uar_free(dev, &ibdev->priv_uar);
3013 mlx4_pd_free(dev, ibdev->priv_pdn);
3014 ib_dealloc_device(&ibdev->ib_dev);
3015 }
3016
do_slave_init(struct mlx4_ib_dev * ibdev,int slave,int do_init)3017 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3018 {
3019 struct mlx4_ib_demux_work **dm;
3020 struct mlx4_dev *dev = ibdev->dev;
3021 int i;
3022 unsigned long flags;
3023 struct mlx4_active_ports actv_ports;
3024 unsigned int ports;
3025 unsigned int first_port;
3026
3027 if (!mlx4_is_master(dev))
3028 return;
3029
3030 actv_ports = mlx4_get_active_ports(dev, slave);
3031 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3032 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3033
3034 dm = kzalloc_objs(*dm, ports, GFP_ATOMIC);
3035 if (!dm)
3036 return;
3037
3038 for (i = 0; i < ports; i++) {
3039 dm[i] = kmalloc_obj(struct mlx4_ib_demux_work, GFP_ATOMIC);
3040 if (!dm[i]) {
3041 while (--i >= 0)
3042 kfree(dm[i]);
3043 goto out;
3044 }
3045 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3046 dm[i]->port = first_port + i + 1;
3047 dm[i]->slave = slave;
3048 dm[i]->do_init = do_init;
3049 dm[i]->dev = ibdev;
3050 }
3051 /* initialize or tear down tunnel QPs for the slave */
3052 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3053 if (!ibdev->sriov.is_going_down) {
3054 for (i = 0; i < ports; i++)
3055 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3056 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3057 } else {
3058 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3059 for (i = 0; i < ports; i++)
3060 kfree(dm[i]);
3061 }
3062 out:
3063 kfree(dm);
3064 return;
3065 }
3066
mlx4_ib_handle_catas_error(struct mlx4_ib_dev * ibdev)3067 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3068 {
3069 struct mlx4_ib_qp *mqp;
3070 unsigned long flags_qp;
3071 unsigned long flags_cq;
3072 struct mlx4_ib_cq *send_mcq, *recv_mcq;
3073 struct list_head cq_notify_list;
3074 struct mlx4_cq *mcq;
3075 unsigned long flags;
3076
3077 pr_warn("mlx4_ib_handle_catas_error was started\n");
3078 INIT_LIST_HEAD(&cq_notify_list);
3079
3080 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3081 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3082
3083 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3084 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3085 if (mqp->sq.tail != mqp->sq.head) {
3086 send_mcq = to_mcq(mqp->ibqp.send_cq);
3087 spin_lock_irqsave(&send_mcq->lock, flags_cq);
3088 if (send_mcq->mcq.comp &&
3089 mqp->ibqp.send_cq->comp_handler) {
3090 if (!send_mcq->mcq.reset_notify_added) {
3091 send_mcq->mcq.reset_notify_added = 1;
3092 list_add_tail(&send_mcq->mcq.reset_notify,
3093 &cq_notify_list);
3094 }
3095 }
3096 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3097 }
3098 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3099 /* Now, handle the QP's receive queue */
3100 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3101 /* no handling is needed for SRQ */
3102 if (!mqp->ibqp.srq) {
3103 if (mqp->rq.tail != mqp->rq.head) {
3104 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3105 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3106 if (recv_mcq->mcq.comp &&
3107 mqp->ibqp.recv_cq->comp_handler) {
3108 if (!recv_mcq->mcq.reset_notify_added) {
3109 recv_mcq->mcq.reset_notify_added = 1;
3110 list_add_tail(&recv_mcq->mcq.reset_notify,
3111 &cq_notify_list);
3112 }
3113 }
3114 spin_unlock_irqrestore(&recv_mcq->lock,
3115 flags_cq);
3116 }
3117 }
3118 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3119 }
3120
3121 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3122 mcq->comp(mcq);
3123 }
3124 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3125 pr_warn("mlx4_ib_handle_catas_error ended\n");
3126 }
3127
handle_bonded_port_state_event(struct work_struct * work)3128 static void handle_bonded_port_state_event(struct work_struct *work)
3129 {
3130 struct ib_event_work *ew =
3131 container_of(work, struct ib_event_work, work);
3132 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3133 enum ib_port_state bonded_port_state = IB_PORT_NOP;
3134 int i;
3135 struct ib_event ibev;
3136
3137 kfree(ew);
3138 spin_lock_bh(&ibdev->iboe.lock);
3139 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3140 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3141 enum ib_port_state curr_port_state;
3142
3143 if (!curr_netdev)
3144 continue;
3145
3146 curr_port_state =
3147 (netif_running(curr_netdev) &&
3148 netif_carrier_ok(curr_netdev)) ?
3149 IB_PORT_ACTIVE : IB_PORT_DOWN;
3150
3151 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3152 curr_port_state : IB_PORT_ACTIVE;
3153 }
3154 spin_unlock_bh(&ibdev->iboe.lock);
3155
3156 ibev.device = &ibdev->ib_dev;
3157 ibev.element.port_num = 1;
3158 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3159 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3160
3161 ib_dispatch_event(&ibev);
3162 }
3163
mlx4_ib_sl2vl_update(struct mlx4_ib_dev * mdev,int port)3164 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3165 {
3166 u64 sl2vl;
3167 int err;
3168
3169 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3170 if (err) {
3171 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3172 port, err);
3173 sl2vl = 0;
3174 }
3175 atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3176 }
3177
ib_sl2vl_update_work(struct work_struct * work)3178 static void ib_sl2vl_update_work(struct work_struct *work)
3179 {
3180 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3181 struct mlx4_ib_dev *mdev = ew->ib_dev;
3182 int port = ew->port;
3183
3184 mlx4_ib_sl2vl_update(mdev, port);
3185
3186 kfree(ew);
3187 }
3188
mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev * ibdev,int port)3189 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3190 int port)
3191 {
3192 struct ib_event_work *ew;
3193
3194 ew = kmalloc_obj(*ew, GFP_ATOMIC);
3195 if (ew) {
3196 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3197 ew->port = port;
3198 ew->ib_dev = ibdev;
3199 queue_work(wq, &ew->work);
3200 }
3201 }
3202
mlx4_ib_event(struct notifier_block * this,unsigned long event,void * param)3203 static int mlx4_ib_event(struct notifier_block *this, unsigned long event,
3204 void *param)
3205 {
3206 struct mlx4_ib_dev *ibdev =
3207 container_of(this, struct mlx4_ib_dev, mlx_nb);
3208 struct mlx4_dev *dev = ibdev->dev;
3209 struct ib_event ibev;
3210 struct mlx4_eqe *eqe = NULL;
3211 struct ib_event_work *ew;
3212 int p = 0;
3213
3214 if (mlx4_is_bonded(dev) &&
3215 ((event == MLX4_DEV_EVENT_PORT_UP) ||
3216 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3217 ew = kmalloc_obj(*ew, GFP_ATOMIC);
3218 if (!ew)
3219 return NOTIFY_DONE;
3220 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3221 ew->ib_dev = ibdev;
3222 queue_work(wq, &ew->work);
3223 return NOTIFY_DONE;
3224 }
3225
3226 switch (event) {
3227 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3228 break;
3229 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3230 eqe = (struct mlx4_eqe *)param;
3231 break;
3232 default:
3233 p = *(int *)param;
3234 break;
3235 }
3236
3237 switch (event) {
3238 case MLX4_DEV_EVENT_PORT_UP:
3239 if (p > ibdev->num_ports)
3240 return NOTIFY_DONE;
3241 if (!mlx4_is_slave(dev) &&
3242 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3243 IB_LINK_LAYER_INFINIBAND) {
3244 if (mlx4_is_master(dev))
3245 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3246 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3247 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3248 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3249 }
3250 ibev.event = IB_EVENT_PORT_ACTIVE;
3251 break;
3252
3253 case MLX4_DEV_EVENT_PORT_DOWN:
3254 if (p > ibdev->num_ports)
3255 return NOTIFY_DONE;
3256 ibev.event = IB_EVENT_PORT_ERR;
3257 break;
3258
3259 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3260 ibdev->ib_active = false;
3261 ibev.event = IB_EVENT_DEVICE_FATAL;
3262 mlx4_ib_handle_catas_error(ibdev);
3263 break;
3264
3265 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3266 ew = kmalloc_obj(*ew, GFP_ATOMIC);
3267 if (!ew)
3268 return NOTIFY_DONE;
3269
3270 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3271 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3272 ew->ib_dev = ibdev;
3273 /* need to queue only for port owner, which uses GEN_EQE */
3274 if (mlx4_is_master(dev))
3275 queue_work(wq, &ew->work);
3276 else
3277 handle_port_mgmt_change_event(&ew->work);
3278 return NOTIFY_DONE;
3279
3280 case MLX4_DEV_EVENT_SLAVE_INIT:
3281 /* here, p is the slave id */
3282 do_slave_init(ibdev, p, 1);
3283 if (mlx4_is_master(dev)) {
3284 int i;
3285
3286 for (i = 1; i <= ibdev->num_ports; i++) {
3287 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3288 == IB_LINK_LAYER_INFINIBAND)
3289 mlx4_ib_slave_alias_guid_event(ibdev,
3290 p, i,
3291 1);
3292 }
3293 }
3294 return NOTIFY_DONE;
3295
3296 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3297 if (mlx4_is_master(dev)) {
3298 int i;
3299
3300 for (i = 1; i <= ibdev->num_ports; i++) {
3301 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3302 == IB_LINK_LAYER_INFINIBAND)
3303 mlx4_ib_slave_alias_guid_event(ibdev,
3304 p, i,
3305 0);
3306 }
3307 }
3308 /* here, p is the slave id */
3309 do_slave_init(ibdev, p, 0);
3310 return NOTIFY_DONE;
3311
3312 default:
3313 return NOTIFY_DONE;
3314 }
3315
3316 ibev.device = &ibdev->ib_dev;
3317 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3318
3319 ib_dispatch_event(&ibev);
3320 return NOTIFY_DONE;
3321 }
3322
3323 static const struct auxiliary_device_id mlx4_ib_id_table[] = {
3324 { .name = MLX4_ADEV_NAME ".ib" },
3325 {},
3326 };
3327
3328 MODULE_DEVICE_TABLE(auxiliary, mlx4_ib_id_table);
3329
3330 static struct mlx4_adrv mlx4_ib_adrv = {
3331 .adrv = {
3332 .name = "ib",
3333 .probe = mlx4_ib_probe,
3334 .remove = mlx4_ib_remove,
3335 .id_table = mlx4_ib_id_table,
3336 },
3337 .protocol = MLX4_PROT_IB_IPV6,
3338 .flags = MLX4_INTFF_BONDING
3339 };
3340
mlx4_ib_init(void)3341 static int __init mlx4_ib_init(void)
3342 {
3343 int err;
3344
3345 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3346 if (!wq)
3347 return -ENOMEM;
3348
3349 err = mlx4_ib_qp_event_init();
3350 if (err)
3351 goto clean_qp_event;
3352
3353 err = mlx4_ib_cm_init();
3354 if (err)
3355 goto clean_wq;
3356
3357 err = mlx4_ib_mcg_init();
3358 if (err)
3359 goto clean_cm;
3360
3361 err = mlx4_register_auxiliary_driver(&mlx4_ib_adrv);
3362 if (err)
3363 goto clean_mcg;
3364
3365 return 0;
3366
3367 clean_mcg:
3368 mlx4_ib_mcg_destroy();
3369
3370 clean_cm:
3371 mlx4_ib_cm_destroy();
3372
3373 clean_wq:
3374 mlx4_ib_qp_event_cleanup();
3375
3376 clean_qp_event:
3377 destroy_workqueue(wq);
3378 return err;
3379 }
3380
mlx4_ib_cleanup(void)3381 static void __exit mlx4_ib_cleanup(void)
3382 {
3383 mlx4_unregister_auxiliary_driver(&mlx4_ib_adrv);
3384 mlx4_ib_mcg_destroy();
3385 mlx4_ib_cm_destroy();
3386 mlx4_ib_qp_event_cleanup();
3387 destroy_workqueue(wq);
3388 }
3389
3390 module_init(mlx4_ib_init);
3391 module_exit(mlx4_ib_cleanup);
3392