1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
44 #include <linux/in.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
48
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/rw.h>
53 #include <rdma/lag.h>
54
55 #include "core_priv.h"
56 #include <trace/events/rdma_core.h>
57
58 static int ib_resolve_eth_dmac(struct ib_device *device,
59 struct rdma_ah_attr *ah_attr);
60
61 static const char * const ib_events[] = {
62 [IB_EVENT_CQ_ERR] = "CQ error",
63 [IB_EVENT_QP_FATAL] = "QP fatal error",
64 [IB_EVENT_QP_REQ_ERR] = "QP request error",
65 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
66 [IB_EVENT_COMM_EST] = "communication established",
67 [IB_EVENT_SQ_DRAINED] = "send queue drained",
68 [IB_EVENT_PATH_MIG] = "path migration successful",
69 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
70 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
71 [IB_EVENT_PORT_ACTIVE] = "port active",
72 [IB_EVENT_PORT_ERR] = "port error",
73 [IB_EVENT_LID_CHANGE] = "LID change",
74 [IB_EVENT_PKEY_CHANGE] = "P_key change",
75 [IB_EVENT_SM_CHANGE] = "SM change",
76 [IB_EVENT_SRQ_ERR] = "SRQ error",
77 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
78 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
79 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
80 [IB_EVENT_GID_CHANGE] = "GID changed",
81 [IB_EVENT_DEVICE_SPEED_CHANGE] = "device speed change"
82 };
83
ib_event_msg(enum ib_event_type event)84 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
85 {
86 size_t index = event;
87
88 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
89 ib_events[index] : "unrecognized event";
90 }
91 EXPORT_SYMBOL(ib_event_msg);
92
93 static const char * const wc_statuses[] = {
94 [IB_WC_SUCCESS] = "success",
95 [IB_WC_LOC_LEN_ERR] = "local length error",
96 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
97 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
98 [IB_WC_LOC_PROT_ERR] = "local protection error",
99 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
100 [IB_WC_MW_BIND_ERR] = "memory bind operation error",
101 [IB_WC_BAD_RESP_ERR] = "bad response error",
102 [IB_WC_LOC_ACCESS_ERR] = "local access error",
103 [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error",
104 [IB_WC_REM_ACCESS_ERR] = "remote access error",
105 [IB_WC_REM_OP_ERR] = "remote operation error",
106 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
107 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
108 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
109 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
110 [IB_WC_REM_ABORT_ERR] = "operation aborted",
111 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
112 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
113 [IB_WC_FATAL_ERR] = "fatal error",
114 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
115 [IB_WC_GENERAL_ERR] = "general error",
116 };
117
ib_wc_status_msg(enum ib_wc_status status)118 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
119 {
120 size_t index = status;
121
122 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
123 wc_statuses[index] : "unrecognized status";
124 }
125 EXPORT_SYMBOL(ib_wc_status_msg);
126
ib_rate_to_mult(enum ib_rate rate)127 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
128 {
129 switch (rate) {
130 case IB_RATE_2_5_GBPS: return 1;
131 case IB_RATE_5_GBPS: return 2;
132 case IB_RATE_10_GBPS: return 4;
133 case IB_RATE_20_GBPS: return 8;
134 case IB_RATE_30_GBPS: return 12;
135 case IB_RATE_40_GBPS: return 16;
136 case IB_RATE_60_GBPS: return 24;
137 case IB_RATE_80_GBPS: return 32;
138 case IB_RATE_120_GBPS: return 48;
139 case IB_RATE_14_GBPS: return 6;
140 case IB_RATE_56_GBPS: return 22;
141 case IB_RATE_112_GBPS: return 45;
142 case IB_RATE_168_GBPS: return 67;
143 case IB_RATE_25_GBPS: return 10;
144 case IB_RATE_100_GBPS: return 40;
145 case IB_RATE_200_GBPS: return 80;
146 case IB_RATE_300_GBPS: return 120;
147 case IB_RATE_28_GBPS: return 11;
148 case IB_RATE_50_GBPS: return 20;
149 case IB_RATE_400_GBPS: return 160;
150 case IB_RATE_600_GBPS: return 240;
151 case IB_RATE_800_GBPS: return 320;
152 case IB_RATE_1600_GBPS: return 640;
153 default: return -1;
154 }
155 }
156 EXPORT_SYMBOL(ib_rate_to_mult);
157
mult_to_ib_rate(int mult)158 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
159 {
160 switch (mult) {
161 case 1: return IB_RATE_2_5_GBPS;
162 case 2: return IB_RATE_5_GBPS;
163 case 4: return IB_RATE_10_GBPS;
164 case 8: return IB_RATE_20_GBPS;
165 case 12: return IB_RATE_30_GBPS;
166 case 16: return IB_RATE_40_GBPS;
167 case 24: return IB_RATE_60_GBPS;
168 case 32: return IB_RATE_80_GBPS;
169 case 48: return IB_RATE_120_GBPS;
170 case 6: return IB_RATE_14_GBPS;
171 case 22: return IB_RATE_56_GBPS;
172 case 45: return IB_RATE_112_GBPS;
173 case 67: return IB_RATE_168_GBPS;
174 case 10: return IB_RATE_25_GBPS;
175 case 40: return IB_RATE_100_GBPS;
176 case 80: return IB_RATE_200_GBPS;
177 case 120: return IB_RATE_300_GBPS;
178 case 11: return IB_RATE_28_GBPS;
179 case 20: return IB_RATE_50_GBPS;
180 case 160: return IB_RATE_400_GBPS;
181 case 240: return IB_RATE_600_GBPS;
182 case 320: return IB_RATE_800_GBPS;
183 case 640: return IB_RATE_1600_GBPS;
184 default: return IB_RATE_PORT_CURRENT;
185 }
186 }
187 EXPORT_SYMBOL(mult_to_ib_rate);
188
ib_rate_to_mbps(enum ib_rate rate)189 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
190 {
191 switch (rate) {
192 case IB_RATE_2_5_GBPS: return 2500;
193 case IB_RATE_5_GBPS: return 5000;
194 case IB_RATE_10_GBPS: return 10000;
195 case IB_RATE_20_GBPS: return 20000;
196 case IB_RATE_30_GBPS: return 30000;
197 case IB_RATE_40_GBPS: return 40000;
198 case IB_RATE_60_GBPS: return 60000;
199 case IB_RATE_80_GBPS: return 80000;
200 case IB_RATE_120_GBPS: return 120000;
201 case IB_RATE_14_GBPS: return 14062;
202 case IB_RATE_56_GBPS: return 56250;
203 case IB_RATE_112_GBPS: return 112500;
204 case IB_RATE_168_GBPS: return 168750;
205 case IB_RATE_25_GBPS: return 25781;
206 case IB_RATE_100_GBPS: return 103125;
207 case IB_RATE_200_GBPS: return 206250;
208 case IB_RATE_300_GBPS: return 309375;
209 case IB_RATE_28_GBPS: return 28125;
210 case IB_RATE_50_GBPS: return 53125;
211 case IB_RATE_400_GBPS: return 425000;
212 case IB_RATE_600_GBPS: return 637500;
213 case IB_RATE_800_GBPS: return 850000;
214 case IB_RATE_1600_GBPS: return 1700000;
215 default: return -1;
216 }
217 }
218 EXPORT_SYMBOL(ib_rate_to_mbps);
219
220 struct ib_speed_attr {
221 const char *str;
222 int speed;
223 };
224
225 #define IB_SPEED_ATTR(speed_type, _str, _speed) \
226 [speed_type] = {.str = _str, .speed = _speed}
227
228 static const struct ib_speed_attr ib_speed_attrs[] = {
229 IB_SPEED_ATTR(IB_SPEED_SDR, " SDR", 25),
230 IB_SPEED_ATTR(IB_SPEED_DDR, " DDR", 50),
231 IB_SPEED_ATTR(IB_SPEED_QDR, " QDR", 100),
232 IB_SPEED_ATTR(IB_SPEED_FDR10, " FDR10", 100),
233 IB_SPEED_ATTR(IB_SPEED_FDR, " FDR", 140),
234 IB_SPEED_ATTR(IB_SPEED_EDR, " EDR", 250),
235 IB_SPEED_ATTR(IB_SPEED_HDR, " HDR", 500),
236 IB_SPEED_ATTR(IB_SPEED_NDR, " NDR", 1000),
237 IB_SPEED_ATTR(IB_SPEED_XDR, " XDR", 2000),
238 };
239
ib_port_attr_to_speed_info(struct ib_port_attr * attr,struct ib_port_speed_info * speed_info)240 int ib_port_attr_to_speed_info(struct ib_port_attr *attr,
241 struct ib_port_speed_info *speed_info)
242 {
243 int speed_idx = attr->active_speed;
244
245 switch (attr->active_speed) {
246 case IB_SPEED_DDR:
247 case IB_SPEED_QDR:
248 case IB_SPEED_FDR10:
249 case IB_SPEED_FDR:
250 case IB_SPEED_EDR:
251 case IB_SPEED_HDR:
252 case IB_SPEED_NDR:
253 case IB_SPEED_XDR:
254 case IB_SPEED_SDR:
255 break;
256 default:
257 speed_idx = IB_SPEED_SDR; /* Default to SDR for invalid rates */
258 break;
259 }
260
261 speed_info->str = ib_speed_attrs[speed_idx].str;
262 speed_info->rate = ib_speed_attrs[speed_idx].speed;
263 speed_info->rate *= ib_width_enum_to_int(attr->active_width);
264 if (speed_info->rate < 0)
265 return -EINVAL;
266
267 return 0;
268 }
269 EXPORT_SYMBOL(ib_port_attr_to_speed_info);
270
271 __attribute_const__ enum rdma_transport_type
rdma_node_get_transport(unsigned int node_type)272 rdma_node_get_transport(unsigned int node_type)
273 {
274
275 if (node_type == RDMA_NODE_USNIC)
276 return RDMA_TRANSPORT_USNIC;
277 if (node_type == RDMA_NODE_USNIC_UDP)
278 return RDMA_TRANSPORT_USNIC_UDP;
279 if (node_type == RDMA_NODE_RNIC)
280 return RDMA_TRANSPORT_IWARP;
281 if (node_type == RDMA_NODE_UNSPECIFIED)
282 return RDMA_TRANSPORT_UNSPECIFIED;
283
284 return RDMA_TRANSPORT_IB;
285 }
286 EXPORT_SYMBOL(rdma_node_get_transport);
287
rdma_port_get_link_layer(struct ib_device * device,u32 port_num)288 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
289 u32 port_num)
290 {
291 enum rdma_transport_type lt;
292 if (device->ops.get_link_layer)
293 return device->ops.get_link_layer(device, port_num);
294
295 lt = rdma_node_get_transport(device->node_type);
296 if (lt == RDMA_TRANSPORT_IB)
297 return IB_LINK_LAYER_INFINIBAND;
298
299 return IB_LINK_LAYER_ETHERNET;
300 }
301 EXPORT_SYMBOL(rdma_port_get_link_layer);
302
303 /* Protection domains */
304
305 /**
306 * __ib_alloc_pd - Allocates an unused protection domain.
307 * @device: The device on which to allocate the protection domain.
308 * @flags: protection domain flags
309 * @caller: caller's build-time module name
310 *
311 * A protection domain object provides an association between QPs, shared
312 * receive queues, address handles, memory regions, and memory windows.
313 *
314 * Every PD has a local_dma_lkey which can be used as the lkey value for local
315 * memory operations.
316 */
__ib_alloc_pd(struct ib_device * device,unsigned int flags,const char * caller)317 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
318 const char *caller)
319 {
320 struct ib_pd *pd;
321 int mr_access_flags = 0;
322 int ret;
323
324 pd = rdma_zalloc_drv_obj(device, ib_pd);
325 if (!pd)
326 return ERR_PTR(-ENOMEM);
327
328 pd->device = device;
329 pd->flags = flags;
330
331 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
332 rdma_restrack_set_name(&pd->res, caller);
333
334 ret = device->ops.alloc_pd(pd, NULL);
335 if (ret) {
336 rdma_restrack_put(&pd->res);
337 kfree(pd);
338 return ERR_PTR(ret);
339 }
340 rdma_restrack_add(&pd->res);
341
342 if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)
343 pd->local_dma_lkey = device->local_dma_lkey;
344 else
345 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
346
347 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
348 pr_warn("%s: enabling unsafe global rkey\n", caller);
349 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
350 }
351
352 if (mr_access_flags) {
353 struct ib_mr *mr;
354
355 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
356 if (IS_ERR(mr)) {
357 ib_dealloc_pd(pd);
358 return ERR_CAST(mr);
359 }
360
361 mr->device = pd->device;
362 mr->pd = pd;
363 mr->type = IB_MR_TYPE_DMA;
364 mr->uobject = NULL;
365 mr->need_inval = false;
366
367 pd->__internal_mr = mr;
368
369 if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
370 pd->local_dma_lkey = pd->__internal_mr->lkey;
371
372 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
373 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
374 }
375
376 return pd;
377 }
378 EXPORT_SYMBOL(__ib_alloc_pd);
379
380 /**
381 * ib_dealloc_pd_user - Deallocates a protection domain.
382 * @pd: The protection domain to deallocate.
383 * @udata: Valid user data or NULL for kernel object
384 *
385 * It is an error to call this function while any resources in the pd still
386 * exist. The caller is responsible to synchronously destroy them and
387 * guarantee no new allocations will happen.
388 */
ib_dealloc_pd_user(struct ib_pd * pd,struct ib_udata * udata)389 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
390 {
391 int ret;
392
393 if (pd->__internal_mr) {
394 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
395 WARN_ON(ret);
396 pd->__internal_mr = NULL;
397 }
398
399 ret = pd->device->ops.dealloc_pd(pd, udata);
400 if (ret)
401 return ret;
402
403 rdma_restrack_del(&pd->res);
404 kfree(pd);
405 return ret;
406 }
407 EXPORT_SYMBOL(ib_dealloc_pd_user);
408
409 /* Address handles */
410
411 /**
412 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
413 * @dest: Pointer to destination ah_attr. Contents of the destination
414 * pointer is assumed to be invalid and attribute are overwritten.
415 * @src: Pointer to source ah_attr.
416 */
rdma_copy_ah_attr(struct rdma_ah_attr * dest,const struct rdma_ah_attr * src)417 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
418 const struct rdma_ah_attr *src)
419 {
420 *dest = *src;
421 if (dest->grh.sgid_attr)
422 rdma_hold_gid_attr(dest->grh.sgid_attr);
423 }
424 EXPORT_SYMBOL(rdma_copy_ah_attr);
425
426 /**
427 * rdma_replace_ah_attr - Replace valid ah_attr with new one.
428 * @old: Pointer to existing ah_attr which needs to be replaced.
429 * old is assumed to be valid or zero'd
430 * @new: Pointer to the new ah_attr.
431 *
432 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
433 * old the ah_attr is valid; after that it copies the new attribute and holds
434 * the reference to the replaced ah_attr.
435 */
rdma_replace_ah_attr(struct rdma_ah_attr * old,const struct rdma_ah_attr * new)436 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
437 const struct rdma_ah_attr *new)
438 {
439 rdma_destroy_ah_attr(old);
440 *old = *new;
441 if (old->grh.sgid_attr)
442 rdma_hold_gid_attr(old->grh.sgid_attr);
443 }
444 EXPORT_SYMBOL(rdma_replace_ah_attr);
445
446 /**
447 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
448 * @dest: Pointer to destination ah_attr to copy to.
449 * dest is assumed to be valid or zero'd
450 * @src: Pointer to the new ah_attr.
451 *
452 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
453 * if it is valid. This also transfers ownership of internal references from
454 * src to dest, making src invalid in the process. No new reference of the src
455 * ah_attr is taken.
456 */
rdma_move_ah_attr(struct rdma_ah_attr * dest,struct rdma_ah_attr * src)457 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
458 {
459 rdma_destroy_ah_attr(dest);
460 *dest = *src;
461 src->grh.sgid_attr = NULL;
462 }
463 EXPORT_SYMBOL(rdma_move_ah_attr);
464
465 /*
466 * Validate that the rdma_ah_attr is valid for the device before passing it
467 * off to the driver.
468 */
rdma_check_ah_attr(struct ib_device * device,struct rdma_ah_attr * ah_attr)469 static int rdma_check_ah_attr(struct ib_device *device,
470 struct rdma_ah_attr *ah_attr)
471 {
472 if (!rdma_is_port_valid(device, ah_attr->port_num))
473 return -EINVAL;
474
475 if ((rdma_is_grh_required(device, ah_attr->port_num) ||
476 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
477 !(ah_attr->ah_flags & IB_AH_GRH))
478 return -EINVAL;
479
480 if (ah_attr->grh.sgid_attr) {
481 /*
482 * Make sure the passed sgid_attr is consistent with the
483 * parameters
484 */
485 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
486 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
487 return -EINVAL;
488 }
489 return 0;
490 }
491
492 /*
493 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
494 * On success the caller is responsible to call rdma_unfill_sgid_attr().
495 */
rdma_fill_sgid_attr(struct ib_device * device,struct rdma_ah_attr * ah_attr,const struct ib_gid_attr ** old_sgid_attr)496 static int rdma_fill_sgid_attr(struct ib_device *device,
497 struct rdma_ah_attr *ah_attr,
498 const struct ib_gid_attr **old_sgid_attr)
499 {
500 const struct ib_gid_attr *sgid_attr;
501 struct ib_global_route *grh;
502 int ret;
503
504 *old_sgid_attr = ah_attr->grh.sgid_attr;
505
506 ret = rdma_check_ah_attr(device, ah_attr);
507 if (ret)
508 return ret;
509
510 if (!(ah_attr->ah_flags & IB_AH_GRH))
511 return 0;
512
513 grh = rdma_ah_retrieve_grh(ah_attr);
514 if (grh->sgid_attr)
515 return 0;
516
517 sgid_attr =
518 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
519 if (IS_ERR(sgid_attr))
520 return PTR_ERR(sgid_attr);
521
522 /* Move ownerhip of the kref into the ah_attr */
523 grh->sgid_attr = sgid_attr;
524 return 0;
525 }
526
rdma_unfill_sgid_attr(struct rdma_ah_attr * ah_attr,const struct ib_gid_attr * old_sgid_attr)527 static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
528 const struct ib_gid_attr *old_sgid_attr)
529 {
530 /*
531 * Fill didn't change anything, the caller retains ownership of
532 * whatever it passed
533 */
534 if (ah_attr->grh.sgid_attr == old_sgid_attr)
535 return;
536
537 /*
538 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
539 * doesn't see any change in the rdma_ah_attr. If we get here
540 * old_sgid_attr is NULL.
541 */
542 rdma_destroy_ah_attr(ah_attr);
543 }
544
545 static const struct ib_gid_attr *
rdma_update_sgid_attr(struct rdma_ah_attr * ah_attr,const struct ib_gid_attr * old_attr)546 rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
547 const struct ib_gid_attr *old_attr)
548 {
549 if (old_attr)
550 rdma_put_gid_attr(old_attr);
551 if (ah_attr->ah_flags & IB_AH_GRH) {
552 rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
553 return ah_attr->grh.sgid_attr;
554 }
555 return NULL;
556 }
557
_rdma_create_ah(struct ib_pd * pd,struct rdma_ah_attr * ah_attr,u32 flags,struct ib_udata * udata,struct net_device * xmit_slave)558 static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
559 struct rdma_ah_attr *ah_attr,
560 u32 flags,
561 struct ib_udata *udata,
562 struct net_device *xmit_slave)
563 {
564 struct rdma_ah_init_attr init_attr = {};
565 struct ib_device *device = pd->device;
566 struct ib_ah *ah;
567 int ret;
568
569 might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
570
571 if (!udata && !device->ops.create_ah)
572 return ERR_PTR(-EOPNOTSUPP);
573
574 ah = rdma_zalloc_drv_obj_gfp(
575 device, ib_ah,
576 (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
577 if (!ah)
578 return ERR_PTR(-ENOMEM);
579
580 ah->device = device;
581 ah->pd = pd;
582 ah->type = ah_attr->type;
583 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
584 init_attr.ah_attr = ah_attr;
585 init_attr.flags = flags;
586 init_attr.xmit_slave = xmit_slave;
587
588 if (udata)
589 ret = device->ops.create_user_ah(ah, &init_attr, udata);
590 else
591 ret = device->ops.create_ah(ah, &init_attr, NULL);
592 if (ret) {
593 if (ah->sgid_attr)
594 rdma_put_gid_attr(ah->sgid_attr);
595 kfree(ah);
596 return ERR_PTR(ret);
597 }
598
599 atomic_inc(&pd->usecnt);
600 return ah;
601 }
602
603 /**
604 * rdma_create_ah - Creates an address handle for the
605 * given address vector.
606 * @pd: The protection domain associated with the address handle.
607 * @ah_attr: The attributes of the address vector.
608 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
609 *
610 * It returns 0 on success and returns appropriate error code on error.
611 * The address handle is used to reference a local or global destination
612 * in all UD QP post sends.
613 */
rdma_create_ah(struct ib_pd * pd,struct rdma_ah_attr * ah_attr,u32 flags)614 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
615 u32 flags)
616 {
617 const struct ib_gid_attr *old_sgid_attr;
618 struct net_device *slave;
619 struct ib_ah *ah;
620 int ret;
621
622 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
623 if (ret)
624 return ERR_PTR(ret);
625 slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
626 (flags & RDMA_CREATE_AH_SLEEPABLE) ?
627 GFP_KERNEL : GFP_ATOMIC);
628 if (IS_ERR(slave)) {
629 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
630 return ERR_CAST(slave);
631 }
632 ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
633 rdma_lag_put_ah_roce_slave(slave);
634 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
635 return ah;
636 }
637 EXPORT_SYMBOL(rdma_create_ah);
638
639 /**
640 * rdma_create_user_ah - Creates an address handle for the
641 * given address vector.
642 * It resolves destination mac address for ah attribute of RoCE type.
643 * @pd: The protection domain associated with the address handle.
644 * @ah_attr: The attributes of the address vector.
645 * @udata: pointer to user's input output buffer information need by
646 * provider driver.
647 *
648 * It returns 0 on success and returns appropriate error code on error.
649 * The address handle is used to reference a local or global destination
650 * in all UD QP post sends.
651 */
rdma_create_user_ah(struct ib_pd * pd,struct rdma_ah_attr * ah_attr,struct ib_udata * udata)652 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
653 struct rdma_ah_attr *ah_attr,
654 struct ib_udata *udata)
655 {
656 const struct ib_gid_attr *old_sgid_attr;
657 struct ib_ah *ah;
658 int err;
659
660 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
661 if (err)
662 return ERR_PTR(err);
663
664 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
665 err = ib_resolve_eth_dmac(pd->device, ah_attr);
666 if (err) {
667 ah = ERR_PTR(err);
668 goto out;
669 }
670 }
671
672 ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
673 udata, NULL);
674
675 out:
676 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
677 return ah;
678 }
679 EXPORT_SYMBOL(rdma_create_user_ah);
680
ib_get_rdma_header_version(const union rdma_network_hdr * hdr)681 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
682 {
683 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
684 struct iphdr ip4h_checked;
685 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
686
687 /* If it's IPv6, the version must be 6, otherwise, the first
688 * 20 bytes (before the IPv4 header) are garbled.
689 */
690 if (ip6h->version != 6)
691 return (ip4h->version == 4) ? 4 : 0;
692 /* version may be 6 or 4 because the first 20 bytes could be garbled */
693
694 /* RoCE v2 requires no options, thus header length
695 * must be 5 words
696 */
697 if (ip4h->ihl != 5)
698 return 6;
699
700 /* Verify checksum.
701 * We can't write on scattered buffers so we need to copy to
702 * temp buffer.
703 */
704 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
705 ip4h_checked.check = 0;
706 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
707 /* if IPv4 header checksum is OK, believe it */
708 if (ip4h->check == ip4h_checked.check)
709 return 4;
710 return 6;
711 }
712 EXPORT_SYMBOL(ib_get_rdma_header_version);
713
ib_get_net_type_by_grh(struct ib_device * device,u32 port_num,const struct ib_grh * grh)714 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
715 u32 port_num,
716 const struct ib_grh *grh)
717 {
718 int grh_version;
719
720 if (rdma_protocol_ib(device, port_num))
721 return RDMA_NETWORK_IB;
722
723 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
724
725 if (grh_version == 4)
726 return RDMA_NETWORK_IPV4;
727
728 if (grh->next_hdr == IPPROTO_UDP)
729 return RDMA_NETWORK_IPV6;
730
731 return RDMA_NETWORK_ROCE_V1;
732 }
733
734 struct find_gid_index_context {
735 u16 vlan_id;
736 enum ib_gid_type gid_type;
737 };
738
find_gid_index(const union ib_gid * gid,const struct ib_gid_attr * gid_attr,void * context)739 static bool find_gid_index(const union ib_gid *gid,
740 const struct ib_gid_attr *gid_attr,
741 void *context)
742 {
743 struct find_gid_index_context *ctx = context;
744 u16 vlan_id = 0xffff;
745 int ret;
746
747 if (ctx->gid_type != gid_attr->gid_type)
748 return false;
749
750 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
751 if (ret)
752 return false;
753
754 return ctx->vlan_id == vlan_id;
755 }
756
757 static const struct ib_gid_attr *
get_sgid_attr_from_eth(struct ib_device * device,u32 port_num,u16 vlan_id,const union ib_gid * sgid,enum ib_gid_type gid_type)758 get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
759 u16 vlan_id, const union ib_gid *sgid,
760 enum ib_gid_type gid_type)
761 {
762 struct find_gid_index_context context = {.vlan_id = vlan_id,
763 .gid_type = gid_type};
764
765 return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
766 &context);
767 }
768
ib_get_gids_from_rdma_hdr(const union rdma_network_hdr * hdr,enum rdma_network_type net_type,union ib_gid * sgid,union ib_gid * dgid)769 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
770 enum rdma_network_type net_type,
771 union ib_gid *sgid, union ib_gid *dgid)
772 {
773 struct sockaddr_in src_in;
774 struct sockaddr_in dst_in;
775 __be32 src_saddr, dst_saddr;
776
777 if (!sgid || !dgid)
778 return -EINVAL;
779
780 if (net_type == RDMA_NETWORK_IPV4) {
781 memcpy(&src_in.sin_addr.s_addr,
782 &hdr->roce4grh.saddr, 4);
783 memcpy(&dst_in.sin_addr.s_addr,
784 &hdr->roce4grh.daddr, 4);
785 src_saddr = src_in.sin_addr.s_addr;
786 dst_saddr = dst_in.sin_addr.s_addr;
787 ipv6_addr_set_v4mapped(src_saddr,
788 (struct in6_addr *)sgid);
789 ipv6_addr_set_v4mapped(dst_saddr,
790 (struct in6_addr *)dgid);
791 return 0;
792 } else if (net_type == RDMA_NETWORK_IPV6 ||
793 net_type == RDMA_NETWORK_IB || net_type == RDMA_NETWORK_ROCE_V1) {
794 *dgid = hdr->ibgrh.dgid;
795 *sgid = hdr->ibgrh.sgid;
796 return 0;
797 } else {
798 return -EINVAL;
799 }
800 }
801 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
802
803 /* Resolve destination mac address and hop limit for unicast destination
804 * GID entry, considering the source GID entry as well.
805 * ah_attribute must have valid port_num, sgid_index.
806 */
ib_resolve_unicast_gid_dmac(struct ib_device * device,struct rdma_ah_attr * ah_attr)807 static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
808 struct rdma_ah_attr *ah_attr)
809 {
810 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
811 const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
812 int hop_limit = 0xff;
813 int ret = 0;
814
815 /* If destination is link local and source GID is RoCEv1,
816 * IP stack is not used.
817 */
818 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
819 sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
820 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
821 ah_attr->roce.dmac);
822 return ret;
823 }
824
825 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
826 ah_attr->roce.dmac,
827 sgid_attr, &hop_limit);
828
829 grh->hop_limit = hop_limit;
830 return ret;
831 }
832
833 /*
834 * This function initializes address handle attributes from the incoming packet.
835 * Incoming packet has dgid of the receiver node on which this code is
836 * getting executed and, sgid contains the GID of the sender.
837 *
838 * When resolving mac address of destination, the arrived dgid is used
839 * as sgid and, sgid is used as dgid because sgid contains destinations
840 * GID whom to respond to.
841 *
842 * On success the caller is responsible to call rdma_destroy_ah_attr on the
843 * attr.
844 */
ib_init_ah_attr_from_wc(struct ib_device * device,u32 port_num,const struct ib_wc * wc,const struct ib_grh * grh,struct rdma_ah_attr * ah_attr)845 int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
846 const struct ib_wc *wc, const struct ib_grh *grh,
847 struct rdma_ah_attr *ah_attr)
848 {
849 u32 flow_class;
850 int ret;
851 enum rdma_network_type net_type = RDMA_NETWORK_IB;
852 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
853 const struct ib_gid_attr *sgid_attr;
854 int hoplimit = 0xff;
855 union ib_gid dgid;
856 union ib_gid sgid;
857
858 might_sleep();
859
860 memset(ah_attr, 0, sizeof *ah_attr);
861 ah_attr->type = rdma_ah_find_type(device, port_num);
862 if (rdma_cap_eth_ah(device, port_num)) {
863 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
864 net_type = wc->network_hdr_type;
865 else
866 net_type = ib_get_net_type_by_grh(device, port_num, grh);
867 gid_type = ib_network_to_gid_type(net_type);
868 }
869 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
870 &sgid, &dgid);
871 if (ret)
872 return ret;
873
874 rdma_ah_set_sl(ah_attr, wc->sl);
875 rdma_ah_set_port_num(ah_attr, port_num);
876
877 if (rdma_protocol_roce(device, port_num)) {
878 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
879 wc->vlan_id : 0xffff;
880
881 if (!(wc->wc_flags & IB_WC_GRH))
882 return -EPROTOTYPE;
883
884 sgid_attr = get_sgid_attr_from_eth(device, port_num,
885 vlan_id, &dgid,
886 gid_type);
887 if (IS_ERR(sgid_attr))
888 return PTR_ERR(sgid_attr);
889
890 flow_class = be32_to_cpu(grh->version_tclass_flow);
891 rdma_move_grh_sgid_attr(ah_attr,
892 &sgid,
893 flow_class & 0xFFFFF,
894 hoplimit,
895 (flow_class >> 20) & 0xFF,
896 sgid_attr);
897
898 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
899 if (ret)
900 rdma_destroy_ah_attr(ah_attr);
901
902 return ret;
903 } else {
904 rdma_ah_set_dlid(ah_attr, wc->slid);
905 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
906
907 if ((wc->wc_flags & IB_WC_GRH) == 0)
908 return 0;
909
910 if (dgid.global.interface_id !=
911 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
912 sgid_attr = rdma_find_gid_by_port(
913 device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
914 } else
915 sgid_attr = rdma_get_gid_attr(device, port_num, 0);
916
917 if (IS_ERR(sgid_attr))
918 return PTR_ERR(sgid_attr);
919 flow_class = be32_to_cpu(grh->version_tclass_flow);
920 rdma_move_grh_sgid_attr(ah_attr,
921 &sgid,
922 flow_class & 0xFFFFF,
923 hoplimit,
924 (flow_class >> 20) & 0xFF,
925 sgid_attr);
926
927 return 0;
928 }
929 }
930 EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
931
932 /**
933 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
934 * of the reference
935 *
936 * @attr: Pointer to AH attribute structure
937 * @dgid: Destination GID
938 * @flow_label: Flow label
939 * @hop_limit: Hop limit
940 * @traffic_class: traffic class
941 * @sgid_attr: Pointer to SGID attribute
942 *
943 * This takes ownership of the sgid_attr reference. The caller must ensure
944 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
945 * calling this function.
946 */
rdma_move_grh_sgid_attr(struct rdma_ah_attr * attr,union ib_gid * dgid,u32 flow_label,u8 hop_limit,u8 traffic_class,const struct ib_gid_attr * sgid_attr)947 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
948 u32 flow_label, u8 hop_limit, u8 traffic_class,
949 const struct ib_gid_attr *sgid_attr)
950 {
951 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
952 traffic_class);
953 attr->grh.sgid_attr = sgid_attr;
954 }
955 EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
956
957 /**
958 * rdma_destroy_ah_attr - Release reference to SGID attribute of
959 * ah attribute.
960 * @ah_attr: Pointer to ah attribute
961 *
962 * Release reference to the SGID attribute of the ah attribute if it is
963 * non NULL. It is safe to call this multiple times, and safe to call it on
964 * a zero initialized ah_attr.
965 */
rdma_destroy_ah_attr(struct rdma_ah_attr * ah_attr)966 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
967 {
968 if (ah_attr->grh.sgid_attr) {
969 rdma_put_gid_attr(ah_attr->grh.sgid_attr);
970 ah_attr->grh.sgid_attr = NULL;
971 }
972 }
973 EXPORT_SYMBOL(rdma_destroy_ah_attr);
974
ib_create_ah_from_wc(struct ib_pd * pd,const struct ib_wc * wc,const struct ib_grh * grh,u32 port_num)975 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
976 const struct ib_grh *grh, u32 port_num)
977 {
978 struct rdma_ah_attr ah_attr;
979 struct ib_ah *ah;
980 int ret;
981
982 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
983 if (ret)
984 return ERR_PTR(ret);
985
986 ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
987
988 rdma_destroy_ah_attr(&ah_attr);
989 return ah;
990 }
991 EXPORT_SYMBOL(ib_create_ah_from_wc);
992
rdma_modify_ah(struct ib_ah * ah,struct rdma_ah_attr * ah_attr)993 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
994 {
995 const struct ib_gid_attr *old_sgid_attr;
996 int ret;
997
998 if (ah->type != ah_attr->type)
999 return -EINVAL;
1000
1001 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
1002 if (ret)
1003 return ret;
1004
1005 ret = ah->device->ops.modify_ah ?
1006 ah->device->ops.modify_ah(ah, ah_attr) :
1007 -EOPNOTSUPP;
1008
1009 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
1010 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
1011 return ret;
1012 }
1013 EXPORT_SYMBOL(rdma_modify_ah);
1014
rdma_query_ah(struct ib_ah * ah,struct rdma_ah_attr * ah_attr)1015 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
1016 {
1017 ah_attr->grh.sgid_attr = NULL;
1018
1019 return ah->device->ops.query_ah ?
1020 ah->device->ops.query_ah(ah, ah_attr) :
1021 -EOPNOTSUPP;
1022 }
1023 EXPORT_SYMBOL(rdma_query_ah);
1024
rdma_destroy_ah_user(struct ib_ah * ah,u32 flags,struct ib_udata * udata)1025 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
1026 {
1027 const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
1028 struct ib_pd *pd;
1029 int ret;
1030
1031 might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
1032
1033 pd = ah->pd;
1034
1035 ret = ah->device->ops.destroy_ah(ah, flags);
1036 if (ret)
1037 return ret;
1038
1039 atomic_dec(&pd->usecnt);
1040 if (sgid_attr)
1041 rdma_put_gid_attr(sgid_attr);
1042
1043 kfree(ah);
1044 return ret;
1045 }
1046 EXPORT_SYMBOL(rdma_destroy_ah_user);
1047
1048 /* Shared receive queues */
1049
1050 /**
1051 * ib_create_srq_user - Creates a SRQ associated with the specified protection
1052 * domain.
1053 * @pd: The protection domain associated with the SRQ.
1054 * @srq_init_attr: A list of initial attributes required to create the
1055 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1056 * the actual capabilities of the created SRQ.
1057 * @uobject: uobject pointer if this is not a kernel SRQ
1058 * @udata: udata pointer if this is not a kernel SRQ
1059 *
1060 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1061 * requested size of the SRQ, and set to the actual values allocated
1062 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1063 * will always be at least as large as the requested values.
1064 */
ib_create_srq_user(struct ib_pd * pd,struct ib_srq_init_attr * srq_init_attr,struct ib_usrq_object * uobject,struct ib_udata * udata)1065 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1066 struct ib_srq_init_attr *srq_init_attr,
1067 struct ib_usrq_object *uobject,
1068 struct ib_udata *udata)
1069 {
1070 struct ib_srq *srq;
1071 int ret;
1072
1073 srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1074 if (!srq)
1075 return ERR_PTR(-ENOMEM);
1076
1077 srq->device = pd->device;
1078 srq->pd = pd;
1079 srq->event_handler = srq_init_attr->event_handler;
1080 srq->srq_context = srq_init_attr->srq_context;
1081 srq->srq_type = srq_init_attr->srq_type;
1082 srq->uobject = uobject;
1083
1084 if (ib_srq_has_cq(srq->srq_type)) {
1085 srq->ext.cq = srq_init_attr->ext.cq;
1086 atomic_inc(&srq->ext.cq->usecnt);
1087 }
1088 if (srq->srq_type == IB_SRQT_XRC) {
1089 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1090 if (srq->ext.xrc.xrcd)
1091 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1092 }
1093 atomic_inc(&pd->usecnt);
1094
1095 rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
1096 rdma_restrack_parent_name(&srq->res, &pd->res);
1097
1098 ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1099 if (ret) {
1100 rdma_restrack_put(&srq->res);
1101 atomic_dec(&pd->usecnt);
1102 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1103 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1104 if (ib_srq_has_cq(srq->srq_type))
1105 atomic_dec(&srq->ext.cq->usecnt);
1106 kfree(srq);
1107 return ERR_PTR(ret);
1108 }
1109
1110 rdma_restrack_add(&srq->res);
1111
1112 return srq;
1113 }
1114 EXPORT_SYMBOL(ib_create_srq_user);
1115
ib_modify_srq(struct ib_srq * srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask)1116 int ib_modify_srq(struct ib_srq *srq,
1117 struct ib_srq_attr *srq_attr,
1118 enum ib_srq_attr_mask srq_attr_mask)
1119 {
1120 return srq->device->ops.modify_srq ?
1121 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1122 NULL) : -EOPNOTSUPP;
1123 }
1124 EXPORT_SYMBOL(ib_modify_srq);
1125
ib_query_srq(struct ib_srq * srq,struct ib_srq_attr * srq_attr)1126 int ib_query_srq(struct ib_srq *srq,
1127 struct ib_srq_attr *srq_attr)
1128 {
1129 return srq->device->ops.query_srq ?
1130 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1131 }
1132 EXPORT_SYMBOL(ib_query_srq);
1133
ib_destroy_srq_user(struct ib_srq * srq,struct ib_udata * udata)1134 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1135 {
1136 int ret;
1137
1138 if (atomic_read(&srq->usecnt))
1139 return -EBUSY;
1140
1141 ret = srq->device->ops.destroy_srq(srq, udata);
1142 if (ret)
1143 return ret;
1144
1145 atomic_dec(&srq->pd->usecnt);
1146 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1147 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1148 if (ib_srq_has_cq(srq->srq_type))
1149 atomic_dec(&srq->ext.cq->usecnt);
1150 rdma_restrack_del(&srq->res);
1151 kfree(srq);
1152
1153 return ret;
1154 }
1155 EXPORT_SYMBOL(ib_destroy_srq_user);
1156
1157 /* Queue pairs */
1158
__ib_qp_event_handler(struct ib_event * event,void * context)1159 static void __ib_qp_event_handler(struct ib_event *event, void *context)
1160 {
1161 struct ib_qp *qp = event->element.qp;
1162
1163 if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
1164 complete(&qp->srq_completion);
1165 if (qp->registered_event_handler)
1166 qp->registered_event_handler(event, qp->qp_context);
1167 }
1168
__ib_shared_qp_event_handler(struct ib_event * event,void * context)1169 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1170 {
1171 struct ib_qp *qp = context;
1172 unsigned long flags;
1173
1174 spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1175 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1176 if (event->element.qp->event_handler)
1177 event->element.qp->event_handler(event, event->element.qp->qp_context);
1178 spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1179 }
1180
__ib_open_qp(struct ib_qp * real_qp,void (* event_handler)(struct ib_event *,void *),void * qp_context)1181 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1182 void (*event_handler)(struct ib_event *, void *),
1183 void *qp_context)
1184 {
1185 struct ib_qp *qp;
1186 unsigned long flags;
1187 int err;
1188
1189 qp = kzalloc_obj(*qp);
1190 if (!qp)
1191 return ERR_PTR(-ENOMEM);
1192
1193 qp->real_qp = real_qp;
1194 err = ib_open_shared_qp_security(qp, real_qp->device);
1195 if (err) {
1196 kfree(qp);
1197 return ERR_PTR(err);
1198 }
1199
1200 qp->real_qp = real_qp;
1201 atomic_inc(&real_qp->usecnt);
1202 qp->device = real_qp->device;
1203 qp->event_handler = event_handler;
1204 qp->qp_context = qp_context;
1205 qp->qp_num = real_qp->qp_num;
1206 qp->qp_type = real_qp->qp_type;
1207
1208 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1209 list_add(&qp->open_list, &real_qp->open_list);
1210 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1211
1212 return qp;
1213 }
1214
ib_open_qp(struct ib_xrcd * xrcd,struct ib_qp_open_attr * qp_open_attr)1215 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1216 struct ib_qp_open_attr *qp_open_attr)
1217 {
1218 struct ib_qp *qp, *real_qp;
1219
1220 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1221 return ERR_PTR(-EINVAL);
1222
1223 down_read(&xrcd->tgt_qps_rwsem);
1224 real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
1225 if (!real_qp) {
1226 up_read(&xrcd->tgt_qps_rwsem);
1227 return ERR_PTR(-EINVAL);
1228 }
1229 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1230 qp_open_attr->qp_context);
1231 up_read(&xrcd->tgt_qps_rwsem);
1232 return qp;
1233 }
1234 EXPORT_SYMBOL(ib_open_qp);
1235
create_xrc_qp_user(struct ib_qp * qp,struct ib_qp_init_attr * qp_init_attr)1236 static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1237 struct ib_qp_init_attr *qp_init_attr)
1238 {
1239 struct ib_qp *real_qp = qp;
1240 int err;
1241
1242 qp->event_handler = __ib_shared_qp_event_handler;
1243 qp->qp_context = qp;
1244 qp->pd = NULL;
1245 qp->send_cq = qp->recv_cq = NULL;
1246 qp->srq = NULL;
1247 qp->xrcd = qp_init_attr->xrcd;
1248 atomic_inc(&qp_init_attr->xrcd->usecnt);
1249 INIT_LIST_HEAD(&qp->open_list);
1250
1251 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1252 qp_init_attr->qp_context);
1253 if (IS_ERR(qp))
1254 return qp;
1255
1256 err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
1257 real_qp, GFP_KERNEL));
1258 if (err) {
1259 ib_close_qp(qp);
1260 return ERR_PTR(err);
1261 }
1262 return qp;
1263 }
1264
create_qp(struct ib_device * dev,struct ib_pd * pd,struct ib_qp_init_attr * attr,struct ib_udata * udata,struct ib_uqp_object * uobj,const char * caller)1265 static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
1266 struct ib_qp_init_attr *attr,
1267 struct ib_udata *udata,
1268 struct ib_uqp_object *uobj, const char *caller)
1269 {
1270 struct ib_udata dummy = {};
1271 struct ib_qp *qp;
1272 int ret;
1273
1274 if (!dev->ops.create_qp)
1275 return ERR_PTR(-EOPNOTSUPP);
1276
1277 qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
1278 if (!qp)
1279 return ERR_PTR(-ENOMEM);
1280
1281 qp->device = dev;
1282 qp->pd = pd;
1283 qp->uobject = uobj;
1284 qp->real_qp = qp;
1285
1286 qp->qp_type = attr->qp_type;
1287 qp->rwq_ind_tbl = attr->rwq_ind_tbl;
1288 qp->srq = attr->srq;
1289 qp->event_handler = __ib_qp_event_handler;
1290 qp->registered_event_handler = attr->event_handler;
1291 qp->port = attr->port_num;
1292 qp->qp_context = attr->qp_context;
1293
1294 spin_lock_init(&qp->mr_lock);
1295 INIT_LIST_HEAD(&qp->rdma_mrs);
1296 INIT_LIST_HEAD(&qp->sig_mrs);
1297 init_completion(&qp->srq_completion);
1298
1299 qp->send_cq = attr->send_cq;
1300 qp->recv_cq = attr->recv_cq;
1301
1302 rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
1303 WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
1304 rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
1305 ret = dev->ops.create_qp(qp, attr, udata);
1306 if (ret)
1307 goto err_create;
1308
1309 /*
1310 * TODO: The mlx4 internally overwrites send_cq and recv_cq.
1311 * Unfortunately, it is not an easy task to fix that driver.
1312 */
1313 qp->send_cq = attr->send_cq;
1314 qp->recv_cq = attr->recv_cq;
1315
1316 ret = ib_create_qp_security(qp, dev);
1317 if (ret)
1318 goto err_security;
1319
1320 rdma_restrack_add(&qp->res);
1321 return qp;
1322
1323 err_security:
1324 qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
1325 err_create:
1326 rdma_restrack_put(&qp->res);
1327 kfree(qp);
1328 return ERR_PTR(ret);
1329
1330 }
1331
1332 /**
1333 * ib_create_qp_user - Creates a QP associated with the specified protection
1334 * domain.
1335 * @dev: IB device
1336 * @pd: The protection domain associated with the QP.
1337 * @attr: A list of initial attributes required to create the
1338 * QP. If QP creation succeeds, then the attributes are updated to
1339 * the actual capabilities of the created QP.
1340 * @udata: User data
1341 * @uobj: uverbs obect
1342 * @caller: caller's build-time module name
1343 */
ib_create_qp_user(struct ib_device * dev,struct ib_pd * pd,struct ib_qp_init_attr * attr,struct ib_udata * udata,struct ib_uqp_object * uobj,const char * caller)1344 struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
1345 struct ib_qp_init_attr *attr,
1346 struct ib_udata *udata,
1347 struct ib_uqp_object *uobj, const char *caller)
1348 {
1349 struct ib_qp *qp, *xrc_qp;
1350
1351 if (attr->qp_type == IB_QPT_XRC_TGT)
1352 qp = create_qp(dev, pd, attr, NULL, NULL, caller);
1353 else
1354 qp = create_qp(dev, pd, attr, udata, uobj, NULL);
1355 if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
1356 return qp;
1357
1358 xrc_qp = create_xrc_qp_user(qp, attr);
1359 if (IS_ERR(xrc_qp)) {
1360 ib_destroy_qp(qp);
1361 return xrc_qp;
1362 }
1363
1364 xrc_qp->uobject = uobj;
1365 return xrc_qp;
1366 }
1367 EXPORT_SYMBOL(ib_create_qp_user);
1368
ib_qp_usecnt_inc(struct ib_qp * qp)1369 void ib_qp_usecnt_inc(struct ib_qp *qp)
1370 {
1371 if (qp->pd)
1372 atomic_inc(&qp->pd->usecnt);
1373 if (qp->send_cq)
1374 atomic_inc(&qp->send_cq->usecnt);
1375 if (qp->recv_cq)
1376 atomic_inc(&qp->recv_cq->usecnt);
1377 if (qp->srq)
1378 atomic_inc(&qp->srq->usecnt);
1379 if (qp->rwq_ind_tbl)
1380 atomic_inc(&qp->rwq_ind_tbl->usecnt);
1381 }
1382 EXPORT_SYMBOL(ib_qp_usecnt_inc);
1383
ib_qp_usecnt_dec(struct ib_qp * qp)1384 void ib_qp_usecnt_dec(struct ib_qp *qp)
1385 {
1386 if (qp->rwq_ind_tbl)
1387 atomic_dec(&qp->rwq_ind_tbl->usecnt);
1388 if (qp->srq)
1389 atomic_dec(&qp->srq->usecnt);
1390 if (qp->recv_cq)
1391 atomic_dec(&qp->recv_cq->usecnt);
1392 if (qp->send_cq)
1393 atomic_dec(&qp->send_cq->usecnt);
1394 if (qp->pd)
1395 atomic_dec(&qp->pd->usecnt);
1396 }
1397 EXPORT_SYMBOL(ib_qp_usecnt_dec);
1398
ib_create_qp_kernel(struct ib_pd * pd,struct ib_qp_init_attr * qp_init_attr,const char * caller)1399 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
1400 struct ib_qp_init_attr *qp_init_attr,
1401 const char *caller)
1402 {
1403 struct ib_device *device = pd->device;
1404 struct ib_qp *qp;
1405 int ret;
1406
1407 /*
1408 * If the callers is using the RDMA API calculate the resources
1409 * needed for the RDMA READ/WRITE operations.
1410 *
1411 * Note that these callers need to pass in a port number.
1412 */
1413 if (qp_init_attr->cap.max_rdma_ctxs)
1414 rdma_rw_init_qp(device, qp_init_attr);
1415
1416 qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
1417 if (IS_ERR(qp))
1418 return qp;
1419
1420 ib_qp_usecnt_inc(qp);
1421
1422 if (qp_init_attr->cap.max_rdma_ctxs) {
1423 ret = rdma_rw_init_mrs(qp, qp_init_attr);
1424 if (ret)
1425 goto err;
1426 }
1427
1428 /*
1429 * Note: all hw drivers guarantee that max_send_sge is lower than
1430 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1431 * max_send_sge <= max_sge_rd.
1432 */
1433 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1434 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1435 device->attrs.max_sge_rd);
1436 if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1437 qp->integrity_en = true;
1438
1439 return qp;
1440
1441 err:
1442 ib_destroy_qp(qp);
1443 return ERR_PTR(ret);
1444
1445 }
1446 EXPORT_SYMBOL(ib_create_qp_kernel);
1447
1448 static const struct {
1449 int valid;
1450 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
1451 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
1452 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1453 [IB_QPS_RESET] = {
1454 [IB_QPS_RESET] = { .valid = 1 },
1455 [IB_QPS_INIT] = {
1456 .valid = 1,
1457 .req_param = {
1458 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1459 IB_QP_PORT |
1460 IB_QP_QKEY),
1461 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
1462 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1463 IB_QP_PORT |
1464 IB_QP_ACCESS_FLAGS),
1465 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1466 IB_QP_PORT |
1467 IB_QP_ACCESS_FLAGS),
1468 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1469 IB_QP_PORT |
1470 IB_QP_ACCESS_FLAGS),
1471 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1472 IB_QP_PORT |
1473 IB_QP_ACCESS_FLAGS),
1474 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1475 IB_QP_QKEY),
1476 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1477 IB_QP_QKEY),
1478 }
1479 },
1480 },
1481 [IB_QPS_INIT] = {
1482 [IB_QPS_RESET] = { .valid = 1 },
1483 [IB_QPS_ERR] = { .valid = 1 },
1484 [IB_QPS_INIT] = {
1485 .valid = 1,
1486 .opt_param = {
1487 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1488 IB_QP_PORT |
1489 IB_QP_QKEY),
1490 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1491 IB_QP_PORT |
1492 IB_QP_ACCESS_FLAGS),
1493 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1494 IB_QP_PORT |
1495 IB_QP_ACCESS_FLAGS),
1496 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1497 IB_QP_PORT |
1498 IB_QP_ACCESS_FLAGS),
1499 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1500 IB_QP_PORT |
1501 IB_QP_ACCESS_FLAGS),
1502 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1503 IB_QP_QKEY),
1504 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1505 IB_QP_QKEY),
1506 }
1507 },
1508 [IB_QPS_RTR] = {
1509 .valid = 1,
1510 .req_param = {
1511 [IB_QPT_UC] = (IB_QP_AV |
1512 IB_QP_PATH_MTU |
1513 IB_QP_DEST_QPN |
1514 IB_QP_RQ_PSN),
1515 [IB_QPT_RC] = (IB_QP_AV |
1516 IB_QP_PATH_MTU |
1517 IB_QP_DEST_QPN |
1518 IB_QP_RQ_PSN |
1519 IB_QP_MAX_DEST_RD_ATOMIC |
1520 IB_QP_MIN_RNR_TIMER),
1521 [IB_QPT_XRC_INI] = (IB_QP_AV |
1522 IB_QP_PATH_MTU |
1523 IB_QP_DEST_QPN |
1524 IB_QP_RQ_PSN),
1525 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1526 IB_QP_PATH_MTU |
1527 IB_QP_DEST_QPN |
1528 IB_QP_RQ_PSN |
1529 IB_QP_MAX_DEST_RD_ATOMIC |
1530 IB_QP_MIN_RNR_TIMER),
1531 },
1532 .opt_param = {
1533 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1534 IB_QP_QKEY),
1535 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1536 IB_QP_ACCESS_FLAGS |
1537 IB_QP_PKEY_INDEX),
1538 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1539 IB_QP_ACCESS_FLAGS |
1540 IB_QP_PKEY_INDEX |
1541 IB_QP_RATE_LIMIT),
1542 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1543 IB_QP_ACCESS_FLAGS |
1544 IB_QP_PKEY_INDEX),
1545 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1546 IB_QP_ACCESS_FLAGS |
1547 IB_QP_PKEY_INDEX),
1548 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1549 IB_QP_QKEY),
1550 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1551 IB_QP_QKEY),
1552 },
1553 },
1554 },
1555 [IB_QPS_RTR] = {
1556 [IB_QPS_RESET] = { .valid = 1 },
1557 [IB_QPS_ERR] = { .valid = 1 },
1558 [IB_QPS_RTS] = {
1559 .valid = 1,
1560 .req_param = {
1561 [IB_QPT_UD] = IB_QP_SQ_PSN,
1562 [IB_QPT_UC] = IB_QP_SQ_PSN,
1563 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1564 IB_QP_RETRY_CNT |
1565 IB_QP_RNR_RETRY |
1566 IB_QP_SQ_PSN |
1567 IB_QP_MAX_QP_RD_ATOMIC),
1568 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1569 IB_QP_RETRY_CNT |
1570 IB_QP_RNR_RETRY |
1571 IB_QP_SQ_PSN |
1572 IB_QP_MAX_QP_RD_ATOMIC),
1573 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1574 IB_QP_SQ_PSN),
1575 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1576 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1577 },
1578 .opt_param = {
1579 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1580 IB_QP_QKEY),
1581 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1582 IB_QP_ALT_PATH |
1583 IB_QP_ACCESS_FLAGS |
1584 IB_QP_PATH_MIG_STATE),
1585 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1586 IB_QP_ALT_PATH |
1587 IB_QP_ACCESS_FLAGS |
1588 IB_QP_MIN_RNR_TIMER |
1589 IB_QP_PATH_MIG_STATE |
1590 IB_QP_RATE_LIMIT),
1591 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1592 IB_QP_ALT_PATH |
1593 IB_QP_ACCESS_FLAGS |
1594 IB_QP_PATH_MIG_STATE),
1595 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1596 IB_QP_ALT_PATH |
1597 IB_QP_ACCESS_FLAGS |
1598 IB_QP_MIN_RNR_TIMER |
1599 IB_QP_PATH_MIG_STATE),
1600 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1601 IB_QP_QKEY),
1602 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1603 IB_QP_QKEY),
1604 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1605 }
1606 }
1607 },
1608 [IB_QPS_RTS] = {
1609 [IB_QPS_RESET] = { .valid = 1 },
1610 [IB_QPS_ERR] = { .valid = 1 },
1611 [IB_QPS_RTS] = {
1612 .valid = 1,
1613 .opt_param = {
1614 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1615 IB_QP_QKEY),
1616 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1617 IB_QP_ACCESS_FLAGS |
1618 IB_QP_ALT_PATH |
1619 IB_QP_PATH_MIG_STATE),
1620 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1621 IB_QP_ACCESS_FLAGS |
1622 IB_QP_ALT_PATH |
1623 IB_QP_PATH_MIG_STATE |
1624 IB_QP_MIN_RNR_TIMER |
1625 IB_QP_RATE_LIMIT),
1626 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1627 IB_QP_ACCESS_FLAGS |
1628 IB_QP_ALT_PATH |
1629 IB_QP_PATH_MIG_STATE),
1630 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1631 IB_QP_ACCESS_FLAGS |
1632 IB_QP_ALT_PATH |
1633 IB_QP_PATH_MIG_STATE |
1634 IB_QP_MIN_RNR_TIMER),
1635 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1636 IB_QP_QKEY),
1637 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1638 IB_QP_QKEY),
1639 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1640 }
1641 },
1642 [IB_QPS_SQD] = {
1643 .valid = 1,
1644 .opt_param = {
1645 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1646 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1647 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1648 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1649 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1650 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1651 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1652 }
1653 },
1654 },
1655 [IB_QPS_SQD] = {
1656 [IB_QPS_RESET] = { .valid = 1 },
1657 [IB_QPS_ERR] = { .valid = 1 },
1658 [IB_QPS_RTS] = {
1659 .valid = 1,
1660 .opt_param = {
1661 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1662 IB_QP_QKEY),
1663 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1664 IB_QP_ALT_PATH |
1665 IB_QP_ACCESS_FLAGS |
1666 IB_QP_PATH_MIG_STATE),
1667 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1668 IB_QP_ALT_PATH |
1669 IB_QP_ACCESS_FLAGS |
1670 IB_QP_MIN_RNR_TIMER |
1671 IB_QP_PATH_MIG_STATE),
1672 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1673 IB_QP_ALT_PATH |
1674 IB_QP_ACCESS_FLAGS |
1675 IB_QP_PATH_MIG_STATE),
1676 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1677 IB_QP_ALT_PATH |
1678 IB_QP_ACCESS_FLAGS |
1679 IB_QP_MIN_RNR_TIMER |
1680 IB_QP_PATH_MIG_STATE),
1681 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1682 IB_QP_QKEY),
1683 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1684 IB_QP_QKEY),
1685 }
1686 },
1687 [IB_QPS_SQD] = {
1688 .valid = 1,
1689 .opt_param = {
1690 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1691 IB_QP_QKEY),
1692 [IB_QPT_UC] = (IB_QP_AV |
1693 IB_QP_ALT_PATH |
1694 IB_QP_ACCESS_FLAGS |
1695 IB_QP_PKEY_INDEX |
1696 IB_QP_PATH_MIG_STATE),
1697 [IB_QPT_RC] = (IB_QP_PORT |
1698 IB_QP_AV |
1699 IB_QP_TIMEOUT |
1700 IB_QP_RETRY_CNT |
1701 IB_QP_RNR_RETRY |
1702 IB_QP_MAX_QP_RD_ATOMIC |
1703 IB_QP_MAX_DEST_RD_ATOMIC |
1704 IB_QP_ALT_PATH |
1705 IB_QP_ACCESS_FLAGS |
1706 IB_QP_PKEY_INDEX |
1707 IB_QP_MIN_RNR_TIMER |
1708 IB_QP_PATH_MIG_STATE),
1709 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1710 IB_QP_AV |
1711 IB_QP_TIMEOUT |
1712 IB_QP_RETRY_CNT |
1713 IB_QP_RNR_RETRY |
1714 IB_QP_MAX_QP_RD_ATOMIC |
1715 IB_QP_ALT_PATH |
1716 IB_QP_ACCESS_FLAGS |
1717 IB_QP_PKEY_INDEX |
1718 IB_QP_PATH_MIG_STATE),
1719 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1720 IB_QP_AV |
1721 IB_QP_TIMEOUT |
1722 IB_QP_MAX_DEST_RD_ATOMIC |
1723 IB_QP_ALT_PATH |
1724 IB_QP_ACCESS_FLAGS |
1725 IB_QP_PKEY_INDEX |
1726 IB_QP_MIN_RNR_TIMER |
1727 IB_QP_PATH_MIG_STATE),
1728 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1729 IB_QP_QKEY),
1730 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1731 IB_QP_QKEY),
1732 }
1733 }
1734 },
1735 [IB_QPS_SQE] = {
1736 [IB_QPS_RESET] = { .valid = 1 },
1737 [IB_QPS_ERR] = { .valid = 1 },
1738 [IB_QPS_RTS] = {
1739 .valid = 1,
1740 .opt_param = {
1741 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1742 IB_QP_QKEY),
1743 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1744 IB_QP_ACCESS_FLAGS),
1745 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1746 IB_QP_QKEY),
1747 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1748 IB_QP_QKEY),
1749 }
1750 }
1751 },
1752 [IB_QPS_ERR] = {
1753 [IB_QPS_RESET] = { .valid = 1 },
1754 [IB_QPS_ERR] = { .valid = 1 }
1755 }
1756 };
1757
ib_modify_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_type type,enum ib_qp_attr_mask mask)1758 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1759 enum ib_qp_type type, enum ib_qp_attr_mask mask)
1760 {
1761 enum ib_qp_attr_mask req_param, opt_param;
1762
1763 if (mask & IB_QP_CUR_STATE &&
1764 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1765 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1766 return false;
1767
1768 if (!qp_state_table[cur_state][next_state].valid)
1769 return false;
1770
1771 req_param = qp_state_table[cur_state][next_state].req_param[type];
1772 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1773
1774 if ((mask & req_param) != req_param)
1775 return false;
1776
1777 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1778 return false;
1779
1780 return true;
1781 }
1782 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1783
1784 /**
1785 * ib_resolve_eth_dmac - Resolve destination mac address
1786 * @device: Device to consider
1787 * @ah_attr: address handle attribute which describes the
1788 * source and destination parameters
1789 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1790 * returns 0 on success or appropriate error code. It initializes the
1791 * necessary ah_attr fields when call is successful.
1792 */
ib_resolve_eth_dmac(struct ib_device * device,struct rdma_ah_attr * ah_attr)1793 static int ib_resolve_eth_dmac(struct ib_device *device,
1794 struct rdma_ah_attr *ah_attr)
1795 {
1796 int ret = 0;
1797
1798 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1799 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1800 __be32 addr = 0;
1801
1802 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1803 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1804 } else {
1805 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1806 (char *)ah_attr->roce.dmac);
1807 }
1808 } else {
1809 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1810 }
1811 return ret;
1812 }
1813
is_qp_type_connected(const struct ib_qp * qp)1814 static bool is_qp_type_connected(const struct ib_qp *qp)
1815 {
1816 return (qp->qp_type == IB_QPT_UC ||
1817 qp->qp_type == IB_QPT_RC ||
1818 qp->qp_type == IB_QPT_XRC_INI ||
1819 qp->qp_type == IB_QPT_XRC_TGT);
1820 }
1821
1822 /*
1823 * IB core internal function to perform QP attributes modification.
1824 */
_ib_modify_qp(struct ib_qp * qp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1825 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1826 int attr_mask, struct ib_udata *udata)
1827 {
1828 u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1829 const struct ib_gid_attr *old_sgid_attr_av;
1830 const struct ib_gid_attr *old_sgid_attr_alt_av;
1831 int ret;
1832
1833 attr->xmit_slave = NULL;
1834 if (attr_mask & IB_QP_AV) {
1835 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1836 &old_sgid_attr_av);
1837 if (ret)
1838 return ret;
1839
1840 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1841 is_qp_type_connected(qp)) {
1842 struct net_device *slave;
1843
1844 /*
1845 * If the user provided the qp_attr then we have to
1846 * resolve it. Kerne users have to provide already
1847 * resolved rdma_ah_attr's.
1848 */
1849 if (udata) {
1850 ret = ib_resolve_eth_dmac(qp->device,
1851 &attr->ah_attr);
1852 if (ret)
1853 goto out_av;
1854 }
1855 slave = rdma_lag_get_ah_roce_slave(qp->device,
1856 &attr->ah_attr,
1857 GFP_KERNEL);
1858 if (IS_ERR(slave)) {
1859 ret = PTR_ERR(slave);
1860 goto out_av;
1861 }
1862 attr->xmit_slave = slave;
1863 }
1864 }
1865 if (attr_mask & IB_QP_ALT_PATH) {
1866 /*
1867 * FIXME: This does not track the migration state, so if the
1868 * user loads a new alternate path after the HW has migrated
1869 * from primary->alternate we will keep the wrong
1870 * references. This is OK for IB because the reference
1871 * counting does not serve any functional purpose.
1872 */
1873 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1874 &old_sgid_attr_alt_av);
1875 if (ret)
1876 goto out_av;
1877
1878 /*
1879 * Today the core code can only handle alternate paths and APM
1880 * for IB. Ban them in roce mode.
1881 */
1882 if (!(rdma_protocol_ib(qp->device,
1883 attr->alt_ah_attr.port_num) &&
1884 rdma_protocol_ib(qp->device, port))) {
1885 ret = -EINVAL;
1886 goto out;
1887 }
1888 }
1889
1890 if (rdma_ib_or_roce(qp->device, port)) {
1891 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1892 dev_warn(&qp->device->dev,
1893 "%s rq_psn overflow, masking to 24 bits\n",
1894 __func__);
1895 attr->rq_psn &= 0xffffff;
1896 }
1897
1898 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1899 dev_warn(&qp->device->dev,
1900 " %s sq_psn overflow, masking to 24 bits\n",
1901 __func__);
1902 attr->sq_psn &= 0xffffff;
1903 }
1904 }
1905
1906 /*
1907 * Bind this qp to a counter automatically based on the rdma counter
1908 * rules. This only set in RST2INIT with port specified
1909 */
1910 if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1911 ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1912 rdma_counter_bind_qp_auto(qp, attr->port_num);
1913
1914 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1915 if (ret)
1916 goto out;
1917
1918 if (attr_mask & IB_QP_PORT)
1919 qp->port = attr->port_num;
1920 if (attr_mask & IB_QP_AV)
1921 qp->av_sgid_attr =
1922 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1923 if (attr_mask & IB_QP_ALT_PATH)
1924 qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1925 &attr->alt_ah_attr, qp->alt_path_sgid_attr);
1926
1927 out:
1928 if (attr_mask & IB_QP_ALT_PATH)
1929 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1930 out_av:
1931 if (attr_mask & IB_QP_AV) {
1932 rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1933 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1934 }
1935 return ret;
1936 }
1937
1938 /**
1939 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1940 * @ib_qp: The QP to modify.
1941 * @attr: On input, specifies the QP attributes to modify. On output,
1942 * the current values of selected QP attributes are returned.
1943 * @attr_mask: A bit-mask used to specify which attributes of the QP
1944 * are being modified.
1945 * @udata: pointer to user's input output buffer information
1946 * are being modified.
1947 * It returns 0 on success and returns appropriate error code on error.
1948 */
ib_modify_qp_with_udata(struct ib_qp * ib_qp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1949 int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1950 int attr_mask, struct ib_udata *udata)
1951 {
1952 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1953 }
1954 EXPORT_SYMBOL(ib_modify_qp_with_udata);
1955
ib_get_width_and_speed(u32 netdev_speed,u32 lanes,u16 * speed,u8 * width)1956 static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes,
1957 u16 *speed, u8 *width)
1958 {
1959 if (!lanes) {
1960 if (netdev_speed <= SPEED_1000) {
1961 *width = IB_WIDTH_1X;
1962 *speed = IB_SPEED_SDR;
1963 } else if (netdev_speed <= SPEED_10000) {
1964 *width = IB_WIDTH_1X;
1965 *speed = IB_SPEED_FDR10;
1966 } else if (netdev_speed <= SPEED_20000) {
1967 *width = IB_WIDTH_4X;
1968 *speed = IB_SPEED_DDR;
1969 } else if (netdev_speed <= SPEED_25000) {
1970 *width = IB_WIDTH_1X;
1971 *speed = IB_SPEED_EDR;
1972 } else if (netdev_speed <= SPEED_40000) {
1973 *width = IB_WIDTH_4X;
1974 *speed = IB_SPEED_FDR10;
1975 } else if (netdev_speed <= SPEED_50000) {
1976 *width = IB_WIDTH_2X;
1977 *speed = IB_SPEED_EDR;
1978 } else if (netdev_speed <= SPEED_100000) {
1979 *width = IB_WIDTH_4X;
1980 *speed = IB_SPEED_EDR;
1981 } else if (netdev_speed <= SPEED_200000) {
1982 *width = IB_WIDTH_4X;
1983 *speed = IB_SPEED_HDR;
1984 } else {
1985 *width = IB_WIDTH_4X;
1986 *speed = IB_SPEED_NDR;
1987 }
1988
1989 return;
1990 }
1991
1992 switch (lanes) {
1993 case 1:
1994 *width = IB_WIDTH_1X;
1995 break;
1996 case 2:
1997 *width = IB_WIDTH_2X;
1998 break;
1999 case 4:
2000 *width = IB_WIDTH_4X;
2001 break;
2002 case 8:
2003 *width = IB_WIDTH_8X;
2004 break;
2005 case 12:
2006 *width = IB_WIDTH_12X;
2007 break;
2008 default:
2009 *width = IB_WIDTH_1X;
2010 }
2011
2012 switch (netdev_speed / lanes) {
2013 case SPEED_2500:
2014 *speed = IB_SPEED_SDR;
2015 break;
2016 case SPEED_5000:
2017 *speed = IB_SPEED_DDR;
2018 break;
2019 case SPEED_10000:
2020 *speed = IB_SPEED_FDR10;
2021 break;
2022 case SPEED_14000:
2023 *speed = IB_SPEED_FDR;
2024 break;
2025 case SPEED_25000:
2026 *speed = IB_SPEED_EDR;
2027 break;
2028 case SPEED_50000:
2029 *speed = IB_SPEED_HDR;
2030 break;
2031 case SPEED_100000:
2032 *speed = IB_SPEED_NDR;
2033 break;
2034 default:
2035 *speed = IB_SPEED_SDR;
2036 }
2037 }
2038
ib_get_eth_speed(struct ib_device * dev,u32 port_num,u16 * speed,u8 * width)2039 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
2040 {
2041 int rc;
2042 u32 netdev_speed;
2043 struct net_device *netdev;
2044 struct ethtool_link_ksettings lksettings = {};
2045
2046 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
2047 return -EINVAL;
2048
2049 netdev = ib_device_get_netdev(dev, port_num);
2050 if (!netdev)
2051 return -ENODEV;
2052
2053 rtnl_lock();
2054 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
2055 rtnl_unlock();
2056
2057 dev_put(netdev);
2058
2059 if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
2060 netdev_speed = lksettings.base.speed;
2061 } else {
2062 netdev_speed = SPEED_1000;
2063 if (rc)
2064 pr_warn("%s speed is unknown, defaulting to %u\n",
2065 netdev->name, netdev_speed);
2066 }
2067
2068 ib_get_width_and_speed(netdev_speed, lksettings.lanes,
2069 speed, width);
2070
2071 return 0;
2072 }
2073 EXPORT_SYMBOL(ib_get_eth_speed);
2074
ib_modify_qp(struct ib_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask)2075 int ib_modify_qp(struct ib_qp *qp,
2076 struct ib_qp_attr *qp_attr,
2077 int qp_attr_mask)
2078 {
2079 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
2080 }
2081 EXPORT_SYMBOL(ib_modify_qp);
2082
ib_query_qp(struct ib_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)2083 int ib_query_qp(struct ib_qp *qp,
2084 struct ib_qp_attr *qp_attr,
2085 int qp_attr_mask,
2086 struct ib_qp_init_attr *qp_init_attr)
2087 {
2088 qp_attr->ah_attr.grh.sgid_attr = NULL;
2089 qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
2090
2091 return qp->device->ops.query_qp ?
2092 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
2093 qp_init_attr) : -EOPNOTSUPP;
2094 }
2095 EXPORT_SYMBOL(ib_query_qp);
2096
ib_close_qp(struct ib_qp * qp)2097 int ib_close_qp(struct ib_qp *qp)
2098 {
2099 struct ib_qp *real_qp;
2100 unsigned long flags;
2101
2102 real_qp = qp->real_qp;
2103 if (real_qp == qp)
2104 return -EINVAL;
2105
2106 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
2107 list_del(&qp->open_list);
2108 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
2109
2110 atomic_dec(&real_qp->usecnt);
2111 if (qp->qp_sec)
2112 ib_close_shared_qp_security(qp->qp_sec);
2113 kfree(qp);
2114
2115 return 0;
2116 }
2117 EXPORT_SYMBOL(ib_close_qp);
2118
__ib_destroy_shared_qp(struct ib_qp * qp)2119 static int __ib_destroy_shared_qp(struct ib_qp *qp)
2120 {
2121 struct ib_xrcd *xrcd;
2122 struct ib_qp *real_qp;
2123 int ret;
2124
2125 real_qp = qp->real_qp;
2126 xrcd = real_qp->xrcd;
2127 down_write(&xrcd->tgt_qps_rwsem);
2128 ib_close_qp(qp);
2129 if (atomic_read(&real_qp->usecnt) == 0)
2130 xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
2131 else
2132 real_qp = NULL;
2133 up_write(&xrcd->tgt_qps_rwsem);
2134
2135 if (real_qp) {
2136 ret = ib_destroy_qp(real_qp);
2137 if (!ret)
2138 atomic_dec(&xrcd->usecnt);
2139 }
2140
2141 return 0;
2142 }
2143
ib_destroy_qp_user(struct ib_qp * qp,struct ib_udata * udata)2144 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
2145 {
2146 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
2147 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
2148 struct ib_qp_security *sec;
2149 int ret;
2150
2151 WARN_ON_ONCE(qp->mrs_used > 0);
2152
2153 if (atomic_read(&qp->usecnt))
2154 return -EBUSY;
2155
2156 if (qp->real_qp != qp)
2157 return __ib_destroy_shared_qp(qp);
2158
2159 sec = qp->qp_sec;
2160 if (sec)
2161 ib_destroy_qp_security_begin(sec);
2162
2163 if (!qp->uobject)
2164 rdma_rw_cleanup_mrs(qp);
2165
2166 rdma_counter_unbind_qp(qp, qp->port, true);
2167 ret = qp->device->ops.destroy_qp(qp, udata);
2168 if (ret) {
2169 if (sec)
2170 ib_destroy_qp_security_abort(sec);
2171 return ret;
2172 }
2173
2174 if (alt_path_sgid_attr)
2175 rdma_put_gid_attr(alt_path_sgid_attr);
2176 if (av_sgid_attr)
2177 rdma_put_gid_attr(av_sgid_attr);
2178
2179 ib_qp_usecnt_dec(qp);
2180 if (sec)
2181 ib_destroy_qp_security_end(sec);
2182
2183 rdma_restrack_del(&qp->res);
2184 kfree(qp);
2185 return ret;
2186 }
2187 EXPORT_SYMBOL(ib_destroy_qp_user);
2188
2189 /* Completion queues */
2190
__ib_create_cq(struct ib_device * device,ib_comp_handler comp_handler,void (* event_handler)(struct ib_event *,void *),void * cq_context,const struct ib_cq_init_attr * cq_attr,const char * caller)2191 struct ib_cq *__ib_create_cq(struct ib_device *device,
2192 ib_comp_handler comp_handler,
2193 void (*event_handler)(struct ib_event *, void *),
2194 void *cq_context,
2195 const struct ib_cq_init_attr *cq_attr,
2196 const char *caller)
2197 {
2198 struct ib_cq *cq;
2199 int ret;
2200
2201 cq = rdma_zalloc_drv_obj(device, ib_cq);
2202 if (!cq)
2203 return ERR_PTR(-ENOMEM);
2204
2205 cq->device = device;
2206 cq->uobject = NULL;
2207 cq->comp_handler = comp_handler;
2208 cq->event_handler = event_handler;
2209 cq->cq_context = cq_context;
2210 atomic_set(&cq->usecnt, 0);
2211
2212 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
2213 rdma_restrack_set_name(&cq->res, caller);
2214
2215 ret = device->ops.create_cq(cq, cq_attr, NULL);
2216 if (ret) {
2217 rdma_restrack_put(&cq->res);
2218 kfree(cq);
2219 return ERR_PTR(ret);
2220 }
2221
2222 rdma_restrack_add(&cq->res);
2223 return cq;
2224 }
2225 EXPORT_SYMBOL(__ib_create_cq);
2226
rdma_set_cq_moderation(struct ib_cq * cq,u16 cq_count,u16 cq_period)2227 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2228 {
2229 if (cq->shared)
2230 return -EOPNOTSUPP;
2231
2232 return cq->device->ops.modify_cq ?
2233 cq->device->ops.modify_cq(cq, cq_count,
2234 cq_period) : -EOPNOTSUPP;
2235 }
2236 EXPORT_SYMBOL(rdma_set_cq_moderation);
2237
ib_destroy_cq_user(struct ib_cq * cq,struct ib_udata * udata)2238 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2239 {
2240 int ret;
2241
2242 if (WARN_ON_ONCE(cq->shared))
2243 return -EOPNOTSUPP;
2244
2245 if (atomic_read(&cq->usecnt))
2246 return -EBUSY;
2247
2248 ret = cq->device->ops.destroy_cq(cq, udata);
2249 if (ret)
2250 return ret;
2251
2252 rdma_restrack_del(&cq->res);
2253 kfree(cq);
2254 return ret;
2255 }
2256 EXPORT_SYMBOL(ib_destroy_cq_user);
2257
ib_resize_cq(struct ib_cq * cq,int cqe)2258 int ib_resize_cq(struct ib_cq *cq, int cqe)
2259 {
2260 if (cq->shared)
2261 return -EOPNOTSUPP;
2262
2263 return cq->device->ops.resize_cq ?
2264 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2265 }
2266 EXPORT_SYMBOL(ib_resize_cq);
2267
2268 /* Memory regions */
2269
ib_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int access_flags)2270 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2271 u64 virt_addr, int access_flags)
2272 {
2273 struct ib_mr *mr;
2274
2275 if (access_flags & IB_ACCESS_ON_DEMAND) {
2276 if (!(pd->device->attrs.kernel_cap_flags &
2277 IBK_ON_DEMAND_PAGING)) {
2278 pr_debug("ODP support not available\n");
2279 return ERR_PTR(-EINVAL);
2280 }
2281 }
2282
2283 mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2284 access_flags, NULL, NULL);
2285
2286 if (IS_ERR(mr))
2287 return mr;
2288
2289 mr->device = pd->device;
2290 mr->type = IB_MR_TYPE_USER;
2291 mr->pd = pd;
2292 mr->dm = NULL;
2293 atomic_inc(&pd->usecnt);
2294 mr->iova = virt_addr;
2295 mr->length = length;
2296
2297 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2298 rdma_restrack_parent_name(&mr->res, &pd->res);
2299 rdma_restrack_add(&mr->res);
2300
2301 return mr;
2302 }
2303 EXPORT_SYMBOL(ib_reg_user_mr);
2304
ib_advise_mr(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 flags,struct ib_sge * sg_list,u32 num_sge)2305 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2306 u32 flags, struct ib_sge *sg_list, u32 num_sge)
2307 {
2308 if (!pd->device->ops.advise_mr)
2309 return -EOPNOTSUPP;
2310
2311 if (!num_sge)
2312 return 0;
2313
2314 return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2315 NULL);
2316 }
2317 EXPORT_SYMBOL(ib_advise_mr);
2318
ib_dereg_mr_user(struct ib_mr * mr,struct ib_udata * udata)2319 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2320 {
2321 struct ib_pd *pd = mr->pd;
2322 struct ib_dm *dm = mr->dm;
2323 struct ib_dmah *dmah = mr->dmah;
2324 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2325 int ret;
2326
2327 trace_mr_dereg(mr);
2328 rdma_restrack_del(&mr->res);
2329 ret = mr->device->ops.dereg_mr(mr, udata);
2330 if (!ret) {
2331 atomic_dec(&pd->usecnt);
2332 if (dm)
2333 atomic_dec(&dm->usecnt);
2334 if (dmah)
2335 atomic_dec(&dmah->usecnt);
2336 kfree(sig_attrs);
2337 }
2338
2339 return ret;
2340 }
2341 EXPORT_SYMBOL(ib_dereg_mr_user);
2342
2343 /**
2344 * ib_alloc_mr() - Allocates a memory region
2345 * @pd: protection domain associated with the region
2346 * @mr_type: memory region type
2347 * @max_num_sg: maximum sg entries available for registration.
2348 *
2349 * Notes:
2350 * Memory registeration page/sg lists must not exceed max_num_sg.
2351 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2352 * max_num_sg * used_page_size.
2353 *
2354 */
ib_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)2355 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2356 u32 max_num_sg)
2357 {
2358 struct ib_mr *mr;
2359
2360 if (!pd->device->ops.alloc_mr) {
2361 mr = ERR_PTR(-EOPNOTSUPP);
2362 goto out;
2363 }
2364
2365 if (mr_type == IB_MR_TYPE_INTEGRITY) {
2366 WARN_ON_ONCE(1);
2367 mr = ERR_PTR(-EINVAL);
2368 goto out;
2369 }
2370
2371 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
2372 if (IS_ERR(mr))
2373 goto out;
2374
2375 mr->device = pd->device;
2376 mr->pd = pd;
2377 mr->dm = NULL;
2378 mr->uobject = NULL;
2379 atomic_inc(&pd->usecnt);
2380 mr->need_inval = false;
2381 mr->type = mr_type;
2382 mr->sig_attrs = NULL;
2383
2384 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2385 rdma_restrack_parent_name(&mr->res, &pd->res);
2386 rdma_restrack_add(&mr->res);
2387 out:
2388 trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2389 return mr;
2390 }
2391 EXPORT_SYMBOL(ib_alloc_mr);
2392
2393 /**
2394 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2395 * @pd: protection domain associated with the region
2396 * @max_num_data_sg: maximum data sg entries available for registration
2397 * @max_num_meta_sg: maximum metadata sg entries available for
2398 * registration
2399 *
2400 * Notes:
2401 * Memory registration page/sg lists must not exceed max_num_sg,
2402 * also the integrity page/sg lists must not exceed max_num_meta_sg.
2403 *
2404 */
ib_alloc_mr_integrity(struct ib_pd * pd,u32 max_num_data_sg,u32 max_num_meta_sg)2405 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2406 u32 max_num_data_sg,
2407 u32 max_num_meta_sg)
2408 {
2409 struct ib_mr *mr;
2410 struct ib_sig_attrs *sig_attrs;
2411
2412 if (!pd->device->ops.alloc_mr_integrity ||
2413 !pd->device->ops.map_mr_sg_pi) {
2414 mr = ERR_PTR(-EOPNOTSUPP);
2415 goto out;
2416 }
2417
2418 if (!max_num_meta_sg) {
2419 mr = ERR_PTR(-EINVAL);
2420 goto out;
2421 }
2422
2423 sig_attrs = kzalloc_obj(struct ib_sig_attrs);
2424 if (!sig_attrs) {
2425 mr = ERR_PTR(-ENOMEM);
2426 goto out;
2427 }
2428
2429 mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2430 max_num_meta_sg);
2431 if (IS_ERR(mr)) {
2432 kfree(sig_attrs);
2433 goto out;
2434 }
2435
2436 mr->device = pd->device;
2437 mr->pd = pd;
2438 mr->dm = NULL;
2439 mr->uobject = NULL;
2440 atomic_inc(&pd->usecnt);
2441 mr->need_inval = false;
2442 mr->type = IB_MR_TYPE_INTEGRITY;
2443 mr->sig_attrs = sig_attrs;
2444
2445 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2446 rdma_restrack_parent_name(&mr->res, &pd->res);
2447 rdma_restrack_add(&mr->res);
2448 out:
2449 trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2450 return mr;
2451 }
2452 EXPORT_SYMBOL(ib_alloc_mr_integrity);
2453
2454 /* Multicast groups */
2455
is_valid_mcast_lid(struct ib_qp * qp,u16 lid)2456 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2457 {
2458 struct ib_qp_init_attr init_attr = {};
2459 struct ib_qp_attr attr = {};
2460 int num_eth_ports = 0;
2461 unsigned int port;
2462
2463 /* If QP state >= init, it is assigned to a port and we can check this
2464 * port only.
2465 */
2466 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2467 if (attr.qp_state >= IB_QPS_INIT) {
2468 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2469 IB_LINK_LAYER_INFINIBAND)
2470 return true;
2471 goto lid_check;
2472 }
2473 }
2474
2475 /* Can't get a quick answer, iterate over all ports */
2476 rdma_for_each_port(qp->device, port)
2477 if (rdma_port_get_link_layer(qp->device, port) !=
2478 IB_LINK_LAYER_INFINIBAND)
2479 num_eth_ports++;
2480
2481 /* If we have at lease one Ethernet port, RoCE annex declares that
2482 * multicast LID should be ignored. We can't tell at this step if the
2483 * QP belongs to an IB or Ethernet port.
2484 */
2485 if (num_eth_ports)
2486 return true;
2487
2488 /* If all the ports are IB, we can check according to IB spec. */
2489 lid_check:
2490 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2491 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2492 }
2493
ib_attach_mcast(struct ib_qp * qp,union ib_gid * gid,u16 lid)2494 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2495 {
2496 int ret;
2497
2498 if (!qp->device->ops.attach_mcast)
2499 return -EOPNOTSUPP;
2500
2501 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2502 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2503 return -EINVAL;
2504
2505 ret = qp->device->ops.attach_mcast(qp, gid, lid);
2506 if (!ret)
2507 atomic_inc(&qp->usecnt);
2508 return ret;
2509 }
2510 EXPORT_SYMBOL(ib_attach_mcast);
2511
ib_detach_mcast(struct ib_qp * qp,union ib_gid * gid,u16 lid)2512 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2513 {
2514 int ret;
2515
2516 if (!qp->device->ops.detach_mcast)
2517 return -EOPNOTSUPP;
2518
2519 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2520 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2521 return -EINVAL;
2522
2523 ret = qp->device->ops.detach_mcast(qp, gid, lid);
2524 if (!ret)
2525 atomic_dec(&qp->usecnt);
2526 return ret;
2527 }
2528 EXPORT_SYMBOL(ib_detach_mcast);
2529
2530 /**
2531 * ib_alloc_xrcd_user - Allocates an XRC domain.
2532 * @device: The device on which to allocate the XRC domain.
2533 * @inode: inode to connect XRCD
2534 * @udata: Valid user data or NULL for kernel object
2535 */
ib_alloc_xrcd_user(struct ib_device * device,struct inode * inode,struct ib_udata * udata)2536 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
2537 struct inode *inode, struct ib_udata *udata)
2538 {
2539 struct ib_xrcd *xrcd;
2540 int ret;
2541
2542 if (!device->ops.alloc_xrcd)
2543 return ERR_PTR(-EOPNOTSUPP);
2544
2545 xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
2546 if (!xrcd)
2547 return ERR_PTR(-ENOMEM);
2548
2549 xrcd->device = device;
2550 xrcd->inode = inode;
2551 atomic_set(&xrcd->usecnt, 0);
2552 init_rwsem(&xrcd->tgt_qps_rwsem);
2553 xa_init(&xrcd->tgt_qps);
2554
2555 ret = device->ops.alloc_xrcd(xrcd, udata);
2556 if (ret)
2557 goto err;
2558 return xrcd;
2559 err:
2560 kfree(xrcd);
2561 return ERR_PTR(ret);
2562 }
2563 EXPORT_SYMBOL(ib_alloc_xrcd_user);
2564
2565 /**
2566 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2567 * @xrcd: The XRC domain to deallocate.
2568 * @udata: Valid user data or NULL for kernel object
2569 */
ib_dealloc_xrcd_user(struct ib_xrcd * xrcd,struct ib_udata * udata)2570 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
2571 {
2572 int ret;
2573
2574 if (atomic_read(&xrcd->usecnt))
2575 return -EBUSY;
2576
2577 WARN_ON(!xa_empty(&xrcd->tgt_qps));
2578 ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2579 if (ret)
2580 return ret;
2581 kfree(xrcd);
2582 return ret;
2583 }
2584 EXPORT_SYMBOL(ib_dealloc_xrcd_user);
2585
2586 /**
2587 * ib_create_wq - Creates a WQ associated with the specified protection
2588 * domain.
2589 * @pd: The protection domain associated with the WQ.
2590 * @wq_attr: A list of initial attributes required to create the
2591 * WQ. If WQ creation succeeds, then the attributes are updated to
2592 * the actual capabilities of the created WQ.
2593 *
2594 * wq_attr->max_wr and wq_attr->max_sge determine
2595 * the requested size of the WQ, and set to the actual values allocated
2596 * on return.
2597 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2598 * at least as large as the requested values.
2599 */
ib_create_wq(struct ib_pd * pd,struct ib_wq_init_attr * wq_attr)2600 struct ib_wq *ib_create_wq(struct ib_pd *pd,
2601 struct ib_wq_init_attr *wq_attr)
2602 {
2603 struct ib_wq *wq;
2604
2605 if (!pd->device->ops.create_wq)
2606 return ERR_PTR(-EOPNOTSUPP);
2607
2608 wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2609 if (!IS_ERR(wq)) {
2610 wq->event_handler = wq_attr->event_handler;
2611 wq->wq_context = wq_attr->wq_context;
2612 wq->wq_type = wq_attr->wq_type;
2613 wq->cq = wq_attr->cq;
2614 wq->device = pd->device;
2615 wq->pd = pd;
2616 wq->uobject = NULL;
2617 atomic_inc(&pd->usecnt);
2618 atomic_inc(&wq_attr->cq->usecnt);
2619 atomic_set(&wq->usecnt, 0);
2620 }
2621 return wq;
2622 }
2623 EXPORT_SYMBOL(ib_create_wq);
2624
2625 /**
2626 * ib_destroy_wq_user - Destroys the specified user WQ.
2627 * @wq: The WQ to destroy.
2628 * @udata: Valid user data
2629 */
ib_destroy_wq_user(struct ib_wq * wq,struct ib_udata * udata)2630 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
2631 {
2632 struct ib_cq *cq = wq->cq;
2633 struct ib_pd *pd = wq->pd;
2634 int ret;
2635
2636 if (atomic_read(&wq->usecnt))
2637 return -EBUSY;
2638
2639 ret = wq->device->ops.destroy_wq(wq, udata);
2640 if (ret)
2641 return ret;
2642
2643 atomic_dec(&pd->usecnt);
2644 atomic_dec(&cq->usecnt);
2645 return ret;
2646 }
2647 EXPORT_SYMBOL(ib_destroy_wq_user);
2648
ib_check_mr_status(struct ib_mr * mr,u32 check_mask,struct ib_mr_status * mr_status)2649 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2650 struct ib_mr_status *mr_status)
2651 {
2652 if (!mr->device->ops.check_mr_status)
2653 return -EOPNOTSUPP;
2654
2655 return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2656 }
2657 EXPORT_SYMBOL(ib_check_mr_status);
2658
ib_set_vf_link_state(struct ib_device * device,int vf,u32 port,int state)2659 int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
2660 int state)
2661 {
2662 if (!device->ops.set_vf_link_state)
2663 return -EOPNOTSUPP;
2664
2665 return device->ops.set_vf_link_state(device, vf, port, state);
2666 }
2667 EXPORT_SYMBOL(ib_set_vf_link_state);
2668
ib_get_vf_config(struct ib_device * device,int vf,u32 port,struct ifla_vf_info * info)2669 int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
2670 struct ifla_vf_info *info)
2671 {
2672 if (!device->ops.get_vf_config)
2673 return -EOPNOTSUPP;
2674
2675 return device->ops.get_vf_config(device, vf, port, info);
2676 }
2677 EXPORT_SYMBOL(ib_get_vf_config);
2678
ib_get_vf_stats(struct ib_device * device,int vf,u32 port,struct ifla_vf_stats * stats)2679 int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
2680 struct ifla_vf_stats *stats)
2681 {
2682 if (!device->ops.get_vf_stats)
2683 return -EOPNOTSUPP;
2684
2685 return device->ops.get_vf_stats(device, vf, port, stats);
2686 }
2687 EXPORT_SYMBOL(ib_get_vf_stats);
2688
ib_set_vf_guid(struct ib_device * device,int vf,u32 port,u64 guid,int type)2689 int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
2690 int type)
2691 {
2692 if (!device->ops.set_vf_guid)
2693 return -EOPNOTSUPP;
2694
2695 return device->ops.set_vf_guid(device, vf, port, guid, type);
2696 }
2697 EXPORT_SYMBOL(ib_set_vf_guid);
2698
ib_get_vf_guid(struct ib_device * device,int vf,u32 port,struct ifla_vf_guid * node_guid,struct ifla_vf_guid * port_guid)2699 int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
2700 struct ifla_vf_guid *node_guid,
2701 struct ifla_vf_guid *port_guid)
2702 {
2703 if (!device->ops.get_vf_guid)
2704 return -EOPNOTSUPP;
2705
2706 return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2707 }
2708 EXPORT_SYMBOL(ib_get_vf_guid);
2709 /**
2710 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2711 * information) and set an appropriate memory region for registration.
2712 * @mr: memory region
2713 * @data_sg: dma mapped scatterlist for data
2714 * @data_sg_nents: number of entries in data_sg
2715 * @data_sg_offset: offset in bytes into data_sg
2716 * @meta_sg: dma mapped scatterlist for metadata
2717 * @meta_sg_nents: number of entries in meta_sg
2718 * @meta_sg_offset: offset in bytes into meta_sg
2719 * @page_size: page vector desired page size
2720 *
2721 * Constraints:
2722 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2723 *
2724 * Return: 0 on success.
2725 *
2726 * After this completes successfully, the memory region
2727 * is ready for registration.
2728 */
ib_map_mr_sg_pi(struct ib_mr * mr,struct scatterlist * data_sg,int data_sg_nents,unsigned int * data_sg_offset,struct scatterlist * meta_sg,int meta_sg_nents,unsigned int * meta_sg_offset,unsigned int page_size)2729 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2730 int data_sg_nents, unsigned int *data_sg_offset,
2731 struct scatterlist *meta_sg, int meta_sg_nents,
2732 unsigned int *meta_sg_offset, unsigned int page_size)
2733 {
2734 if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2735 WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2736 return -EOPNOTSUPP;
2737
2738 mr->page_size = page_size;
2739
2740 return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2741 data_sg_offset, meta_sg,
2742 meta_sg_nents, meta_sg_offset);
2743 }
2744 EXPORT_SYMBOL(ib_map_mr_sg_pi);
2745
2746 /**
2747 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2748 * and set it the memory region.
2749 * @mr: memory region
2750 * @sg: dma mapped scatterlist
2751 * @sg_nents: number of entries in sg
2752 * @sg_offset: offset in bytes into sg
2753 * @page_size: page vector desired page size
2754 *
2755 * Constraints:
2756 *
2757 * - The first sg element is allowed to have an offset.
2758 * - Each sg element must either be aligned to page_size or virtually
2759 * contiguous to the previous element. In case an sg element has a
2760 * non-contiguous offset, the mapping prefix will not include it.
2761 * - The last sg element is allowed to have length less than page_size.
2762 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2763 * then only max_num_sg entries will be mapped.
2764 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2765 * constraints holds and the page_size argument is ignored.
2766 *
2767 * Returns the number of sg elements that were mapped to the memory region.
2768 *
2769 * After this completes successfully, the memory region
2770 * is ready for registration.
2771 */
ib_map_mr_sg(struct ib_mr * mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset,unsigned int page_size)2772 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2773 unsigned int *sg_offset, unsigned int page_size)
2774 {
2775 if (unlikely(!mr->device->ops.map_mr_sg))
2776 return -EOPNOTSUPP;
2777
2778 mr->page_size = page_size;
2779
2780 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2781 }
2782 EXPORT_SYMBOL(ib_map_mr_sg);
2783
2784 /**
2785 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2786 * to a page vector
2787 * @mr: memory region
2788 * @sgl: dma mapped scatterlist
2789 * @sg_nents: number of entries in sg
2790 * @sg_offset_p: ==== =======================================================
2791 * IN start offset in bytes into sg
2792 * OUT offset in bytes for element n of the sg of the first
2793 * byte that has not been processed where n is the return
2794 * value of this function.
2795 * ==== =======================================================
2796 * @set_page: driver page assignment function pointer
2797 *
2798 * Core service helper for drivers to convert the largest
2799 * prefix of given sg list to a page vector. The sg list
2800 * prefix converted is the prefix that meet the requirements
2801 * of ib_map_mr_sg.
2802 *
2803 * Returns the number of sg elements that were assigned to
2804 * a page vector.
2805 */
ib_sg_to_pages(struct ib_mr * mr,struct scatterlist * sgl,int sg_nents,unsigned int * sg_offset_p,int (* set_page)(struct ib_mr *,u64))2806 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2807 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2808 {
2809 struct scatterlist *sg;
2810 u64 last_end_dma_addr = 0;
2811 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2812 unsigned int last_page_off = 0;
2813 u64 page_mask = ~((u64)mr->page_size - 1);
2814 int i, ret;
2815
2816 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2817 return -EINVAL;
2818
2819 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2820 mr->length = 0;
2821
2822 for_each_sg(sgl, sg, sg_nents, i) {
2823 u64 dma_addr = sg_dma_address(sg) + sg_offset;
2824 u64 prev_addr = dma_addr;
2825 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2826 u64 end_dma_addr = dma_addr + dma_len;
2827 u64 page_addr = dma_addr & page_mask;
2828
2829 /*
2830 * For the second and later elements, check whether either the
2831 * end of element i-1 or the start of element i is not aligned
2832 * on a page boundary.
2833 */
2834 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2835 /* Stop mapping if there is a gap. */
2836 if (last_end_dma_addr != dma_addr)
2837 break;
2838
2839 /*
2840 * Coalesce this element with the last. If it is small
2841 * enough just update mr->length. Otherwise start
2842 * mapping from the next page.
2843 */
2844 goto next_page;
2845 }
2846
2847 do {
2848 ret = set_page(mr, page_addr);
2849 if (unlikely(ret < 0)) {
2850 sg_offset = prev_addr - sg_dma_address(sg);
2851 mr->length += prev_addr - dma_addr;
2852 if (sg_offset_p)
2853 *sg_offset_p = sg_offset;
2854 return i || sg_offset ? i : ret;
2855 }
2856 prev_addr = page_addr;
2857 next_page:
2858 page_addr += mr->page_size;
2859 } while (page_addr < end_dma_addr);
2860
2861 mr->length += dma_len;
2862 last_end_dma_addr = end_dma_addr;
2863 last_page_off = end_dma_addr & ~page_mask;
2864
2865 sg_offset = 0;
2866 }
2867
2868 if (sg_offset_p)
2869 *sg_offset_p = 0;
2870 return i;
2871 }
2872 EXPORT_SYMBOL(ib_sg_to_pages);
2873
2874 struct ib_drain_cqe {
2875 struct ib_cqe cqe;
2876 struct completion done;
2877 };
2878
ib_drain_qp_done(struct ib_cq * cq,struct ib_wc * wc)2879 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2880 {
2881 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2882 cqe);
2883
2884 complete(&cqe->done);
2885 }
2886
2887 /*
2888 * Post a WR and block until its completion is reaped for the SQ.
2889 */
__ib_drain_sq(struct ib_qp * qp)2890 static void __ib_drain_sq(struct ib_qp *qp)
2891 {
2892 struct ib_cq *cq = qp->send_cq;
2893 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2894 struct ib_drain_cqe sdrain;
2895 struct ib_rdma_wr swr = {
2896 .wr = {
2897 .next = NULL,
2898 { .wr_cqe = &sdrain.cqe, },
2899 .opcode = IB_WR_RDMA_WRITE,
2900 },
2901 };
2902 int ret;
2903
2904 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2905 if (ret) {
2906 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2907 return;
2908 }
2909
2910 sdrain.cqe.done = ib_drain_qp_done;
2911 init_completion(&sdrain.done);
2912
2913 ret = ib_post_send(qp, &swr.wr, NULL);
2914 if (ret) {
2915 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2916 return;
2917 }
2918
2919 if (cq->poll_ctx == IB_POLL_DIRECT)
2920 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2921 ib_process_cq_direct(cq, -1);
2922 else
2923 wait_for_completion(&sdrain.done);
2924 }
2925
2926 /*
2927 * Post a WR and block until its completion is reaped for the RQ.
2928 */
__ib_drain_rq(struct ib_qp * qp)2929 static void __ib_drain_rq(struct ib_qp *qp)
2930 {
2931 struct ib_cq *cq = qp->recv_cq;
2932 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2933 struct ib_drain_cqe rdrain;
2934 struct ib_recv_wr rwr = {};
2935 int ret;
2936
2937 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2938 if (ret) {
2939 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2940 return;
2941 }
2942
2943 rwr.wr_cqe = &rdrain.cqe;
2944 rdrain.cqe.done = ib_drain_qp_done;
2945 init_completion(&rdrain.done);
2946
2947 ret = ib_post_recv(qp, &rwr, NULL);
2948 if (ret) {
2949 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2950 return;
2951 }
2952
2953 if (cq->poll_ctx == IB_POLL_DIRECT)
2954 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2955 ib_process_cq_direct(cq, -1);
2956 else
2957 wait_for_completion(&rdrain.done);
2958 }
2959
2960 /*
2961 * __ib_drain_srq() - Block until Last WQE Reached event arrives, or timeout
2962 * expires.
2963 * @qp: queue pair associated with SRQ to drain
2964 *
2965 * Quoting 10.3.1 Queue Pair and EE Context States:
2966 *
2967 * Note, for QPs that are associated with an SRQ, the Consumer should take the
2968 * QP through the Error State before invoking a Destroy QP or a Modify QP to the
2969 * Reset State. The Consumer may invoke the Destroy QP without first performing
2970 * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
2971 * Last WQE Reached Event. However, if the Consumer does not wait for the
2972 * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
2973 * leakage may occur. Therefore, it is good programming practice to tear down a
2974 * QP that is associated with an SRQ by using the following process:
2975 *
2976 * - Put the QP in the Error State
2977 * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
2978 * - either:
2979 * drain the CQ by invoking the Poll CQ verb and either wait for CQ
2980 * to be empty or the number of Poll CQ operations has exceeded
2981 * CQ capacity size;
2982 * - or
2983 * post another WR that completes on the same CQ and wait for this
2984 * WR to return as a WC;
2985 * - and then invoke a Destroy QP or Reset QP.
2986 *
2987 * We use the first option.
2988 */
__ib_drain_srq(struct ib_qp * qp)2989 static void __ib_drain_srq(struct ib_qp *qp)
2990 {
2991 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2992 struct ib_cq *cq;
2993 int n, polled = 0;
2994 int ret;
2995
2996 if (!qp->srq) {
2997 WARN_ONCE(1, "QP 0x%p is not associated with SRQ\n", qp);
2998 return;
2999 }
3000
3001 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
3002 if (ret) {
3003 WARN_ONCE(ret, "failed to drain shared recv queue: %d\n", ret);
3004 return;
3005 }
3006
3007 if (ib_srq_has_cq(qp->srq->srq_type)) {
3008 cq = qp->srq->ext.cq;
3009 } else if (qp->recv_cq) {
3010 cq = qp->recv_cq;
3011 } else {
3012 WARN_ONCE(1, "QP 0x%p has no CQ associated with SRQ\n", qp);
3013 return;
3014 }
3015
3016 if (wait_for_completion_timeout(&qp->srq_completion, 60 * HZ) > 0) {
3017 while (polled != cq->cqe) {
3018 n = ib_process_cq_direct(cq, cq->cqe - polled);
3019 if (!n)
3020 return;
3021 polled += n;
3022 }
3023 }
3024 }
3025
3026 /**
3027 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
3028 * application.
3029 * @qp: queue pair to drain
3030 *
3031 * If the device has a provider-specific drain function, then
3032 * call that. Otherwise call the generic drain function
3033 * __ib_drain_sq().
3034 *
3035 * The caller must:
3036 *
3037 * ensure there is room in the CQ and SQ for the drain work request and
3038 * completion.
3039 *
3040 * allocate the CQ using ib_alloc_cq().
3041 *
3042 * ensure that there are no other contexts that are posting WRs concurrently.
3043 * Otherwise the drain is not guaranteed.
3044 */
ib_drain_sq(struct ib_qp * qp)3045 void ib_drain_sq(struct ib_qp *qp)
3046 {
3047 if (qp->device->ops.drain_sq)
3048 qp->device->ops.drain_sq(qp);
3049 else
3050 __ib_drain_sq(qp);
3051 trace_cq_drain_complete(qp->send_cq);
3052 }
3053 EXPORT_SYMBOL(ib_drain_sq);
3054
3055 /**
3056 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
3057 * application.
3058 * @qp: queue pair to drain
3059 *
3060 * If the device has a provider-specific drain function, then
3061 * call that. Otherwise call the generic drain function
3062 * __ib_drain_rq().
3063 *
3064 * The caller must:
3065 *
3066 * ensure there is room in the CQ and RQ for the drain work request and
3067 * completion.
3068 *
3069 * allocate the CQ using ib_alloc_cq().
3070 *
3071 * ensure that there are no other contexts that are posting WRs concurrently.
3072 * Otherwise the drain is not guaranteed.
3073 */
ib_drain_rq(struct ib_qp * qp)3074 void ib_drain_rq(struct ib_qp *qp)
3075 {
3076 if (qp->device->ops.drain_rq)
3077 qp->device->ops.drain_rq(qp);
3078 else
3079 __ib_drain_rq(qp);
3080 trace_cq_drain_complete(qp->recv_cq);
3081 }
3082 EXPORT_SYMBOL(ib_drain_rq);
3083
3084 /**
3085 * ib_drain_qp() - Block until all CQEs have been consumed by the
3086 * application on both the RQ and SQ.
3087 * @qp: queue pair to drain
3088 *
3089 * The caller must:
3090 *
3091 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
3092 * and completions.
3093 *
3094 * allocate the CQs using ib_alloc_cq().
3095 *
3096 * ensure that there are no other contexts that are posting WRs concurrently.
3097 * Otherwise the drain is not guaranteed.
3098 */
ib_drain_qp(struct ib_qp * qp)3099 void ib_drain_qp(struct ib_qp *qp)
3100 {
3101 ib_drain_sq(qp);
3102 if (!qp->srq)
3103 ib_drain_rq(qp);
3104 else
3105 __ib_drain_srq(qp);
3106 }
3107 EXPORT_SYMBOL(ib_drain_qp);
3108
rdma_alloc_netdev(struct ib_device * device,u32 port_num,enum rdma_netdev_t type,const char * name,unsigned char name_assign_type,void (* setup)(struct net_device *))3109 struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
3110 enum rdma_netdev_t type, const char *name,
3111 unsigned char name_assign_type,
3112 void (*setup)(struct net_device *))
3113 {
3114 struct rdma_netdev_alloc_params params;
3115 struct net_device *netdev;
3116 int rc;
3117
3118 if (!device->ops.rdma_netdev_get_params)
3119 return ERR_PTR(-EOPNOTSUPP);
3120
3121 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
3122 ¶ms);
3123 if (rc)
3124 return ERR_PTR(rc);
3125
3126 netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
3127 setup, params.txqs, params.rxqs);
3128 if (!netdev)
3129 return ERR_PTR(-ENOMEM);
3130
3131 return netdev;
3132 }
3133 EXPORT_SYMBOL(rdma_alloc_netdev);
3134
rdma_init_netdev(struct ib_device * device,u32 port_num,enum rdma_netdev_t type,const char * name,unsigned char name_assign_type,void (* setup)(struct net_device *),struct net_device * netdev)3135 int rdma_init_netdev(struct ib_device *device, u32 port_num,
3136 enum rdma_netdev_t type, const char *name,
3137 unsigned char name_assign_type,
3138 void (*setup)(struct net_device *),
3139 struct net_device *netdev)
3140 {
3141 struct rdma_netdev_alloc_params params;
3142 int rc;
3143
3144 if (!device->ops.rdma_netdev_get_params)
3145 return -EOPNOTSUPP;
3146
3147 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
3148 ¶ms);
3149 if (rc)
3150 return rc;
3151
3152 return params.initialize_rdma_netdev(device, port_num,
3153 netdev, params.param);
3154 }
3155 EXPORT_SYMBOL(rdma_init_netdev);
3156
__rdma_block_iter_start(struct ib_block_iter * biter,struct scatterlist * sglist,unsigned int nents,unsigned long pgsz)3157 void __rdma_block_iter_start(struct ib_block_iter *biter,
3158 struct scatterlist *sglist, unsigned int nents,
3159 unsigned long pgsz)
3160 {
3161 memset(biter, 0, sizeof(struct ib_block_iter));
3162 biter->__sg = sglist;
3163 biter->__sg_nents = nents;
3164
3165 /* Driver provides best block size to use */
3166 biter->__pg_bit = __fls(pgsz);
3167 }
3168 EXPORT_SYMBOL(__rdma_block_iter_start);
3169
__rdma_block_iter_next(struct ib_block_iter * biter)3170 bool __rdma_block_iter_next(struct ib_block_iter *biter)
3171 {
3172 unsigned int block_offset;
3173 unsigned int delta;
3174
3175 if (!biter->__sg_nents || !biter->__sg)
3176 return false;
3177
3178 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
3179 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
3180 delta = BIT_ULL(biter->__pg_bit) - block_offset;
3181
3182 while (biter->__sg_nents && biter->__sg &&
3183 sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) {
3184 delta -= sg_dma_len(biter->__sg) - biter->__sg_advance;
3185 biter->__sg_advance = 0;
3186 biter->__sg = sg_next(biter->__sg);
3187 biter->__sg_nents--;
3188 }
3189 biter->__sg_advance += delta;
3190
3191 return true;
3192 }
3193 EXPORT_SYMBOL(__rdma_block_iter_next);
3194
3195 /**
3196 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
3197 * for the drivers.
3198 * @descs: array of static descriptors
3199 * @num_counters: number of elements in array
3200 * @lifespan: milliseconds between updates
3201 */
rdma_alloc_hw_stats_struct(const struct rdma_stat_desc * descs,int num_counters,unsigned long lifespan)3202 struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
3203 const struct rdma_stat_desc *descs, int num_counters,
3204 unsigned long lifespan)
3205 {
3206 struct rdma_hw_stats *stats;
3207
3208 stats = kzalloc_flex(*stats, value, num_counters);
3209 if (!stats)
3210 return NULL;
3211
3212 stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters),
3213 sizeof(*stats->is_disabled), GFP_KERNEL);
3214 if (!stats->is_disabled)
3215 goto err;
3216
3217 stats->descs = descs;
3218 stats->num_counters = num_counters;
3219 stats->lifespan = msecs_to_jiffies(lifespan);
3220 mutex_init(&stats->lock);
3221
3222 return stats;
3223
3224 err:
3225 kfree(stats);
3226 return NULL;
3227 }
3228 EXPORT_SYMBOL(rdma_alloc_hw_stats_struct);
3229
3230 /**
3231 * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
3232 * @stats: statistics to release
3233 */
rdma_free_hw_stats_struct(struct rdma_hw_stats * stats)3234 void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
3235 {
3236 if (!stats)
3237 return;
3238
3239 kfree(stats->is_disabled);
3240 kfree(stats);
3241 }
3242 EXPORT_SYMBOL(rdma_free_hw_stats_struct);
3243