1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #include <linux/errno.h> 40 #include <linux/err.h> 41 #include <linux/export.h> 42 #include <linux/string.h> 43 44 #include <rdma/ib_verbs.h> 45 #include <rdma/ib_cache.h> 46 47 int ib_rate_to_mult(enum ib_rate rate) 48 { 49 switch (rate) { 50 case IB_RATE_2_5_GBPS: return 1; 51 case IB_RATE_5_GBPS: return 2; 52 case IB_RATE_10_GBPS: return 4; 53 case IB_RATE_20_GBPS: return 8; 54 case IB_RATE_30_GBPS: return 12; 55 case IB_RATE_40_GBPS: return 16; 56 case IB_RATE_60_GBPS: return 24; 57 case IB_RATE_80_GBPS: return 32; 58 case IB_RATE_120_GBPS: return 48; 59 default: return -1; 60 } 61 } 62 EXPORT_SYMBOL(ib_rate_to_mult); 63 64 enum ib_rate mult_to_ib_rate(int mult) 65 { 66 switch (mult) { 67 case 1: return IB_RATE_2_5_GBPS; 68 case 2: return IB_RATE_5_GBPS; 69 case 4: return IB_RATE_10_GBPS; 70 case 8: return IB_RATE_20_GBPS; 71 case 12: return IB_RATE_30_GBPS; 72 case 16: return IB_RATE_40_GBPS; 73 case 24: return IB_RATE_60_GBPS; 74 case 32: return IB_RATE_80_GBPS; 75 case 48: return IB_RATE_120_GBPS; 76 default: return IB_RATE_PORT_CURRENT; 77 } 78 } 79 EXPORT_SYMBOL(mult_to_ib_rate); 80 81 enum rdma_transport_type 82 rdma_node_get_transport(enum rdma_node_type node_type) 83 { 84 switch (node_type) { 85 case RDMA_NODE_IB_CA: 86 case RDMA_NODE_IB_SWITCH: 87 case RDMA_NODE_IB_ROUTER: 88 return RDMA_TRANSPORT_IB; 89 case RDMA_NODE_RNIC: 90 return RDMA_TRANSPORT_IWARP; 91 default: 92 BUG(); 93 return 0; 94 } 95 } 96 EXPORT_SYMBOL(rdma_node_get_transport); 97 98 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 99 { 100 if (device->get_link_layer) 101 return device->get_link_layer(device, port_num); 102 103 switch (rdma_node_get_transport(device->node_type)) { 104 case RDMA_TRANSPORT_IB: 105 return IB_LINK_LAYER_INFINIBAND; 106 case RDMA_TRANSPORT_IWARP: 107 return IB_LINK_LAYER_ETHERNET; 108 default: 109 return IB_LINK_LAYER_UNSPECIFIED; 110 } 111 } 112 EXPORT_SYMBOL(rdma_port_get_link_layer); 113 114 /* Protection domains */ 115 116 struct ib_pd *ib_alloc_pd(struct ib_device *device) 117 { 118 struct ib_pd *pd; 119 120 pd = device->alloc_pd(device, NULL, NULL); 121 122 if (!IS_ERR(pd)) { 123 pd->device = device; 124 pd->uobject = NULL; 125 atomic_set(&pd->usecnt, 0); 126 } 127 128 return pd; 129 } 130 EXPORT_SYMBOL(ib_alloc_pd); 131 132 int ib_dealloc_pd(struct ib_pd *pd) 133 { 134 if (atomic_read(&pd->usecnt)) 135 return -EBUSY; 136 137 return pd->device->dealloc_pd(pd); 138 } 139 EXPORT_SYMBOL(ib_dealloc_pd); 140 141 /* Address handles */ 142 143 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 144 { 145 struct ib_ah *ah; 146 147 ah = pd->device->create_ah(pd, ah_attr); 148 149 if (!IS_ERR(ah)) { 150 ah->device = pd->device; 151 ah->pd = pd; 152 ah->uobject = NULL; 153 atomic_inc(&pd->usecnt); 154 } 155 156 return ah; 157 } 158 EXPORT_SYMBOL(ib_create_ah); 159 160 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, 161 struct ib_grh *grh, struct ib_ah_attr *ah_attr) 162 { 163 u32 flow_class; 164 u16 gid_index; 165 int ret; 166 167 memset(ah_attr, 0, sizeof *ah_attr); 168 ah_attr->dlid = wc->slid; 169 ah_attr->sl = wc->sl; 170 ah_attr->src_path_bits = wc->dlid_path_bits; 171 ah_attr->port_num = port_num; 172 173 if (wc->wc_flags & IB_WC_GRH) { 174 ah_attr->ah_flags = IB_AH_GRH; 175 ah_attr->grh.dgid = grh->sgid; 176 177 ret = ib_find_cached_gid(device, &grh->dgid, &port_num, 178 &gid_index); 179 if (ret) 180 return ret; 181 182 ah_attr->grh.sgid_index = (u8) gid_index; 183 flow_class = be32_to_cpu(grh->version_tclass_flow); 184 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 185 ah_attr->grh.hop_limit = 0xFF; 186 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 187 } 188 return 0; 189 } 190 EXPORT_SYMBOL(ib_init_ah_from_wc); 191 192 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, 193 struct ib_grh *grh, u8 port_num) 194 { 195 struct ib_ah_attr ah_attr; 196 int ret; 197 198 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 199 if (ret) 200 return ERR_PTR(ret); 201 202 return ib_create_ah(pd, &ah_attr); 203 } 204 EXPORT_SYMBOL(ib_create_ah_from_wc); 205 206 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 207 { 208 return ah->device->modify_ah ? 209 ah->device->modify_ah(ah, ah_attr) : 210 -ENOSYS; 211 } 212 EXPORT_SYMBOL(ib_modify_ah); 213 214 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 215 { 216 return ah->device->query_ah ? 217 ah->device->query_ah(ah, ah_attr) : 218 -ENOSYS; 219 } 220 EXPORT_SYMBOL(ib_query_ah); 221 222 int ib_destroy_ah(struct ib_ah *ah) 223 { 224 struct ib_pd *pd; 225 int ret; 226 227 pd = ah->pd; 228 ret = ah->device->destroy_ah(ah); 229 if (!ret) 230 atomic_dec(&pd->usecnt); 231 232 return ret; 233 } 234 EXPORT_SYMBOL(ib_destroy_ah); 235 236 /* Shared receive queues */ 237 238 struct ib_srq *ib_create_srq(struct ib_pd *pd, 239 struct ib_srq_init_attr *srq_init_attr) 240 { 241 struct ib_srq *srq; 242 243 if (!pd->device->create_srq) 244 return ERR_PTR(-ENOSYS); 245 246 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 247 248 if (!IS_ERR(srq)) { 249 srq->device = pd->device; 250 srq->pd = pd; 251 srq->uobject = NULL; 252 srq->event_handler = srq_init_attr->event_handler; 253 srq->srq_context = srq_init_attr->srq_context; 254 atomic_inc(&pd->usecnt); 255 atomic_set(&srq->usecnt, 0); 256 } 257 258 return srq; 259 } 260 EXPORT_SYMBOL(ib_create_srq); 261 262 int ib_modify_srq(struct ib_srq *srq, 263 struct ib_srq_attr *srq_attr, 264 enum ib_srq_attr_mask srq_attr_mask) 265 { 266 return srq->device->modify_srq ? 267 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 268 -ENOSYS; 269 } 270 EXPORT_SYMBOL(ib_modify_srq); 271 272 int ib_query_srq(struct ib_srq *srq, 273 struct ib_srq_attr *srq_attr) 274 { 275 return srq->device->query_srq ? 276 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 277 } 278 EXPORT_SYMBOL(ib_query_srq); 279 280 int ib_destroy_srq(struct ib_srq *srq) 281 { 282 struct ib_pd *pd; 283 int ret; 284 285 if (atomic_read(&srq->usecnt)) 286 return -EBUSY; 287 288 pd = srq->pd; 289 290 ret = srq->device->destroy_srq(srq); 291 if (!ret) 292 atomic_dec(&pd->usecnt); 293 294 return ret; 295 } 296 EXPORT_SYMBOL(ib_destroy_srq); 297 298 /* Queue pairs */ 299 300 struct ib_qp *ib_create_qp(struct ib_pd *pd, 301 struct ib_qp_init_attr *qp_init_attr) 302 { 303 struct ib_qp *qp; 304 305 qp = pd->device->create_qp(pd, qp_init_attr, NULL); 306 307 if (!IS_ERR(qp)) { 308 qp->device = pd->device; 309 qp->pd = pd; 310 qp->send_cq = qp_init_attr->send_cq; 311 qp->recv_cq = qp_init_attr->recv_cq; 312 qp->srq = qp_init_attr->srq; 313 qp->uobject = NULL; 314 qp->event_handler = qp_init_attr->event_handler; 315 qp->qp_context = qp_init_attr->qp_context; 316 qp->qp_type = qp_init_attr->qp_type; 317 atomic_inc(&pd->usecnt); 318 atomic_inc(&qp_init_attr->send_cq->usecnt); 319 atomic_inc(&qp_init_attr->recv_cq->usecnt); 320 if (qp_init_attr->srq) 321 atomic_inc(&qp_init_attr->srq->usecnt); 322 } 323 324 return qp; 325 } 326 EXPORT_SYMBOL(ib_create_qp); 327 328 static const struct { 329 int valid; 330 enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1]; 331 enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1]; 332 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 333 [IB_QPS_RESET] = { 334 [IB_QPS_RESET] = { .valid = 1 }, 335 [IB_QPS_INIT] = { 336 .valid = 1, 337 .req_param = { 338 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 339 IB_QP_PORT | 340 IB_QP_QKEY), 341 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 342 IB_QP_PORT | 343 IB_QP_ACCESS_FLAGS), 344 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 345 IB_QP_PORT | 346 IB_QP_ACCESS_FLAGS), 347 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 348 IB_QP_QKEY), 349 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 350 IB_QP_QKEY), 351 } 352 }, 353 }, 354 [IB_QPS_INIT] = { 355 [IB_QPS_RESET] = { .valid = 1 }, 356 [IB_QPS_ERR] = { .valid = 1 }, 357 [IB_QPS_INIT] = { 358 .valid = 1, 359 .opt_param = { 360 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 361 IB_QP_PORT | 362 IB_QP_QKEY), 363 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 364 IB_QP_PORT | 365 IB_QP_ACCESS_FLAGS), 366 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 367 IB_QP_PORT | 368 IB_QP_ACCESS_FLAGS), 369 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 370 IB_QP_QKEY), 371 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 372 IB_QP_QKEY), 373 } 374 }, 375 [IB_QPS_RTR] = { 376 .valid = 1, 377 .req_param = { 378 [IB_QPT_UC] = (IB_QP_AV | 379 IB_QP_PATH_MTU | 380 IB_QP_DEST_QPN | 381 IB_QP_RQ_PSN), 382 [IB_QPT_RC] = (IB_QP_AV | 383 IB_QP_PATH_MTU | 384 IB_QP_DEST_QPN | 385 IB_QP_RQ_PSN | 386 IB_QP_MAX_DEST_RD_ATOMIC | 387 IB_QP_MIN_RNR_TIMER), 388 }, 389 .opt_param = { 390 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 391 IB_QP_QKEY), 392 [IB_QPT_UC] = (IB_QP_ALT_PATH | 393 IB_QP_ACCESS_FLAGS | 394 IB_QP_PKEY_INDEX), 395 [IB_QPT_RC] = (IB_QP_ALT_PATH | 396 IB_QP_ACCESS_FLAGS | 397 IB_QP_PKEY_INDEX), 398 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 399 IB_QP_QKEY), 400 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 401 IB_QP_QKEY), 402 } 403 } 404 }, 405 [IB_QPS_RTR] = { 406 [IB_QPS_RESET] = { .valid = 1 }, 407 [IB_QPS_ERR] = { .valid = 1 }, 408 [IB_QPS_RTS] = { 409 .valid = 1, 410 .req_param = { 411 [IB_QPT_UD] = IB_QP_SQ_PSN, 412 [IB_QPT_UC] = IB_QP_SQ_PSN, 413 [IB_QPT_RC] = (IB_QP_TIMEOUT | 414 IB_QP_RETRY_CNT | 415 IB_QP_RNR_RETRY | 416 IB_QP_SQ_PSN | 417 IB_QP_MAX_QP_RD_ATOMIC), 418 [IB_QPT_SMI] = IB_QP_SQ_PSN, 419 [IB_QPT_GSI] = IB_QP_SQ_PSN, 420 }, 421 .opt_param = { 422 [IB_QPT_UD] = (IB_QP_CUR_STATE | 423 IB_QP_QKEY), 424 [IB_QPT_UC] = (IB_QP_CUR_STATE | 425 IB_QP_ALT_PATH | 426 IB_QP_ACCESS_FLAGS | 427 IB_QP_PATH_MIG_STATE), 428 [IB_QPT_RC] = (IB_QP_CUR_STATE | 429 IB_QP_ALT_PATH | 430 IB_QP_ACCESS_FLAGS | 431 IB_QP_MIN_RNR_TIMER | 432 IB_QP_PATH_MIG_STATE), 433 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 434 IB_QP_QKEY), 435 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 436 IB_QP_QKEY), 437 } 438 } 439 }, 440 [IB_QPS_RTS] = { 441 [IB_QPS_RESET] = { .valid = 1 }, 442 [IB_QPS_ERR] = { .valid = 1 }, 443 [IB_QPS_RTS] = { 444 .valid = 1, 445 .opt_param = { 446 [IB_QPT_UD] = (IB_QP_CUR_STATE | 447 IB_QP_QKEY), 448 [IB_QPT_UC] = (IB_QP_CUR_STATE | 449 IB_QP_ACCESS_FLAGS | 450 IB_QP_ALT_PATH | 451 IB_QP_PATH_MIG_STATE), 452 [IB_QPT_RC] = (IB_QP_CUR_STATE | 453 IB_QP_ACCESS_FLAGS | 454 IB_QP_ALT_PATH | 455 IB_QP_PATH_MIG_STATE | 456 IB_QP_MIN_RNR_TIMER), 457 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 458 IB_QP_QKEY), 459 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 460 IB_QP_QKEY), 461 } 462 }, 463 [IB_QPS_SQD] = { 464 .valid = 1, 465 .opt_param = { 466 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 467 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 468 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 469 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 470 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 471 } 472 }, 473 }, 474 [IB_QPS_SQD] = { 475 [IB_QPS_RESET] = { .valid = 1 }, 476 [IB_QPS_ERR] = { .valid = 1 }, 477 [IB_QPS_RTS] = { 478 .valid = 1, 479 .opt_param = { 480 [IB_QPT_UD] = (IB_QP_CUR_STATE | 481 IB_QP_QKEY), 482 [IB_QPT_UC] = (IB_QP_CUR_STATE | 483 IB_QP_ALT_PATH | 484 IB_QP_ACCESS_FLAGS | 485 IB_QP_PATH_MIG_STATE), 486 [IB_QPT_RC] = (IB_QP_CUR_STATE | 487 IB_QP_ALT_PATH | 488 IB_QP_ACCESS_FLAGS | 489 IB_QP_MIN_RNR_TIMER | 490 IB_QP_PATH_MIG_STATE), 491 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 492 IB_QP_QKEY), 493 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 494 IB_QP_QKEY), 495 } 496 }, 497 [IB_QPS_SQD] = { 498 .valid = 1, 499 .opt_param = { 500 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 501 IB_QP_QKEY), 502 [IB_QPT_UC] = (IB_QP_AV | 503 IB_QP_ALT_PATH | 504 IB_QP_ACCESS_FLAGS | 505 IB_QP_PKEY_INDEX | 506 IB_QP_PATH_MIG_STATE), 507 [IB_QPT_RC] = (IB_QP_PORT | 508 IB_QP_AV | 509 IB_QP_TIMEOUT | 510 IB_QP_RETRY_CNT | 511 IB_QP_RNR_RETRY | 512 IB_QP_MAX_QP_RD_ATOMIC | 513 IB_QP_MAX_DEST_RD_ATOMIC | 514 IB_QP_ALT_PATH | 515 IB_QP_ACCESS_FLAGS | 516 IB_QP_PKEY_INDEX | 517 IB_QP_MIN_RNR_TIMER | 518 IB_QP_PATH_MIG_STATE), 519 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 520 IB_QP_QKEY), 521 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 522 IB_QP_QKEY), 523 } 524 } 525 }, 526 [IB_QPS_SQE] = { 527 [IB_QPS_RESET] = { .valid = 1 }, 528 [IB_QPS_ERR] = { .valid = 1 }, 529 [IB_QPS_RTS] = { 530 .valid = 1, 531 .opt_param = { 532 [IB_QPT_UD] = (IB_QP_CUR_STATE | 533 IB_QP_QKEY), 534 [IB_QPT_UC] = (IB_QP_CUR_STATE | 535 IB_QP_ACCESS_FLAGS), 536 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 537 IB_QP_QKEY), 538 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 539 IB_QP_QKEY), 540 } 541 } 542 }, 543 [IB_QPS_ERR] = { 544 [IB_QPS_RESET] = { .valid = 1 }, 545 [IB_QPS_ERR] = { .valid = 1 } 546 } 547 }; 548 549 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 550 enum ib_qp_type type, enum ib_qp_attr_mask mask) 551 { 552 enum ib_qp_attr_mask req_param, opt_param; 553 554 if (cur_state < 0 || cur_state > IB_QPS_ERR || 555 next_state < 0 || next_state > IB_QPS_ERR) 556 return 0; 557 558 if (mask & IB_QP_CUR_STATE && 559 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 560 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 561 return 0; 562 563 if (!qp_state_table[cur_state][next_state].valid) 564 return 0; 565 566 req_param = qp_state_table[cur_state][next_state].req_param[type]; 567 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 568 569 if ((mask & req_param) != req_param) 570 return 0; 571 572 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 573 return 0; 574 575 return 1; 576 } 577 EXPORT_SYMBOL(ib_modify_qp_is_ok); 578 579 int ib_modify_qp(struct ib_qp *qp, 580 struct ib_qp_attr *qp_attr, 581 int qp_attr_mask) 582 { 583 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); 584 } 585 EXPORT_SYMBOL(ib_modify_qp); 586 587 int ib_query_qp(struct ib_qp *qp, 588 struct ib_qp_attr *qp_attr, 589 int qp_attr_mask, 590 struct ib_qp_init_attr *qp_init_attr) 591 { 592 return qp->device->query_qp ? 593 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : 594 -ENOSYS; 595 } 596 EXPORT_SYMBOL(ib_query_qp); 597 598 int ib_destroy_qp(struct ib_qp *qp) 599 { 600 struct ib_pd *pd; 601 struct ib_cq *scq, *rcq; 602 struct ib_srq *srq; 603 int ret; 604 605 pd = qp->pd; 606 scq = qp->send_cq; 607 rcq = qp->recv_cq; 608 srq = qp->srq; 609 610 ret = qp->device->destroy_qp(qp); 611 if (!ret) { 612 atomic_dec(&pd->usecnt); 613 atomic_dec(&scq->usecnt); 614 atomic_dec(&rcq->usecnt); 615 if (srq) 616 atomic_dec(&srq->usecnt); 617 } 618 619 return ret; 620 } 621 EXPORT_SYMBOL(ib_destroy_qp); 622 623 /* Completion queues */ 624 625 struct ib_cq *ib_create_cq(struct ib_device *device, 626 ib_comp_handler comp_handler, 627 void (*event_handler)(struct ib_event *, void *), 628 void *cq_context, int cqe, int comp_vector) 629 { 630 struct ib_cq *cq; 631 632 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); 633 634 if (!IS_ERR(cq)) { 635 cq->device = device; 636 cq->uobject = NULL; 637 cq->comp_handler = comp_handler; 638 cq->event_handler = event_handler; 639 cq->cq_context = cq_context; 640 atomic_set(&cq->usecnt, 0); 641 } 642 643 return cq; 644 } 645 EXPORT_SYMBOL(ib_create_cq); 646 647 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 648 { 649 return cq->device->modify_cq ? 650 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 651 } 652 EXPORT_SYMBOL(ib_modify_cq); 653 654 int ib_destroy_cq(struct ib_cq *cq) 655 { 656 if (atomic_read(&cq->usecnt)) 657 return -EBUSY; 658 659 return cq->device->destroy_cq(cq); 660 } 661 EXPORT_SYMBOL(ib_destroy_cq); 662 663 int ib_resize_cq(struct ib_cq *cq, int cqe) 664 { 665 return cq->device->resize_cq ? 666 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 667 } 668 EXPORT_SYMBOL(ib_resize_cq); 669 670 /* Memory regions */ 671 672 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) 673 { 674 struct ib_mr *mr; 675 676 mr = pd->device->get_dma_mr(pd, mr_access_flags); 677 678 if (!IS_ERR(mr)) { 679 mr->device = pd->device; 680 mr->pd = pd; 681 mr->uobject = NULL; 682 atomic_inc(&pd->usecnt); 683 atomic_set(&mr->usecnt, 0); 684 } 685 686 return mr; 687 } 688 EXPORT_SYMBOL(ib_get_dma_mr); 689 690 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 691 struct ib_phys_buf *phys_buf_array, 692 int num_phys_buf, 693 int mr_access_flags, 694 u64 *iova_start) 695 { 696 struct ib_mr *mr; 697 698 if (!pd->device->reg_phys_mr) 699 return ERR_PTR(-ENOSYS); 700 701 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, 702 mr_access_flags, iova_start); 703 704 if (!IS_ERR(mr)) { 705 mr->device = pd->device; 706 mr->pd = pd; 707 mr->uobject = NULL; 708 atomic_inc(&pd->usecnt); 709 atomic_set(&mr->usecnt, 0); 710 } 711 712 return mr; 713 } 714 EXPORT_SYMBOL(ib_reg_phys_mr); 715 716 int ib_rereg_phys_mr(struct ib_mr *mr, 717 int mr_rereg_mask, 718 struct ib_pd *pd, 719 struct ib_phys_buf *phys_buf_array, 720 int num_phys_buf, 721 int mr_access_flags, 722 u64 *iova_start) 723 { 724 struct ib_pd *old_pd; 725 int ret; 726 727 if (!mr->device->rereg_phys_mr) 728 return -ENOSYS; 729 730 if (atomic_read(&mr->usecnt)) 731 return -EBUSY; 732 733 old_pd = mr->pd; 734 735 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, 736 phys_buf_array, num_phys_buf, 737 mr_access_flags, iova_start); 738 739 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { 740 atomic_dec(&old_pd->usecnt); 741 atomic_inc(&pd->usecnt); 742 } 743 744 return ret; 745 } 746 EXPORT_SYMBOL(ib_rereg_phys_mr); 747 748 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) 749 { 750 return mr->device->query_mr ? 751 mr->device->query_mr(mr, mr_attr) : -ENOSYS; 752 } 753 EXPORT_SYMBOL(ib_query_mr); 754 755 int ib_dereg_mr(struct ib_mr *mr) 756 { 757 struct ib_pd *pd; 758 int ret; 759 760 if (atomic_read(&mr->usecnt)) 761 return -EBUSY; 762 763 pd = mr->pd; 764 ret = mr->device->dereg_mr(mr); 765 if (!ret) 766 atomic_dec(&pd->usecnt); 767 768 return ret; 769 } 770 EXPORT_SYMBOL(ib_dereg_mr); 771 772 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) 773 { 774 struct ib_mr *mr; 775 776 if (!pd->device->alloc_fast_reg_mr) 777 return ERR_PTR(-ENOSYS); 778 779 mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len); 780 781 if (!IS_ERR(mr)) { 782 mr->device = pd->device; 783 mr->pd = pd; 784 mr->uobject = NULL; 785 atomic_inc(&pd->usecnt); 786 atomic_set(&mr->usecnt, 0); 787 } 788 789 return mr; 790 } 791 EXPORT_SYMBOL(ib_alloc_fast_reg_mr); 792 793 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device, 794 int max_page_list_len) 795 { 796 struct ib_fast_reg_page_list *page_list; 797 798 if (!device->alloc_fast_reg_page_list) 799 return ERR_PTR(-ENOSYS); 800 801 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len); 802 803 if (!IS_ERR(page_list)) { 804 page_list->device = device; 805 page_list->max_page_list_len = max_page_list_len; 806 } 807 808 return page_list; 809 } 810 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list); 811 812 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) 813 { 814 page_list->device->free_fast_reg_page_list(page_list); 815 } 816 EXPORT_SYMBOL(ib_free_fast_reg_page_list); 817 818 /* Memory windows */ 819 820 struct ib_mw *ib_alloc_mw(struct ib_pd *pd) 821 { 822 struct ib_mw *mw; 823 824 if (!pd->device->alloc_mw) 825 return ERR_PTR(-ENOSYS); 826 827 mw = pd->device->alloc_mw(pd); 828 if (!IS_ERR(mw)) { 829 mw->device = pd->device; 830 mw->pd = pd; 831 mw->uobject = NULL; 832 atomic_inc(&pd->usecnt); 833 } 834 835 return mw; 836 } 837 EXPORT_SYMBOL(ib_alloc_mw); 838 839 int ib_dealloc_mw(struct ib_mw *mw) 840 { 841 struct ib_pd *pd; 842 int ret; 843 844 pd = mw->pd; 845 ret = mw->device->dealloc_mw(mw); 846 if (!ret) 847 atomic_dec(&pd->usecnt); 848 849 return ret; 850 } 851 EXPORT_SYMBOL(ib_dealloc_mw); 852 853 /* "Fast" memory regions */ 854 855 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 856 int mr_access_flags, 857 struct ib_fmr_attr *fmr_attr) 858 { 859 struct ib_fmr *fmr; 860 861 if (!pd->device->alloc_fmr) 862 return ERR_PTR(-ENOSYS); 863 864 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 865 if (!IS_ERR(fmr)) { 866 fmr->device = pd->device; 867 fmr->pd = pd; 868 atomic_inc(&pd->usecnt); 869 } 870 871 return fmr; 872 } 873 EXPORT_SYMBOL(ib_alloc_fmr); 874 875 int ib_unmap_fmr(struct list_head *fmr_list) 876 { 877 struct ib_fmr *fmr; 878 879 if (list_empty(fmr_list)) 880 return 0; 881 882 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 883 return fmr->device->unmap_fmr(fmr_list); 884 } 885 EXPORT_SYMBOL(ib_unmap_fmr); 886 887 int ib_dealloc_fmr(struct ib_fmr *fmr) 888 { 889 struct ib_pd *pd; 890 int ret; 891 892 pd = fmr->pd; 893 ret = fmr->device->dealloc_fmr(fmr); 894 if (!ret) 895 atomic_dec(&pd->usecnt); 896 897 return ret; 898 } 899 EXPORT_SYMBOL(ib_dealloc_fmr); 900 901 /* Multicast groups */ 902 903 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 904 { 905 if (!qp->device->attach_mcast) 906 return -ENOSYS; 907 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 908 return -EINVAL; 909 910 return qp->device->attach_mcast(qp, gid, lid); 911 } 912 EXPORT_SYMBOL(ib_attach_mcast); 913 914 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 915 { 916 if (!qp->device->detach_mcast) 917 return -ENOSYS; 918 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 919 return -EINVAL; 920 921 return qp->device->detach_mcast(qp, gid, lid); 922 } 923 EXPORT_SYMBOL(ib_detach_mcast); 924