1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/string.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 40 #include <asm/io.h> 41 42 #include <rdma/ib_verbs.h> 43 #include <rdma/ib_cache.h> 44 #include <rdma/ib_pack.h> 45 #include <rdma/uverbs_ioctl.h> 46 47 #include "mthca_dev.h" 48 #include "mthca_cmd.h" 49 #include "mthca_memfree.h" 50 #include "mthca_wqe.h" 51 52 enum { 53 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 54 MTHCA_ACK_REQ_FREQ = 10, 55 MTHCA_FLIGHT_LIMIT = 9, 56 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ 57 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ 58 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ 59 }; 60 61 enum { 62 MTHCA_QP_STATE_RST = 0, 63 MTHCA_QP_STATE_INIT = 1, 64 MTHCA_QP_STATE_RTR = 2, 65 MTHCA_QP_STATE_RTS = 3, 66 MTHCA_QP_STATE_SQE = 4, 67 MTHCA_QP_STATE_SQD = 5, 68 MTHCA_QP_STATE_ERR = 6, 69 MTHCA_QP_STATE_DRAINING = 7 70 }; 71 72 enum { 73 MTHCA_QP_ST_RC = 0x0, 74 MTHCA_QP_ST_UC = 0x1, 75 MTHCA_QP_ST_RD = 0x2, 76 MTHCA_QP_ST_UD = 0x3, 77 MTHCA_QP_ST_MLX = 0x7 78 }; 79 80 enum { 81 MTHCA_QP_PM_MIGRATED = 0x3, 82 MTHCA_QP_PM_ARMED = 0x0, 83 MTHCA_QP_PM_REARM = 0x1 84 }; 85 86 enum { 87 /* qp_context flags */ 88 MTHCA_QP_BIT_DE = 1 << 8, 89 /* params1 */ 90 MTHCA_QP_BIT_SRE = 1 << 15, 91 MTHCA_QP_BIT_SWE = 1 << 14, 92 MTHCA_QP_BIT_SAE = 1 << 13, 93 MTHCA_QP_BIT_SIC = 1 << 4, 94 MTHCA_QP_BIT_SSC = 1 << 3, 95 /* params2 */ 96 MTHCA_QP_BIT_RRE = 1 << 15, 97 MTHCA_QP_BIT_RWE = 1 << 14, 98 MTHCA_QP_BIT_RAE = 1 << 13, 99 MTHCA_QP_BIT_RIC = 1 << 4, 100 MTHCA_QP_BIT_RSC = 1 << 3 101 }; 102 103 enum { 104 MTHCA_SEND_DOORBELL_FENCE = 1 << 5 105 }; 106 107 struct mthca_qp_path { 108 __be32 port_pkey; 109 u8 rnr_retry; 110 u8 g_mylmc; 111 __be16 rlid; 112 u8 ackto; 113 u8 mgid_index; 114 u8 static_rate; 115 u8 hop_limit; 116 __be32 sl_tclass_flowlabel; 117 u8 rgid[16]; 118 } __packed; 119 120 struct mthca_qp_context { 121 __be32 flags; 122 __be32 tavor_sched_queue; /* Reserved on Arbel */ 123 u8 mtu_msgmax; 124 u8 rq_size_stride; /* Reserved on Tavor */ 125 u8 sq_size_stride; /* Reserved on Tavor */ 126 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 127 __be32 usr_page; 128 __be32 local_qpn; 129 __be32 remote_qpn; 130 u32 reserved1[2]; 131 struct mthca_qp_path pri_path; 132 struct mthca_qp_path alt_path; 133 __be32 rdd; 134 __be32 pd; 135 __be32 wqe_base; 136 __be32 wqe_lkey; 137 __be32 params1; 138 __be32 reserved2; 139 __be32 next_send_psn; 140 __be32 cqn_snd; 141 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ 142 __be32 snd_db_index; /* (debugging only entries) */ 143 __be32 last_acked_psn; 144 __be32 ssn; 145 __be32 params2; 146 __be32 rnr_nextrecvpsn; 147 __be32 ra_buff_indx; 148 __be32 cqn_rcv; 149 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 150 __be32 rcv_db_index; /* (debugging only entries) */ 151 __be32 qkey; 152 __be32 srqn; 153 __be32 rmsn; 154 __be16 rq_wqe_counter; /* reserved on Tavor */ 155 __be16 sq_wqe_counter; /* reserved on Tavor */ 156 u32 reserved3[18]; 157 } __packed; 158 159 struct mthca_qp_param { 160 __be32 opt_param_mask; 161 u32 reserved1; 162 struct mthca_qp_context context; 163 u32 reserved2[62]; 164 } __packed; 165 166 enum { 167 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 168 MTHCA_QP_OPTPAR_RRE = 1 << 1, 169 MTHCA_QP_OPTPAR_RAE = 1 << 2, 170 MTHCA_QP_OPTPAR_RWE = 1 << 3, 171 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, 172 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, 173 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, 174 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, 175 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, 176 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, 177 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, 178 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, 179 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, 180 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, 181 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 182 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, 183 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 184 }; 185 186 static const u8 mthca_opcode[] = { 187 [IB_WR_SEND] = MTHCA_OPCODE_SEND, 188 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, 189 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, 190 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, 191 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, 192 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, 193 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, 194 }; 195 196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) 197 { 198 return qp->qpn >= dev->qp_table.sqp_start && 199 qp->qpn <= dev->qp_table.sqp_start + 3; 200 } 201 202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) 203 { 204 return qp->qpn >= dev->qp_table.sqp_start && 205 qp->qpn <= dev->qp_table.sqp_start + 1; 206 } 207 208 static void *get_recv_wqe(struct mthca_qp *qp, int n) 209 { 210 if (qp->is_direct) 211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); 212 else 213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + 214 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); 215 } 216 217 static void *get_send_wqe(struct mthca_qp *qp, int n) 218 { 219 if (qp->is_direct) 220 return qp->queue.direct.buf + qp->send_wqe_offset + 221 (n << qp->sq.wqe_shift); 222 else 223 return qp->queue.page_list[(qp->send_wqe_offset + 224 (n << qp->sq.wqe_shift)) >> 225 PAGE_SHIFT].buf + 226 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & 227 (PAGE_SIZE - 1)); 228 } 229 230 static void mthca_wq_reset(struct mthca_wq *wq) 231 { 232 wq->next_ind = 0; 233 wq->last_comp = wq->max - 1; 234 wq->head = 0; 235 wq->tail = 0; 236 } 237 238 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 239 enum ib_event_type event_type) 240 { 241 struct mthca_qp *qp; 242 struct ib_event event; 243 244 spin_lock(&dev->qp_table.lock); 245 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 246 if (qp) 247 ++qp->refcount; 248 spin_unlock(&dev->qp_table.lock); 249 250 if (!qp) { 251 mthca_warn(dev, "Async event %d for bogus QP %08x\n", 252 event_type, qpn); 253 return; 254 } 255 256 if (event_type == IB_EVENT_PATH_MIG) 257 qp->port = qp->alt_port; 258 259 event.device = &dev->ib_dev; 260 event.event = event_type; 261 event.element.qp = &qp->ibqp; 262 if (qp->ibqp.event_handler) 263 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 264 265 spin_lock(&dev->qp_table.lock); 266 if (!--qp->refcount) 267 wake_up(&qp->wait); 268 spin_unlock(&dev->qp_table.lock); 269 } 270 271 static int to_mthca_state(enum ib_qp_state ib_state) 272 { 273 switch (ib_state) { 274 case IB_QPS_RESET: return MTHCA_QP_STATE_RST; 275 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; 276 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; 277 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; 278 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; 279 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; 280 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; 281 default: return -1; 282 } 283 } 284 285 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; 286 287 static int to_mthca_st(int transport) 288 { 289 switch (transport) { 290 case RC: return MTHCA_QP_ST_RC; 291 case UC: return MTHCA_QP_ST_UC; 292 case UD: return MTHCA_QP_ST_UD; 293 case RD: return MTHCA_QP_ST_RD; 294 case MLX: return MTHCA_QP_ST_MLX; 295 default: return -1; 296 } 297 } 298 299 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, 300 int attr_mask) 301 { 302 if (attr_mask & IB_QP_PKEY_INDEX) 303 sqp->pkey_index = attr->pkey_index; 304 if (attr_mask & IB_QP_QKEY) 305 sqp->qkey = attr->qkey; 306 if (attr_mask & IB_QP_SQ_PSN) 307 sqp->send_psn = attr->sq_psn; 308 } 309 310 static void init_port(struct mthca_dev *dev, int port) 311 { 312 int err; 313 struct mthca_init_ib_param param; 314 315 memset(¶m, 0, sizeof param); 316 317 param.port_width = dev->limits.port_width_cap; 318 param.vl_cap = dev->limits.vl_cap; 319 param.mtu_cap = dev->limits.mtu_cap; 320 param.gid_cap = dev->limits.gid_table_len; 321 param.pkey_cap = dev->limits.pkey_table_len; 322 323 err = mthca_INIT_IB(dev, ¶m, port); 324 if (err) 325 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); 326 } 327 328 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, 329 int attr_mask) 330 { 331 u8 dest_rd_atomic; 332 u32 access_flags; 333 u32 hw_access_flags = 0; 334 335 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 336 dest_rd_atomic = attr->max_dest_rd_atomic; 337 else 338 dest_rd_atomic = qp->resp_depth; 339 340 if (attr_mask & IB_QP_ACCESS_FLAGS) 341 access_flags = attr->qp_access_flags; 342 else 343 access_flags = qp->atomic_rd_en; 344 345 if (!dest_rd_atomic) 346 access_flags &= IB_ACCESS_REMOTE_WRITE; 347 348 if (access_flags & IB_ACCESS_REMOTE_READ) 349 hw_access_flags |= MTHCA_QP_BIT_RRE; 350 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 351 hw_access_flags |= MTHCA_QP_BIT_RAE; 352 if (access_flags & IB_ACCESS_REMOTE_WRITE) 353 hw_access_flags |= MTHCA_QP_BIT_RWE; 354 355 return cpu_to_be32(hw_access_flags); 356 } 357 358 static inline enum ib_qp_state to_ib_qp_state(int mthca_state) 359 { 360 switch (mthca_state) { 361 case MTHCA_QP_STATE_RST: return IB_QPS_RESET; 362 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; 363 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; 364 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; 365 case MTHCA_QP_STATE_DRAINING: 366 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; 367 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; 368 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; 369 default: return -1; 370 } 371 } 372 373 static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) 374 { 375 switch (mthca_mig_state) { 376 case 0: return IB_MIG_ARMED; 377 case 1: return IB_MIG_REARM; 378 case 3: return IB_MIG_MIGRATED; 379 default: return -1; 380 } 381 } 382 383 static int to_ib_qp_access_flags(int mthca_flags) 384 { 385 int ib_flags = 0; 386 387 if (mthca_flags & MTHCA_QP_BIT_RRE) 388 ib_flags |= IB_ACCESS_REMOTE_READ; 389 if (mthca_flags & MTHCA_QP_BIT_RWE) 390 ib_flags |= IB_ACCESS_REMOTE_WRITE; 391 if (mthca_flags & MTHCA_QP_BIT_RAE) 392 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 393 394 return ib_flags; 395 } 396 397 static void to_rdma_ah_attr(struct mthca_dev *dev, 398 struct rdma_ah_attr *ah_attr, 399 struct mthca_qp_path *path) 400 { 401 u8 port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; 402 403 memset(ah_attr, 0, sizeof(*ah_attr)); 404 405 if (port_num == 0 || port_num > dev->limits.num_ports) 406 return; 407 ah_attr->type = rdma_ah_find_type(&dev->ib_dev, port_num); 408 rdma_ah_set_port_num(ah_attr, port_num); 409 410 rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid)); 411 rdma_ah_set_sl(ah_attr, be32_to_cpu(path->sl_tclass_flowlabel) >> 28); 412 rdma_ah_set_path_bits(ah_attr, path->g_mylmc & 0x7f); 413 rdma_ah_set_static_rate(ah_attr, 414 mthca_rate_to_ib(dev, 415 path->static_rate & 0xf, 416 port_num)); 417 if (path->g_mylmc & (1 << 7)) { 418 u32 tc_fl = be32_to_cpu(path->sl_tclass_flowlabel); 419 420 rdma_ah_set_grh(ah_attr, NULL, 421 tc_fl & 0xfffff, 422 path->mgid_index & 423 (dev->limits.gid_table_len - 1), 424 path->hop_limit, 425 (tc_fl >> 20) & 0xff); 426 rdma_ah_set_dgid_raw(ah_attr, path->rgid); 427 } 428 } 429 430 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 431 struct ib_qp_init_attr *qp_init_attr) 432 { 433 struct mthca_dev *dev = to_mdev(ibqp->device); 434 struct mthca_qp *qp = to_mqp(ibqp); 435 int err = 0; 436 struct mthca_mailbox *mailbox = NULL; 437 struct mthca_qp_param *qp_param; 438 struct mthca_qp_context *context; 439 int mthca_state; 440 441 mutex_lock(&qp->mutex); 442 443 if (qp->state == IB_QPS_RESET) { 444 qp_attr->qp_state = IB_QPS_RESET; 445 goto done; 446 } 447 448 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 449 if (IS_ERR(mailbox)) { 450 err = PTR_ERR(mailbox); 451 goto out; 452 } 453 454 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); 455 if (err) { 456 mthca_warn(dev, "QUERY_QP failed (%d)\n", err); 457 goto out_mailbox; 458 } 459 460 qp_param = mailbox->buf; 461 context = &qp_param->context; 462 mthca_state = be32_to_cpu(context->flags) >> 28; 463 464 qp->state = to_ib_qp_state(mthca_state); 465 qp_attr->qp_state = qp->state; 466 qp_attr->path_mtu = context->mtu_msgmax >> 5; 467 qp_attr->path_mig_state = 468 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 469 qp_attr->qkey = be32_to_cpu(context->qkey); 470 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 471 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 472 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 473 qp_attr->qp_access_flags = 474 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 475 476 if (qp->transport == RC || qp->transport == UC) { 477 to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 478 to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 479 qp_attr->alt_pkey_index = 480 be32_to_cpu(context->alt_path.port_pkey) & 0x7f; 481 qp_attr->alt_port_num = 482 rdma_ah_get_port_num(&qp_attr->alt_ah_attr); 483 } 484 485 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; 486 qp_attr->port_num = 487 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; 488 489 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 490 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; 491 492 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 493 494 qp_attr->max_dest_rd_atomic = 495 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 496 qp_attr->min_rnr_timer = 497 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 498 qp_attr->timeout = context->pri_path.ackto >> 3; 499 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 500 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 501 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 502 503 done: 504 qp_attr->cur_qp_state = qp_attr->qp_state; 505 qp_attr->cap.max_send_wr = qp->sq.max; 506 qp_attr->cap.max_recv_wr = qp->rq.max; 507 qp_attr->cap.max_send_sge = qp->sq.max_gs; 508 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 509 qp_attr->cap.max_inline_data = qp->max_inline_data; 510 511 qp_init_attr->cap = qp_attr->cap; 512 qp_init_attr->sq_sig_type = qp->sq_policy; 513 514 out_mailbox: 515 mthca_free_mailbox(dev, mailbox); 516 517 out: 518 mutex_unlock(&qp->mutex); 519 return err; 520 } 521 522 static int mthca_path_set(struct mthca_dev *dev, const struct rdma_ah_attr *ah, 523 struct mthca_qp_path *path, u8 port) 524 { 525 path->g_mylmc = rdma_ah_get_path_bits(ah) & 0x7f; 526 path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah)); 527 path->static_rate = mthca_get_rate(dev, rdma_ah_get_static_rate(ah), 528 port); 529 530 if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) { 531 const struct ib_global_route *grh = rdma_ah_read_grh(ah); 532 533 if (grh->sgid_index >= dev->limits.gid_table_len) { 534 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", 535 grh->sgid_index, 536 dev->limits.gid_table_len - 1); 537 return -1; 538 } 539 540 path->g_mylmc |= 1 << 7; 541 path->mgid_index = grh->sgid_index; 542 path->hop_limit = grh->hop_limit; 543 path->sl_tclass_flowlabel = 544 cpu_to_be32((rdma_ah_get_sl(ah) << 28) | 545 (grh->traffic_class << 20) | 546 (grh->flow_label)); 547 memcpy(path->rgid, grh->dgid.raw, 16); 548 } else { 549 path->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah) << 550 28); 551 } 552 553 return 0; 554 } 555 556 static int __mthca_modify_qp(struct ib_qp *ibqp, 557 const struct ib_qp_attr *attr, int attr_mask, 558 enum ib_qp_state cur_state, 559 enum ib_qp_state new_state, 560 struct ib_udata *udata) 561 { 562 struct mthca_dev *dev = to_mdev(ibqp->device); 563 struct mthca_qp *qp = to_mqp(ibqp); 564 struct mthca_ucontext *context = rdma_udata_to_drv_context( 565 udata, struct mthca_ucontext, ibucontext); 566 struct mthca_mailbox *mailbox; 567 struct mthca_qp_param *qp_param; 568 struct mthca_qp_context *qp_context; 569 u32 sqd_event = 0; 570 int err = -EINVAL; 571 572 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 573 if (IS_ERR(mailbox)) { 574 err = PTR_ERR(mailbox); 575 goto out; 576 } 577 qp_param = mailbox->buf; 578 qp_context = &qp_param->context; 579 memset(qp_param, 0, sizeof *qp_param); 580 581 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | 582 (to_mthca_st(qp->transport) << 16)); 583 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); 584 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) 585 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 586 else { 587 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); 588 switch (attr->path_mig_state) { 589 case IB_MIG_MIGRATED: 590 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 591 break; 592 case IB_MIG_REARM: 593 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); 594 break; 595 case IB_MIG_ARMED: 596 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); 597 break; 598 } 599 } 600 601 /* leave tavor_sched_queue as 0 */ 602 603 if (qp->transport == MLX || qp->transport == UD) 604 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; 605 else if (attr_mask & IB_QP_PATH_MTU) { 606 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { 607 mthca_dbg(dev, "path MTU (%u) is invalid\n", 608 attr->path_mtu); 609 goto out_mailbox; 610 } 611 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 612 } 613 614 if (mthca_is_memfree(dev)) { 615 if (qp->rq.max) 616 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; 617 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; 618 619 if (qp->sq.max) 620 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; 621 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; 622 } 623 624 /* leave arbel_sched_queue as 0 */ 625 626 if (qp->ibqp.uobject) 627 qp_context->usr_page = cpu_to_be32(context->uar.index); 628 else 629 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); 630 qp_context->local_qpn = cpu_to_be32(qp->qpn); 631 if (attr_mask & IB_QP_DEST_QPN) { 632 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 633 } 634 635 if (qp->transport == MLX) 636 qp_context->pri_path.port_pkey |= 637 cpu_to_be32(qp->port << 24); 638 else { 639 if (attr_mask & IB_QP_PORT) { 640 qp_context->pri_path.port_pkey |= 641 cpu_to_be32(attr->port_num << 24); 642 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); 643 } 644 } 645 646 if (attr_mask & IB_QP_PKEY_INDEX) { 647 qp_context->pri_path.port_pkey |= 648 cpu_to_be32(attr->pkey_index); 649 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); 650 } 651 652 if (attr_mask & IB_QP_RNR_RETRY) { 653 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = 654 attr->rnr_retry << 5; 655 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | 656 MTHCA_QP_OPTPAR_ALT_RNR_RETRY); 657 } 658 659 if (attr_mask & IB_QP_AV) { 660 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, 661 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) 662 goto out_mailbox; 663 664 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 665 } 666 667 if (ibqp->qp_type == IB_QPT_RC && 668 cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 669 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; 670 671 if (mthca_is_memfree(dev)) 672 qp_context->rlkey_arbel_sched_queue |= sched_queue; 673 else 674 qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); 675 676 qp_param->opt_param_mask |= 677 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); 678 } 679 680 if (attr_mask & IB_QP_TIMEOUT) { 681 qp_context->pri_path.ackto = attr->timeout << 3; 682 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 683 } 684 685 if (attr_mask & IB_QP_ALT_PATH) { 686 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { 687 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", 688 attr->alt_pkey_index, dev->limits.pkey_table_len-1); 689 goto out_mailbox; 690 } 691 692 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { 693 mthca_dbg(dev, "Alternate port number (%u) is invalid\n", 694 attr->alt_port_num); 695 goto out_mailbox; 696 } 697 698 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, 699 rdma_ah_get_port_num(&attr->alt_ah_attr))) 700 goto out_mailbox; 701 702 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 703 attr->alt_port_num << 24); 704 qp_context->alt_path.ackto = attr->alt_timeout << 3; 705 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); 706 } 707 708 /* leave rdd as 0 */ 709 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); 710 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ 711 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); 712 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | 713 (MTHCA_FLIGHT_LIMIT << 24) | 714 MTHCA_QP_BIT_SWE); 715 if (qp->sq_policy == IB_SIGNAL_ALL_WR) 716 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); 717 if (attr_mask & IB_QP_RETRY_CNT) { 718 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 719 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); 720 } 721 722 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 723 if (attr->max_rd_atomic) { 724 qp_context->params1 |= 725 cpu_to_be32(MTHCA_QP_BIT_SRE | 726 MTHCA_QP_BIT_SAE); 727 qp_context->params1 |= 728 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 729 } 730 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); 731 } 732 733 if (attr_mask & IB_QP_SQ_PSN) 734 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); 735 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); 736 737 if (mthca_is_memfree(dev)) { 738 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); 739 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); 740 } 741 742 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 743 if (attr->max_dest_rd_atomic) 744 qp_context->params2 |= 745 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 746 747 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); 748 } 749 750 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 751 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); 752 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 753 MTHCA_QP_OPTPAR_RRE | 754 MTHCA_QP_OPTPAR_RAE); 755 } 756 757 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 758 759 if (ibqp->srq) 760 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); 761 762 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 763 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 764 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); 765 } 766 if (attr_mask & IB_QP_RQ_PSN) 767 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 768 769 qp_context->ra_buff_indx = 770 cpu_to_be32(dev->qp_table.rdb_base + 771 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << 772 dev->qp_table.rdb_shift)); 773 774 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); 775 776 if (mthca_is_memfree(dev)) 777 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); 778 779 if (attr_mask & IB_QP_QKEY) { 780 qp_context->qkey = cpu_to_be32(attr->qkey); 781 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 782 } 783 784 if (ibqp->srq) 785 qp_context->srqn = cpu_to_be32(1 << 24 | 786 to_msrq(ibqp->srq)->srqn); 787 788 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 789 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && 790 attr->en_sqd_async_notify) 791 sqd_event = 1 << 31; 792 793 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 794 mailbox, sqd_event); 795 if (err) { 796 mthca_warn(dev, "modify QP %d->%d returned %d.\n", 797 cur_state, new_state, err); 798 goto out_mailbox; 799 } 800 801 qp->state = new_state; 802 if (attr_mask & IB_QP_ACCESS_FLAGS) 803 qp->atomic_rd_en = attr->qp_access_flags; 804 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 805 qp->resp_depth = attr->max_dest_rd_atomic; 806 if (attr_mask & IB_QP_PORT) 807 qp->port = attr->port_num; 808 if (attr_mask & IB_QP_ALT_PATH) 809 qp->alt_port = attr->alt_port_num; 810 811 if (is_sqp(dev, qp)) 812 store_attrs(qp->sqp, attr, attr_mask); 813 814 /* 815 * If we moved QP0 to RTR, bring the IB link up; if we moved 816 * QP0 to RESET or ERROR, bring the link back down. 817 */ 818 if (is_qp0(dev, qp)) { 819 if (cur_state != IB_QPS_RTR && 820 new_state == IB_QPS_RTR) 821 init_port(dev, qp->port); 822 823 if (cur_state != IB_QPS_RESET && 824 cur_state != IB_QPS_ERR && 825 (new_state == IB_QPS_RESET || 826 new_state == IB_QPS_ERR)) 827 mthca_CLOSE_IB(dev, qp->port); 828 } 829 830 /* 831 * If we moved a kernel QP to RESET, clean up all old CQ 832 * entries and reinitialize the QP. 833 */ 834 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 835 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 836 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); 839 840 mthca_wq_reset(&qp->sq); 841 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 842 843 mthca_wq_reset(&qp->rq); 844 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 845 846 if (mthca_is_memfree(dev)) { 847 *qp->sq.db = 0; 848 *qp->rq.db = 0; 849 } 850 } 851 852 out_mailbox: 853 mthca_free_mailbox(dev, mailbox); 854 out: 855 return err; 856 } 857 858 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 859 struct ib_udata *udata) 860 { 861 struct mthca_dev *dev = to_mdev(ibqp->device); 862 struct mthca_qp *qp = to_mqp(ibqp); 863 enum ib_qp_state cur_state, new_state; 864 int err = -EINVAL; 865 866 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 867 return -EOPNOTSUPP; 868 869 mutex_lock(&qp->mutex); 870 if (attr_mask & IB_QP_CUR_STATE) { 871 cur_state = attr->cur_qp_state; 872 } else { 873 spin_lock_irq(&qp->sq.lock); 874 spin_lock(&qp->rq.lock); 875 cur_state = qp->state; 876 spin_unlock(&qp->rq.lock); 877 spin_unlock_irq(&qp->sq.lock); 878 } 879 880 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 881 882 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 883 attr_mask)) { 884 mthca_dbg(dev, "Bad QP transition (transport %d) " 885 "%d->%d with attr 0x%08x\n", 886 qp->transport, cur_state, new_state, 887 attr_mask); 888 goto out; 889 } 890 891 if ((attr_mask & IB_QP_PKEY_INDEX) && 892 attr->pkey_index >= dev->limits.pkey_table_len) { 893 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", 894 attr->pkey_index, dev->limits.pkey_table_len-1); 895 goto out; 896 } 897 898 if ((attr_mask & IB_QP_PORT) && 899 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { 900 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); 901 goto out; 902 } 903 904 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 905 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { 906 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", 907 attr->max_rd_atomic, dev->limits.max_qp_init_rdma); 908 goto out; 909 } 910 911 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 912 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { 913 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", 914 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); 915 goto out; 916 } 917 918 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 919 err = 0; 920 goto out; 921 } 922 923 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state, 924 udata); 925 926 out: 927 mutex_unlock(&qp->mutex); 928 return err; 929 } 930 931 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) 932 { 933 /* 934 * Calculate the maximum size of WQE s/g segments, excluding 935 * the next segment and other non-data segments. 936 */ 937 int max_data_size = desc_sz - sizeof (struct mthca_next_seg); 938 939 switch (qp->transport) { 940 case MLX: 941 max_data_size -= 2 * sizeof (struct mthca_data_seg); 942 break; 943 944 case UD: 945 if (mthca_is_memfree(dev)) 946 max_data_size -= sizeof (struct mthca_arbel_ud_seg); 947 else 948 max_data_size -= sizeof (struct mthca_tavor_ud_seg); 949 break; 950 951 default: 952 max_data_size -= sizeof (struct mthca_raddr_seg); 953 break; 954 } 955 956 return max_data_size; 957 } 958 959 static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) 960 { 961 /* We don't support inline data for kernel QPs (yet). */ 962 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; 963 } 964 965 static void mthca_adjust_qp_caps(struct mthca_dev *dev, 966 struct mthca_pd *pd, 967 struct mthca_qp *qp) 968 { 969 int max_data_size = mthca_max_data_size(dev, qp, 970 min(dev->limits.max_desc_sz, 971 1 << qp->sq.wqe_shift)); 972 973 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); 974 975 qp->sq.max_gs = min_t(int, dev->limits.max_sg, 976 max_data_size / sizeof (struct mthca_data_seg)); 977 qp->rq.max_gs = min_t(int, dev->limits.max_sg, 978 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - 979 sizeof (struct mthca_next_seg)) / 980 sizeof (struct mthca_data_seg)); 981 } 982 983 /* 984 * Allocate and register buffer for WQEs. qp->rq.max, sq.max, 985 * rq.max_gs and sq.max_gs must all be assigned. 986 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and 987 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and 988 * queue) 989 */ 990 static int mthca_alloc_wqe_buf(struct mthca_dev *dev, 991 struct mthca_pd *pd, 992 struct mthca_qp *qp, 993 struct ib_udata *udata) 994 { 995 int size; 996 int err = -ENOMEM; 997 998 size = sizeof (struct mthca_next_seg) + 999 qp->rq.max_gs * sizeof (struct mthca_data_seg); 1000 1001 if (size > dev->limits.max_desc_sz) 1002 return -EINVAL; 1003 1004 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; 1005 qp->rq.wqe_shift++) 1006 ; /* nothing */ 1007 1008 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); 1009 switch (qp->transport) { 1010 case MLX: 1011 size += 2 * sizeof (struct mthca_data_seg); 1012 break; 1013 1014 case UD: 1015 size += mthca_is_memfree(dev) ? 1016 sizeof (struct mthca_arbel_ud_seg) : 1017 sizeof (struct mthca_tavor_ud_seg); 1018 break; 1019 1020 case UC: 1021 size += sizeof (struct mthca_raddr_seg); 1022 break; 1023 1024 case RC: 1025 size += sizeof (struct mthca_raddr_seg); 1026 /* 1027 * An atomic op will require an atomic segment, a 1028 * remote address segment and one scatter entry. 1029 */ 1030 size = max_t(int, size, 1031 sizeof (struct mthca_atomic_seg) + 1032 sizeof (struct mthca_raddr_seg) + 1033 sizeof (struct mthca_data_seg)); 1034 break; 1035 1036 default: 1037 break; 1038 } 1039 1040 /* Make sure that we have enough space for a bind request */ 1041 size = max_t(int, size, sizeof (struct mthca_bind_seg)); 1042 1043 size += sizeof (struct mthca_next_seg); 1044 1045 if (size > dev->limits.max_desc_sz) 1046 return -EINVAL; 1047 1048 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; 1049 qp->sq.wqe_shift++) 1050 ; /* nothing */ 1051 1052 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 1053 1 << qp->sq.wqe_shift); 1054 1055 /* 1056 * If this is a userspace QP, we don't actually have to 1057 * allocate anything. All we need is to calculate the WQE 1058 * sizes and the send_wqe_offset, so we're done now. 1059 */ 1060 if (udata) 1061 return 0; 1062 1063 size = PAGE_ALIGN(qp->send_wqe_offset + 1064 (qp->sq.max << qp->sq.wqe_shift)); 1065 1066 qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64), 1067 GFP_KERNEL); 1068 if (!qp->wrid) 1069 goto err_out; 1070 1071 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, 1072 &qp->queue, &qp->is_direct, pd, 0, &qp->mr); 1073 if (err) 1074 goto err_out; 1075 1076 return 0; 1077 1078 err_out: 1079 kfree(qp->wrid); 1080 return err; 1081 } 1082 1083 static void mthca_free_wqe_buf(struct mthca_dev *dev, 1084 struct mthca_qp *qp) 1085 { 1086 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + 1087 (qp->sq.max << qp->sq.wqe_shift)), 1088 &qp->queue, qp->is_direct, &qp->mr); 1089 kfree(qp->wrid); 1090 } 1091 1092 static int mthca_map_memfree(struct mthca_dev *dev, 1093 struct mthca_qp *qp) 1094 { 1095 int ret; 1096 1097 if (mthca_is_memfree(dev)) { 1098 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); 1099 if (ret) 1100 return ret; 1101 1102 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); 1103 if (ret) 1104 goto err_qpc; 1105 1106 ret = mthca_table_get(dev, dev->qp_table.rdb_table, 1107 qp->qpn << dev->qp_table.rdb_shift); 1108 if (ret) 1109 goto err_eqpc; 1110 1111 } 1112 1113 return 0; 1114 1115 err_eqpc: 1116 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1117 1118 err_qpc: 1119 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1120 1121 return ret; 1122 } 1123 1124 static void mthca_unmap_memfree(struct mthca_dev *dev, 1125 struct mthca_qp *qp) 1126 { 1127 mthca_table_put(dev, dev->qp_table.rdb_table, 1128 qp->qpn << dev->qp_table.rdb_shift); 1129 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1130 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1131 } 1132 1133 static int mthca_alloc_memfree(struct mthca_dev *dev, 1134 struct mthca_qp *qp) 1135 { 1136 if (mthca_is_memfree(dev)) { 1137 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, 1138 qp->qpn, &qp->rq.db); 1139 if (qp->rq.db_index < 0) 1140 return -ENOMEM; 1141 1142 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, 1143 qp->qpn, &qp->sq.db); 1144 if (qp->sq.db_index < 0) { 1145 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1146 return -ENOMEM; 1147 } 1148 } 1149 1150 return 0; 1151 } 1152 1153 static void mthca_free_memfree(struct mthca_dev *dev, 1154 struct mthca_qp *qp) 1155 { 1156 if (mthca_is_memfree(dev)) { 1157 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1158 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1159 } 1160 } 1161 1162 static int mthca_alloc_qp_common(struct mthca_dev *dev, 1163 struct mthca_pd *pd, 1164 struct mthca_cq *send_cq, 1165 struct mthca_cq *recv_cq, 1166 enum ib_sig_type send_policy, 1167 struct mthca_qp *qp, 1168 struct ib_udata *udata) 1169 { 1170 int ret; 1171 int i; 1172 struct mthca_next_seg *next; 1173 1174 qp->refcount = 1; 1175 init_waitqueue_head(&qp->wait); 1176 mutex_init(&qp->mutex); 1177 qp->state = IB_QPS_RESET; 1178 qp->atomic_rd_en = 0; 1179 qp->resp_depth = 0; 1180 qp->sq_policy = send_policy; 1181 mthca_wq_reset(&qp->sq); 1182 mthca_wq_reset(&qp->rq); 1183 1184 spin_lock_init(&qp->sq.lock); 1185 spin_lock_init(&qp->rq.lock); 1186 1187 ret = mthca_map_memfree(dev, qp); 1188 if (ret) 1189 return ret; 1190 1191 ret = mthca_alloc_wqe_buf(dev, pd, qp, udata); 1192 if (ret) { 1193 mthca_unmap_memfree(dev, qp); 1194 return ret; 1195 } 1196 1197 mthca_adjust_qp_caps(dev, pd, qp); 1198 1199 /* 1200 * If this is a userspace QP, we're done now. The doorbells 1201 * will be allocated and buffers will be initialized in 1202 * userspace. 1203 */ 1204 if (udata) 1205 return 0; 1206 1207 ret = mthca_alloc_memfree(dev, qp); 1208 if (ret) { 1209 mthca_free_wqe_buf(dev, qp); 1210 mthca_unmap_memfree(dev, qp); 1211 return ret; 1212 } 1213 1214 if (mthca_is_memfree(dev)) { 1215 struct mthca_data_seg *scatter; 1216 int size = (sizeof (struct mthca_next_seg) + 1217 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; 1218 1219 for (i = 0; i < qp->rq.max; ++i) { 1220 next = get_recv_wqe(qp, i); 1221 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << 1222 qp->rq.wqe_shift); 1223 next->ee_nds = cpu_to_be32(size); 1224 1225 for (scatter = (void *) (next + 1); 1226 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); 1227 ++scatter) 1228 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 1229 } 1230 1231 for (i = 0; i < qp->sq.max; ++i) { 1232 next = get_send_wqe(qp, i); 1233 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << 1234 qp->sq.wqe_shift) + 1235 qp->send_wqe_offset); 1236 } 1237 } else { 1238 for (i = 0; i < qp->rq.max; ++i) { 1239 next = get_recv_wqe(qp, i); 1240 next->nda_op = htonl((((i + 1) % qp->rq.max) << 1241 qp->rq.wqe_shift) | 1); 1242 } 1243 1244 } 1245 1246 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 1247 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 1248 1249 return 0; 1250 } 1251 1252 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, 1253 struct mthca_pd *pd, struct mthca_qp *qp) 1254 { 1255 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); 1256 1257 /* Sanity check QP size before proceeding */ 1258 if (cap->max_send_wr > dev->limits.max_wqes || 1259 cap->max_recv_wr > dev->limits.max_wqes || 1260 cap->max_send_sge > dev->limits.max_sg || 1261 cap->max_recv_sge > dev->limits.max_sg || 1262 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) 1263 return -EINVAL; 1264 1265 /* 1266 * For MLX transport we need 2 extra send gather entries: 1267 * one for the header and one for the checksum at the end 1268 */ 1269 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) 1270 return -EINVAL; 1271 1272 if (mthca_is_memfree(dev)) { 1273 qp->rq.max = cap->max_recv_wr ? 1274 roundup_pow_of_two(cap->max_recv_wr) : 0; 1275 qp->sq.max = cap->max_send_wr ? 1276 roundup_pow_of_two(cap->max_send_wr) : 0; 1277 } else { 1278 qp->rq.max = cap->max_recv_wr; 1279 qp->sq.max = cap->max_send_wr; 1280 } 1281 1282 qp->rq.max_gs = cap->max_recv_sge; 1283 qp->sq.max_gs = max_t(int, cap->max_send_sge, 1284 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, 1285 MTHCA_INLINE_CHUNK_SIZE) / 1286 sizeof (struct mthca_data_seg)); 1287 1288 return 0; 1289 } 1290 1291 int mthca_alloc_qp(struct mthca_dev *dev, 1292 struct mthca_pd *pd, 1293 struct mthca_cq *send_cq, 1294 struct mthca_cq *recv_cq, 1295 enum ib_qp_type type, 1296 enum ib_sig_type send_policy, 1297 struct ib_qp_cap *cap, 1298 struct mthca_qp *qp, 1299 struct ib_udata *udata) 1300 { 1301 int err; 1302 1303 switch (type) { 1304 case IB_QPT_RC: qp->transport = RC; break; 1305 case IB_QPT_UC: qp->transport = UC; break; 1306 case IB_QPT_UD: qp->transport = UD; break; 1307 default: return -EINVAL; 1308 } 1309 1310 err = mthca_set_qp_size(dev, cap, pd, qp); 1311 if (err) 1312 return err; 1313 1314 qp->qpn = mthca_alloc(&dev->qp_table.alloc); 1315 if (qp->qpn == -1) 1316 return -ENOMEM; 1317 1318 /* initialize port to zero for error-catching. */ 1319 qp->port = 0; 1320 1321 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1322 send_policy, qp, udata); 1323 if (err) { 1324 mthca_free(&dev->qp_table.alloc, qp->qpn); 1325 return err; 1326 } 1327 1328 spin_lock_irq(&dev->qp_table.lock); 1329 mthca_array_set(&dev->qp_table.qp, 1330 qp->qpn & (dev->limits.num_qps - 1), qp); 1331 spin_unlock_irq(&dev->qp_table.lock); 1332 1333 return 0; 1334 } 1335 1336 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1337 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1338 { 1339 if (send_cq == recv_cq) { 1340 spin_lock_irq(&send_cq->lock); 1341 __acquire(&recv_cq->lock); 1342 } else if (send_cq->cqn < recv_cq->cqn) { 1343 spin_lock_irq(&send_cq->lock); 1344 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1345 } else { 1346 spin_lock_irq(&recv_cq->lock); 1347 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1348 } 1349 } 1350 1351 static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1352 __releases(&send_cq->lock) __releases(&recv_cq->lock) 1353 { 1354 if (send_cq == recv_cq) { 1355 __release(&recv_cq->lock); 1356 spin_unlock_irq(&send_cq->lock); 1357 } else if (send_cq->cqn < recv_cq->cqn) { 1358 spin_unlock(&recv_cq->lock); 1359 spin_unlock_irq(&send_cq->lock); 1360 } else { 1361 spin_unlock(&send_cq->lock); 1362 spin_unlock_irq(&recv_cq->lock); 1363 } 1364 } 1365 1366 int mthca_alloc_sqp(struct mthca_dev *dev, 1367 struct mthca_pd *pd, 1368 struct mthca_cq *send_cq, 1369 struct mthca_cq *recv_cq, 1370 enum ib_sig_type send_policy, 1371 struct ib_qp_cap *cap, 1372 int qpn, 1373 u32 port, 1374 struct mthca_qp *qp, 1375 struct ib_udata *udata) 1376 { 1377 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1378 int err; 1379 1380 qp->transport = MLX; 1381 err = mthca_set_qp_size(dev, cap, pd, qp); 1382 if (err) 1383 return err; 1384 1385 qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE; 1386 qp->sqp->header_buf = 1387 dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, 1388 &qp->sqp->header_dma, GFP_KERNEL); 1389 if (!qp->sqp->header_buf) 1390 return -ENOMEM; 1391 1392 spin_lock_irq(&dev->qp_table.lock); 1393 if (mthca_array_get(&dev->qp_table.qp, mqpn)) 1394 err = -EBUSY; 1395 else 1396 mthca_array_set(&dev->qp_table.qp, mqpn, qp); 1397 spin_unlock_irq(&dev->qp_table.lock); 1398 1399 if (err) 1400 goto err_out; 1401 1402 qp->port = port; 1403 qp->qpn = mqpn; 1404 qp->transport = MLX; 1405 1406 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1407 send_policy, qp, udata); 1408 if (err) 1409 goto err_out_free; 1410 1411 atomic_inc(&pd->sqp_count); 1412 1413 return 0; 1414 1415 err_out_free: 1416 /* 1417 * Lock CQs here, so that CQ polling code can do QP lookup 1418 * without taking a lock. 1419 */ 1420 mthca_lock_cqs(send_cq, recv_cq); 1421 1422 spin_lock(&dev->qp_table.lock); 1423 mthca_array_clear(&dev->qp_table.qp, mqpn); 1424 spin_unlock(&dev->qp_table.lock); 1425 1426 mthca_unlock_cqs(send_cq, recv_cq); 1427 1428 err_out: 1429 dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, 1430 qp->sqp->header_buf, qp->sqp->header_dma); 1431 return err; 1432 } 1433 1434 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) 1435 { 1436 int c; 1437 1438 spin_lock_irq(&dev->qp_table.lock); 1439 c = qp->refcount; 1440 spin_unlock_irq(&dev->qp_table.lock); 1441 1442 return c; 1443 } 1444 1445 void mthca_free_qp(struct mthca_dev *dev, 1446 struct mthca_qp *qp) 1447 { 1448 struct mthca_cq *send_cq; 1449 struct mthca_cq *recv_cq; 1450 1451 send_cq = to_mcq(qp->ibqp.send_cq); 1452 recv_cq = to_mcq(qp->ibqp.recv_cq); 1453 1454 /* 1455 * Lock CQs here, so that CQ polling code can do QP lookup 1456 * without taking a lock. 1457 */ 1458 mthca_lock_cqs(send_cq, recv_cq); 1459 1460 spin_lock(&dev->qp_table.lock); 1461 mthca_array_clear(&dev->qp_table.qp, 1462 qp->qpn & (dev->limits.num_qps - 1)); 1463 --qp->refcount; 1464 spin_unlock(&dev->qp_table.lock); 1465 1466 mthca_unlock_cqs(send_cq, recv_cq); 1467 1468 wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1469 1470 if (qp->state != IB_QPS_RESET) 1471 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1472 NULL, 0); 1473 1474 /* 1475 * If this is a userspace QP, the buffers, MR, CQs and so on 1476 * will be cleaned up in userspace, so all we have to do is 1477 * unref the mem-free tables and free the QPN in our table. 1478 */ 1479 if (!qp->ibqp.uobject) { 1480 mthca_cq_clean(dev, recv_cq, qp->qpn, 1481 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1482 if (send_cq != recv_cq) 1483 mthca_cq_clean(dev, send_cq, qp->qpn, NULL); 1484 1485 mthca_free_memfree(dev, qp); 1486 mthca_free_wqe_buf(dev, qp); 1487 } 1488 1489 mthca_unmap_memfree(dev, qp); 1490 1491 if (is_sqp(dev, qp)) { 1492 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); 1493 dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, 1494 qp->sqp->header_buf, qp->sqp->header_dma); 1495 } else 1496 mthca_free(&dev->qp_table.alloc, qp->qpn); 1497 } 1498 1499 /* Create UD header for an MLX send and build a data segment for it */ 1500 static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind, 1501 const struct ib_ud_wr *wr, 1502 struct mthca_mlx_seg *mlx, 1503 struct mthca_data_seg *data) 1504 { 1505 struct mthca_sqp *sqp = qp->sqp; 1506 int header_size; 1507 int err; 1508 u16 pkey; 1509 1510 ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, 1511 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, 1512 &sqp->ud_header); 1513 1514 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); 1515 if (err) 1516 return err; 1517 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1518 mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1519 (sqp->ud_header.lrh.destination_lid == 1520 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | 1521 (sqp->ud_header.lrh.service_level << 8)); 1522 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1523 mlx->vcrc = 0; 1524 1525 switch (wr->wr.opcode) { 1526 case IB_WR_SEND: 1527 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 1528 sqp->ud_header.immediate_present = 0; 1529 break; 1530 case IB_WR_SEND_WITH_IMM: 1531 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1532 sqp->ud_header.immediate_present = 1; 1533 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; 1534 break; 1535 default: 1536 return -EINVAL; 1537 } 1538 1539 sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0; 1540 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1541 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1542 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); 1543 if (!qp->ibqp.qp_num) 1544 ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index, 1545 &pkey); 1546 else 1547 ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index, 1548 &pkey); 1549 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1550 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); 1551 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1552 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? 1553 sqp->qkey : wr->remote_qkey); 1554 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); 1555 1556 header_size = ib_ud_header_pack(&sqp->ud_header, 1557 sqp->header_buf + 1558 ind * MTHCA_UD_HEADER_SIZE); 1559 1560 data->byte_count = cpu_to_be32(header_size); 1561 data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey); 1562 data->addr = cpu_to_be64(sqp->header_dma + 1563 ind * MTHCA_UD_HEADER_SIZE); 1564 1565 return 0; 1566 } 1567 1568 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, 1569 struct ib_cq *ib_cq) 1570 { 1571 unsigned cur; 1572 struct mthca_cq *cq; 1573 1574 cur = wq->head - wq->tail; 1575 if (likely(cur + nreq < wq->max)) 1576 return 0; 1577 1578 cq = to_mcq(ib_cq); 1579 spin_lock(&cq->lock); 1580 cur = wq->head - wq->tail; 1581 spin_unlock(&cq->lock); 1582 1583 return cur + nreq >= wq->max; 1584 } 1585 1586 static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, 1587 u64 remote_addr, u32 rkey) 1588 { 1589 rseg->raddr = cpu_to_be64(remote_addr); 1590 rseg->rkey = cpu_to_be32(rkey); 1591 rseg->reserved = 0; 1592 } 1593 1594 static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, 1595 const struct ib_atomic_wr *wr) 1596 { 1597 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1598 aseg->swap_add = cpu_to_be64(wr->swap); 1599 aseg->compare = cpu_to_be64(wr->compare_add); 1600 } else { 1601 aseg->swap_add = cpu_to_be64(wr->compare_add); 1602 aseg->compare = 0; 1603 } 1604 1605 } 1606 1607 static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, 1608 const struct ib_ud_wr *wr) 1609 { 1610 useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); 1611 useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); 1612 useg->dqpn = cpu_to_be32(wr->remote_qpn); 1613 useg->qkey = cpu_to_be32(wr->remote_qkey); 1614 1615 } 1616 1617 static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, 1618 const struct ib_ud_wr *wr) 1619 { 1620 memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); 1621 useg->dqpn = cpu_to_be32(wr->remote_qpn); 1622 useg->qkey = cpu_to_be32(wr->remote_qkey); 1623 } 1624 1625 int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 1626 const struct ib_send_wr **bad_wr) 1627 { 1628 struct mthca_dev *dev = to_mdev(ibqp->device); 1629 struct mthca_qp *qp = to_mqp(ibqp); 1630 void *wqe; 1631 void *prev_wqe; 1632 unsigned long flags; 1633 int err = 0; 1634 int nreq; 1635 int i; 1636 int size; 1637 /* 1638 * f0 and size0 are only used if nreq != 0, and they will 1639 * always be initialized the first time through the main loop 1640 * before nreq is incremented. So nreq cannot become non-zero 1641 * without initializing f0 and size0, and they are in fact 1642 * never used uninitialized. 1643 */ 1644 int size0; 1645 u32 f0; 1646 int ind; 1647 u8 op0 = 0; 1648 1649 spin_lock_irqsave(&qp->sq.lock, flags); 1650 1651 /* XXX check that state is OK to post send */ 1652 1653 ind = qp->sq.next_ind; 1654 1655 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1656 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1657 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1658 " %d max, %d nreq)\n", qp->qpn, 1659 qp->sq.head, qp->sq.tail, 1660 qp->sq.max, nreq); 1661 err = -ENOMEM; 1662 *bad_wr = wr; 1663 goto out; 1664 } 1665 1666 wqe = get_send_wqe(qp, ind); 1667 prev_wqe = qp->sq.last; 1668 qp->sq.last = wqe; 1669 1670 ((struct mthca_next_seg *) wqe)->nda_op = 0; 1671 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 1672 ((struct mthca_next_seg *) wqe)->flags = 1673 ((wr->send_flags & IB_SEND_SIGNALED) ? 1674 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 1675 ((wr->send_flags & IB_SEND_SOLICITED) ? 1676 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 1677 cpu_to_be32(1); 1678 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1679 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1680 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 1681 1682 wqe += sizeof (struct mthca_next_seg); 1683 size = sizeof (struct mthca_next_seg) / 16; 1684 1685 switch (qp->transport) { 1686 case RC: 1687 switch (wr->opcode) { 1688 case IB_WR_ATOMIC_CMP_AND_SWP: 1689 case IB_WR_ATOMIC_FETCH_AND_ADD: 1690 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 1691 atomic_wr(wr)->rkey); 1692 wqe += sizeof (struct mthca_raddr_seg); 1693 1694 set_atomic_seg(wqe, atomic_wr(wr)); 1695 wqe += sizeof (struct mthca_atomic_seg); 1696 size += (sizeof (struct mthca_raddr_seg) + 1697 sizeof (struct mthca_atomic_seg)) / 16; 1698 break; 1699 1700 case IB_WR_RDMA_WRITE: 1701 case IB_WR_RDMA_WRITE_WITH_IMM: 1702 case IB_WR_RDMA_READ: 1703 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 1704 rdma_wr(wr)->rkey); 1705 wqe += sizeof (struct mthca_raddr_seg); 1706 size += sizeof (struct mthca_raddr_seg) / 16; 1707 break; 1708 1709 default: 1710 /* No extra segments required for sends */ 1711 break; 1712 } 1713 1714 break; 1715 1716 case UC: 1717 switch (wr->opcode) { 1718 case IB_WR_RDMA_WRITE: 1719 case IB_WR_RDMA_WRITE_WITH_IMM: 1720 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 1721 rdma_wr(wr)->rkey); 1722 wqe += sizeof (struct mthca_raddr_seg); 1723 size += sizeof (struct mthca_raddr_seg) / 16; 1724 break; 1725 1726 default: 1727 /* No extra segments required for sends */ 1728 break; 1729 } 1730 1731 break; 1732 1733 case UD: 1734 set_tavor_ud_seg(wqe, ud_wr(wr)); 1735 wqe += sizeof (struct mthca_tavor_ud_seg); 1736 size += sizeof (struct mthca_tavor_ud_seg) / 16; 1737 break; 1738 1739 case MLX: 1740 err = build_mlx_header( 1741 dev, qp, ind, ud_wr(wr), 1742 wqe - sizeof(struct mthca_next_seg), wqe); 1743 if (err) { 1744 *bad_wr = wr; 1745 goto out; 1746 } 1747 wqe += sizeof (struct mthca_data_seg); 1748 size += sizeof (struct mthca_data_seg) / 16; 1749 break; 1750 } 1751 1752 if (wr->num_sge > qp->sq.max_gs) { 1753 mthca_err(dev, "too many gathers\n"); 1754 err = -EINVAL; 1755 *bad_wr = wr; 1756 goto out; 1757 } 1758 1759 for (i = 0; i < wr->num_sge; ++i) { 1760 mthca_set_data_seg(wqe, wr->sg_list + i); 1761 wqe += sizeof (struct mthca_data_seg); 1762 size += sizeof (struct mthca_data_seg) / 16; 1763 } 1764 1765 /* Add one more inline data segment for ICRC */ 1766 if (qp->transport == MLX) { 1767 ((struct mthca_data_seg *) wqe)->byte_count = 1768 cpu_to_be32((1 << 31) | 4); 1769 ((u32 *) wqe)[1] = 0; 1770 wqe += sizeof (struct mthca_data_seg); 1771 size += sizeof (struct mthca_data_seg) / 16; 1772 } 1773 1774 qp->wrid[ind + qp->rq.max] = wr->wr_id; 1775 1776 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 1777 mthca_err(dev, "opcode invalid\n"); 1778 err = -EINVAL; 1779 *bad_wr = wr; 1780 goto out; 1781 } 1782 1783 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1784 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1785 qp->send_wqe_offset) | 1786 mthca_opcode[wr->opcode]); 1787 wmb(); 1788 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1789 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | 1790 ((wr->send_flags & IB_SEND_FENCE) ? 1791 MTHCA_NEXT_FENCE : 0)); 1792 1793 if (!nreq) { 1794 size0 = size; 1795 op0 = mthca_opcode[wr->opcode]; 1796 f0 = wr->send_flags & IB_SEND_FENCE ? 1797 MTHCA_SEND_DOORBELL_FENCE : 0; 1798 } 1799 1800 ++ind; 1801 if (unlikely(ind >= qp->sq.max)) 1802 ind -= qp->sq.max; 1803 } 1804 1805 out: 1806 if (likely(nreq)) { 1807 wmb(); 1808 1809 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + 1810 qp->send_wqe_offset) | f0 | op0, 1811 (qp->qpn << 8) | size0, 1812 dev->kar + MTHCA_SEND_DOORBELL, 1813 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1814 } 1815 1816 qp->sq.next_ind = ind; 1817 qp->sq.head += nreq; 1818 1819 spin_unlock_irqrestore(&qp->sq.lock, flags); 1820 return err; 1821 } 1822 1823 int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 1824 const struct ib_recv_wr **bad_wr) 1825 { 1826 struct mthca_dev *dev = to_mdev(ibqp->device); 1827 struct mthca_qp *qp = to_mqp(ibqp); 1828 unsigned long flags; 1829 int err = 0; 1830 int nreq; 1831 int i; 1832 int size; 1833 /* 1834 * size0 is only used if nreq != 0, and it will always be 1835 * initialized the first time through the main loop before 1836 * nreq is incremented. So nreq cannot become non-zero 1837 * without initializing size0, and it is in fact never used 1838 * uninitialized. 1839 */ 1840 int size0; 1841 int ind; 1842 void *wqe; 1843 void *prev_wqe; 1844 1845 spin_lock_irqsave(&qp->rq.lock, flags); 1846 1847 /* XXX check that state is OK to post receive */ 1848 1849 ind = qp->rq.next_ind; 1850 1851 for (nreq = 0; wr; wr = wr->next) { 1852 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 1853 mthca_err(dev, "RQ %06x full (%u head, %u tail," 1854 " %d max, %d nreq)\n", qp->qpn, 1855 qp->rq.head, qp->rq.tail, 1856 qp->rq.max, nreq); 1857 err = -ENOMEM; 1858 *bad_wr = wr; 1859 goto out; 1860 } 1861 1862 wqe = get_recv_wqe(qp, ind); 1863 prev_wqe = qp->rq.last; 1864 qp->rq.last = wqe; 1865 1866 ((struct mthca_next_seg *) wqe)->ee_nds = 1867 cpu_to_be32(MTHCA_NEXT_DBD); 1868 ((struct mthca_next_seg *) wqe)->flags = 0; 1869 1870 wqe += sizeof (struct mthca_next_seg); 1871 size = sizeof (struct mthca_next_seg) / 16; 1872 1873 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 1874 err = -EINVAL; 1875 *bad_wr = wr; 1876 goto out; 1877 } 1878 1879 for (i = 0; i < wr->num_sge; ++i) { 1880 mthca_set_data_seg(wqe, wr->sg_list + i); 1881 wqe += sizeof (struct mthca_data_seg); 1882 size += sizeof (struct mthca_data_seg) / 16; 1883 } 1884 1885 qp->wrid[ind] = wr->wr_id; 1886 1887 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1888 cpu_to_be32(MTHCA_NEXT_DBD | size); 1889 1890 if (!nreq) 1891 size0 = size; 1892 1893 ++ind; 1894 if (unlikely(ind >= qp->rq.max)) 1895 ind -= qp->rq.max; 1896 1897 ++nreq; 1898 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 1899 nreq = 0; 1900 1901 wmb(); 1902 1903 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, 1904 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, 1905 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1906 1907 qp->rq.next_ind = ind; 1908 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1909 } 1910 } 1911 1912 out: 1913 if (likely(nreq)) { 1914 wmb(); 1915 1916 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, 1917 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, 1918 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1919 } 1920 1921 qp->rq.next_ind = ind; 1922 qp->rq.head += nreq; 1923 1924 spin_unlock_irqrestore(&qp->rq.lock, flags); 1925 return err; 1926 } 1927 1928 int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 1929 const struct ib_send_wr **bad_wr) 1930 { 1931 struct mthca_dev *dev = to_mdev(ibqp->device); 1932 struct mthca_qp *qp = to_mqp(ibqp); 1933 u32 dbhi; 1934 void *wqe; 1935 void *prev_wqe; 1936 unsigned long flags; 1937 int err = 0; 1938 int nreq; 1939 int i; 1940 int size; 1941 /* 1942 * f0 and size0 are only used if nreq != 0, and they will 1943 * always be initialized the first time through the main loop 1944 * before nreq is incremented. So nreq cannot become non-zero 1945 * without initializing f0 and size0, and they are in fact 1946 * never used uninitialized. 1947 */ 1948 int size0; 1949 u32 f0; 1950 int ind; 1951 u8 op0 = 0; 1952 1953 spin_lock_irqsave(&qp->sq.lock, flags); 1954 1955 /* XXX check that state is OK to post send */ 1956 1957 ind = qp->sq.head & (qp->sq.max - 1); 1958 1959 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1960 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { 1961 nreq = 0; 1962 1963 dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | 1964 ((qp->sq.head & 0xffff) << 8) | f0 | op0; 1965 1966 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; 1967 1968 /* 1969 * Make sure that descriptors are written before 1970 * doorbell record. 1971 */ 1972 wmb(); 1973 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 1974 1975 /* 1976 * Make sure doorbell record is written before we 1977 * write MMIO send doorbell. 1978 */ 1979 wmb(); 1980 1981 mthca_write64(dbhi, (qp->qpn << 8) | size0, 1982 dev->kar + MTHCA_SEND_DOORBELL, 1983 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1984 } 1985 1986 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1987 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1988 " %d max, %d nreq)\n", qp->qpn, 1989 qp->sq.head, qp->sq.tail, 1990 qp->sq.max, nreq); 1991 err = -ENOMEM; 1992 *bad_wr = wr; 1993 goto out; 1994 } 1995 1996 wqe = get_send_wqe(qp, ind); 1997 prev_wqe = qp->sq.last; 1998 qp->sq.last = wqe; 1999 2000 ((struct mthca_next_seg *) wqe)->flags = 2001 ((wr->send_flags & IB_SEND_SIGNALED) ? 2002 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 2003 ((wr->send_flags & IB_SEND_SOLICITED) ? 2004 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 2005 ((wr->send_flags & IB_SEND_IP_CSUM) ? 2006 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) | 2007 cpu_to_be32(1); 2008 if (wr->opcode == IB_WR_SEND_WITH_IMM || 2009 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 2010 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 2011 2012 wqe += sizeof (struct mthca_next_seg); 2013 size = sizeof (struct mthca_next_seg) / 16; 2014 2015 switch (qp->transport) { 2016 case RC: 2017 switch (wr->opcode) { 2018 case IB_WR_ATOMIC_CMP_AND_SWP: 2019 case IB_WR_ATOMIC_FETCH_AND_ADD: 2020 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 2021 atomic_wr(wr)->rkey); 2022 wqe += sizeof (struct mthca_raddr_seg); 2023 2024 set_atomic_seg(wqe, atomic_wr(wr)); 2025 wqe += sizeof (struct mthca_atomic_seg); 2026 size += (sizeof (struct mthca_raddr_seg) + 2027 sizeof (struct mthca_atomic_seg)) / 16; 2028 break; 2029 2030 case IB_WR_RDMA_READ: 2031 case IB_WR_RDMA_WRITE: 2032 case IB_WR_RDMA_WRITE_WITH_IMM: 2033 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 2034 rdma_wr(wr)->rkey); 2035 wqe += sizeof (struct mthca_raddr_seg); 2036 size += sizeof (struct mthca_raddr_seg) / 16; 2037 break; 2038 2039 default: 2040 /* No extra segments required for sends */ 2041 break; 2042 } 2043 2044 break; 2045 2046 case UC: 2047 switch (wr->opcode) { 2048 case IB_WR_RDMA_WRITE: 2049 case IB_WR_RDMA_WRITE_WITH_IMM: 2050 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 2051 rdma_wr(wr)->rkey); 2052 wqe += sizeof (struct mthca_raddr_seg); 2053 size += sizeof (struct mthca_raddr_seg) / 16; 2054 break; 2055 2056 default: 2057 /* No extra segments required for sends */ 2058 break; 2059 } 2060 2061 break; 2062 2063 case UD: 2064 set_arbel_ud_seg(wqe, ud_wr(wr)); 2065 wqe += sizeof (struct mthca_arbel_ud_seg); 2066 size += sizeof (struct mthca_arbel_ud_seg) / 16; 2067 break; 2068 2069 case MLX: 2070 err = build_mlx_header( 2071 dev, qp, ind, ud_wr(wr), 2072 wqe - sizeof(struct mthca_next_seg), wqe); 2073 if (err) { 2074 *bad_wr = wr; 2075 goto out; 2076 } 2077 wqe += sizeof (struct mthca_data_seg); 2078 size += sizeof (struct mthca_data_seg) / 16; 2079 break; 2080 } 2081 2082 if (wr->num_sge > qp->sq.max_gs) { 2083 mthca_err(dev, "too many gathers\n"); 2084 err = -EINVAL; 2085 *bad_wr = wr; 2086 goto out; 2087 } 2088 2089 for (i = 0; i < wr->num_sge; ++i) { 2090 mthca_set_data_seg(wqe, wr->sg_list + i); 2091 wqe += sizeof (struct mthca_data_seg); 2092 size += sizeof (struct mthca_data_seg) / 16; 2093 } 2094 2095 /* Add one more inline data segment for ICRC */ 2096 if (qp->transport == MLX) { 2097 ((struct mthca_data_seg *) wqe)->byte_count = 2098 cpu_to_be32((1 << 31) | 4); 2099 ((u32 *) wqe)[1] = 0; 2100 wqe += sizeof (struct mthca_data_seg); 2101 size += sizeof (struct mthca_data_seg) / 16; 2102 } 2103 2104 qp->wrid[ind + qp->rq.max] = wr->wr_id; 2105 2106 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 2107 mthca_err(dev, "opcode invalid\n"); 2108 err = -EINVAL; 2109 *bad_wr = wr; 2110 goto out; 2111 } 2112 2113 ((struct mthca_next_seg *) prev_wqe)->nda_op = 2114 cpu_to_be32(((ind << qp->sq.wqe_shift) + 2115 qp->send_wqe_offset) | 2116 mthca_opcode[wr->opcode]); 2117 wmb(); 2118 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 2119 cpu_to_be32(MTHCA_NEXT_DBD | size | 2120 ((wr->send_flags & IB_SEND_FENCE) ? 2121 MTHCA_NEXT_FENCE : 0)); 2122 2123 if (!nreq) { 2124 size0 = size; 2125 op0 = mthca_opcode[wr->opcode]; 2126 f0 = wr->send_flags & IB_SEND_FENCE ? 2127 MTHCA_SEND_DOORBELL_FENCE : 0; 2128 } 2129 2130 ++ind; 2131 if (unlikely(ind >= qp->sq.max)) 2132 ind -= qp->sq.max; 2133 } 2134 2135 out: 2136 if (likely(nreq)) { 2137 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; 2138 2139 qp->sq.head += nreq; 2140 2141 /* 2142 * Make sure that descriptors are written before 2143 * doorbell record. 2144 */ 2145 wmb(); 2146 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 2147 2148 /* 2149 * Make sure doorbell record is written before we 2150 * write MMIO send doorbell. 2151 */ 2152 wmb(); 2153 2154 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, 2155 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 2156 } 2157 2158 spin_unlock_irqrestore(&qp->sq.lock, flags); 2159 return err; 2160 } 2161 2162 int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 2163 const struct ib_recv_wr **bad_wr) 2164 { 2165 struct mthca_dev *dev = to_mdev(ibqp->device); 2166 struct mthca_qp *qp = to_mqp(ibqp); 2167 unsigned long flags; 2168 int err = 0; 2169 int nreq; 2170 int ind; 2171 int i; 2172 void *wqe; 2173 2174 spin_lock_irqsave(&qp->rq.lock, flags); 2175 2176 /* XXX check that state is OK to post receive */ 2177 2178 ind = qp->rq.head & (qp->rq.max - 1); 2179 2180 for (nreq = 0; wr; ++nreq, wr = wr->next) { 2181 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 2182 mthca_err(dev, "RQ %06x full (%u head, %u tail," 2183 " %d max, %d nreq)\n", qp->qpn, 2184 qp->rq.head, qp->rq.tail, 2185 qp->rq.max, nreq); 2186 err = -ENOMEM; 2187 *bad_wr = wr; 2188 goto out; 2189 } 2190 2191 wqe = get_recv_wqe(qp, ind); 2192 2193 ((struct mthca_next_seg *) wqe)->flags = 0; 2194 2195 wqe += sizeof (struct mthca_next_seg); 2196 2197 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 2198 err = -EINVAL; 2199 *bad_wr = wr; 2200 goto out; 2201 } 2202 2203 for (i = 0; i < wr->num_sge; ++i) { 2204 mthca_set_data_seg(wqe, wr->sg_list + i); 2205 wqe += sizeof (struct mthca_data_seg); 2206 } 2207 2208 if (i < qp->rq.max_gs) 2209 mthca_set_data_seg_inval(wqe); 2210 2211 qp->wrid[ind] = wr->wr_id; 2212 2213 ++ind; 2214 if (unlikely(ind >= qp->rq.max)) 2215 ind -= qp->rq.max; 2216 } 2217 out: 2218 if (likely(nreq)) { 2219 qp->rq.head += nreq; 2220 2221 /* 2222 * Make sure that descriptors are written before 2223 * doorbell record. 2224 */ 2225 wmb(); 2226 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); 2227 } 2228 2229 spin_unlock_irqrestore(&qp->rq.lock, flags); 2230 return err; 2231 } 2232 2233 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2234 int index, int *dbd, __be32 *new_wqe) 2235 { 2236 struct mthca_next_seg *next; 2237 2238 /* 2239 * For SRQs, all receive WQEs generate a CQE, so we're always 2240 * at the end of the doorbell chain. 2241 */ 2242 if (qp->ibqp.srq && !is_send) { 2243 *new_wqe = 0; 2244 return; 2245 } 2246 2247 if (is_send) 2248 next = get_send_wqe(qp, index); 2249 else 2250 next = get_recv_wqe(qp, index); 2251 2252 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); 2253 if (next->ee_nds & cpu_to_be32(0x3f)) 2254 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | 2255 (next->ee_nds & cpu_to_be32(0x3f)); 2256 else 2257 *new_wqe = 0; 2258 } 2259 2260 int mthca_init_qp_table(struct mthca_dev *dev) 2261 { 2262 int err; 2263 int i; 2264 2265 spin_lock_init(&dev->qp_table.lock); 2266 2267 /* 2268 * We reserve 2 extra QPs per port for the special QPs. The 2269 * special QP for port 1 has to be even, so round up. 2270 */ 2271 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; 2272 err = mthca_alloc_init(&dev->qp_table.alloc, 2273 dev->limits.num_qps, 2274 (1 << 24) - 1, 2275 dev->qp_table.sqp_start + 2276 MTHCA_MAX_PORTS * 2); 2277 if (err) 2278 return err; 2279 2280 err = mthca_array_init(&dev->qp_table.qp, 2281 dev->limits.num_qps); 2282 if (err) { 2283 mthca_alloc_cleanup(&dev->qp_table.alloc); 2284 return err; 2285 } 2286 2287 for (i = 0; i < 2; ++i) { 2288 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, 2289 dev->qp_table.sqp_start + i * 2); 2290 if (err) { 2291 mthca_warn(dev, "CONF_SPECIAL_QP returned " 2292 "%d, aborting.\n", err); 2293 goto err_out; 2294 } 2295 } 2296 return 0; 2297 2298 err_out: 2299 for (i = 0; i < 2; ++i) 2300 mthca_CONF_SPECIAL_QP(dev, i, 0); 2301 2302 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2303 mthca_alloc_cleanup(&dev->qp_table.alloc); 2304 2305 return err; 2306 } 2307 2308 void mthca_cleanup_qp_table(struct mthca_dev *dev) 2309 { 2310 int i; 2311 2312 for (i = 0; i < 2; ++i) 2313 mthca_CONF_SPECIAL_QP(dev, i, 0); 2314 2315 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2316 mthca_alloc_cleanup(&dev->qp_table.alloc); 2317 } 2318