1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/string.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 40 #include <asm/io.h> 41 42 #include <rdma/ib_verbs.h> 43 #include <rdma/ib_cache.h> 44 #include <rdma/ib_pack.h> 45 46 #include "mthca_dev.h" 47 #include "mthca_cmd.h" 48 #include "mthca_memfree.h" 49 #include "mthca_wqe.h" 50 51 enum { 52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 53 MTHCA_ACK_REQ_FREQ = 10, 54 MTHCA_FLIGHT_LIMIT = 9, 55 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ 56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ 57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ 58 }; 59 60 enum { 61 MTHCA_QP_STATE_RST = 0, 62 MTHCA_QP_STATE_INIT = 1, 63 MTHCA_QP_STATE_RTR = 2, 64 MTHCA_QP_STATE_RTS = 3, 65 MTHCA_QP_STATE_SQE = 4, 66 MTHCA_QP_STATE_SQD = 5, 67 MTHCA_QP_STATE_ERR = 6, 68 MTHCA_QP_STATE_DRAINING = 7 69 }; 70 71 enum { 72 MTHCA_QP_ST_RC = 0x0, 73 MTHCA_QP_ST_UC = 0x1, 74 MTHCA_QP_ST_RD = 0x2, 75 MTHCA_QP_ST_UD = 0x3, 76 MTHCA_QP_ST_MLX = 0x7 77 }; 78 79 enum { 80 MTHCA_QP_PM_MIGRATED = 0x3, 81 MTHCA_QP_PM_ARMED = 0x0, 82 MTHCA_QP_PM_REARM = 0x1 83 }; 84 85 enum { 86 /* qp_context flags */ 87 MTHCA_QP_BIT_DE = 1 << 8, 88 /* params1 */ 89 MTHCA_QP_BIT_SRE = 1 << 15, 90 MTHCA_QP_BIT_SWE = 1 << 14, 91 MTHCA_QP_BIT_SAE = 1 << 13, 92 MTHCA_QP_BIT_SIC = 1 << 4, 93 MTHCA_QP_BIT_SSC = 1 << 3, 94 /* params2 */ 95 MTHCA_QP_BIT_RRE = 1 << 15, 96 MTHCA_QP_BIT_RWE = 1 << 14, 97 MTHCA_QP_BIT_RAE = 1 << 13, 98 MTHCA_QP_BIT_RIC = 1 << 4, 99 MTHCA_QP_BIT_RSC = 1 << 3 100 }; 101 102 enum { 103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5 104 }; 105 106 struct mthca_qp_path { 107 __be32 port_pkey; 108 u8 rnr_retry; 109 u8 g_mylmc; 110 __be16 rlid; 111 u8 ackto; 112 u8 mgid_index; 113 u8 static_rate; 114 u8 hop_limit; 115 __be32 sl_tclass_flowlabel; 116 u8 rgid[16]; 117 } __attribute__((packed)); 118 119 struct mthca_qp_context { 120 __be32 flags; 121 __be32 tavor_sched_queue; /* Reserved on Arbel */ 122 u8 mtu_msgmax; 123 u8 rq_size_stride; /* Reserved on Tavor */ 124 u8 sq_size_stride; /* Reserved on Tavor */ 125 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 126 __be32 usr_page; 127 __be32 local_qpn; 128 __be32 remote_qpn; 129 u32 reserved1[2]; 130 struct mthca_qp_path pri_path; 131 struct mthca_qp_path alt_path; 132 __be32 rdd; 133 __be32 pd; 134 __be32 wqe_base; 135 __be32 wqe_lkey; 136 __be32 params1; 137 __be32 reserved2; 138 __be32 next_send_psn; 139 __be32 cqn_snd; 140 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ 141 __be32 snd_db_index; /* (debugging only entries) */ 142 __be32 last_acked_psn; 143 __be32 ssn; 144 __be32 params2; 145 __be32 rnr_nextrecvpsn; 146 __be32 ra_buff_indx; 147 __be32 cqn_rcv; 148 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 149 __be32 rcv_db_index; /* (debugging only entries) */ 150 __be32 qkey; 151 __be32 srqn; 152 __be32 rmsn; 153 __be16 rq_wqe_counter; /* reserved on Tavor */ 154 __be16 sq_wqe_counter; /* reserved on Tavor */ 155 u32 reserved3[18]; 156 } __attribute__((packed)); 157 158 struct mthca_qp_param { 159 __be32 opt_param_mask; 160 u32 reserved1; 161 struct mthca_qp_context context; 162 u32 reserved2[62]; 163 } __attribute__((packed)); 164 165 enum { 166 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 167 MTHCA_QP_OPTPAR_RRE = 1 << 1, 168 MTHCA_QP_OPTPAR_RAE = 1 << 2, 169 MTHCA_QP_OPTPAR_RWE = 1 << 3, 170 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, 171 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, 172 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, 173 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, 174 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, 175 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, 176 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, 177 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, 178 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, 179 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, 180 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 181 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, 182 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 183 }; 184 185 static const u8 mthca_opcode[] = { 186 [IB_WR_SEND] = MTHCA_OPCODE_SEND, 187 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, 188 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, 189 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, 190 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, 191 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, 192 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, 193 }; 194 195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) 196 { 197 return qp->qpn >= dev->qp_table.sqp_start && 198 qp->qpn <= dev->qp_table.sqp_start + 3; 199 } 200 201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) 202 { 203 return qp->qpn >= dev->qp_table.sqp_start && 204 qp->qpn <= dev->qp_table.sqp_start + 1; 205 } 206 207 static void *get_recv_wqe(struct mthca_qp *qp, int n) 208 { 209 if (qp->is_direct) 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); 211 else 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + 213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); 214 } 215 216 static void *get_send_wqe(struct mthca_qp *qp, int n) 217 { 218 if (qp->is_direct) 219 return qp->queue.direct.buf + qp->send_wqe_offset + 220 (n << qp->sq.wqe_shift); 221 else 222 return qp->queue.page_list[(qp->send_wqe_offset + 223 (n << qp->sq.wqe_shift)) >> 224 PAGE_SHIFT].buf + 225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & 226 (PAGE_SIZE - 1)); 227 } 228 229 static void mthca_wq_reset(struct mthca_wq *wq) 230 { 231 wq->next_ind = 0; 232 wq->last_comp = wq->max - 1; 233 wq->head = 0; 234 wq->tail = 0; 235 } 236 237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 238 enum ib_event_type event_type) 239 { 240 struct mthca_qp *qp; 241 struct ib_event event; 242 243 spin_lock(&dev->qp_table.lock); 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 245 if (qp) 246 ++qp->refcount; 247 spin_unlock(&dev->qp_table.lock); 248 249 if (!qp) { 250 mthca_warn(dev, "Async event %d for bogus QP %08x\n", 251 event_type, qpn); 252 return; 253 } 254 255 if (event_type == IB_EVENT_PATH_MIG) 256 qp->port = qp->alt_port; 257 258 event.device = &dev->ib_dev; 259 event.event = event_type; 260 event.element.qp = &qp->ibqp; 261 if (qp->ibqp.event_handler) 262 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 263 264 spin_lock(&dev->qp_table.lock); 265 if (!--qp->refcount) 266 wake_up(&qp->wait); 267 spin_unlock(&dev->qp_table.lock); 268 } 269 270 static int to_mthca_state(enum ib_qp_state ib_state) 271 { 272 switch (ib_state) { 273 case IB_QPS_RESET: return MTHCA_QP_STATE_RST; 274 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; 275 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; 276 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; 277 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; 278 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; 279 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; 280 default: return -1; 281 } 282 } 283 284 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; 285 286 static int to_mthca_st(int transport) 287 { 288 switch (transport) { 289 case RC: return MTHCA_QP_ST_RC; 290 case UC: return MTHCA_QP_ST_UC; 291 case UD: return MTHCA_QP_ST_UD; 292 case RD: return MTHCA_QP_ST_RD; 293 case MLX: return MTHCA_QP_ST_MLX; 294 default: return -1; 295 } 296 } 297 298 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, 299 int attr_mask) 300 { 301 if (attr_mask & IB_QP_PKEY_INDEX) 302 sqp->pkey_index = attr->pkey_index; 303 if (attr_mask & IB_QP_QKEY) 304 sqp->qkey = attr->qkey; 305 if (attr_mask & IB_QP_SQ_PSN) 306 sqp->send_psn = attr->sq_psn; 307 } 308 309 static void init_port(struct mthca_dev *dev, int port) 310 { 311 int err; 312 struct mthca_init_ib_param param; 313 314 memset(¶m, 0, sizeof param); 315 316 param.port_width = dev->limits.port_width_cap; 317 param.vl_cap = dev->limits.vl_cap; 318 param.mtu_cap = dev->limits.mtu_cap; 319 param.gid_cap = dev->limits.gid_table_len; 320 param.pkey_cap = dev->limits.pkey_table_len; 321 322 err = mthca_INIT_IB(dev, ¶m, port); 323 if (err) 324 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); 325 } 326 327 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, 328 int attr_mask) 329 { 330 u8 dest_rd_atomic; 331 u32 access_flags; 332 u32 hw_access_flags = 0; 333 334 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 335 dest_rd_atomic = attr->max_dest_rd_atomic; 336 else 337 dest_rd_atomic = qp->resp_depth; 338 339 if (attr_mask & IB_QP_ACCESS_FLAGS) 340 access_flags = attr->qp_access_flags; 341 else 342 access_flags = qp->atomic_rd_en; 343 344 if (!dest_rd_atomic) 345 access_flags &= IB_ACCESS_REMOTE_WRITE; 346 347 if (access_flags & IB_ACCESS_REMOTE_READ) 348 hw_access_flags |= MTHCA_QP_BIT_RRE; 349 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 350 hw_access_flags |= MTHCA_QP_BIT_RAE; 351 if (access_flags & IB_ACCESS_REMOTE_WRITE) 352 hw_access_flags |= MTHCA_QP_BIT_RWE; 353 354 return cpu_to_be32(hw_access_flags); 355 } 356 357 static inline enum ib_qp_state to_ib_qp_state(int mthca_state) 358 { 359 switch (mthca_state) { 360 case MTHCA_QP_STATE_RST: return IB_QPS_RESET; 361 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; 362 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; 363 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; 364 case MTHCA_QP_STATE_DRAINING: 365 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; 366 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; 367 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; 368 default: return -1; 369 } 370 } 371 372 static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) 373 { 374 switch (mthca_mig_state) { 375 case 0: return IB_MIG_ARMED; 376 case 1: return IB_MIG_REARM; 377 case 3: return IB_MIG_MIGRATED; 378 default: return -1; 379 } 380 } 381 382 static int to_ib_qp_access_flags(int mthca_flags) 383 { 384 int ib_flags = 0; 385 386 if (mthca_flags & MTHCA_QP_BIT_RRE) 387 ib_flags |= IB_ACCESS_REMOTE_READ; 388 if (mthca_flags & MTHCA_QP_BIT_RWE) 389 ib_flags |= IB_ACCESS_REMOTE_WRITE; 390 if (mthca_flags & MTHCA_QP_BIT_RAE) 391 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 392 393 return ib_flags; 394 } 395 396 static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, 397 struct mthca_qp_path *path) 398 { 399 memset(ib_ah_attr, 0, sizeof *ib_ah_attr); 400 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; 401 402 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) 403 return; 404 405 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 406 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; 407 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; 408 ib_ah_attr->static_rate = mthca_rate_to_ib(dev, 409 path->static_rate & 0xf, 410 ib_ah_attr->port_num); 411 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 412 if (ib_ah_attr->ah_flags) { 413 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); 414 ib_ah_attr->grh.hop_limit = path->hop_limit; 415 ib_ah_attr->grh.traffic_class = 416 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff; 417 ib_ah_attr->grh.flow_label = 418 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff; 419 memcpy(ib_ah_attr->grh.dgid.raw, 420 path->rgid, sizeof ib_ah_attr->grh.dgid.raw); 421 } 422 } 423 424 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 425 struct ib_qp_init_attr *qp_init_attr) 426 { 427 struct mthca_dev *dev = to_mdev(ibqp->device); 428 struct mthca_qp *qp = to_mqp(ibqp); 429 int err = 0; 430 struct mthca_mailbox *mailbox = NULL; 431 struct mthca_qp_param *qp_param; 432 struct mthca_qp_context *context; 433 int mthca_state; 434 435 mutex_lock(&qp->mutex); 436 437 if (qp->state == IB_QPS_RESET) { 438 qp_attr->qp_state = IB_QPS_RESET; 439 goto done; 440 } 441 442 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 443 if (IS_ERR(mailbox)) { 444 err = PTR_ERR(mailbox); 445 goto out; 446 } 447 448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); 449 if (err) { 450 mthca_warn(dev, "QUERY_QP failed (%d)\n", err); 451 goto out_mailbox; 452 } 453 454 qp_param = mailbox->buf; 455 context = &qp_param->context; 456 mthca_state = be32_to_cpu(context->flags) >> 28; 457 458 qp->state = to_ib_qp_state(mthca_state); 459 qp_attr->qp_state = qp->state; 460 qp_attr->path_mtu = context->mtu_msgmax >> 5; 461 qp_attr->path_mig_state = 462 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 463 qp_attr->qkey = be32_to_cpu(context->qkey); 464 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 465 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 466 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 467 qp_attr->qp_access_flags = 468 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 469 470 if (qp->transport == RC || qp->transport == UC) { 471 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 472 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 473 qp_attr->alt_pkey_index = 474 be32_to_cpu(context->alt_path.port_pkey) & 0x7f; 475 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 476 } 477 478 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; 479 qp_attr->port_num = 480 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; 481 482 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 483 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; 484 485 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 486 487 qp_attr->max_dest_rd_atomic = 488 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 489 qp_attr->min_rnr_timer = 490 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 491 qp_attr->timeout = context->pri_path.ackto >> 3; 492 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 493 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 494 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 495 496 done: 497 qp_attr->cur_qp_state = qp_attr->qp_state; 498 qp_attr->cap.max_send_wr = qp->sq.max; 499 qp_attr->cap.max_recv_wr = qp->rq.max; 500 qp_attr->cap.max_send_sge = qp->sq.max_gs; 501 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 502 qp_attr->cap.max_inline_data = qp->max_inline_data; 503 504 qp_init_attr->cap = qp_attr->cap; 505 qp_init_attr->sq_sig_type = qp->sq_policy; 506 507 out_mailbox: 508 mthca_free_mailbox(dev, mailbox); 509 510 out: 511 mutex_unlock(&qp->mutex); 512 return err; 513 } 514 515 static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah, 516 struct mthca_qp_path *path, u8 port) 517 { 518 path->g_mylmc = ah->src_path_bits & 0x7f; 519 path->rlid = cpu_to_be16(ah->dlid); 520 path->static_rate = mthca_get_rate(dev, ah->static_rate, port); 521 522 if (ah->ah_flags & IB_AH_GRH) { 523 if (ah->grh.sgid_index >= dev->limits.gid_table_len) { 524 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", 525 ah->grh.sgid_index, dev->limits.gid_table_len-1); 526 return -1; 527 } 528 529 path->g_mylmc |= 1 << 7; 530 path->mgid_index = ah->grh.sgid_index; 531 path->hop_limit = ah->grh.hop_limit; 532 path->sl_tclass_flowlabel = 533 cpu_to_be32((ah->sl << 28) | 534 (ah->grh.traffic_class << 20) | 535 (ah->grh.flow_label)); 536 memcpy(path->rgid, ah->grh.dgid.raw, 16); 537 } else 538 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); 539 540 return 0; 541 } 542 543 static int __mthca_modify_qp(struct ib_qp *ibqp, 544 const struct ib_qp_attr *attr, int attr_mask, 545 enum ib_qp_state cur_state, enum ib_qp_state new_state) 546 { 547 struct mthca_dev *dev = to_mdev(ibqp->device); 548 struct mthca_qp *qp = to_mqp(ibqp); 549 struct mthca_mailbox *mailbox; 550 struct mthca_qp_param *qp_param; 551 struct mthca_qp_context *qp_context; 552 u32 sqd_event = 0; 553 int err = -EINVAL; 554 555 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 556 if (IS_ERR(mailbox)) { 557 err = PTR_ERR(mailbox); 558 goto out; 559 } 560 qp_param = mailbox->buf; 561 qp_context = &qp_param->context; 562 memset(qp_param, 0, sizeof *qp_param); 563 564 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | 565 (to_mthca_st(qp->transport) << 16)); 566 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); 567 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) 568 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 569 else { 570 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); 571 switch (attr->path_mig_state) { 572 case IB_MIG_MIGRATED: 573 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 574 break; 575 case IB_MIG_REARM: 576 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); 577 break; 578 case IB_MIG_ARMED: 579 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); 580 break; 581 } 582 } 583 584 /* leave tavor_sched_queue as 0 */ 585 586 if (qp->transport == MLX || qp->transport == UD) 587 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; 588 else if (attr_mask & IB_QP_PATH_MTU) { 589 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { 590 mthca_dbg(dev, "path MTU (%u) is invalid\n", 591 attr->path_mtu); 592 goto out_mailbox; 593 } 594 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 595 } 596 597 if (mthca_is_memfree(dev)) { 598 if (qp->rq.max) 599 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; 600 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; 601 602 if (qp->sq.max) 603 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; 604 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; 605 } 606 607 /* leave arbel_sched_queue as 0 */ 608 609 if (qp->ibqp.uobject) 610 qp_context->usr_page = 611 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); 612 else 613 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); 614 qp_context->local_qpn = cpu_to_be32(qp->qpn); 615 if (attr_mask & IB_QP_DEST_QPN) { 616 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 617 } 618 619 if (qp->transport == MLX) 620 qp_context->pri_path.port_pkey |= 621 cpu_to_be32(qp->port << 24); 622 else { 623 if (attr_mask & IB_QP_PORT) { 624 qp_context->pri_path.port_pkey |= 625 cpu_to_be32(attr->port_num << 24); 626 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); 627 } 628 } 629 630 if (attr_mask & IB_QP_PKEY_INDEX) { 631 qp_context->pri_path.port_pkey |= 632 cpu_to_be32(attr->pkey_index); 633 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); 634 } 635 636 if (attr_mask & IB_QP_RNR_RETRY) { 637 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = 638 attr->rnr_retry << 5; 639 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | 640 MTHCA_QP_OPTPAR_ALT_RNR_RETRY); 641 } 642 643 if (attr_mask & IB_QP_AV) { 644 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, 645 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) 646 goto out_mailbox; 647 648 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 649 } 650 651 if (ibqp->qp_type == IB_QPT_RC && 652 cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 653 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; 654 655 if (mthca_is_memfree(dev)) 656 qp_context->rlkey_arbel_sched_queue |= sched_queue; 657 else 658 qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); 659 660 qp_param->opt_param_mask |= 661 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); 662 } 663 664 if (attr_mask & IB_QP_TIMEOUT) { 665 qp_context->pri_path.ackto = attr->timeout << 3; 666 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 667 } 668 669 if (attr_mask & IB_QP_ALT_PATH) { 670 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { 671 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", 672 attr->alt_pkey_index, dev->limits.pkey_table_len-1); 673 goto out_mailbox; 674 } 675 676 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { 677 mthca_dbg(dev, "Alternate port number (%u) is invalid\n", 678 attr->alt_port_num); 679 goto out_mailbox; 680 } 681 682 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, 683 attr->alt_ah_attr.port_num)) 684 goto out_mailbox; 685 686 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 687 attr->alt_port_num << 24); 688 qp_context->alt_path.ackto = attr->alt_timeout << 3; 689 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); 690 } 691 692 /* leave rdd as 0 */ 693 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); 694 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ 695 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); 696 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | 697 (MTHCA_FLIGHT_LIMIT << 24) | 698 MTHCA_QP_BIT_SWE); 699 if (qp->sq_policy == IB_SIGNAL_ALL_WR) 700 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); 701 if (attr_mask & IB_QP_RETRY_CNT) { 702 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 703 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); 704 } 705 706 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 707 if (attr->max_rd_atomic) { 708 qp_context->params1 |= 709 cpu_to_be32(MTHCA_QP_BIT_SRE | 710 MTHCA_QP_BIT_SAE); 711 qp_context->params1 |= 712 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 713 } 714 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); 715 } 716 717 if (attr_mask & IB_QP_SQ_PSN) 718 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); 719 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); 720 721 if (mthca_is_memfree(dev)) { 722 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); 723 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); 724 } 725 726 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 727 if (attr->max_dest_rd_atomic) 728 qp_context->params2 |= 729 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 730 731 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); 732 } 733 734 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 735 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); 736 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 737 MTHCA_QP_OPTPAR_RRE | 738 MTHCA_QP_OPTPAR_RAE); 739 } 740 741 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 742 743 if (ibqp->srq) 744 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); 745 746 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 747 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 748 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); 749 } 750 if (attr_mask & IB_QP_RQ_PSN) 751 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 752 753 qp_context->ra_buff_indx = 754 cpu_to_be32(dev->qp_table.rdb_base + 755 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << 756 dev->qp_table.rdb_shift)); 757 758 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); 759 760 if (mthca_is_memfree(dev)) 761 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); 762 763 if (attr_mask & IB_QP_QKEY) { 764 qp_context->qkey = cpu_to_be32(attr->qkey); 765 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 766 } 767 768 if (ibqp->srq) 769 qp_context->srqn = cpu_to_be32(1 << 24 | 770 to_msrq(ibqp->srq)->srqn); 771 772 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 773 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && 774 attr->en_sqd_async_notify) 775 sqd_event = 1 << 31; 776 777 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 778 mailbox, sqd_event); 779 if (err) { 780 mthca_warn(dev, "modify QP %d->%d returned %d.\n", 781 cur_state, new_state, err); 782 goto out_mailbox; 783 } 784 785 qp->state = new_state; 786 if (attr_mask & IB_QP_ACCESS_FLAGS) 787 qp->atomic_rd_en = attr->qp_access_flags; 788 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 789 qp->resp_depth = attr->max_dest_rd_atomic; 790 if (attr_mask & IB_QP_PORT) 791 qp->port = attr->port_num; 792 if (attr_mask & IB_QP_ALT_PATH) 793 qp->alt_port = attr->alt_port_num; 794 795 if (is_sqp(dev, qp)) 796 store_attrs(to_msqp(qp), attr, attr_mask); 797 798 /* 799 * If we moved QP0 to RTR, bring the IB link up; if we moved 800 * QP0 to RESET or ERROR, bring the link back down. 801 */ 802 if (is_qp0(dev, qp)) { 803 if (cur_state != IB_QPS_RTR && 804 new_state == IB_QPS_RTR) 805 init_port(dev, qp->port); 806 807 if (cur_state != IB_QPS_RESET && 808 cur_state != IB_QPS_ERR && 809 (new_state == IB_QPS_RESET || 810 new_state == IB_QPS_ERR)) 811 mthca_CLOSE_IB(dev, qp->port); 812 } 813 814 /* 815 * If we moved a kernel QP to RESET, clean up all old CQ 816 * entries and reinitialize the QP. 817 */ 818 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 819 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 820 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 821 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 822 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); 823 824 mthca_wq_reset(&qp->sq); 825 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 826 827 mthca_wq_reset(&qp->rq); 828 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 829 830 if (mthca_is_memfree(dev)) { 831 *qp->sq.db = 0; 832 *qp->rq.db = 0; 833 } 834 } 835 836 out_mailbox: 837 mthca_free_mailbox(dev, mailbox); 838 out: 839 return err; 840 } 841 842 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 843 struct ib_udata *udata) 844 { 845 struct mthca_dev *dev = to_mdev(ibqp->device); 846 struct mthca_qp *qp = to_mqp(ibqp); 847 enum ib_qp_state cur_state, new_state; 848 int err = -EINVAL; 849 850 mutex_lock(&qp->mutex); 851 if (attr_mask & IB_QP_CUR_STATE) { 852 cur_state = attr->cur_qp_state; 853 } else { 854 spin_lock_irq(&qp->sq.lock); 855 spin_lock(&qp->rq.lock); 856 cur_state = qp->state; 857 spin_unlock(&qp->rq.lock); 858 spin_unlock_irq(&qp->sq.lock); 859 } 860 861 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 862 863 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, 864 IB_LINK_LAYER_UNSPECIFIED)) { 865 mthca_dbg(dev, "Bad QP transition (transport %d) " 866 "%d->%d with attr 0x%08x\n", 867 qp->transport, cur_state, new_state, 868 attr_mask); 869 goto out; 870 } 871 872 if ((attr_mask & IB_QP_PKEY_INDEX) && 873 attr->pkey_index >= dev->limits.pkey_table_len) { 874 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", 875 attr->pkey_index, dev->limits.pkey_table_len-1); 876 goto out; 877 } 878 879 if ((attr_mask & IB_QP_PORT) && 880 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { 881 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); 882 goto out; 883 } 884 885 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 886 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { 887 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", 888 attr->max_rd_atomic, dev->limits.max_qp_init_rdma); 889 goto out; 890 } 891 892 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 893 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { 894 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", 895 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); 896 goto out; 897 } 898 899 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 900 err = 0; 901 goto out; 902 } 903 904 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 905 906 out: 907 mutex_unlock(&qp->mutex); 908 return err; 909 } 910 911 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) 912 { 913 /* 914 * Calculate the maximum size of WQE s/g segments, excluding 915 * the next segment and other non-data segments. 916 */ 917 int max_data_size = desc_sz - sizeof (struct mthca_next_seg); 918 919 switch (qp->transport) { 920 case MLX: 921 max_data_size -= 2 * sizeof (struct mthca_data_seg); 922 break; 923 924 case UD: 925 if (mthca_is_memfree(dev)) 926 max_data_size -= sizeof (struct mthca_arbel_ud_seg); 927 else 928 max_data_size -= sizeof (struct mthca_tavor_ud_seg); 929 break; 930 931 default: 932 max_data_size -= sizeof (struct mthca_raddr_seg); 933 break; 934 } 935 936 return max_data_size; 937 } 938 939 static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) 940 { 941 /* We don't support inline data for kernel QPs (yet). */ 942 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; 943 } 944 945 static void mthca_adjust_qp_caps(struct mthca_dev *dev, 946 struct mthca_pd *pd, 947 struct mthca_qp *qp) 948 { 949 int max_data_size = mthca_max_data_size(dev, qp, 950 min(dev->limits.max_desc_sz, 951 1 << qp->sq.wqe_shift)); 952 953 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); 954 955 qp->sq.max_gs = min_t(int, dev->limits.max_sg, 956 max_data_size / sizeof (struct mthca_data_seg)); 957 qp->rq.max_gs = min_t(int, dev->limits.max_sg, 958 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - 959 sizeof (struct mthca_next_seg)) / 960 sizeof (struct mthca_data_seg)); 961 } 962 963 /* 964 * Allocate and register buffer for WQEs. qp->rq.max, sq.max, 965 * rq.max_gs and sq.max_gs must all be assigned. 966 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and 967 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and 968 * queue) 969 */ 970 static int mthca_alloc_wqe_buf(struct mthca_dev *dev, 971 struct mthca_pd *pd, 972 struct mthca_qp *qp) 973 { 974 int size; 975 int err = -ENOMEM; 976 977 size = sizeof (struct mthca_next_seg) + 978 qp->rq.max_gs * sizeof (struct mthca_data_seg); 979 980 if (size > dev->limits.max_desc_sz) 981 return -EINVAL; 982 983 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; 984 qp->rq.wqe_shift++) 985 ; /* nothing */ 986 987 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); 988 switch (qp->transport) { 989 case MLX: 990 size += 2 * sizeof (struct mthca_data_seg); 991 break; 992 993 case UD: 994 size += mthca_is_memfree(dev) ? 995 sizeof (struct mthca_arbel_ud_seg) : 996 sizeof (struct mthca_tavor_ud_seg); 997 break; 998 999 case UC: 1000 size += sizeof (struct mthca_raddr_seg); 1001 break; 1002 1003 case RC: 1004 size += sizeof (struct mthca_raddr_seg); 1005 /* 1006 * An atomic op will require an atomic segment, a 1007 * remote address segment and one scatter entry. 1008 */ 1009 size = max_t(int, size, 1010 sizeof (struct mthca_atomic_seg) + 1011 sizeof (struct mthca_raddr_seg) + 1012 sizeof (struct mthca_data_seg)); 1013 break; 1014 1015 default: 1016 break; 1017 } 1018 1019 /* Make sure that we have enough space for a bind request */ 1020 size = max_t(int, size, sizeof (struct mthca_bind_seg)); 1021 1022 size += sizeof (struct mthca_next_seg); 1023 1024 if (size > dev->limits.max_desc_sz) 1025 return -EINVAL; 1026 1027 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; 1028 qp->sq.wqe_shift++) 1029 ; /* nothing */ 1030 1031 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 1032 1 << qp->sq.wqe_shift); 1033 1034 /* 1035 * If this is a userspace QP, we don't actually have to 1036 * allocate anything. All we need is to calculate the WQE 1037 * sizes and the send_wqe_offset, so we're done now. 1038 */ 1039 if (pd->ibpd.uobject) 1040 return 0; 1041 1042 size = PAGE_ALIGN(qp->send_wqe_offset + 1043 (qp->sq.max << qp->sq.wqe_shift)); 1044 1045 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), 1046 GFP_KERNEL); 1047 if (!qp->wrid) 1048 goto err_out; 1049 1050 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, 1051 &qp->queue, &qp->is_direct, pd, 0, &qp->mr); 1052 if (err) 1053 goto err_out; 1054 1055 return 0; 1056 1057 err_out: 1058 kfree(qp->wrid); 1059 return err; 1060 } 1061 1062 static void mthca_free_wqe_buf(struct mthca_dev *dev, 1063 struct mthca_qp *qp) 1064 { 1065 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + 1066 (qp->sq.max << qp->sq.wqe_shift)), 1067 &qp->queue, qp->is_direct, &qp->mr); 1068 kfree(qp->wrid); 1069 } 1070 1071 static int mthca_map_memfree(struct mthca_dev *dev, 1072 struct mthca_qp *qp) 1073 { 1074 int ret; 1075 1076 if (mthca_is_memfree(dev)) { 1077 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); 1078 if (ret) 1079 return ret; 1080 1081 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); 1082 if (ret) 1083 goto err_qpc; 1084 1085 ret = mthca_table_get(dev, dev->qp_table.rdb_table, 1086 qp->qpn << dev->qp_table.rdb_shift); 1087 if (ret) 1088 goto err_eqpc; 1089 } 1090 1091 return 0; 1092 1093 err_eqpc: 1094 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1095 1096 err_qpc: 1097 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1098 1099 return ret; 1100 } 1101 1102 static void mthca_unmap_memfree(struct mthca_dev *dev, 1103 struct mthca_qp *qp) 1104 { 1105 mthca_table_put(dev, dev->qp_table.rdb_table, 1106 qp->qpn << dev->qp_table.rdb_shift); 1107 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1108 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1109 } 1110 1111 static int mthca_alloc_memfree(struct mthca_dev *dev, 1112 struct mthca_qp *qp) 1113 { 1114 if (mthca_is_memfree(dev)) { 1115 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, 1116 qp->qpn, &qp->rq.db); 1117 if (qp->rq.db_index < 0) 1118 return -ENOMEM; 1119 1120 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, 1121 qp->qpn, &qp->sq.db); 1122 if (qp->sq.db_index < 0) { 1123 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1124 return -ENOMEM; 1125 } 1126 } 1127 1128 return 0; 1129 } 1130 1131 static void mthca_free_memfree(struct mthca_dev *dev, 1132 struct mthca_qp *qp) 1133 { 1134 if (mthca_is_memfree(dev)) { 1135 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1136 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1137 } 1138 } 1139 1140 static int mthca_alloc_qp_common(struct mthca_dev *dev, 1141 struct mthca_pd *pd, 1142 struct mthca_cq *send_cq, 1143 struct mthca_cq *recv_cq, 1144 enum ib_sig_type send_policy, 1145 struct mthca_qp *qp) 1146 { 1147 int ret; 1148 int i; 1149 struct mthca_next_seg *next; 1150 1151 qp->refcount = 1; 1152 init_waitqueue_head(&qp->wait); 1153 mutex_init(&qp->mutex); 1154 qp->state = IB_QPS_RESET; 1155 qp->atomic_rd_en = 0; 1156 qp->resp_depth = 0; 1157 qp->sq_policy = send_policy; 1158 mthca_wq_reset(&qp->sq); 1159 mthca_wq_reset(&qp->rq); 1160 1161 spin_lock_init(&qp->sq.lock); 1162 spin_lock_init(&qp->rq.lock); 1163 1164 ret = mthca_map_memfree(dev, qp); 1165 if (ret) 1166 return ret; 1167 1168 ret = mthca_alloc_wqe_buf(dev, pd, qp); 1169 if (ret) { 1170 mthca_unmap_memfree(dev, qp); 1171 return ret; 1172 } 1173 1174 mthca_adjust_qp_caps(dev, pd, qp); 1175 1176 /* 1177 * If this is a userspace QP, we're done now. The doorbells 1178 * will be allocated and buffers will be initialized in 1179 * userspace. 1180 */ 1181 if (pd->ibpd.uobject) 1182 return 0; 1183 1184 ret = mthca_alloc_memfree(dev, qp); 1185 if (ret) { 1186 mthca_free_wqe_buf(dev, qp); 1187 mthca_unmap_memfree(dev, qp); 1188 return ret; 1189 } 1190 1191 if (mthca_is_memfree(dev)) { 1192 struct mthca_data_seg *scatter; 1193 int size = (sizeof (struct mthca_next_seg) + 1194 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; 1195 1196 for (i = 0; i < qp->rq.max; ++i) { 1197 next = get_recv_wqe(qp, i); 1198 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << 1199 qp->rq.wqe_shift); 1200 next->ee_nds = cpu_to_be32(size); 1201 1202 for (scatter = (void *) (next + 1); 1203 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); 1204 ++scatter) 1205 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 1206 } 1207 1208 for (i = 0; i < qp->sq.max; ++i) { 1209 next = get_send_wqe(qp, i); 1210 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << 1211 qp->sq.wqe_shift) + 1212 qp->send_wqe_offset); 1213 } 1214 } else { 1215 for (i = 0; i < qp->rq.max; ++i) { 1216 next = get_recv_wqe(qp, i); 1217 next->nda_op = htonl((((i + 1) % qp->rq.max) << 1218 qp->rq.wqe_shift) | 1); 1219 } 1220 } 1221 1222 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 1223 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 1224 1225 return 0; 1226 } 1227 1228 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, 1229 struct mthca_pd *pd, struct mthca_qp *qp) 1230 { 1231 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); 1232 1233 /* Sanity check QP size before proceeding */ 1234 if (cap->max_send_wr > dev->limits.max_wqes || 1235 cap->max_recv_wr > dev->limits.max_wqes || 1236 cap->max_send_sge > dev->limits.max_sg || 1237 cap->max_recv_sge > dev->limits.max_sg || 1238 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) 1239 return -EINVAL; 1240 1241 /* 1242 * For MLX transport we need 2 extra send gather entries: 1243 * one for the header and one for the checksum at the end 1244 */ 1245 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) 1246 return -EINVAL; 1247 1248 if (mthca_is_memfree(dev)) { 1249 qp->rq.max = cap->max_recv_wr ? 1250 roundup_pow_of_two(cap->max_recv_wr) : 0; 1251 qp->sq.max = cap->max_send_wr ? 1252 roundup_pow_of_two(cap->max_send_wr) : 0; 1253 } else { 1254 qp->rq.max = cap->max_recv_wr; 1255 qp->sq.max = cap->max_send_wr; 1256 } 1257 1258 qp->rq.max_gs = cap->max_recv_sge; 1259 qp->sq.max_gs = max_t(int, cap->max_send_sge, 1260 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, 1261 MTHCA_INLINE_CHUNK_SIZE) / 1262 sizeof (struct mthca_data_seg)); 1263 1264 return 0; 1265 } 1266 1267 int mthca_alloc_qp(struct mthca_dev *dev, 1268 struct mthca_pd *pd, 1269 struct mthca_cq *send_cq, 1270 struct mthca_cq *recv_cq, 1271 enum ib_qp_type type, 1272 enum ib_sig_type send_policy, 1273 struct ib_qp_cap *cap, 1274 struct mthca_qp *qp) 1275 { 1276 int err; 1277 1278 switch (type) { 1279 case IB_QPT_RC: qp->transport = RC; break; 1280 case IB_QPT_UC: qp->transport = UC; break; 1281 case IB_QPT_UD: qp->transport = UD; break; 1282 default: return -EINVAL; 1283 } 1284 1285 err = mthca_set_qp_size(dev, cap, pd, qp); 1286 if (err) 1287 return err; 1288 1289 qp->qpn = mthca_alloc(&dev->qp_table.alloc); 1290 if (qp->qpn == -1) 1291 return -ENOMEM; 1292 1293 /* initialize port to zero for error-catching. */ 1294 qp->port = 0; 1295 1296 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1297 send_policy, qp); 1298 if (err) { 1299 mthca_free(&dev->qp_table.alloc, qp->qpn); 1300 return err; 1301 } 1302 1303 spin_lock_irq(&dev->qp_table.lock); 1304 mthca_array_set(&dev->qp_table.qp, 1305 qp->qpn & (dev->limits.num_qps - 1), qp); 1306 spin_unlock_irq(&dev->qp_table.lock); 1307 1308 return 0; 1309 } 1310 1311 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1312 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1313 { 1314 if (send_cq == recv_cq) { 1315 spin_lock_irq(&send_cq->lock); 1316 __acquire(&recv_cq->lock); 1317 } else if (send_cq->cqn < recv_cq->cqn) { 1318 spin_lock_irq(&send_cq->lock); 1319 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1320 } else { 1321 spin_lock_irq(&recv_cq->lock); 1322 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1323 } 1324 } 1325 1326 static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1327 __releases(&send_cq->lock) __releases(&recv_cq->lock) 1328 { 1329 if (send_cq == recv_cq) { 1330 __release(&recv_cq->lock); 1331 spin_unlock_irq(&send_cq->lock); 1332 } else if (send_cq->cqn < recv_cq->cqn) { 1333 spin_unlock(&recv_cq->lock); 1334 spin_unlock_irq(&send_cq->lock); 1335 } else { 1336 spin_unlock(&send_cq->lock); 1337 spin_unlock_irq(&recv_cq->lock); 1338 } 1339 } 1340 1341 int mthca_alloc_sqp(struct mthca_dev *dev, 1342 struct mthca_pd *pd, 1343 struct mthca_cq *send_cq, 1344 struct mthca_cq *recv_cq, 1345 enum ib_sig_type send_policy, 1346 struct ib_qp_cap *cap, 1347 int qpn, 1348 int port, 1349 struct mthca_sqp *sqp) 1350 { 1351 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1352 int err; 1353 1354 sqp->qp.transport = MLX; 1355 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); 1356 if (err) 1357 return err; 1358 1359 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; 1360 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, 1361 &sqp->header_dma, GFP_KERNEL); 1362 if (!sqp->header_buf) 1363 return -ENOMEM; 1364 1365 spin_lock_irq(&dev->qp_table.lock); 1366 if (mthca_array_get(&dev->qp_table.qp, mqpn)) 1367 err = -EBUSY; 1368 else 1369 mthca_array_set(&dev->qp_table.qp, mqpn, sqp); 1370 spin_unlock_irq(&dev->qp_table.lock); 1371 1372 if (err) 1373 goto err_out; 1374 1375 sqp->qp.port = port; 1376 sqp->qp.qpn = mqpn; 1377 sqp->qp.transport = MLX; 1378 1379 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1380 send_policy, &sqp->qp); 1381 if (err) 1382 goto err_out_free; 1383 1384 atomic_inc(&pd->sqp_count); 1385 1386 return 0; 1387 1388 err_out_free: 1389 /* 1390 * Lock CQs here, so that CQ polling code can do QP lookup 1391 * without taking a lock. 1392 */ 1393 mthca_lock_cqs(send_cq, recv_cq); 1394 1395 spin_lock(&dev->qp_table.lock); 1396 mthca_array_clear(&dev->qp_table.qp, mqpn); 1397 spin_unlock(&dev->qp_table.lock); 1398 1399 mthca_unlock_cqs(send_cq, recv_cq); 1400 1401 err_out: 1402 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, 1403 sqp->header_buf, sqp->header_dma); 1404 1405 return err; 1406 } 1407 1408 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) 1409 { 1410 int c; 1411 1412 spin_lock_irq(&dev->qp_table.lock); 1413 c = qp->refcount; 1414 spin_unlock_irq(&dev->qp_table.lock); 1415 1416 return c; 1417 } 1418 1419 void mthca_free_qp(struct mthca_dev *dev, 1420 struct mthca_qp *qp) 1421 { 1422 struct mthca_cq *send_cq; 1423 struct mthca_cq *recv_cq; 1424 1425 send_cq = to_mcq(qp->ibqp.send_cq); 1426 recv_cq = to_mcq(qp->ibqp.recv_cq); 1427 1428 /* 1429 * Lock CQs here, so that CQ polling code can do QP lookup 1430 * without taking a lock. 1431 */ 1432 mthca_lock_cqs(send_cq, recv_cq); 1433 1434 spin_lock(&dev->qp_table.lock); 1435 mthca_array_clear(&dev->qp_table.qp, 1436 qp->qpn & (dev->limits.num_qps - 1)); 1437 --qp->refcount; 1438 spin_unlock(&dev->qp_table.lock); 1439 1440 mthca_unlock_cqs(send_cq, recv_cq); 1441 1442 wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1443 1444 if (qp->state != IB_QPS_RESET) 1445 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1446 NULL, 0); 1447 1448 /* 1449 * If this is a userspace QP, the buffers, MR, CQs and so on 1450 * will be cleaned up in userspace, so all we have to do is 1451 * unref the mem-free tables and free the QPN in our table. 1452 */ 1453 if (!qp->ibqp.uobject) { 1454 mthca_cq_clean(dev, recv_cq, qp->qpn, 1455 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1456 if (send_cq != recv_cq) 1457 mthca_cq_clean(dev, send_cq, qp->qpn, NULL); 1458 1459 mthca_free_memfree(dev, qp); 1460 mthca_free_wqe_buf(dev, qp); 1461 } 1462 1463 mthca_unmap_memfree(dev, qp); 1464 1465 if (is_sqp(dev, qp)) { 1466 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); 1467 dma_free_coherent(&dev->pdev->dev, 1468 to_msqp(qp)->header_buf_size, 1469 to_msqp(qp)->header_buf, 1470 to_msqp(qp)->header_dma); 1471 } else 1472 mthca_free(&dev->qp_table.alloc, qp->qpn); 1473 } 1474 1475 /* Create UD header for an MLX send and build a data segment for it */ 1476 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, 1477 int ind, struct ib_ud_wr *wr, 1478 struct mthca_mlx_seg *mlx, 1479 struct mthca_data_seg *data) 1480 { 1481 int header_size; 1482 int err; 1483 u16 pkey; 1484 1485 ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, 1486 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, 1487 &sqp->ud_header); 1488 1489 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); 1490 if (err) 1491 return err; 1492 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1493 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1494 (sqp->ud_header.lrh.destination_lid == 1495 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | 1496 (sqp->ud_header.lrh.service_level << 8)); 1497 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1498 mlx->vcrc = 0; 1499 1500 switch (wr->wr.opcode) { 1501 case IB_WR_SEND: 1502 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 1503 sqp->ud_header.immediate_present = 0; 1504 break; 1505 case IB_WR_SEND_WITH_IMM: 1506 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1507 sqp->ud_header.immediate_present = 1; 1508 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; 1509 break; 1510 default: 1511 return -EINVAL; 1512 } 1513 1514 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1515 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1516 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1517 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); 1518 if (!sqp->qp.ibqp.qp_num) 1519 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1520 sqp->pkey_index, &pkey); 1521 else 1522 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1523 wr->pkey_index, &pkey); 1524 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1525 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); 1526 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1527 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? 1528 sqp->qkey : wr->remote_qkey); 1529 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); 1530 1531 header_size = ib_ud_header_pack(&sqp->ud_header, 1532 sqp->header_buf + 1533 ind * MTHCA_UD_HEADER_SIZE); 1534 1535 data->byte_count = cpu_to_be32(header_size); 1536 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); 1537 data->addr = cpu_to_be64(sqp->header_dma + 1538 ind * MTHCA_UD_HEADER_SIZE); 1539 1540 return 0; 1541 } 1542 1543 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, 1544 struct ib_cq *ib_cq) 1545 { 1546 unsigned cur; 1547 struct mthca_cq *cq; 1548 1549 cur = wq->head - wq->tail; 1550 if (likely(cur + nreq < wq->max)) 1551 return 0; 1552 1553 cq = to_mcq(ib_cq); 1554 spin_lock(&cq->lock); 1555 cur = wq->head - wq->tail; 1556 spin_unlock(&cq->lock); 1557 1558 return cur + nreq >= wq->max; 1559 } 1560 1561 static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, 1562 u64 remote_addr, u32 rkey) 1563 { 1564 rseg->raddr = cpu_to_be64(remote_addr); 1565 rseg->rkey = cpu_to_be32(rkey); 1566 rseg->reserved = 0; 1567 } 1568 1569 static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, 1570 struct ib_atomic_wr *wr) 1571 { 1572 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1573 aseg->swap_add = cpu_to_be64(wr->swap); 1574 aseg->compare = cpu_to_be64(wr->compare_add); 1575 } else { 1576 aseg->swap_add = cpu_to_be64(wr->compare_add); 1577 aseg->compare = 0; 1578 } 1579 1580 } 1581 1582 static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, 1583 struct ib_ud_wr *wr) 1584 { 1585 useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); 1586 useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); 1587 useg->dqpn = cpu_to_be32(wr->remote_qpn); 1588 useg->qkey = cpu_to_be32(wr->remote_qkey); 1589 1590 } 1591 1592 static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, 1593 struct ib_ud_wr *wr) 1594 { 1595 memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); 1596 useg->dqpn = cpu_to_be32(wr->remote_qpn); 1597 useg->qkey = cpu_to_be32(wr->remote_qkey); 1598 } 1599 1600 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1601 struct ib_send_wr **bad_wr) 1602 { 1603 struct mthca_dev *dev = to_mdev(ibqp->device); 1604 struct mthca_qp *qp = to_mqp(ibqp); 1605 void *wqe; 1606 void *prev_wqe; 1607 unsigned long flags; 1608 int err = 0; 1609 int nreq; 1610 int i; 1611 int size; 1612 /* 1613 * f0 and size0 are only used if nreq != 0, and they will 1614 * always be initialized the first time through the main loop 1615 * before nreq is incremented. So nreq cannot become non-zero 1616 * without initializing f0 and size0, and they are in fact 1617 * never used uninitialized. 1618 */ 1619 int uninitialized_var(size0); 1620 u32 uninitialized_var(f0); 1621 int ind; 1622 u8 op0 = 0; 1623 1624 spin_lock_irqsave(&qp->sq.lock, flags); 1625 1626 /* XXX check that state is OK to post send */ 1627 1628 ind = qp->sq.next_ind; 1629 1630 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1631 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1632 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1633 " %d max, %d nreq)\n", qp->qpn, 1634 qp->sq.head, qp->sq.tail, 1635 qp->sq.max, nreq); 1636 err = -ENOMEM; 1637 *bad_wr = wr; 1638 goto out; 1639 } 1640 1641 wqe = get_send_wqe(qp, ind); 1642 prev_wqe = qp->sq.last; 1643 qp->sq.last = wqe; 1644 1645 ((struct mthca_next_seg *) wqe)->nda_op = 0; 1646 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 1647 ((struct mthca_next_seg *) wqe)->flags = 1648 ((wr->send_flags & IB_SEND_SIGNALED) ? 1649 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 1650 ((wr->send_flags & IB_SEND_SOLICITED) ? 1651 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 1652 cpu_to_be32(1); 1653 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1654 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1655 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 1656 1657 wqe += sizeof (struct mthca_next_seg); 1658 size = sizeof (struct mthca_next_seg) / 16; 1659 1660 switch (qp->transport) { 1661 case RC: 1662 switch (wr->opcode) { 1663 case IB_WR_ATOMIC_CMP_AND_SWP: 1664 case IB_WR_ATOMIC_FETCH_AND_ADD: 1665 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 1666 atomic_wr(wr)->rkey); 1667 wqe += sizeof (struct mthca_raddr_seg); 1668 1669 set_atomic_seg(wqe, atomic_wr(wr)); 1670 wqe += sizeof (struct mthca_atomic_seg); 1671 size += (sizeof (struct mthca_raddr_seg) + 1672 sizeof (struct mthca_atomic_seg)) / 16; 1673 break; 1674 1675 case IB_WR_RDMA_WRITE: 1676 case IB_WR_RDMA_WRITE_WITH_IMM: 1677 case IB_WR_RDMA_READ: 1678 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 1679 rdma_wr(wr)->rkey); 1680 wqe += sizeof (struct mthca_raddr_seg); 1681 size += sizeof (struct mthca_raddr_seg) / 16; 1682 break; 1683 1684 default: 1685 /* No extra segments required for sends */ 1686 break; 1687 } 1688 1689 break; 1690 1691 case UC: 1692 switch (wr->opcode) { 1693 case IB_WR_RDMA_WRITE: 1694 case IB_WR_RDMA_WRITE_WITH_IMM: 1695 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 1696 rdma_wr(wr)->rkey); 1697 wqe += sizeof (struct mthca_raddr_seg); 1698 size += sizeof (struct mthca_raddr_seg) / 16; 1699 break; 1700 1701 default: 1702 /* No extra segments required for sends */ 1703 break; 1704 } 1705 1706 break; 1707 1708 case UD: 1709 set_tavor_ud_seg(wqe, ud_wr(wr)); 1710 wqe += sizeof (struct mthca_tavor_ud_seg); 1711 size += sizeof (struct mthca_tavor_ud_seg) / 16; 1712 break; 1713 1714 case MLX: 1715 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), 1716 wqe - sizeof (struct mthca_next_seg), 1717 wqe); 1718 if (err) { 1719 *bad_wr = wr; 1720 goto out; 1721 } 1722 wqe += sizeof (struct mthca_data_seg); 1723 size += sizeof (struct mthca_data_seg) / 16; 1724 break; 1725 } 1726 1727 if (wr->num_sge > qp->sq.max_gs) { 1728 mthca_err(dev, "too many gathers\n"); 1729 err = -EINVAL; 1730 *bad_wr = wr; 1731 goto out; 1732 } 1733 1734 for (i = 0; i < wr->num_sge; ++i) { 1735 mthca_set_data_seg(wqe, wr->sg_list + i); 1736 wqe += sizeof (struct mthca_data_seg); 1737 size += sizeof (struct mthca_data_seg) / 16; 1738 } 1739 1740 /* Add one more inline data segment for ICRC */ 1741 if (qp->transport == MLX) { 1742 ((struct mthca_data_seg *) wqe)->byte_count = 1743 cpu_to_be32((1 << 31) | 4); 1744 ((u32 *) wqe)[1] = 0; 1745 wqe += sizeof (struct mthca_data_seg); 1746 size += sizeof (struct mthca_data_seg) / 16; 1747 } 1748 1749 qp->wrid[ind + qp->rq.max] = wr->wr_id; 1750 1751 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 1752 mthca_err(dev, "opcode invalid\n"); 1753 err = -EINVAL; 1754 *bad_wr = wr; 1755 goto out; 1756 } 1757 1758 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1759 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1760 qp->send_wqe_offset) | 1761 mthca_opcode[wr->opcode]); 1762 wmb(); 1763 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1764 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | 1765 ((wr->send_flags & IB_SEND_FENCE) ? 1766 MTHCA_NEXT_FENCE : 0)); 1767 1768 if (!nreq) { 1769 size0 = size; 1770 op0 = mthca_opcode[wr->opcode]; 1771 f0 = wr->send_flags & IB_SEND_FENCE ? 1772 MTHCA_SEND_DOORBELL_FENCE : 0; 1773 } 1774 1775 ++ind; 1776 if (unlikely(ind >= qp->sq.max)) 1777 ind -= qp->sq.max; 1778 } 1779 1780 out: 1781 if (likely(nreq)) { 1782 wmb(); 1783 1784 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + 1785 qp->send_wqe_offset) | f0 | op0, 1786 (qp->qpn << 8) | size0, 1787 dev->kar + MTHCA_SEND_DOORBELL, 1788 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1789 /* 1790 * Make sure doorbells don't leak out of SQ spinlock 1791 * and reach the HCA out of order: 1792 */ 1793 mmiowb(); 1794 } 1795 1796 qp->sq.next_ind = ind; 1797 qp->sq.head += nreq; 1798 1799 spin_unlock_irqrestore(&qp->sq.lock, flags); 1800 return err; 1801 } 1802 1803 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1804 struct ib_recv_wr **bad_wr) 1805 { 1806 struct mthca_dev *dev = to_mdev(ibqp->device); 1807 struct mthca_qp *qp = to_mqp(ibqp); 1808 unsigned long flags; 1809 int err = 0; 1810 int nreq; 1811 int i; 1812 int size; 1813 /* 1814 * size0 is only used if nreq != 0, and it will always be 1815 * initialized the first time through the main loop before 1816 * nreq is incremented. So nreq cannot become non-zero 1817 * without initializing size0, and it is in fact never used 1818 * uninitialized. 1819 */ 1820 int uninitialized_var(size0); 1821 int ind; 1822 void *wqe; 1823 void *prev_wqe; 1824 1825 spin_lock_irqsave(&qp->rq.lock, flags); 1826 1827 /* XXX check that state is OK to post receive */ 1828 1829 ind = qp->rq.next_ind; 1830 1831 for (nreq = 0; wr; wr = wr->next) { 1832 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 1833 mthca_err(dev, "RQ %06x full (%u head, %u tail," 1834 " %d max, %d nreq)\n", qp->qpn, 1835 qp->rq.head, qp->rq.tail, 1836 qp->rq.max, nreq); 1837 err = -ENOMEM; 1838 *bad_wr = wr; 1839 goto out; 1840 } 1841 1842 wqe = get_recv_wqe(qp, ind); 1843 prev_wqe = qp->rq.last; 1844 qp->rq.last = wqe; 1845 1846 ((struct mthca_next_seg *) wqe)->ee_nds = 1847 cpu_to_be32(MTHCA_NEXT_DBD); 1848 ((struct mthca_next_seg *) wqe)->flags = 0; 1849 1850 wqe += sizeof (struct mthca_next_seg); 1851 size = sizeof (struct mthca_next_seg) / 16; 1852 1853 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 1854 err = -EINVAL; 1855 *bad_wr = wr; 1856 goto out; 1857 } 1858 1859 for (i = 0; i < wr->num_sge; ++i) { 1860 mthca_set_data_seg(wqe, wr->sg_list + i); 1861 wqe += sizeof (struct mthca_data_seg); 1862 size += sizeof (struct mthca_data_seg) / 16; 1863 } 1864 1865 qp->wrid[ind] = wr->wr_id; 1866 1867 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1868 cpu_to_be32(MTHCA_NEXT_DBD | size); 1869 1870 if (!nreq) 1871 size0 = size; 1872 1873 ++ind; 1874 if (unlikely(ind >= qp->rq.max)) 1875 ind -= qp->rq.max; 1876 1877 ++nreq; 1878 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 1879 nreq = 0; 1880 1881 wmb(); 1882 1883 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, 1884 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, 1885 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1886 1887 qp->rq.next_ind = ind; 1888 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1889 } 1890 } 1891 1892 out: 1893 if (likely(nreq)) { 1894 wmb(); 1895 1896 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, 1897 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, 1898 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1899 } 1900 1901 qp->rq.next_ind = ind; 1902 qp->rq.head += nreq; 1903 1904 /* 1905 * Make sure doorbells don't leak out of RQ spinlock and reach 1906 * the HCA out of order: 1907 */ 1908 mmiowb(); 1909 1910 spin_unlock_irqrestore(&qp->rq.lock, flags); 1911 return err; 1912 } 1913 1914 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1915 struct ib_send_wr **bad_wr) 1916 { 1917 struct mthca_dev *dev = to_mdev(ibqp->device); 1918 struct mthca_qp *qp = to_mqp(ibqp); 1919 u32 dbhi; 1920 void *wqe; 1921 void *prev_wqe; 1922 unsigned long flags; 1923 int err = 0; 1924 int nreq; 1925 int i; 1926 int size; 1927 /* 1928 * f0 and size0 are only used if nreq != 0, and they will 1929 * always be initialized the first time through the main loop 1930 * before nreq is incremented. So nreq cannot become non-zero 1931 * without initializing f0 and size0, and they are in fact 1932 * never used uninitialized. 1933 */ 1934 int uninitialized_var(size0); 1935 u32 uninitialized_var(f0); 1936 int ind; 1937 u8 op0 = 0; 1938 1939 spin_lock_irqsave(&qp->sq.lock, flags); 1940 1941 /* XXX check that state is OK to post send */ 1942 1943 ind = qp->sq.head & (qp->sq.max - 1); 1944 1945 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1946 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { 1947 nreq = 0; 1948 1949 dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | 1950 ((qp->sq.head & 0xffff) << 8) | f0 | op0; 1951 1952 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; 1953 1954 /* 1955 * Make sure that descriptors are written before 1956 * doorbell record. 1957 */ 1958 wmb(); 1959 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 1960 1961 /* 1962 * Make sure doorbell record is written before we 1963 * write MMIO send doorbell. 1964 */ 1965 wmb(); 1966 1967 mthca_write64(dbhi, (qp->qpn << 8) | size0, 1968 dev->kar + MTHCA_SEND_DOORBELL, 1969 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1970 } 1971 1972 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1973 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1974 " %d max, %d nreq)\n", qp->qpn, 1975 qp->sq.head, qp->sq.tail, 1976 qp->sq.max, nreq); 1977 err = -ENOMEM; 1978 *bad_wr = wr; 1979 goto out; 1980 } 1981 1982 wqe = get_send_wqe(qp, ind); 1983 prev_wqe = qp->sq.last; 1984 qp->sq.last = wqe; 1985 1986 ((struct mthca_next_seg *) wqe)->flags = 1987 ((wr->send_flags & IB_SEND_SIGNALED) ? 1988 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 1989 ((wr->send_flags & IB_SEND_SOLICITED) ? 1990 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 1991 ((wr->send_flags & IB_SEND_IP_CSUM) ? 1992 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) | 1993 cpu_to_be32(1); 1994 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1995 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1996 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 1997 1998 wqe += sizeof (struct mthca_next_seg); 1999 size = sizeof (struct mthca_next_seg) / 16; 2000 2001 switch (qp->transport) { 2002 case RC: 2003 switch (wr->opcode) { 2004 case IB_WR_ATOMIC_CMP_AND_SWP: 2005 case IB_WR_ATOMIC_FETCH_AND_ADD: 2006 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 2007 atomic_wr(wr)->rkey); 2008 wqe += sizeof (struct mthca_raddr_seg); 2009 2010 set_atomic_seg(wqe, atomic_wr(wr)); 2011 wqe += sizeof (struct mthca_atomic_seg); 2012 size += (sizeof (struct mthca_raddr_seg) + 2013 sizeof (struct mthca_atomic_seg)) / 16; 2014 break; 2015 2016 case IB_WR_RDMA_READ: 2017 case IB_WR_RDMA_WRITE: 2018 case IB_WR_RDMA_WRITE_WITH_IMM: 2019 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 2020 rdma_wr(wr)->rkey); 2021 wqe += sizeof (struct mthca_raddr_seg); 2022 size += sizeof (struct mthca_raddr_seg) / 16; 2023 break; 2024 2025 default: 2026 /* No extra segments required for sends */ 2027 break; 2028 } 2029 2030 break; 2031 2032 case UC: 2033 switch (wr->opcode) { 2034 case IB_WR_RDMA_WRITE: 2035 case IB_WR_RDMA_WRITE_WITH_IMM: 2036 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 2037 rdma_wr(wr)->rkey); 2038 wqe += sizeof (struct mthca_raddr_seg); 2039 size += sizeof (struct mthca_raddr_seg) / 16; 2040 break; 2041 2042 default: 2043 /* No extra segments required for sends */ 2044 break; 2045 } 2046 2047 break; 2048 2049 case UD: 2050 set_arbel_ud_seg(wqe, ud_wr(wr)); 2051 wqe += sizeof (struct mthca_arbel_ud_seg); 2052 size += sizeof (struct mthca_arbel_ud_seg) / 16; 2053 break; 2054 2055 case MLX: 2056 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), 2057 wqe - sizeof (struct mthca_next_seg), 2058 wqe); 2059 if (err) { 2060 *bad_wr = wr; 2061 goto out; 2062 } 2063 wqe += sizeof (struct mthca_data_seg); 2064 size += sizeof (struct mthca_data_seg) / 16; 2065 break; 2066 } 2067 2068 if (wr->num_sge > qp->sq.max_gs) { 2069 mthca_err(dev, "too many gathers\n"); 2070 err = -EINVAL; 2071 *bad_wr = wr; 2072 goto out; 2073 } 2074 2075 for (i = 0; i < wr->num_sge; ++i) { 2076 mthca_set_data_seg(wqe, wr->sg_list + i); 2077 wqe += sizeof (struct mthca_data_seg); 2078 size += sizeof (struct mthca_data_seg) / 16; 2079 } 2080 2081 /* Add one more inline data segment for ICRC */ 2082 if (qp->transport == MLX) { 2083 ((struct mthca_data_seg *) wqe)->byte_count = 2084 cpu_to_be32((1 << 31) | 4); 2085 ((u32 *) wqe)[1] = 0; 2086 wqe += sizeof (struct mthca_data_seg); 2087 size += sizeof (struct mthca_data_seg) / 16; 2088 } 2089 2090 qp->wrid[ind + qp->rq.max] = wr->wr_id; 2091 2092 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 2093 mthca_err(dev, "opcode invalid\n"); 2094 err = -EINVAL; 2095 *bad_wr = wr; 2096 goto out; 2097 } 2098 2099 ((struct mthca_next_seg *) prev_wqe)->nda_op = 2100 cpu_to_be32(((ind << qp->sq.wqe_shift) + 2101 qp->send_wqe_offset) | 2102 mthca_opcode[wr->opcode]); 2103 wmb(); 2104 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 2105 cpu_to_be32(MTHCA_NEXT_DBD | size | 2106 ((wr->send_flags & IB_SEND_FENCE) ? 2107 MTHCA_NEXT_FENCE : 0)); 2108 2109 if (!nreq) { 2110 size0 = size; 2111 op0 = mthca_opcode[wr->opcode]; 2112 f0 = wr->send_flags & IB_SEND_FENCE ? 2113 MTHCA_SEND_DOORBELL_FENCE : 0; 2114 } 2115 2116 ++ind; 2117 if (unlikely(ind >= qp->sq.max)) 2118 ind -= qp->sq.max; 2119 } 2120 2121 out: 2122 if (likely(nreq)) { 2123 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; 2124 2125 qp->sq.head += nreq; 2126 2127 /* 2128 * Make sure that descriptors are written before 2129 * doorbell record. 2130 */ 2131 wmb(); 2132 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 2133 2134 /* 2135 * Make sure doorbell record is written before we 2136 * write MMIO send doorbell. 2137 */ 2138 wmb(); 2139 2140 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, 2141 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 2142 } 2143 2144 /* 2145 * Make sure doorbells don't leak out of SQ spinlock and reach 2146 * the HCA out of order: 2147 */ 2148 mmiowb(); 2149 2150 spin_unlock_irqrestore(&qp->sq.lock, flags); 2151 return err; 2152 } 2153 2154 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 2155 struct ib_recv_wr **bad_wr) 2156 { 2157 struct mthca_dev *dev = to_mdev(ibqp->device); 2158 struct mthca_qp *qp = to_mqp(ibqp); 2159 unsigned long flags; 2160 int err = 0; 2161 int nreq; 2162 int ind; 2163 int i; 2164 void *wqe; 2165 2166 spin_lock_irqsave(&qp->rq.lock, flags); 2167 2168 /* XXX check that state is OK to post receive */ 2169 2170 ind = qp->rq.head & (qp->rq.max - 1); 2171 2172 for (nreq = 0; wr; ++nreq, wr = wr->next) { 2173 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 2174 mthca_err(dev, "RQ %06x full (%u head, %u tail," 2175 " %d max, %d nreq)\n", qp->qpn, 2176 qp->rq.head, qp->rq.tail, 2177 qp->rq.max, nreq); 2178 err = -ENOMEM; 2179 *bad_wr = wr; 2180 goto out; 2181 } 2182 2183 wqe = get_recv_wqe(qp, ind); 2184 2185 ((struct mthca_next_seg *) wqe)->flags = 0; 2186 2187 wqe += sizeof (struct mthca_next_seg); 2188 2189 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 2190 err = -EINVAL; 2191 *bad_wr = wr; 2192 goto out; 2193 } 2194 2195 for (i = 0; i < wr->num_sge; ++i) { 2196 mthca_set_data_seg(wqe, wr->sg_list + i); 2197 wqe += sizeof (struct mthca_data_seg); 2198 } 2199 2200 if (i < qp->rq.max_gs) 2201 mthca_set_data_seg_inval(wqe); 2202 2203 qp->wrid[ind] = wr->wr_id; 2204 2205 ++ind; 2206 if (unlikely(ind >= qp->rq.max)) 2207 ind -= qp->rq.max; 2208 } 2209 out: 2210 if (likely(nreq)) { 2211 qp->rq.head += nreq; 2212 2213 /* 2214 * Make sure that descriptors are written before 2215 * doorbell record. 2216 */ 2217 wmb(); 2218 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); 2219 } 2220 2221 spin_unlock_irqrestore(&qp->rq.lock, flags); 2222 return err; 2223 } 2224 2225 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2226 int index, int *dbd, __be32 *new_wqe) 2227 { 2228 struct mthca_next_seg *next; 2229 2230 /* 2231 * For SRQs, all receive WQEs generate a CQE, so we're always 2232 * at the end of the doorbell chain. 2233 */ 2234 if (qp->ibqp.srq && !is_send) { 2235 *new_wqe = 0; 2236 return; 2237 } 2238 2239 if (is_send) 2240 next = get_send_wqe(qp, index); 2241 else 2242 next = get_recv_wqe(qp, index); 2243 2244 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); 2245 if (next->ee_nds & cpu_to_be32(0x3f)) 2246 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | 2247 (next->ee_nds & cpu_to_be32(0x3f)); 2248 else 2249 *new_wqe = 0; 2250 } 2251 2252 int mthca_init_qp_table(struct mthca_dev *dev) 2253 { 2254 int err; 2255 int i; 2256 2257 spin_lock_init(&dev->qp_table.lock); 2258 2259 /* 2260 * We reserve 2 extra QPs per port for the special QPs. The 2261 * special QP for port 1 has to be even, so round up. 2262 */ 2263 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; 2264 err = mthca_alloc_init(&dev->qp_table.alloc, 2265 dev->limits.num_qps, 2266 (1 << 24) - 1, 2267 dev->qp_table.sqp_start + 2268 MTHCA_MAX_PORTS * 2); 2269 if (err) 2270 return err; 2271 2272 err = mthca_array_init(&dev->qp_table.qp, 2273 dev->limits.num_qps); 2274 if (err) { 2275 mthca_alloc_cleanup(&dev->qp_table.alloc); 2276 return err; 2277 } 2278 2279 for (i = 0; i < 2; ++i) { 2280 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, 2281 dev->qp_table.sqp_start + i * 2); 2282 if (err) { 2283 mthca_warn(dev, "CONF_SPECIAL_QP returned " 2284 "%d, aborting.\n", err); 2285 goto err_out; 2286 } 2287 } 2288 return 0; 2289 2290 err_out: 2291 for (i = 0; i < 2; ++i) 2292 mthca_CONF_SPECIAL_QP(dev, i, 0); 2293 2294 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2295 mthca_alloc_cleanup(&dev->qp_table.alloc); 2296 2297 return err; 2298 } 2299 2300 void mthca_cleanup_qp_table(struct mthca_dev *dev) 2301 { 2302 int i; 2303 2304 for (i = 0; i < 2; ++i) 2305 mthca_CONF_SPECIAL_QP(dev, i, 0); 2306 2307 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2308 mthca_alloc_cleanup(&dev->qp_table.alloc); 2309 } 2310