1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/gfp.h> 37 #include <linux/export.h> 38 #include <linux/mlx4/cmd.h> 39 #include <linux/mlx4/qp.h> 40 41 #include "mlx4.h" 42 #include "icm.h" 43 44 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) 45 { 46 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 47 struct mlx4_qp *qp; 48 49 spin_lock(&qp_table->lock); 50 51 qp = __mlx4_qp_lookup(dev, qpn); 52 if (qp) 53 atomic_inc(&qp->refcount); 54 55 spin_unlock(&qp_table->lock); 56 57 if (!qp) { 58 mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); 59 return; 60 } 61 62 qp->event(qp, event_type); 63 64 if (atomic_dec_and_test(&qp->refcount)) 65 complete(&qp->free); 66 } 67 68 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 69 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, 70 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, 71 int sqd_event, struct mlx4_qp *qp) 72 { 73 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { 74 [MLX4_QP_STATE_RST] = { 75 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 76 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 77 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, 78 }, 79 [MLX4_QP_STATE_INIT] = { 80 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 81 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 82 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, 83 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, 84 }, 85 [MLX4_QP_STATE_RTR] = { 86 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 87 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 88 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, 89 }, 90 [MLX4_QP_STATE_RTS] = { 91 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 92 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 93 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, 94 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, 95 }, 96 [MLX4_QP_STATE_SQD] = { 97 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 98 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 99 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, 100 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, 101 }, 102 [MLX4_QP_STATE_SQER] = { 103 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 104 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 105 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, 106 }, 107 [MLX4_QP_STATE_ERR] = { 108 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 109 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 110 } 111 }; 112 113 struct mlx4_cmd_mailbox *mailbox; 114 int ret = 0; 115 116 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || 117 !op[cur_state][new_state]) 118 return -EINVAL; 119 120 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) 121 return mlx4_cmd(dev, 0, qp->qpn, 2, 122 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A); 123 124 mailbox = mlx4_alloc_cmd_mailbox(dev); 125 if (IS_ERR(mailbox)) 126 return PTR_ERR(mailbox); 127 128 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { 129 u64 mtt_addr = mlx4_mtt_addr(dev, mtt); 130 context->mtt_base_addr_h = mtt_addr >> 32; 131 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 132 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 133 } 134 135 *(__be32 *) mailbox->buf = cpu_to_be32(optpar); 136 memcpy(mailbox->buf + 8, context, sizeof *context); 137 138 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = 139 cpu_to_be32(qp->qpn); 140 141 ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), 142 new_state == MLX4_QP_STATE_RST ? 2 : 0, 143 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C); 144 145 mlx4_free_cmd_mailbox(dev, mailbox); 146 return ret; 147 } 148 EXPORT_SYMBOL_GPL(mlx4_qp_modify); 149 150 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) 151 { 152 struct mlx4_priv *priv = mlx4_priv(dev); 153 struct mlx4_qp_table *qp_table = &priv->qp_table; 154 int qpn; 155 156 qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); 157 if (qpn == -1) 158 return -ENOMEM; 159 160 *base = qpn; 161 return 0; 162 } 163 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); 164 165 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 166 { 167 struct mlx4_priv *priv = mlx4_priv(dev); 168 struct mlx4_qp_table *qp_table = &priv->qp_table; 169 if (base_qpn < dev->caps.sqp_start + 8) 170 return; 171 172 mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); 173 } 174 EXPORT_SYMBOL_GPL(mlx4_qp_release_range); 175 176 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) 177 { 178 struct mlx4_priv *priv = mlx4_priv(dev); 179 struct mlx4_qp_table *qp_table = &priv->qp_table; 180 int err; 181 182 if (!qpn) 183 return -EINVAL; 184 185 qp->qpn = qpn; 186 187 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); 188 if (err) 189 goto err_out; 190 191 err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn); 192 if (err) 193 goto err_put_qp; 194 195 err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn); 196 if (err) 197 goto err_put_auxc; 198 199 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn); 200 if (err) 201 goto err_put_altc; 202 203 err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn); 204 if (err) 205 goto err_put_rdmarc; 206 207 spin_lock_irq(&qp_table->lock); 208 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); 209 spin_unlock_irq(&qp_table->lock); 210 if (err) 211 goto err_put_cmpt; 212 213 atomic_set(&qp->refcount, 1); 214 init_completion(&qp->free); 215 216 return 0; 217 218 err_put_cmpt: 219 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); 220 221 err_put_rdmarc: 222 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); 223 224 err_put_altc: 225 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); 226 227 err_put_auxc: 228 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); 229 230 err_put_qp: 231 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 232 233 err_out: 234 return err; 235 } 236 EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 237 238 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 239 { 240 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 241 unsigned long flags; 242 243 spin_lock_irqsave(&qp_table->lock, flags); 244 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); 245 spin_unlock_irqrestore(&qp_table->lock, flags); 246 } 247 EXPORT_SYMBOL_GPL(mlx4_qp_remove); 248 249 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) 250 { 251 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 252 253 if (atomic_dec_and_test(&qp->refcount)) 254 complete(&qp->free); 255 wait_for_completion(&qp->free); 256 257 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); 258 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); 259 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); 260 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); 261 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 262 } 263 EXPORT_SYMBOL_GPL(mlx4_qp_free); 264 265 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) 266 { 267 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, 268 MLX4_CMD_TIME_CLASS_B); 269 } 270 271 int mlx4_init_qp_table(struct mlx4_dev *dev) 272 { 273 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 274 int err; 275 int reserved_from_top = 0; 276 277 spin_lock_init(&qp_table->lock); 278 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 279 280 /* 281 * We reserve 2 extra QPs per port for the special QPs. The 282 * block of special QPs must be aligned to a multiple of 8, so 283 * round up. 284 * 285 * We also reserve the MSB of the 24-bit QP number to indicate 286 * that a QP is an XRC QP. 287 */ 288 dev->caps.sqp_start = 289 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); 290 291 { 292 int sort[MLX4_NUM_QP_REGION]; 293 int i, j, tmp; 294 int last_base = dev->caps.num_qps; 295 296 for (i = 1; i < MLX4_NUM_QP_REGION; ++i) 297 sort[i] = i; 298 299 for (i = MLX4_NUM_QP_REGION; i > 0; --i) { 300 for (j = 2; j < i; ++j) { 301 if (dev->caps.reserved_qps_cnt[sort[j]] > 302 dev->caps.reserved_qps_cnt[sort[j - 1]]) { 303 tmp = sort[j]; 304 sort[j] = sort[j - 1]; 305 sort[j - 1] = tmp; 306 } 307 } 308 } 309 310 for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { 311 last_base -= dev->caps.reserved_qps_cnt[sort[i]]; 312 dev->caps.reserved_qps_base[sort[i]] = last_base; 313 reserved_from_top += 314 dev->caps.reserved_qps_cnt[sort[i]]; 315 } 316 317 } 318 319 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, 320 (1 << 23) - 1, dev->caps.sqp_start + 8, 321 reserved_from_top); 322 if (err) 323 return err; 324 325 return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start); 326 } 327 328 void mlx4_cleanup_qp_table(struct mlx4_dev *dev) 329 { 330 mlx4_CONF_SPECIAL_QP(dev, 0); 331 mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); 332 } 333 334 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 335 struct mlx4_qp_context *context) 336 { 337 struct mlx4_cmd_mailbox *mailbox; 338 int err; 339 340 mailbox = mlx4_alloc_cmd_mailbox(dev); 341 if (IS_ERR(mailbox)) 342 return PTR_ERR(mailbox); 343 344 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, 345 MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A); 346 if (!err) 347 memcpy(context, mailbox->buf + 8, sizeof *context); 348 349 mlx4_free_cmd_mailbox(dev, mailbox); 350 return err; 351 } 352 EXPORT_SYMBOL_GPL(mlx4_qp_query); 353 354 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 355 struct mlx4_qp_context *context, 356 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) 357 { 358 int err; 359 int i; 360 enum mlx4_qp_state states[] = { 361 MLX4_QP_STATE_RST, 362 MLX4_QP_STATE_INIT, 363 MLX4_QP_STATE_RTR, 364 MLX4_QP_STATE_RTS 365 }; 366 367 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 368 context->flags &= cpu_to_be32(~(0xf << 28)); 369 context->flags |= cpu_to_be32(states[i + 1] << 28); 370 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 371 context, 0, 0, qp); 372 if (err) { 373 mlx4_err(dev, "Failed to bring QP to state: " 374 "%d with error: %d\n", 375 states[i + 1], err); 376 return err; 377 } 378 379 *qp_state = states[i + 1]; 380 } 381 382 return 0; 383 } 384 EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); 385