Lines Matching full:gsi

68 /* Call with gsi->lock locked */
69 static void generate_completions(struct mlx5_ib_gsi_qp *gsi) in generate_completions() argument
71 struct ib_cq *gsi_cq = gsi->ibqp.send_cq; in generate_completions()
75 for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; in generate_completions()
77 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions()
82 if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR || in generate_completions()
89 gsi->outstanding_ci = index; in generate_completions()
94 struct mlx5_ib_gsi_qp *gsi = cq->cq_context; in handle_single_completion() local
100 spin_lock_irqsave(&gsi->lock, flags); in handle_single_completion()
105 wr->wc.qp = &gsi->ibqp; in handle_single_completion()
107 generate_completions(gsi); in handle_single_completion()
108 spin_unlock_irqrestore(&gsi->lock, flags); in handle_single_completion()
115 struct mlx5_ib_gsi_qp *gsi; in mlx5_ib_gsi_create_qp() local
122 mlx5_ib_dbg(dev, "creating GSI QP\n"); in mlx5_ib_gsi_create_qp()
126 "invalid port number %d during GSI QP creation\n", in mlx5_ib_gsi_create_qp()
131 gsi = kzalloc(sizeof(*gsi), GFP_KERNEL); in mlx5_ib_gsi_create_qp()
132 if (!gsi) in mlx5_ib_gsi_create_qp()
135 gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL); in mlx5_ib_gsi_create_qp()
136 if (!gsi->tx_qps) { in mlx5_ib_gsi_create_qp()
141 gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr, in mlx5_ib_gsi_create_qp()
142 sizeof(*gsi->outstanding_wrs), in mlx5_ib_gsi_create_qp()
144 if (!gsi->outstanding_wrs) { in mlx5_ib_gsi_create_qp()
149 mutex_init(&gsi->mutex); in mlx5_ib_gsi_create_qp()
153 if (dev->devr.ports[port_num - 1].gsi) { in mlx5_ib_gsi_create_qp()
154 mlx5_ib_warn(dev, "GSI QP already exists on port %d\n", in mlx5_ib_gsi_create_qp()
159 gsi->num_qps = num_qps; in mlx5_ib_gsi_create_qp()
160 spin_lock_init(&gsi->lock); in mlx5_ib_gsi_create_qp()
162 gsi->cap = init_attr->cap; in mlx5_ib_gsi_create_qp()
163 gsi->sq_sig_type = init_attr->sq_sig_type; in mlx5_ib_gsi_create_qp()
164 gsi->ibqp.qp_num = 1; in mlx5_ib_gsi_create_qp()
165 gsi->port_num = port_num; in mlx5_ib_gsi_create_qp()
167 gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0, in mlx5_ib_gsi_create_qp()
169 if (IS_ERR(gsi->cq)) { in mlx5_ib_gsi_create_qp()
170 mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n", in mlx5_ib_gsi_create_qp()
171 PTR_ERR(gsi->cq)); in mlx5_ib_gsi_create_qp()
172 ret = PTR_ERR(gsi->cq); in mlx5_ib_gsi_create_qp()
177 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_gsi_create_qp()
183 gsi->rx_qp = ib_create_qp(pd, &hw_init_attr); in mlx5_ib_gsi_create_qp()
184 if (IS_ERR(gsi->rx_qp)) { in mlx5_ib_gsi_create_qp()
185 mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n", in mlx5_ib_gsi_create_qp()
186 PTR_ERR(gsi->rx_qp)); in mlx5_ib_gsi_create_qp()
187 ret = PTR_ERR(gsi->rx_qp); in mlx5_ib_gsi_create_qp()
191 dev->devr.ports[init_attr->port_num - 1].gsi = gsi; in mlx5_ib_gsi_create_qp()
195 return &gsi->ibqp; in mlx5_ib_gsi_create_qp()
198 ib_free_cq(gsi->cq); in mlx5_ib_gsi_create_qp()
201 kfree(gsi->outstanding_wrs); in mlx5_ib_gsi_create_qp()
203 kfree(gsi->tx_qps); in mlx5_ib_gsi_create_qp()
205 kfree(gsi); in mlx5_ib_gsi_create_qp()
212 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); in mlx5_ib_gsi_destroy_qp() local
213 const int port_num = gsi->port_num; in mlx5_ib_gsi_destroy_qp()
217 mlx5_ib_dbg(dev, "destroying GSI QP\n"); in mlx5_ib_gsi_destroy_qp()
220 ret = ib_destroy_qp(gsi->rx_qp); in mlx5_ib_gsi_destroy_qp()
222 mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n", in mlx5_ib_gsi_destroy_qp()
227 dev->devr.ports[port_num - 1].gsi = NULL; in mlx5_ib_gsi_destroy_qp()
229 gsi->rx_qp = NULL; in mlx5_ib_gsi_destroy_qp()
231 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) { in mlx5_ib_gsi_destroy_qp()
232 if (!gsi->tx_qps[qp_index]) in mlx5_ib_gsi_destroy_qp()
234 WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index])); in mlx5_ib_gsi_destroy_qp()
235 gsi->tx_qps[qp_index] = NULL; in mlx5_ib_gsi_destroy_qp()
238 ib_free_cq(gsi->cq); in mlx5_ib_gsi_destroy_qp()
240 kfree(gsi->outstanding_wrs); in mlx5_ib_gsi_destroy_qp()
241 kfree(gsi->tx_qps); in mlx5_ib_gsi_destroy_qp()
242 kfree(gsi); in mlx5_ib_gsi_destroy_qp()
247 static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi) in create_gsi_ud_qp() argument
249 struct ib_pd *pd = gsi->rx_qp->pd; in create_gsi_ud_qp()
251 .event_handler = gsi->rx_qp->event_handler, in create_gsi_ud_qp()
252 .qp_context = gsi->rx_qp->qp_context, in create_gsi_ud_qp()
253 .send_cq = gsi->cq, in create_gsi_ud_qp()
254 .recv_cq = gsi->rx_qp->recv_cq, in create_gsi_ud_qp()
256 .max_send_wr = gsi->cap.max_send_wr, in create_gsi_ud_qp()
257 .max_send_sge = gsi->cap.max_send_sge, in create_gsi_ud_qp()
258 .max_inline_data = gsi->cap.max_inline_data, in create_gsi_ud_qp()
260 .sq_sig_type = gsi->sq_sig_type, in create_gsi_ud_qp()
268 static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp, in modify_to_rts() argument
280 attr.port_num = gsi->port_num; in modify_to_rts()
308 static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index) in setup_qp() argument
310 struct ib_device *device = gsi->rx_qp->device; in setup_qp()
317 ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey); in setup_qp()
320 gsi->port_num, qp_index); in setup_qp()
326 gsi->port_num, qp_index); in setup_qp()
330 spin_lock_irqsave(&gsi->lock, flags); in setup_qp()
331 qp = gsi->tx_qps[qp_index]; in setup_qp()
332 spin_unlock_irqrestore(&gsi->lock, flags); in setup_qp()
334 mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n", in setup_qp()
335 gsi->port_num, qp_index); in setup_qp()
339 qp = create_gsi_ud_qp(gsi); in setup_qp()
341 mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n", in setup_qp()
346 ret = modify_to_rts(gsi, qp, qp_index); in setup_qp()
350 spin_lock_irqsave(&gsi->lock, flags); in setup_qp()
351 WARN_ON_ONCE(gsi->tx_qps[qp_index]); in setup_qp()
352 gsi->tx_qps[qp_index] = qp; in setup_qp()
353 spin_unlock_irqrestore(&gsi->lock, flags); in setup_qp()
361 static void setup_qps(struct mlx5_ib_gsi_qp *gsi) in setup_qps() argument
365 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) in setup_qps()
366 setup_qp(gsi, qp_index); in setup_qps()
373 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); in mlx5_ib_gsi_modify_qp() local
376 mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state); in mlx5_ib_gsi_modify_qp()
378 mutex_lock(&gsi->mutex); in mlx5_ib_gsi_modify_qp()
379 ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask); in mlx5_ib_gsi_modify_qp()
381 mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret); in mlx5_ib_gsi_modify_qp()
385 if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS) in mlx5_ib_gsi_modify_qp()
386 setup_qps(gsi); in mlx5_ib_gsi_modify_qp()
389 mutex_unlock(&gsi->mutex); in mlx5_ib_gsi_modify_qp()
398 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); in mlx5_ib_gsi_query_qp() local
401 mutex_lock(&gsi->mutex); in mlx5_ib_gsi_query_qp()
402 ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr); in mlx5_ib_gsi_query_qp()
403 qp_init_attr->cap = gsi->cap; in mlx5_ib_gsi_query_qp()
404 mutex_unlock(&gsi->mutex); in mlx5_ib_gsi_query_qp()
409 /* Call with gsi->lock locked */
410 static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi, in mlx5_ib_add_outstanding_wr() argument
413 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); in mlx5_ib_add_outstanding_wr()
416 if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) { in mlx5_ib_add_outstanding_wr()
417 mlx5_ib_warn(dev, "no available GSI work request.\n"); in mlx5_ib_add_outstanding_wr()
421 gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi % in mlx5_ib_add_outstanding_wr()
422 gsi->cap.max_send_wr]; in mlx5_ib_add_outstanding_wr()
423 gsi->outstanding_pi++; in mlx5_ib_add_outstanding_wr()
440 /* Call with gsi->lock locked */
441 static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi, in mlx5_ib_gsi_silent_drop() argument
448 .qp = &gsi->ibqp, in mlx5_ib_gsi_silent_drop()
452 ret = mlx5_ib_add_outstanding_wr(gsi, wr, &wc); in mlx5_ib_gsi_silent_drop()
456 generate_completions(gsi); in mlx5_ib_gsi_silent_drop()
461 /* Call with gsi->lock locked */
462 static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr) in get_tx_qp() argument
464 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); in get_tx_qp()
468 return gsi->rx_qp; in get_tx_qp()
470 if (qp_index >= gsi->num_qps) in get_tx_qp()
473 return gsi->tx_qps[qp_index]; in get_tx_qp()
479 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); in mlx5_ib_gsi_post_send() local
489 spin_lock_irqsave(&gsi->lock, flags); in mlx5_ib_gsi_post_send()
490 tx_qp = get_tx_qp(gsi, &cur_wr); in mlx5_ib_gsi_post_send()
492 ret = mlx5_ib_gsi_silent_drop(gsi, &cur_wr); in mlx5_ib_gsi_post_send()
495 spin_unlock_irqrestore(&gsi->lock, flags); in mlx5_ib_gsi_post_send()
499 ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL); in mlx5_ib_gsi_post_send()
506 gsi->outstanding_pi = (gsi->outstanding_pi - 1) % in mlx5_ib_gsi_post_send()
507 gsi->cap.max_send_wr; in mlx5_ib_gsi_post_send()
510 spin_unlock_irqrestore(&gsi->lock, flags); in mlx5_ib_gsi_post_send()
516 spin_unlock_irqrestore(&gsi->lock, flags); in mlx5_ib_gsi_post_send()
524 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); in mlx5_ib_gsi_post_recv() local
526 return ib_post_recv(gsi->rx_qp, wr, bad_wr); in mlx5_ib_gsi_post_recv()
529 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi) in mlx5_ib_gsi_pkey_change() argument
531 if (!gsi) in mlx5_ib_gsi_pkey_change()
534 mutex_lock(&gsi->mutex); in mlx5_ib_gsi_pkey_change()
535 setup_qps(gsi); in mlx5_ib_gsi_pkey_change()
536 mutex_unlock(&gsi->mutex); in mlx5_ib_gsi_pkey_change()