1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12
13 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
14 u32 opcode);
15
retry_first_write_send(struct rxe_qp * qp,struct rxe_send_wqe * wqe,int npsn)16 static inline void retry_first_write_send(struct rxe_qp *qp,
17 struct rxe_send_wqe *wqe, int npsn)
18 {
19 int i;
20
21 for (i = 0; i < npsn; i++) {
22 int to_send = (wqe->dma.resid > qp->mtu) ?
23 qp->mtu : wqe->dma.resid;
24
25 qp->req.opcode = next_opcode(qp, wqe,
26 wqe->wr.opcode);
27
28 if (wqe->wr.send_flags & IB_SEND_INLINE) {
29 wqe->dma.resid -= to_send;
30 wqe->dma.sge_offset += to_send;
31 } else {
32 advance_dma_data(&wqe->dma, to_send);
33 }
34 }
35 }
36
req_retry(struct rxe_qp * qp)37 static void req_retry(struct rxe_qp *qp)
38 {
39 struct rxe_send_wqe *wqe;
40 unsigned int wqe_index;
41 unsigned int mask;
42 int npsn;
43 int first = 1;
44 struct rxe_queue *q = qp->sq.queue;
45 unsigned int cons;
46 unsigned int prod;
47
48 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
49 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
50
51 qp->req.wqe_index = cons;
52 qp->req.psn = qp->comp.psn;
53 qp->req.opcode = -1;
54
55 for (wqe_index = cons; wqe_index != prod;
56 wqe_index = queue_next_index(q, wqe_index)) {
57 wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
58 mask = wr_opcode_mask(wqe->wr.opcode, qp);
59
60 if (wqe->state == wqe_state_posted)
61 break;
62
63 if (wqe->state == wqe_state_done)
64 continue;
65
66 wqe->iova = (mask & WR_ATOMIC_MASK) ?
67 wqe->wr.wr.atomic.remote_addr :
68 (mask & WR_READ_OR_WRITE_MASK) ?
69 wqe->wr.wr.rdma.remote_addr :
70 0;
71
72 if (!first || (mask & WR_READ_MASK) == 0) {
73 wqe->dma.resid = wqe->dma.length;
74 wqe->dma.cur_sge = 0;
75 wqe->dma.sge_offset = 0;
76 }
77
78 if (first) {
79 first = 0;
80
81 if (mask & WR_WRITE_OR_SEND_MASK) {
82 npsn = (qp->comp.psn - wqe->first_psn) &
83 BTH_PSN_MASK;
84 retry_first_write_send(qp, wqe, npsn);
85 }
86
87 if (mask & WR_READ_MASK) {
88 npsn = (wqe->dma.length - wqe->dma.resid) /
89 qp->mtu;
90 wqe->iova += npsn * qp->mtu;
91 }
92 }
93
94 wqe->state = wqe_state_posted;
95 }
96 }
97
rnr_nak_timer(struct timer_list * t)98 void rnr_nak_timer(struct timer_list *t)
99 {
100 struct rxe_qp *qp = timer_container_of(qp, t, rnr_nak_timer);
101 unsigned long flags;
102
103 rxe_dbg_qp(qp, "nak timer fired\n");
104
105 spin_lock_irqsave(&qp->state_lock, flags);
106 if (qp->valid) {
107 /* request a send queue retry */
108 qp->req.need_retry = 1;
109 qp->req.wait_for_rnr_timer = 0;
110 rxe_sched_task(&qp->send_task);
111 }
112 spin_unlock_irqrestore(&qp->state_lock, flags);
113 }
114
req_check_sq_drain_done(struct rxe_qp * qp)115 static void req_check_sq_drain_done(struct rxe_qp *qp)
116 {
117 struct rxe_queue *q;
118 unsigned int index;
119 unsigned int cons;
120 struct rxe_send_wqe *wqe;
121 unsigned long flags;
122
123 spin_lock_irqsave(&qp->state_lock, flags);
124 if (qp_state(qp) == IB_QPS_SQD) {
125 q = qp->sq.queue;
126 index = qp->req.wqe_index;
127 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
128 wqe = queue_addr_from_index(q, cons);
129
130 /* check to see if we are drained;
131 * state_lock used by requester and completer
132 */
133 do {
134 if (!qp->attr.sq_draining)
135 /* comp just finished */
136 break;
137
138 if (wqe && ((index != cons) ||
139 (wqe->state != wqe_state_posted)))
140 /* comp not done yet */
141 break;
142
143 qp->attr.sq_draining = 0;
144 spin_unlock_irqrestore(&qp->state_lock, flags);
145
146 if (qp->ibqp.event_handler) {
147 struct ib_event ev;
148
149 ev.device = qp->ibqp.device;
150 ev.element.qp = &qp->ibqp;
151 ev.event = IB_EVENT_SQ_DRAINED;
152 qp->ibqp.event_handler(&ev,
153 qp->ibqp.qp_context);
154 }
155 return;
156 } while (0);
157 }
158 spin_unlock_irqrestore(&qp->state_lock, flags);
159 }
160
__req_next_wqe(struct rxe_qp * qp)161 static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
162 {
163 struct rxe_queue *q = qp->sq.queue;
164 unsigned int index = qp->req.wqe_index;
165 unsigned int prod;
166
167 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
168 if (index == prod)
169 return NULL;
170 else
171 return queue_addr_from_index(q, index);
172 }
173
req_next_wqe(struct rxe_qp * qp)174 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
175 {
176 struct rxe_send_wqe *wqe;
177 unsigned long flags;
178
179 req_check_sq_drain_done(qp);
180
181 wqe = __req_next_wqe(qp);
182 if (wqe == NULL)
183 return NULL;
184
185 spin_lock_irqsave(&qp->state_lock, flags);
186 if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
187 (wqe->state != wqe_state_processing))) {
188 spin_unlock_irqrestore(&qp->state_lock, flags);
189 return NULL;
190 }
191 spin_unlock_irqrestore(&qp->state_lock, flags);
192
193 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
194 return wqe;
195 }
196
197 /**
198 * rxe_wqe_is_fenced - check if next wqe is fenced
199 * @qp: the queue pair
200 * @wqe: the next wqe
201 *
202 * Returns: 1 if wqe needs to wait
203 * 0 if wqe is ready to go
204 */
rxe_wqe_is_fenced(struct rxe_qp * qp,struct rxe_send_wqe * wqe)205 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
206 {
207 /* Local invalidate fence (LIF) see IBA 10.6.5.1
208 * Requires ALL previous operations on the send queue
209 * are complete. Make mandatory for the rxe driver.
210 */
211 if (wqe->wr.opcode == IB_WR_LOCAL_INV)
212 return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
213 QUEUE_TYPE_FROM_CLIENT);
214
215 /* Fence see IBA 10.8.3.3
216 * Requires that all previous read and atomic operations
217 * are complete.
218 */
219 return (wqe->wr.send_flags & IB_SEND_FENCE) &&
220 atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
221 }
222
next_opcode_rc(struct rxe_qp * qp,u32 opcode,int fits)223 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
224 {
225 switch (opcode) {
226 case IB_WR_RDMA_WRITE:
227 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
228 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
229 return fits ?
230 IB_OPCODE_RC_RDMA_WRITE_LAST :
231 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
232 else
233 return fits ?
234 IB_OPCODE_RC_RDMA_WRITE_ONLY :
235 IB_OPCODE_RC_RDMA_WRITE_FIRST;
236
237 case IB_WR_RDMA_WRITE_WITH_IMM:
238 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
239 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
240 return fits ?
241 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
242 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
243 else
244 return fits ?
245 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
246 IB_OPCODE_RC_RDMA_WRITE_FIRST;
247
248 case IB_WR_SEND:
249 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
250 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
251 return fits ?
252 IB_OPCODE_RC_SEND_LAST :
253 IB_OPCODE_RC_SEND_MIDDLE;
254 else
255 return fits ?
256 IB_OPCODE_RC_SEND_ONLY :
257 IB_OPCODE_RC_SEND_FIRST;
258
259 case IB_WR_SEND_WITH_IMM:
260 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
261 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
262 return fits ?
263 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
264 IB_OPCODE_RC_SEND_MIDDLE;
265 else
266 return fits ?
267 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
268 IB_OPCODE_RC_SEND_FIRST;
269
270 case IB_WR_FLUSH:
271 return IB_OPCODE_RC_FLUSH;
272
273 case IB_WR_RDMA_READ:
274 return IB_OPCODE_RC_RDMA_READ_REQUEST;
275
276 case IB_WR_ATOMIC_CMP_AND_SWP:
277 return IB_OPCODE_RC_COMPARE_SWAP;
278
279 case IB_WR_ATOMIC_FETCH_AND_ADD:
280 return IB_OPCODE_RC_FETCH_ADD;
281
282 case IB_WR_SEND_WITH_INV:
283 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
284 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
285 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
286 IB_OPCODE_RC_SEND_MIDDLE;
287 else
288 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
289 IB_OPCODE_RC_SEND_FIRST;
290
291 case IB_WR_ATOMIC_WRITE:
292 return IB_OPCODE_RC_ATOMIC_WRITE;
293
294 case IB_WR_REG_MR:
295 case IB_WR_LOCAL_INV:
296 return opcode;
297 }
298
299 return -EINVAL;
300 }
301
next_opcode_uc(struct rxe_qp * qp,u32 opcode,int fits)302 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
303 {
304 switch (opcode) {
305 case IB_WR_RDMA_WRITE:
306 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
307 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
308 return fits ?
309 IB_OPCODE_UC_RDMA_WRITE_LAST :
310 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
311 else
312 return fits ?
313 IB_OPCODE_UC_RDMA_WRITE_ONLY :
314 IB_OPCODE_UC_RDMA_WRITE_FIRST;
315
316 case IB_WR_RDMA_WRITE_WITH_IMM:
317 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
318 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
319 return fits ?
320 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
321 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
322 else
323 return fits ?
324 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
325 IB_OPCODE_UC_RDMA_WRITE_FIRST;
326
327 case IB_WR_SEND:
328 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
329 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
330 return fits ?
331 IB_OPCODE_UC_SEND_LAST :
332 IB_OPCODE_UC_SEND_MIDDLE;
333 else
334 return fits ?
335 IB_OPCODE_UC_SEND_ONLY :
336 IB_OPCODE_UC_SEND_FIRST;
337
338 case IB_WR_SEND_WITH_IMM:
339 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
340 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
341 return fits ?
342 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
343 IB_OPCODE_UC_SEND_MIDDLE;
344 else
345 return fits ?
346 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
347 IB_OPCODE_UC_SEND_FIRST;
348 }
349
350 return -EINVAL;
351 }
352
next_opcode(struct rxe_qp * qp,struct rxe_send_wqe * wqe,u32 opcode)353 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
354 u32 opcode)
355 {
356 int fits = (wqe->dma.resid <= qp->mtu);
357
358 switch (qp_type(qp)) {
359 case IB_QPT_RC:
360 return next_opcode_rc(qp, opcode, fits);
361
362 case IB_QPT_UC:
363 return next_opcode_uc(qp, opcode, fits);
364
365 case IB_QPT_UD:
366 case IB_QPT_GSI:
367 switch (opcode) {
368 case IB_WR_SEND:
369 return IB_OPCODE_UD_SEND_ONLY;
370
371 case IB_WR_SEND_WITH_IMM:
372 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
373 }
374 break;
375
376 default:
377 break;
378 }
379
380 return -EINVAL;
381 }
382
check_init_depth(struct rxe_qp * qp,struct rxe_send_wqe * wqe)383 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
384 {
385 int depth;
386
387 if (wqe->has_rd_atomic)
388 return 0;
389
390 qp->req.need_rd_atomic = 1;
391 depth = atomic_dec_return(&qp->req.rd_atomic);
392
393 if (depth >= 0) {
394 qp->req.need_rd_atomic = 0;
395 wqe->has_rd_atomic = 1;
396 return 0;
397 }
398
399 atomic_inc(&qp->req.rd_atomic);
400 return -EAGAIN;
401 }
402
get_mtu(struct rxe_qp * qp)403 static inline int get_mtu(struct rxe_qp *qp)
404 {
405 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
406
407 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
408 return qp->mtu;
409
410 return rxe->port.mtu_cap;
411 }
412
init_req_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,int opcode,u32 payload,struct rxe_pkt_info * pkt)413 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
414 struct rxe_av *av,
415 struct rxe_send_wqe *wqe,
416 int opcode, u32 payload,
417 struct rxe_pkt_info *pkt)
418 {
419 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
420 struct sk_buff *skb;
421 struct rxe_send_wr *ibwr = &wqe->wr;
422 int pad = (-payload) & 0x3;
423 int paylen;
424 int solicited;
425 u32 qp_num;
426 int ack_req = 0;
427
428 /* length from start of bth to end of icrc */
429 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
430 pkt->paylen = paylen;
431
432 /* init skb */
433 skb = rxe_init_packet(rxe, av, paylen, pkt);
434 if (unlikely(!skb))
435 return NULL;
436
437 /* init bth */
438 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
439 (pkt->mask & RXE_END_MASK) &&
440 ((pkt->mask & (RXE_SEND_MASK)) ||
441 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
442 (RXE_WRITE_MASK | RXE_IMMDT_MASK));
443
444 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
445 qp->attr.dest_qp_num;
446
447 if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
448 ack_req = ((pkt->mask & RXE_END_MASK) ||
449 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
450 if (ack_req)
451 qp->req.noack_pkts = 0;
452
453 bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
454 ack_req, pkt->psn);
455
456 /* init optional headers */
457 if (pkt->mask & RXE_RETH_MASK) {
458 if (pkt->mask & RXE_FETH_MASK)
459 reth_set_rkey(pkt, ibwr->wr.flush.rkey);
460 else
461 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
462 reth_set_va(pkt, wqe->iova);
463 reth_set_len(pkt, wqe->dma.resid);
464 }
465
466 /* Fill Flush Extension Transport Header */
467 if (pkt->mask & RXE_FETH_MASK)
468 feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level);
469
470 if (pkt->mask & RXE_IMMDT_MASK)
471 immdt_set_imm(pkt, ibwr->ex.imm_data);
472
473 if (pkt->mask & RXE_IETH_MASK)
474 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
475
476 if (pkt->mask & RXE_ATMETH_MASK) {
477 atmeth_set_va(pkt, wqe->iova);
478 if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
479 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
480 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
481 } else {
482 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
483 }
484 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
485 }
486
487 if (pkt->mask & RXE_DETH_MASK) {
488 if (qp->ibqp.qp_num == 1)
489 deth_set_qkey(pkt, GSI_QKEY);
490 else
491 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
492 deth_set_sqp(pkt, qp->ibqp.qp_num);
493 }
494
495 return skb;
496 }
497
finish_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,struct sk_buff * skb,u32 payload)498 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
499 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
500 struct sk_buff *skb, u32 payload)
501 {
502 int err;
503
504 err = rxe_prepare(av, pkt, skb);
505 if (err)
506 return err;
507
508 if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
509 if (wqe->wr.send_flags & IB_SEND_INLINE) {
510 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
511
512 memcpy(payload_addr(pkt), tmp, payload);
513
514 wqe->dma.resid -= payload;
515 wqe->dma.sge_offset += payload;
516 } else {
517 err = copy_data(qp->pd, 0, &wqe->dma,
518 payload_addr(pkt), payload,
519 RXE_FROM_MR_OBJ);
520 if (err)
521 return err;
522 }
523 if (bth_pad(pkt)) {
524 u8 *pad = payload_addr(pkt) + payload;
525
526 memset(pad, 0, bth_pad(pkt));
527 }
528 } else if (pkt->mask & RXE_FLUSH_MASK) {
529 /* oA19-2: shall have no payload. */
530 wqe->dma.resid = 0;
531 }
532
533 if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
534 memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
535 wqe->dma.resid -= payload;
536 }
537
538 return 0;
539 }
540
update_wqe_state(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt)541 static void update_wqe_state(struct rxe_qp *qp,
542 struct rxe_send_wqe *wqe,
543 struct rxe_pkt_info *pkt)
544 {
545 if (pkt->mask & RXE_END_MASK) {
546 if (qp_type(qp) == IB_QPT_RC)
547 wqe->state = wqe_state_pending;
548 else
549 wqe->state = wqe_state_done;
550 } else {
551 wqe->state = wqe_state_processing;
552 }
553 }
554
update_wqe_psn(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,u32 payload)555 static void update_wqe_psn(struct rxe_qp *qp,
556 struct rxe_send_wqe *wqe,
557 struct rxe_pkt_info *pkt,
558 u32 payload)
559 {
560 /* number of packets left to send including current one */
561 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
562
563 /* handle zero length packet case */
564 if (num_pkt == 0)
565 num_pkt = 1;
566
567 if (pkt->mask & RXE_START_MASK) {
568 wqe->first_psn = qp->req.psn;
569 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
570 }
571
572 if (pkt->mask & RXE_READ_MASK)
573 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
574 else
575 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
576 }
577
update_state(struct rxe_qp * qp,struct rxe_pkt_info * pkt)578 static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
579 {
580 qp->req.opcode = pkt->opcode;
581
582 if (pkt->mask & RXE_END_MASK)
583 qp->req.wqe_index = queue_next_index(qp->sq.queue,
584 qp->req.wqe_index);
585
586 qp->need_req_skb = 0;
587
588 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
589 mod_timer(&qp->retrans_timer,
590 jiffies + qp->qp_timeout_jiffies);
591 }
592
rxe_do_local_ops(struct rxe_qp * qp,struct rxe_send_wqe * wqe)593 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
594 {
595 u8 opcode = wqe->wr.opcode;
596 u32 rkey;
597 int ret;
598
599 switch (opcode) {
600 case IB_WR_LOCAL_INV:
601 rkey = wqe->wr.ex.invalidate_rkey;
602 if (rkey_is_mw(rkey))
603 ret = rxe_invalidate_mw(qp, rkey);
604 else
605 ret = rxe_invalidate_mr(qp, rkey);
606
607 if (unlikely(ret)) {
608 wqe->status = IB_WC_LOC_QP_OP_ERR;
609 return ret;
610 }
611 break;
612 case IB_WR_REG_MR:
613 ret = rxe_reg_fast_mr(qp, wqe);
614 if (unlikely(ret)) {
615 wqe->status = IB_WC_LOC_QP_OP_ERR;
616 return ret;
617 }
618 break;
619 case IB_WR_BIND_MW:
620 ret = rxe_bind_mw(qp, wqe);
621 if (unlikely(ret)) {
622 wqe->status = IB_WC_MW_BIND_ERR;
623 return ret;
624 }
625 break;
626 default:
627 rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
628 wqe->status = IB_WC_LOC_QP_OP_ERR;
629 return -EINVAL;
630 }
631
632 wqe->state = wqe_state_done;
633 wqe->status = IB_WC_SUCCESS;
634 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
635
636 return 0;
637 }
638
rxe_requester(struct rxe_qp * qp)639 int rxe_requester(struct rxe_qp *qp)
640 {
641 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
642 struct rxe_pkt_info pkt;
643 struct sk_buff *skb;
644 struct rxe_send_wqe *wqe;
645 enum rxe_hdr_mask mask;
646 u32 payload;
647 int mtu;
648 int opcode;
649 int err;
650 int ret;
651 struct rxe_queue *q = qp->sq.queue;
652 struct rxe_ah *ah;
653 struct rxe_av *av;
654 unsigned long flags;
655
656 spin_lock_irqsave(&qp->state_lock, flags);
657 if (unlikely(!qp->valid)) {
658 spin_unlock_irqrestore(&qp->state_lock, flags);
659 goto exit;
660 }
661
662 if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
663 wqe = __req_next_wqe(qp);
664 spin_unlock_irqrestore(&qp->state_lock, flags);
665 if (wqe) {
666 wqe->status = IB_WC_WR_FLUSH_ERR;
667 goto err;
668 } else {
669 goto exit;
670 }
671 }
672
673 if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
674 qp->req.wqe_index = queue_get_consumer(q,
675 QUEUE_TYPE_FROM_CLIENT);
676 qp->req.opcode = -1;
677 qp->req.need_rd_atomic = 0;
678 qp->req.wait_psn = 0;
679 qp->req.need_retry = 0;
680 qp->req.wait_for_rnr_timer = 0;
681 spin_unlock_irqrestore(&qp->state_lock, flags);
682 goto exit;
683 }
684 spin_unlock_irqrestore(&qp->state_lock, flags);
685
686 /* we come here if the retransmit timer has fired
687 * or if the rnr timer has fired. If the retransmit
688 * timer fires while we are processing an RNR NAK wait
689 * until the rnr timer has fired before starting the
690 * retry flow
691 */
692 if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
693 req_retry(qp);
694 qp->req.need_retry = 0;
695 }
696
697 wqe = req_next_wqe(qp);
698 if (unlikely(!wqe))
699 goto exit;
700
701 if (rxe_wqe_is_fenced(qp, wqe)) {
702 qp->req.wait_fence = 1;
703 goto exit;
704 }
705
706 if (wqe->mask & WR_LOCAL_OP_MASK) {
707 err = rxe_do_local_ops(qp, wqe);
708 if (unlikely(err))
709 goto err;
710 else
711 goto done;
712 }
713
714 if (unlikely(qp_type(qp) == IB_QPT_RC &&
715 psn_compare(qp->req.psn, (qp->comp.psn +
716 RXE_MAX_UNACKED_PSNS)) > 0)) {
717 qp->req.wait_psn = 1;
718 goto exit;
719 }
720
721 /* Limit the number of inflight SKBs per QP */
722 if (unlikely(atomic_read(&qp->skb_out) >
723 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
724 qp->need_req_skb = 1;
725 goto exit;
726 }
727
728 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
729 if (unlikely(opcode < 0)) {
730 wqe->status = IB_WC_LOC_QP_OP_ERR;
731 goto err;
732 }
733
734 mask = rxe_opcode[opcode].mask;
735 if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
736 RXE_ATOMIC_WRITE_MASK))) {
737 if (check_init_depth(qp, wqe))
738 goto exit;
739 }
740
741 mtu = get_mtu(qp);
742 payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
743 wqe->dma.resid : 0;
744 if (payload > mtu) {
745 if (qp_type(qp) == IB_QPT_UD) {
746 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
747 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
748 * shall not emit any packets for this message. Further, the CI shall not
749 * generate an error due to this condition.
750 */
751
752 /* fake a successful UD send */
753 wqe->first_psn = qp->req.psn;
754 wqe->last_psn = qp->req.psn;
755 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
756 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
757 qp->req.wqe_index = queue_next_index(qp->sq.queue,
758 qp->req.wqe_index);
759 wqe->state = wqe_state_done;
760 wqe->status = IB_WC_SUCCESS;
761 goto done;
762 }
763 payload = mtu;
764 }
765
766 pkt.rxe = rxe;
767 pkt.opcode = opcode;
768 pkt.qp = qp;
769 pkt.psn = qp->req.psn;
770 pkt.mask = rxe_opcode[opcode].mask;
771 pkt.wqe = wqe;
772
773 av = rxe_get_av(&pkt, &ah);
774 if (unlikely(!av)) {
775 rxe_dbg_qp(qp, "Failed no address vector\n");
776 wqe->status = IB_WC_LOC_QP_OP_ERR;
777 goto err;
778 }
779
780 skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
781 if (unlikely(!skb)) {
782 rxe_dbg_qp(qp, "Failed allocating skb\n");
783 wqe->status = IB_WC_LOC_QP_OP_ERR;
784 if (ah)
785 rxe_put(ah);
786 goto err;
787 }
788
789 err = finish_packet(qp, av, wqe, &pkt, skb, payload);
790 if (unlikely(err)) {
791 rxe_dbg_qp(qp, "Error during finish packet\n");
792 if (err == -EFAULT)
793 wqe->status = IB_WC_LOC_PROT_ERR;
794 else
795 wqe->status = IB_WC_LOC_QP_OP_ERR;
796 kfree_skb(skb);
797 if (ah)
798 rxe_put(ah);
799 goto err;
800 }
801
802 if (ah)
803 rxe_put(ah);
804
805 err = rxe_xmit_packet(qp, &pkt, skb);
806 if (err) {
807 wqe->status = IB_WC_LOC_QP_OP_ERR;
808 goto err;
809 }
810
811 update_wqe_state(qp, wqe, &pkt);
812 update_wqe_psn(qp, wqe, &pkt, payload);
813 update_state(qp, &pkt);
814
815 /* A non-zero return value will cause rxe_do_task to
816 * exit its loop and end the work item. A zero return
817 * will continue looping and return to rxe_requester
818 */
819 done:
820 ret = 0;
821 goto out;
822 err:
823 /* update wqe_index for each wqe completion */
824 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
825 wqe->state = wqe_state_error;
826 rxe_qp_error(qp);
827 exit:
828 ret = -EAGAIN;
829 out:
830 return ret;
831 }
832
rxe_sender(struct rxe_qp * qp)833 int rxe_sender(struct rxe_qp *qp)
834 {
835 int req_ret;
836 int comp_ret;
837
838 /* process the send queue */
839 req_ret = rxe_requester(qp);
840
841 /* process the response queue */
842 comp_ret = rxe_completer(qp);
843
844 /* exit the task loop if both requester and completer
845 * are ready
846 */
847 return (req_ret && comp_ret) ? -EAGAIN : 0;
848 }
849