xref: /linux/include/rdma/rdmavt_qp.h (revision dd5b2498d845f925904cb2afabb6ba11bfc317c5)
1 #ifndef DEF_RDMAVT_INCQP_H
2 #define DEF_RDMAVT_INCQP_H
3 
4 /*
5  * Copyright(c) 2016 - 2018 Intel Corporation.
6  *
7  * This file is provided under a dual BSD/GPLv2 license.  When using or
8  * redistributing this file, you may do so under either license.
9  *
10  * GPL LICENSE SUMMARY
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * BSD LICENSE
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50 
51 #include <rdma/rdma_vt.h>
52 #include <rdma/ib_pack.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/rdmavt_cq.h>
55 /*
56  * Atomic bit definitions for r_aflags.
57  */
58 #define RVT_R_WRID_VALID        0
59 #define RVT_R_REWIND_SGE        1
60 
61 /*
62  * Bit definitions for r_flags.
63  */
64 #define RVT_R_REUSE_SGE 0x01
65 #define RVT_R_RDMAR_SEQ 0x02
66 #define RVT_R_RSP_NAK   0x04
67 #define RVT_R_RSP_SEND  0x08
68 #define RVT_R_COMM_EST  0x10
69 
70 /*
71  * Bit definitions for s_flags.
72  *
73  * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
74  * RVT_S_BUSY - send tasklet is processing the QP
75  * RVT_S_TIMER - the RC retry timer is active
76  * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
77  * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
78  *                         before processing the next SWQE
79  * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
80  *                         before processing the next SWQE
81  * RVT_S_WAIT_RNR - waiting for RNR timeout
82  * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
83  * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
84  *                  next send completion entry not via send DMA
85  * RVT_S_WAIT_PIO - waiting for a send buffer to be available
86  * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
87  * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
88  * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
89  * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
90  * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
91  * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
92  * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
93  * RVT_S_ECN - a BECN was queued to the send engine
94  * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
95  */
96 #define RVT_S_SIGNAL_REQ_WR	0x0001
97 #define RVT_S_BUSY		0x0002
98 #define RVT_S_TIMER		0x0004
99 #define RVT_S_RESP_PENDING	0x0008
100 #define RVT_S_ACK_PENDING	0x0010
101 #define RVT_S_WAIT_FENCE	0x0020
102 #define RVT_S_WAIT_RDMAR	0x0040
103 #define RVT_S_WAIT_RNR		0x0080
104 #define RVT_S_WAIT_SSN_CREDIT	0x0100
105 #define RVT_S_WAIT_DMA		0x0200
106 #define RVT_S_WAIT_PIO		0x0400
107 #define RVT_S_WAIT_TX		0x0800
108 #define RVT_S_WAIT_DMA_DESC	0x1000
109 #define RVT_S_WAIT_KMEM		0x2000
110 #define RVT_S_WAIT_PSN		0x4000
111 #define RVT_S_WAIT_ACK		0x8000
112 #define RVT_S_SEND_ONE		0x10000
113 #define RVT_S_UNLIMITED_CREDIT	0x20000
114 #define RVT_S_ECN		0x40000
115 #define RVT_S_MAX_BIT_MASK	0x800000
116 
117 /*
118  * Drivers should use s_flags starting with bit 31 down to the bit next to
119  * RVT_S_MAX_BIT_MASK
120  */
121 
122 /*
123  * Wait flags that would prevent any packet type from being sent.
124  */
125 #define RVT_S_ANY_WAIT_IO \
126 	(RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
127 	 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
128 
129 /*
130  * Wait flags that would prevent send work requests from making progress.
131  */
132 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
133 	RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
134 	RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
135 
136 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
137 
138 /* Number of bits to pay attention to in the opcode for checking qp type */
139 #define RVT_OPCODE_QP_MASK 0xE0
140 
141 /* Flags for checking QP state (see ib_rvt_state_ops[]) */
142 #define RVT_POST_SEND_OK                0x01
143 #define RVT_POST_RECV_OK                0x02
144 #define RVT_PROCESS_RECV_OK             0x04
145 #define RVT_PROCESS_SEND_OK             0x08
146 #define RVT_PROCESS_NEXT_SEND_OK        0x10
147 #define RVT_FLUSH_SEND			0x20
148 #define RVT_FLUSH_RECV			0x40
149 #define RVT_PROCESS_OR_FLUSH_SEND \
150 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
151 #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
152 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
153 
154 /*
155  * Internal send flags
156  */
157 #define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
158 #define RVT_SEND_COMPLETION_ONLY	(IB_SEND_RESERVED_START << 1)
159 
160 /*
161  * Send work request queue entry.
162  * The size of the sg_list is determined when the QP is created and stored
163  * in qp->s_max_sge.
164  */
165 struct rvt_swqe {
166 	union {
167 		struct ib_send_wr wr;   /* don't use wr.sg_list */
168 		struct ib_ud_wr ud_wr;
169 		struct ib_reg_wr reg_wr;
170 		struct ib_rdma_wr rdma_wr;
171 		struct ib_atomic_wr atomic_wr;
172 	};
173 	u32 psn;                /* first packet sequence number */
174 	u32 lpsn;               /* last packet sequence number */
175 	u32 ssn;                /* send sequence number */
176 	u32 length;             /* total length of data in sg_list */
177 	void *priv;             /* driver dependent field */
178 	struct rvt_sge sg_list[0];
179 };
180 
181 /*
182  * Receive work request queue entry.
183  * The size of the sg_list is determined when the QP (or SRQ) is created
184  * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
185  */
186 struct rvt_rwqe {
187 	u64 wr_id;
188 	u8 num_sge;
189 	struct ib_sge sg_list[0];
190 };
191 
192 /*
193  * This structure is used to contain the head pointer, tail pointer,
194  * and receive work queue entries as a single memory allocation so
195  * it can be mmap'ed into user space.
196  * Note that the wq array elements are variable size so you can't
197  * just index into the array to get the N'th element;
198  * use get_rwqe_ptr() instead.
199  */
200 struct rvt_rwq {
201 	u32 head;               /* new work requests posted to the head */
202 	u32 tail;               /* receives pull requests from here. */
203 	struct rvt_rwqe wq[0];
204 };
205 
206 struct rvt_rq {
207 	struct rvt_rwq *wq;
208 	u32 size;               /* size of RWQE array */
209 	u8 max_sge;
210 	/* protect changes in this struct */
211 	spinlock_t lock ____cacheline_aligned_in_smp;
212 };
213 
214 /*
215  * This structure is used by rvt_mmap() to validate an offset
216  * when an mmap() request is made.  The vm_area_struct then uses
217  * this as its vm_private_data.
218  */
219 struct rvt_mmap_info {
220 	struct list_head pending_mmaps;
221 	struct ib_ucontext *context;
222 	void *obj;
223 	__u64 offset;
224 	struct kref ref;
225 	unsigned size;
226 };
227 
228 /*
229  * This structure holds the information that the send tasklet needs
230  * to send a RDMA read response or atomic operation.
231  */
232 struct rvt_ack_entry {
233 	struct rvt_sge rdma_sge;
234 	u64 atomic_data;
235 	u32 psn;
236 	u32 lpsn;
237 	u8 opcode;
238 	u8 sent;
239 	void *priv;
240 };
241 
242 #define	RC_QP_SCALING_INTERVAL	5
243 
244 #define RVT_OPERATION_PRIV        0x00000001
245 #define RVT_OPERATION_ATOMIC      0x00000002
246 #define RVT_OPERATION_ATOMIC_SGE  0x00000004
247 #define RVT_OPERATION_LOCAL       0x00000008
248 #define RVT_OPERATION_USE_RESERVE 0x00000010
249 #define RVT_OPERATION_IGN_RNR_CNT 0x00000020
250 
251 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
252 
253 /**
254  * rvt_operation_params - op table entry
255  * @length - the length to copy into the swqe entry
256  * @qpt_support - a bit mask indicating QP type support
257  * @flags - RVT_OPERATION flags (see above)
258  *
259  * This supports table driven post send so that
260  * the driver can have differing an potentially
261  * different sets of operations.
262  *
263  **/
264 
265 struct rvt_operation_params {
266 	size_t length;
267 	u32 qpt_support;
268 	u32 flags;
269 };
270 
271 /*
272  * Common variables are protected by both r_rq.lock and s_lock in that order
273  * which only happens in modify_qp() or changing the QP 'state'.
274  */
275 struct rvt_qp {
276 	struct ib_qp ibqp;
277 	void *priv; /* Driver private data */
278 	/* read mostly fields above and below */
279 	struct rdma_ah_attr remote_ah_attr;
280 	struct rdma_ah_attr alt_ah_attr;
281 	struct rvt_qp __rcu *next;           /* link list for QPN hash table */
282 	struct rvt_swqe *s_wq;  /* send work queue */
283 	struct rvt_mmap_info *ip;
284 
285 	unsigned long timeout_jiffies;  /* computed from timeout */
286 
287 	int srate_mbps;		/* s_srate (below) converted to Mbit/s */
288 	pid_t pid;		/* pid for user mode QPs */
289 	u32 remote_qpn;
290 	u32 qkey;               /* QKEY for this QP (for UD or RD) */
291 	u32 s_size;             /* send work queue size */
292 
293 	u16 pmtu;		/* decoded from path_mtu */
294 	u8 log_pmtu;		/* shift for pmtu */
295 	u8 state;               /* QP state */
296 	u8 allowed_ops;		/* high order bits of allowed opcodes */
297 	u8 qp_access_flags;
298 	u8 alt_timeout;         /* Alternate path timeout for this QP */
299 	u8 timeout;             /* Timeout for this QP */
300 	u8 s_srate;
301 	u8 s_mig_state;
302 	u8 port_num;
303 	u8 s_pkey_index;        /* PKEY index to use */
304 	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
305 	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
306 	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
307 	u8 s_retry_cnt;         /* number of times to retry */
308 	u8 s_rnr_retry_cnt;
309 	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
310 	u8 s_max_sge;           /* size of s_wq->sg_list */
311 	u8 s_draining;
312 
313 	/* start of read/write fields */
314 	atomic_t refcount ____cacheline_aligned_in_smp;
315 	wait_queue_head_t wait;
316 
317 	struct rvt_ack_entry *s_ack_queue;
318 	struct rvt_sge_state s_rdma_read_sge;
319 
320 	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
321 	u32 r_psn;              /* expected rcv packet sequence number */
322 	unsigned long r_aflags;
323 	u64 r_wr_id;            /* ID for current receive WQE */
324 	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
325 	u32 r_len;              /* total length of r_sge */
326 	u32 r_rcv_len;          /* receive data len processed */
327 	u32 r_msn;              /* message sequence number */
328 
329 	u8 r_state;             /* opcode of last packet received */
330 	u8 r_flags;
331 	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
332 	u8 r_adefered;          /* defered ack count */
333 
334 	struct list_head rspwait;       /* link for waiting to respond */
335 
336 	struct rvt_sge_state r_sge;     /* current receive data */
337 	struct rvt_rq r_rq;             /* receive work queue */
338 
339 	/* post send line */
340 	spinlock_t s_hlock ____cacheline_aligned_in_smp;
341 	u32 s_head;             /* new entries added here */
342 	u32 s_next_psn;         /* PSN for next request */
343 	u32 s_avail;            /* number of entries avail */
344 	u32 s_ssn;              /* SSN of tail entry */
345 	atomic_t s_reserved_used; /* reserved entries in use */
346 
347 	spinlock_t s_lock ____cacheline_aligned_in_smp;
348 	u32 s_flags;
349 	struct rvt_sge_state *s_cur_sge;
350 	struct rvt_swqe *s_wqe;
351 	struct rvt_sge_state s_sge;     /* current send request data */
352 	struct rvt_mregion *s_rdma_mr;
353 	u32 s_len;              /* total length of s_sge */
354 	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
355 	u32 s_last_psn;         /* last response PSN processed */
356 	u32 s_sending_psn;      /* lowest PSN that is being sent */
357 	u32 s_sending_hpsn;     /* highest PSN that is being sent */
358 	u32 s_psn;              /* current packet sequence number */
359 	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
360 	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
361 	u32 s_tail;             /* next entry to process */
362 	u32 s_cur;              /* current work queue entry */
363 	u32 s_acked;            /* last un-ACK'ed entry */
364 	u32 s_last;             /* last completed entry */
365 	u32 s_lsn;              /* limit sequence number (credit) */
366 	u32 s_ahgpsn;           /* set to the psn in the copy of the header */
367 	u16 s_cur_size;         /* size of send packet in bytes */
368 	u16 s_rdma_ack_cnt;
369 	u8 s_hdrwords;         /* size of s_hdr in 32 bit words */
370 	s8 s_ahgidx;
371 	u8 s_state;             /* opcode of last packet sent */
372 	u8 s_ack_state;         /* opcode of packet to ACK */
373 	u8 s_nak_state;         /* non-zero if NAK is pending */
374 	u8 r_nak_state;         /* non-zero if NAK is pending */
375 	u8 s_retry;             /* requester retry counter */
376 	u8 s_rnr_retry;         /* requester RNR retry counter */
377 	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
378 	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
379 	u8 s_acked_ack_queue;   /* index into s_ack_queue[] */
380 
381 	struct rvt_sge_state s_ack_rdma_sge;
382 	struct timer_list s_timer;
383 	struct hrtimer s_rnr_timer;
384 
385 	atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
386 
387 	/*
388 	 * This sge list MUST be last. Do not add anything below here.
389 	 */
390 	struct rvt_sge r_sg_list[0] /* verified SGEs */
391 		____cacheline_aligned_in_smp;
392 };
393 
394 struct rvt_srq {
395 	struct ib_srq ibsrq;
396 	struct rvt_rq rq;
397 	struct rvt_mmap_info *ip;
398 	/* send signal when number of RWQEs < limit */
399 	u32 limit;
400 };
401 
402 #define RVT_QPN_MAX                 BIT(24)
403 #define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
404 #define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
405 #define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
406 #define RVT_QPN_MASK		    IB_QPN_MASK
407 
408 /*
409  * QPN-map pages start out as NULL, they get allocated upon
410  * first use and are never deallocated. This way,
411  * large bitmaps are not allocated unless large numbers of QPs are used.
412  */
413 struct rvt_qpn_map {
414 	void *page;
415 };
416 
417 struct rvt_qpn_table {
418 	spinlock_t lock; /* protect changes to the qp table */
419 	unsigned flags;         /* flags for QP0/1 allocated for each port */
420 	u32 last;               /* last QP number allocated */
421 	u32 nmaps;              /* size of the map table */
422 	u16 limit;
423 	u8  incr;
424 	/* bit map of free QP numbers other than 0/1 */
425 	struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
426 };
427 
428 struct rvt_qp_ibdev {
429 	u32 qp_table_size;
430 	u32 qp_table_bits;
431 	struct rvt_qp __rcu **qp_table;
432 	spinlock_t qpt_lock; /* qptable lock */
433 	struct rvt_qpn_table qpn_table;
434 };
435 
436 /*
437  * There is one struct rvt_mcast for each multicast GID.
438  * All attached QPs are then stored as a list of
439  * struct rvt_mcast_qp.
440  */
441 struct rvt_mcast_qp {
442 	struct list_head list;
443 	struct rvt_qp *qp;
444 };
445 
446 struct rvt_mcast_addr {
447 	union ib_gid mgid;
448 	u16 lid;
449 };
450 
451 struct rvt_mcast {
452 	struct rb_node rb_node;
453 	struct rvt_mcast_addr mcast_addr;
454 	struct list_head qp_list;
455 	wait_queue_head_t wait;
456 	atomic_t refcount;
457 	int n_attached;
458 };
459 
460 /*
461  * Since struct rvt_swqe is not a fixed size, we can't simply index into
462  * struct rvt_qp.s_wq.  This function does the array index computation.
463  */
464 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
465 						unsigned n)
466 {
467 	return (struct rvt_swqe *)((char *)qp->s_wq +
468 				     (sizeof(struct rvt_swqe) +
469 				      qp->s_max_sge *
470 				      sizeof(struct rvt_sge)) * n);
471 }
472 
473 /*
474  * Since struct rvt_rwqe is not a fixed size, we can't simply index into
475  * struct rvt_rwq.wq.  This function does the array index computation.
476  */
477 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
478 {
479 	return (struct rvt_rwqe *)
480 		((char *)rq->wq->wq +
481 		 (sizeof(struct rvt_rwqe) +
482 		  rq->max_sge * sizeof(struct ib_sge)) * n);
483 }
484 
485 /**
486  * rvt_is_user_qp - return if this is user mode QP
487  * @qp - the target QP
488  */
489 static inline bool rvt_is_user_qp(struct rvt_qp *qp)
490 {
491 	return !!qp->pid;
492 }
493 
494 /**
495  * rvt_get_qp - get a QP reference
496  * @qp - the QP to hold
497  */
498 static inline void rvt_get_qp(struct rvt_qp *qp)
499 {
500 	atomic_inc(&qp->refcount);
501 }
502 
503 /**
504  * rvt_put_qp - release a QP reference
505  * @qp - the QP to release
506  */
507 static inline void rvt_put_qp(struct rvt_qp *qp)
508 {
509 	if (qp && atomic_dec_and_test(&qp->refcount))
510 		wake_up(&qp->wait);
511 }
512 
513 /**
514  * rvt_put_swqe - drop mr refs held by swqe
515  * @wqe - the send wqe
516  *
517  * This drops any mr references held by the swqe
518  */
519 static inline void rvt_put_swqe(struct rvt_swqe *wqe)
520 {
521 	int i;
522 
523 	for (i = 0; i < wqe->wr.num_sge; i++) {
524 		struct rvt_sge *sge = &wqe->sg_list[i];
525 
526 		rvt_put_mr(sge->mr);
527 	}
528 }
529 
530 /**
531  * rvt_qp_wqe_reserve - reserve operation
532  * @qp - the rvt qp
533  * @wqe - the send wqe
534  *
535  * This routine used in post send to record
536  * a wqe relative reserved operation use.
537  */
538 static inline void rvt_qp_wqe_reserve(
539 	struct rvt_qp *qp,
540 	struct rvt_swqe *wqe)
541 {
542 	atomic_inc(&qp->s_reserved_used);
543 }
544 
545 /**
546  * rvt_qp_wqe_unreserve - clean reserved operation
547  * @qp - the rvt qp
548  * @wqe - the send wqe
549  *
550  * This decrements the reserve use count.
551  *
552  * This call MUST precede the change to
553  * s_last to insure that post send sees a stable
554  * s_avail.
555  *
556  * An smp_mp__after_atomic() is used to insure
557  * the compiler does not juggle the order of the s_last
558  * ring index and the decrementing of s_reserved_used.
559  */
560 static inline void rvt_qp_wqe_unreserve(
561 	struct rvt_qp *qp,
562 	struct rvt_swqe *wqe)
563 {
564 	if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
565 		atomic_dec(&qp->s_reserved_used);
566 		/* insure no compiler re-order up to s_last change */
567 		smp_mb__after_atomic();
568 	}
569 }
570 
571 extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
572 
573 /**
574  * rvt_qp_swqe_complete() - insert send completion
575  * @qp - the qp
576  * @wqe - the send wqe
577  * @status - completion status
578  *
579  * Insert a send completion into the completion
580  * queue if the qp indicates it should be done.
581  *
582  * See IBTA 10.7.3.1 for info on completion
583  * control.
584  */
585 static inline void rvt_qp_swqe_complete(
586 	struct rvt_qp *qp,
587 	struct rvt_swqe *wqe,
588 	enum ib_wc_opcode opcode,
589 	enum ib_wc_status status)
590 {
591 	if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
592 		return;
593 	if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
594 	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
595 	     status != IB_WC_SUCCESS) {
596 		struct ib_wc wc;
597 
598 		memset(&wc, 0, sizeof(wc));
599 		wc.wr_id = wqe->wr.wr_id;
600 		wc.status = status;
601 		wc.opcode = opcode;
602 		wc.qp = &qp->ibqp;
603 		wc.byte_len = wqe->length;
604 		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
605 			     status != IB_WC_SUCCESS);
606 	}
607 }
608 
609 /*
610  * Compare the lower 24 bits of the msn values.
611  * Returns an integer <, ==, or > than zero.
612  */
613 static inline int rvt_cmp_msn(u32 a, u32 b)
614 {
615 	return (((int)a) - ((int)b)) << 8;
616 }
617 
618 /**
619  * rvt_compute_aeth - compute the AETH (syndrome + MSN)
620  * @qp: the queue pair to compute the AETH for
621  *
622  * Returns the AETH.
623  */
624 __be32 rvt_compute_aeth(struct rvt_qp *qp);
625 
626 /**
627  * rvt_get_credit - flush the send work queue of a QP
628  * @qp: the qp who's send work queue to flush
629  * @aeth: the Acknowledge Extended Transport Header
630  *
631  * The QP s_lock should be held.
632  */
633 void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
634 
635 /**
636  * rvt_restart_sge - rewind the sge state for a wqe
637  * @ss: the sge state pointer
638  * @wqe: the wqe to rewind
639  * @len: the data length from the start of the wqe in bytes
640  *
641  * Returns the remaining data length.
642  */
643 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
644 
645 /**
646  * @qp - the qp pair
647  * @len - the length
648  *
649  * Perform a shift based mtu round up divide
650  */
651 static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
652 {
653 	return (len + qp->pmtu - 1) >> qp->log_pmtu;
654 }
655 
656 /**
657  * @qp - the qp pair
658  * @len - the length
659  *
660  * Perform a shift based mtu divide
661  */
662 static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
663 {
664 	return len >> qp->log_pmtu;
665 }
666 
667 /**
668  * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
669  * @timeout - timeout input(0 - 31).
670  *
671  * Return a timeout value in jiffies.
672  */
673 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
674 {
675 	if (timeout > 31)
676 		timeout = 31;
677 
678 	return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
679 }
680 
681 extern const int  ib_rvt_state_ops[];
682 
683 struct rvt_dev_info;
684 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
685 void rvt_comm_est(struct rvt_qp *qp);
686 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
687 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
688 unsigned long rvt_rnr_tbl_to_usec(u32 index);
689 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
690 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
691 void rvt_del_timers_sync(struct rvt_qp *qp);
692 void rvt_stop_rc_timers(struct rvt_qp *qp);
693 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
694 static inline void rvt_add_retry_timer(struct rvt_qp *qp)
695 {
696 	rvt_add_retry_timer_ext(qp, 0);
697 }
698 
699 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
700 		  void *data, u32 length,
701 		  bool release, bool copy_last);
702 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
703 		       enum ib_wc_status status);
704 void rvt_ruc_loopback(struct rvt_qp *qp);
705 
706 /**
707  * struct rvt_qp_iter - the iterator for QPs
708  * @qp - the current QP
709  *
710  * This structure defines the current iterator
711  * state for sequenced access to all QPs relative
712  * to an rvt_dev_info.
713  */
714 struct rvt_qp_iter {
715 	struct rvt_qp *qp;
716 	/* private: backpointer */
717 	struct rvt_dev_info *rdi;
718 	/* private: callback routine */
719 	void (*cb)(struct rvt_qp *qp, u64 v);
720 	/* private: for arg to callback routine */
721 	u64 v;
722 	/* private: number of SMI,GSI QPs for device */
723 	int specials;
724 	/* private: current iterator index */
725 	int n;
726 };
727 
728 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
729 				     u64 v,
730 				     void (*cb)(struct rvt_qp *qp, u64 v));
731 int rvt_qp_iter_next(struct rvt_qp_iter *iter);
732 void rvt_qp_iter(struct rvt_dev_info *rdi,
733 		 u64 v,
734 		 void (*cb)(struct rvt_qp *qp, u64 v));
735 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
736 #endif          /* DEF_RDMAVT_INCQP_H */
737