xref: /linux/drivers/infiniband/sw/rxe/rxe_queue.h (revision de73b5a97bba1538f065e1e90d8eeac399db7510)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #ifndef RXE_QUEUE_H
8 #define RXE_QUEUE_H
9 
10 /* for definition of shared struct rxe_queue_buf */
11 #include <uapi/rdma/rdma_user_rxe.h>
12 
13 /* implements a simple circular buffer that can optionally be
14  * shared between user space and the kernel and can be resized
15  * the requested element size is rounded up to a power of 2
16  * and the number of elements in the buffer is also rounded
17  * up to a power of 2. Since the queue is empty when the
18  * producer and consumer indices match the maximum capacity
19  * of the queue is one less than the number of element slots
20  */
21 
22 struct rxe_queue {
23 	struct rxe_dev		*rxe;
24 	struct rxe_queue_buf	*buf;
25 	struct rxe_mmap_info	*ip;
26 	size_t			buf_size;
27 	size_t			elem_size;
28 	unsigned int		log2_elem_size;
29 	u32			index_mask;
30 };
31 
32 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
33 		 struct ib_udata *udata, struct rxe_queue_buf *buf,
34 		 size_t buf_size, struct rxe_mmap_info **ip_p);
35 
36 void rxe_queue_reset(struct rxe_queue *q);
37 
38 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
39 				 int *num_elem,
40 				 unsigned int elem_size);
41 
42 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
43 		     unsigned int elem_size, struct ib_udata *udata,
44 		     struct mminfo __user *outbuf,
45 		     /* Protect producers while resizing queue */
46 		     spinlock_t *producer_lock,
47 		     /* Protect consumers while resizing queue */
48 		     spinlock_t *consumer_lock);
49 
50 void rxe_queue_cleanup(struct rxe_queue *queue);
51 
52 static inline int next_index(struct rxe_queue *q, int index)
53 {
54 	return (index + 1) & q->buf->index_mask;
55 }
56 
57 static inline int queue_empty(struct rxe_queue *q)
58 {
59 	u32 prod;
60 	u32 cons;
61 
62 	/* make sure all changes to queue complete before
63 	 * testing queue empty
64 	 */
65 	prod = smp_load_acquire(&q->buf->producer_index);
66 	/* same */
67 	cons = smp_load_acquire(&q->buf->consumer_index);
68 
69 	return ((prod - cons) & q->index_mask) == 0;
70 }
71 
72 static inline int queue_full(struct rxe_queue *q)
73 {
74 	u32 prod;
75 	u32 cons;
76 
77 	/* make sure all changes to queue complete before
78 	 * testing queue full
79 	 */
80 	prod = smp_load_acquire(&q->buf->producer_index);
81 	/* same */
82 	cons = smp_load_acquire(&q->buf->consumer_index);
83 
84 	return ((prod + 1 - cons) & q->index_mask) == 0;
85 }
86 
87 static inline void advance_producer(struct rxe_queue *q)
88 {
89 	u32 prod;
90 
91 	prod = (q->buf->producer_index + 1) & q->index_mask;
92 
93 	/* make sure all changes to queue complete before
94 	 * changing producer index
95 	 */
96 	smp_store_release(&q->buf->producer_index, prod);
97 }
98 
99 static inline void advance_consumer(struct rxe_queue *q)
100 {
101 	u32 cons;
102 
103 	cons = (q->buf->consumer_index + 1) & q->index_mask;
104 
105 	/* make sure all changes to queue complete before
106 	 * changing consumer index
107 	 */
108 	smp_store_release(&q->buf->consumer_index, cons);
109 }
110 
111 static inline void *producer_addr(struct rxe_queue *q)
112 {
113 	return q->buf->data + ((q->buf->producer_index & q->index_mask)
114 				<< q->log2_elem_size);
115 }
116 
117 static inline void *consumer_addr(struct rxe_queue *q)
118 {
119 	return q->buf->data + ((q->buf->consumer_index & q->index_mask)
120 				<< q->log2_elem_size);
121 }
122 
123 static inline unsigned int producer_index(struct rxe_queue *q)
124 {
125 	u32 index;
126 
127 	/* make sure all changes to queue
128 	 * complete before getting producer index
129 	 */
130 	index = smp_load_acquire(&q->buf->producer_index);
131 	index &= q->index_mask;
132 
133 	return index;
134 }
135 
136 static inline unsigned int consumer_index(struct rxe_queue *q)
137 {
138 	u32 index;
139 
140 	/* make sure all changes to queue
141 	 * complete before getting consumer index
142 	 */
143 	index = smp_load_acquire(&q->buf->consumer_index);
144 	index &= q->index_mask;
145 
146 	return index;
147 }
148 
149 static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
150 {
151 	return q->buf->data + ((index & q->index_mask)
152 				<< q->buf->log2_elem_size);
153 }
154 
155 static inline unsigned int index_from_addr(const struct rxe_queue *q,
156 					   const void *addr)
157 {
158 	return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
159 		& q->index_mask;
160 }
161 
162 static inline unsigned int queue_count(const struct rxe_queue *q)
163 {
164 	return (q->buf->producer_index - q->buf->consumer_index)
165 		& q->index_mask;
166 }
167 
168 static inline void *queue_head(struct rxe_queue *q)
169 {
170 	return queue_empty(q) ? NULL : consumer_addr(q);
171 }
172 
173 #endif /* RXE_QUEUE_H */
174