xref: /linux/drivers/infiniband/sw/rxe/rxe_queue.c (revision 97ef3b7f4fdf8ad6818aa2c8201c3b72cc635e16)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/vmalloc.h>
8 #include "rxe.h"
9 #include "rxe_loc.h"
10 #include "rxe_queue.h"
11 
12 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
13 		 struct ib_udata *udata, struct rxe_queue_buf *buf,
14 		 size_t buf_size, struct rxe_mmap_info **ip_p)
15 {
16 	int err;
17 	struct rxe_mmap_info *ip = NULL;
18 
19 	if (outbuf) {
20 		ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
21 		if (IS_ERR(ip)) {
22 			err = PTR_ERR(ip);
23 			goto err1;
24 		}
25 
26 		if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
27 			err = -EFAULT;
28 			goto err2;
29 		}
30 
31 		spin_lock_bh(&rxe->pending_lock);
32 		list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
33 		spin_unlock_bh(&rxe->pending_lock);
34 	}
35 
36 	*ip_p = ip;
37 
38 	return 0;
39 
40 err2:
41 	kfree(ip);
42 err1:
43 	return err;
44 }
45 
46 inline void rxe_queue_reset(struct rxe_queue *q)
47 {
48 	/* queue is comprised from header and the memory
49 	 * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
50 	 * reset only the queue itself and not the management header
51 	 */
52 	memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
53 }
54 
55 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
56 			unsigned int elem_size, enum queue_type type)
57 {
58 	struct rxe_queue *q;
59 	size_t buf_size;
60 	unsigned int num_slots;
61 
62 	/* num_elem == 0 is allowed, but uninteresting */
63 	if (*num_elem < 0)
64 		goto err1;
65 
66 	q = kzalloc(sizeof(*q), GFP_KERNEL);
67 	if (!q)
68 		goto err1;
69 
70 	q->rxe = rxe;
71 	q->type = type;
72 
73 	/* used in resize, only need to copy used part of queue */
74 	q->elem_size = elem_size;
75 
76 	/* pad element up to at least a cacheline and always a power of 2 */
77 	if (elem_size < cache_line_size())
78 		elem_size = cache_line_size();
79 	elem_size = roundup_pow_of_two(elem_size);
80 
81 	q->log2_elem_size = order_base_2(elem_size);
82 
83 	num_slots = *num_elem + 1;
84 	num_slots = roundup_pow_of_two(num_slots);
85 	q->index_mask = num_slots - 1;
86 
87 	buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
88 
89 	q->buf = vmalloc_user(buf_size);
90 	if (!q->buf)
91 		goto err2;
92 
93 	q->buf->log2_elem_size = q->log2_elem_size;
94 	q->buf->index_mask = q->index_mask;
95 
96 	q->buf_size = buf_size;
97 
98 	*num_elem = num_slots - 1;
99 	return q;
100 
101 err2:
102 	kfree(q);
103 err1:
104 	return NULL;
105 }
106 
107 /* copies elements from original q to new q and then swaps the contents of the
108  * two q headers. This is so that if anyone is holding a pointer to q it will
109  * still work
110  */
111 static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
112 			 unsigned int num_elem)
113 {
114 	if (!queue_empty(q, q->type) && (num_elem < queue_count(q, q->type)))
115 		return -EINVAL;
116 
117 	while (!queue_empty(q, q->type)) {
118 		memcpy(producer_addr(new_q, new_q->type),
119 					consumer_addr(q, q->type),
120 					new_q->elem_size);
121 		advance_producer(new_q, new_q->type);
122 		advance_consumer(q, q->type);
123 	}
124 
125 	swap(*q, *new_q);
126 
127 	return 0;
128 }
129 
130 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
131 		     unsigned int elem_size, struct ib_udata *udata,
132 		     struct mminfo __user *outbuf, spinlock_t *producer_lock,
133 		     spinlock_t *consumer_lock)
134 {
135 	struct rxe_queue *new_q;
136 	unsigned int num_elem = *num_elem_p;
137 	int err;
138 	unsigned long flags = 0, flags1;
139 
140 	new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
141 	if (!new_q)
142 		return -ENOMEM;
143 
144 	err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
145 			   new_q->buf_size, &new_q->ip);
146 	if (err) {
147 		vfree(new_q->buf);
148 		kfree(new_q);
149 		goto err1;
150 	}
151 
152 	spin_lock_irqsave(consumer_lock, flags1);
153 
154 	if (producer_lock) {
155 		spin_lock_irqsave(producer_lock, flags);
156 		err = resize_finish(q, new_q, num_elem);
157 		spin_unlock_irqrestore(producer_lock, flags);
158 	} else {
159 		err = resize_finish(q, new_q, num_elem);
160 	}
161 
162 	spin_unlock_irqrestore(consumer_lock, flags1);
163 
164 	rxe_queue_cleanup(new_q);	/* new/old dep on err */
165 	if (err)
166 		goto err1;
167 
168 	*num_elem_p = num_elem;
169 	return 0;
170 
171 err1:
172 	return err;
173 }
174 
175 void rxe_queue_cleanup(struct rxe_queue *q)
176 {
177 	if (q->ip)
178 		kref_put(&q->ip->ref, rxe_mmap_release);
179 	else
180 		vfree(q->buf);
181 
182 	kfree(q);
183 }
184