xref: /linux/drivers/infiniband/sw/rdmavt/cq.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /*
2  * Copyright(c) 2016 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include "cq.h"
51 #include "vt.h"
52 #include "trace.h"
53 
54 static struct workqueue_struct *comp_vector_wq;
55 
56 /**
57  * rvt_cq_enter - add a new entry to the completion queue
58  * @cq: completion queue
59  * @entry: work completion entry to add
60  * @solicited: true if @entry is solicited
61  *
62  * This may be called with qp->s_lock held.
63  */
64 void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
65 {
66 	struct rvt_cq_wc *wc;
67 	unsigned long flags;
68 	u32 head;
69 	u32 next;
70 
71 	spin_lock_irqsave(&cq->lock, flags);
72 
73 	/*
74 	 * Note that the head pointer might be writable by user processes.
75 	 * Take care to verify it is a sane value.
76 	 */
77 	wc = cq->queue;
78 	head = wc->head;
79 	if (head >= (unsigned)cq->ibcq.cqe) {
80 		head = cq->ibcq.cqe;
81 		next = 0;
82 	} else {
83 		next = head + 1;
84 	}
85 
86 	if (unlikely(next == wc->tail)) {
87 		spin_unlock_irqrestore(&cq->lock, flags);
88 		if (cq->ibcq.event_handler) {
89 			struct ib_event ev;
90 
91 			ev.device = cq->ibcq.device;
92 			ev.element.cq = &cq->ibcq;
93 			ev.event = IB_EVENT_CQ_ERR;
94 			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
95 		}
96 		return;
97 	}
98 	trace_rvt_cq_enter(cq, entry, head);
99 	if (cq->ip) {
100 		wc->uqueue[head].wr_id = entry->wr_id;
101 		wc->uqueue[head].status = entry->status;
102 		wc->uqueue[head].opcode = entry->opcode;
103 		wc->uqueue[head].vendor_err = entry->vendor_err;
104 		wc->uqueue[head].byte_len = entry->byte_len;
105 		wc->uqueue[head].ex.imm_data = entry->ex.imm_data;
106 		wc->uqueue[head].qp_num = entry->qp->qp_num;
107 		wc->uqueue[head].src_qp = entry->src_qp;
108 		wc->uqueue[head].wc_flags = entry->wc_flags;
109 		wc->uqueue[head].pkey_index = entry->pkey_index;
110 		wc->uqueue[head].slid = ib_lid_cpu16(entry->slid);
111 		wc->uqueue[head].sl = entry->sl;
112 		wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
113 		wc->uqueue[head].port_num = entry->port_num;
114 		/* Make sure entry is written before the head index. */
115 		smp_wmb();
116 	} else {
117 		wc->kqueue[head] = *entry;
118 	}
119 	wc->head = next;
120 
121 	if (cq->notify == IB_CQ_NEXT_COMP ||
122 	    (cq->notify == IB_CQ_SOLICITED &&
123 	     (solicited || entry->status != IB_WC_SUCCESS))) {
124 		/*
125 		 * This will cause send_complete() to be called in
126 		 * another thread.
127 		 */
128 		cq->notify = RVT_CQ_NONE;
129 		cq->triggered++;
130 		queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
131 			      &cq->comptask);
132 	}
133 
134 	spin_unlock_irqrestore(&cq->lock, flags);
135 }
136 EXPORT_SYMBOL(rvt_cq_enter);
137 
138 static void send_complete(struct work_struct *work)
139 {
140 	struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
141 
142 	/*
143 	 * The completion handler will most likely rearm the notification
144 	 * and poll for all pending entries.  If a new completion entry
145 	 * is added while we are in this routine, queue_work()
146 	 * won't call us again until we return so we check triggered to
147 	 * see if we need to call the handler again.
148 	 */
149 	for (;;) {
150 		u8 triggered = cq->triggered;
151 
152 		/*
153 		 * IPoIB connected mode assumes the callback is from a
154 		 * soft IRQ. We simulate this by blocking "bottom halves".
155 		 * See the implementation for ipoib_cm_handle_tx_wc(),
156 		 * netif_tx_lock_bh() and netif_tx_lock().
157 		 */
158 		local_bh_disable();
159 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
160 		local_bh_enable();
161 
162 		if (cq->triggered == triggered)
163 			return;
164 	}
165 }
166 
167 /**
168  * rvt_create_cq - create a completion queue
169  * @ibdev: the device this completion queue is attached to
170  * @attr: creation attributes
171  * @udata: user data for libibverbs.so
172  *
173  * Called by ib_create_cq() in the generic verbs code.
174  *
175  * Return: pointer to the completion queue or negative errno values
176  * for failure.
177  */
178 struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
179 			    const struct ib_cq_init_attr *attr,
180 			    struct ib_udata *udata)
181 {
182 	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
183 	struct rvt_cq *cq;
184 	struct rvt_cq_wc *wc;
185 	struct ib_cq *ret;
186 	u32 sz;
187 	unsigned int entries = attr->cqe;
188 	int comp_vector = attr->comp_vector;
189 
190 	if (attr->flags)
191 		return ERR_PTR(-EINVAL);
192 
193 	if (entries < 1 || entries > rdi->dparms.props.max_cqe)
194 		return ERR_PTR(-EINVAL);
195 
196 	if (comp_vector < 0)
197 		comp_vector = 0;
198 
199 	comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
200 
201 	/* Allocate the completion queue structure. */
202 	cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
203 	if (!cq)
204 		return ERR_PTR(-ENOMEM);
205 
206 	/*
207 	 * Allocate the completion queue entries and head/tail pointers.
208 	 * This is allocated separately so that it can be resized and
209 	 * also mapped into user space.
210 	 * We need to use vmalloc() in order to support mmap and large
211 	 * numbers of entries.
212 	 */
213 	sz = sizeof(*wc);
214 	if (udata && udata->outlen >= sizeof(__u64))
215 		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
216 	else
217 		sz += sizeof(struct ib_wc) * (entries + 1);
218 	wc = udata ?
219 		vmalloc_user(sz) :
220 		vzalloc_node(sz, rdi->dparms.node);
221 	if (!wc) {
222 		ret = ERR_PTR(-ENOMEM);
223 		goto bail_cq;
224 	}
225 
226 	/*
227 	 * Return the address of the WC as the offset to mmap.
228 	 * See rvt_mmap() for details.
229 	 */
230 	if (udata && udata->outlen >= sizeof(__u64)) {
231 		int err;
232 
233 		cq->ip = rvt_create_mmap_info(rdi, sz, udata, wc);
234 		if (!cq->ip) {
235 			ret = ERR_PTR(-ENOMEM);
236 			goto bail_wc;
237 		}
238 
239 		err = ib_copy_to_udata(udata, &cq->ip->offset,
240 				       sizeof(cq->ip->offset));
241 		if (err) {
242 			ret = ERR_PTR(err);
243 			goto bail_ip;
244 		}
245 	}
246 
247 	spin_lock_irq(&rdi->n_cqs_lock);
248 	if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
249 		spin_unlock_irq(&rdi->n_cqs_lock);
250 		ret = ERR_PTR(-ENOMEM);
251 		goto bail_ip;
252 	}
253 
254 	rdi->n_cqs_allocated++;
255 	spin_unlock_irq(&rdi->n_cqs_lock);
256 
257 	if (cq->ip) {
258 		spin_lock_irq(&rdi->pending_lock);
259 		list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
260 		spin_unlock_irq(&rdi->pending_lock);
261 	}
262 
263 	/*
264 	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
265 	 * The number of entries should be >= the number requested or return
266 	 * an error.
267 	 */
268 	cq->rdi = rdi;
269 	if (rdi->driver_f.comp_vect_cpu_lookup)
270 		cq->comp_vector_cpu =
271 			rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
272 	else
273 		cq->comp_vector_cpu =
274 			cpumask_first(cpumask_of_node(rdi->dparms.node));
275 
276 	cq->ibcq.cqe = entries;
277 	cq->notify = RVT_CQ_NONE;
278 	spin_lock_init(&cq->lock);
279 	INIT_WORK(&cq->comptask, send_complete);
280 	cq->queue = wc;
281 
282 	ret = &cq->ibcq;
283 
284 	trace_rvt_create_cq(cq, attr);
285 	goto done;
286 
287 bail_ip:
288 	kfree(cq->ip);
289 bail_wc:
290 	vfree(wc);
291 bail_cq:
292 	kfree(cq);
293 done:
294 	return ret;
295 }
296 
297 /**
298  * rvt_destroy_cq - destroy a completion queue
299  * @ibcq: the completion queue to destroy.
300  * @udata: user data or NULL for kernel object
301  *
302  * Called by ib_destroy_cq() in the generic verbs code.
303  *
304  * Return: always 0
305  */
306 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
307 {
308 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
309 	struct rvt_dev_info *rdi = cq->rdi;
310 
311 	flush_work(&cq->comptask);
312 	spin_lock_irq(&rdi->n_cqs_lock);
313 	rdi->n_cqs_allocated--;
314 	spin_unlock_irq(&rdi->n_cqs_lock);
315 	if (cq->ip)
316 		kref_put(&cq->ip->ref, rvt_release_mmap_info);
317 	else
318 		vfree(cq->queue);
319 	kfree(cq);
320 
321 	return 0;
322 }
323 
324 /**
325  * rvt_req_notify_cq - change the notification type for a completion queue
326  * @ibcq: the completion queue
327  * @notify_flags: the type of notification to request
328  *
329  * This may be called from interrupt context.  Also called by
330  * ib_req_notify_cq() in the generic verbs code.
331  *
332  * Return: 0 for success.
333  */
334 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
335 {
336 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
337 	unsigned long flags;
338 	int ret = 0;
339 
340 	spin_lock_irqsave(&cq->lock, flags);
341 	/*
342 	 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
343 	 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
344 	 */
345 	if (cq->notify != IB_CQ_NEXT_COMP)
346 		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
347 
348 	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
349 	    cq->queue->head != cq->queue->tail)
350 		ret = 1;
351 
352 	spin_unlock_irqrestore(&cq->lock, flags);
353 
354 	return ret;
355 }
356 
357 /**
358  * rvt_resize_cq - change the size of the CQ
359  * @ibcq: the completion queue
360  *
361  * Return: 0 for success.
362  */
363 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
364 {
365 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
366 	struct rvt_cq_wc *old_wc;
367 	struct rvt_cq_wc *wc;
368 	u32 head, tail, n;
369 	int ret;
370 	u32 sz;
371 	struct rvt_dev_info *rdi = cq->rdi;
372 
373 	if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
374 		return -EINVAL;
375 
376 	/*
377 	 * Need to use vmalloc() if we want to support large #s of entries.
378 	 */
379 	sz = sizeof(*wc);
380 	if (udata && udata->outlen >= sizeof(__u64))
381 		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
382 	else
383 		sz += sizeof(struct ib_wc) * (cqe + 1);
384 	wc = udata ?
385 		vmalloc_user(sz) :
386 		vzalloc_node(sz, rdi->dparms.node);
387 	if (!wc)
388 		return -ENOMEM;
389 
390 	/* Check that we can write the offset to mmap. */
391 	if (udata && udata->outlen >= sizeof(__u64)) {
392 		__u64 offset = 0;
393 
394 		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
395 		if (ret)
396 			goto bail_free;
397 	}
398 
399 	spin_lock_irq(&cq->lock);
400 	/*
401 	 * Make sure head and tail are sane since they
402 	 * might be user writable.
403 	 */
404 	old_wc = cq->queue;
405 	head = old_wc->head;
406 	if (head > (u32)cq->ibcq.cqe)
407 		head = (u32)cq->ibcq.cqe;
408 	tail = old_wc->tail;
409 	if (tail > (u32)cq->ibcq.cqe)
410 		tail = (u32)cq->ibcq.cqe;
411 	if (head < tail)
412 		n = cq->ibcq.cqe + 1 + head - tail;
413 	else
414 		n = head - tail;
415 	if (unlikely((u32)cqe < n)) {
416 		ret = -EINVAL;
417 		goto bail_unlock;
418 	}
419 	for (n = 0; tail != head; n++) {
420 		if (cq->ip)
421 			wc->uqueue[n] = old_wc->uqueue[tail];
422 		else
423 			wc->kqueue[n] = old_wc->kqueue[tail];
424 		if (tail == (u32)cq->ibcq.cqe)
425 			tail = 0;
426 		else
427 			tail++;
428 	}
429 	cq->ibcq.cqe = cqe;
430 	wc->head = n;
431 	wc->tail = 0;
432 	cq->queue = wc;
433 	spin_unlock_irq(&cq->lock);
434 
435 	vfree(old_wc);
436 
437 	if (cq->ip) {
438 		struct rvt_mmap_info *ip = cq->ip;
439 
440 		rvt_update_mmap_info(rdi, ip, sz, wc);
441 
442 		/*
443 		 * Return the offset to mmap.
444 		 * See rvt_mmap() for details.
445 		 */
446 		if (udata && udata->outlen >= sizeof(__u64)) {
447 			ret = ib_copy_to_udata(udata, &ip->offset,
448 					       sizeof(ip->offset));
449 			if (ret)
450 				return ret;
451 		}
452 
453 		spin_lock_irq(&rdi->pending_lock);
454 		if (list_empty(&ip->pending_mmaps))
455 			list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
456 		spin_unlock_irq(&rdi->pending_lock);
457 	}
458 
459 	return 0;
460 
461 bail_unlock:
462 	spin_unlock_irq(&cq->lock);
463 bail_free:
464 	vfree(wc);
465 	return ret;
466 }
467 
468 /**
469  * rvt_poll_cq - poll for work completion entries
470  * @ibcq: the completion queue to poll
471  * @num_entries: the maximum number of entries to return
472  * @entry: pointer to array where work completions are placed
473  *
474  * This may be called from interrupt context.  Also called by ib_poll_cq()
475  * in the generic verbs code.
476  *
477  * Return: the number of completion entries polled.
478  */
479 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
480 {
481 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
482 	struct rvt_cq_wc *wc;
483 	unsigned long flags;
484 	int npolled;
485 	u32 tail;
486 
487 	/* The kernel can only poll a kernel completion queue */
488 	if (cq->ip)
489 		return -EINVAL;
490 
491 	spin_lock_irqsave(&cq->lock, flags);
492 
493 	wc = cq->queue;
494 	tail = wc->tail;
495 	if (tail > (u32)cq->ibcq.cqe)
496 		tail = (u32)cq->ibcq.cqe;
497 	for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
498 		if (tail == wc->head)
499 			break;
500 		/* The kernel doesn't need a RMB since it has the lock. */
501 		trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
502 		*entry = wc->kqueue[tail];
503 		if (tail >= cq->ibcq.cqe)
504 			tail = 0;
505 		else
506 			tail++;
507 	}
508 	wc->tail = tail;
509 
510 	spin_unlock_irqrestore(&cq->lock, flags);
511 
512 	return npolled;
513 }
514 
515 /**
516  * rvt_driver_cq_init - Init cq resources on behalf of driver
517  * @rdi: rvt dev structure
518  *
519  * Return: 0 on success
520  */
521 int rvt_driver_cq_init(void)
522 {
523 	comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
524 					 0, "rdmavt_cq");
525 	if (!comp_vector_wq)
526 		return -ENOMEM;
527 
528 	return 0;
529 }
530 
531 /**
532  * rvt_cq_exit - tear down cq reources
533  * @rdi: rvt dev structure
534  */
535 void rvt_cq_exit(void)
536 {
537 	destroy_workqueue(comp_vector_wq);
538 	comp_vector_wq = NULL;
539 }
540