xref: /linux/drivers/infiniband/hw/qib/qib_user_sdma.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/mm.h>
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
39 #include <linux/io.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44 
45 #include "qib.h"
46 #include "qib_user_sdma.h"
47 
48 /* minimum size of header */
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* attempt to drain the queue for 5secs */
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 500
54 
55 struct qib_user_sdma_pkt {
56 	u8 naddr;               /* dimension of addr (1..3) ... */
57 	u32 counter;            /* sdma pkts queued counter for this entry */
58 	u64 added;              /* global descq number of entries */
59 
60 	struct {
61 		u32 offset;                     /* offset for kvaddr, addr */
62 		u32 length;                     /* length in page */
63 		u8  put_page;                   /* should we put_page? */
64 		u8  dma_mapped;                 /* is page dma_mapped? */
65 		struct page *page;              /* may be NULL (coherent mem) */
66 		void *kvaddr;                   /* FIXME: only for pio hack */
67 		dma_addr_t addr;
68 	} addr[4];   /* max pages, any more and we coalesce */
69 	struct list_head list;  /* list element */
70 };
71 
72 struct qib_user_sdma_queue {
73 	/*
74 	 * pkts sent to dma engine are queued on this
75 	 * list head.  the type of the elements of this
76 	 * list are struct qib_user_sdma_pkt...
77 	 */
78 	struct list_head sent;
79 
80 	/* headers with expected length are allocated from here... */
81 	char header_cache_name[64];
82 	struct dma_pool *header_cache;
83 
84 	/* packets are allocated from the slab cache... */
85 	char pkt_slab_name[64];
86 	struct kmem_cache *pkt_slab;
87 
88 	/* as packets go on the queued queue, they are counted... */
89 	u32 counter;
90 	u32 sent_counter;
91 
92 	/* dma page table */
93 	struct rb_root dma_pages_root;
94 
95 	/* protect everything above... */
96 	struct mutex lock;
97 };
98 
99 struct qib_user_sdma_queue *
100 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
101 {
102 	struct qib_user_sdma_queue *pq =
103 		kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
104 
105 	if (!pq)
106 		goto done;
107 
108 	pq->counter = 0;
109 	pq->sent_counter = 0;
110 	INIT_LIST_HEAD(&pq->sent);
111 
112 	mutex_init(&pq->lock);
113 
114 	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115 		 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
116 	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117 					 sizeof(struct qib_user_sdma_pkt),
118 					 0, 0, NULL);
119 
120 	if (!pq->pkt_slab)
121 		goto err_kfree;
122 
123 	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124 		 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
125 	pq->header_cache = dma_pool_create(pq->header_cache_name,
126 					   dev,
127 					   QIB_USER_SDMA_EXP_HEADER_LENGTH,
128 					   4, 0);
129 	if (!pq->header_cache)
130 		goto err_slab;
131 
132 	pq->dma_pages_root = RB_ROOT;
133 
134 	goto done;
135 
136 err_slab:
137 	kmem_cache_destroy(pq->pkt_slab);
138 err_kfree:
139 	kfree(pq);
140 	pq = NULL;
141 
142 done:
143 	return pq;
144 }
145 
146 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
147 				    int i, size_t offset, size_t len,
148 				    int put_page, int dma_mapped,
149 				    struct page *page,
150 				    void *kvaddr, dma_addr_t dma_addr)
151 {
152 	pkt->addr[i].offset = offset;
153 	pkt->addr[i].length = len;
154 	pkt->addr[i].put_page = put_page;
155 	pkt->addr[i].dma_mapped = dma_mapped;
156 	pkt->addr[i].page = page;
157 	pkt->addr[i].kvaddr = kvaddr;
158 	pkt->addr[i].addr = dma_addr;
159 }
160 
161 static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
162 				      u32 counter, size_t offset,
163 				      size_t len, int dma_mapped,
164 				      struct page *page,
165 				      void *kvaddr, dma_addr_t dma_addr)
166 {
167 	pkt->naddr = 1;
168 	pkt->counter = counter;
169 	qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
170 				kvaddr, dma_addr);
171 }
172 
173 /* we've too many pages in the iovec, coalesce to a single page */
174 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
175 				  struct qib_user_sdma_pkt *pkt,
176 				  const struct iovec *iov,
177 				  unsigned long niov)
178 {
179 	int ret = 0;
180 	struct page *page = alloc_page(GFP_KERNEL);
181 	void *mpage_save;
182 	char *mpage;
183 	int i;
184 	int len = 0;
185 	dma_addr_t dma_addr;
186 
187 	if (!page) {
188 		ret = -ENOMEM;
189 		goto done;
190 	}
191 
192 	mpage = kmap(page);
193 	mpage_save = mpage;
194 	for (i = 0; i < niov; i++) {
195 		int cfur;
196 
197 		cfur = copy_from_user(mpage,
198 				      iov[i].iov_base, iov[i].iov_len);
199 		if (cfur) {
200 			ret = -EFAULT;
201 			goto free_unmap;
202 		}
203 
204 		mpage += iov[i].iov_len;
205 		len += iov[i].iov_len;
206 	}
207 
208 	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
209 				DMA_TO_DEVICE);
210 	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
211 		ret = -ENOMEM;
212 		goto free_unmap;
213 	}
214 
215 	qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
216 				dma_addr);
217 	pkt->naddr = 2;
218 
219 	goto done;
220 
221 free_unmap:
222 	kunmap(page);
223 	__free_page(page);
224 done:
225 	return ret;
226 }
227 
228 /*
229  * How many pages in this iovec element?
230  */
231 static int qib_user_sdma_num_pages(const struct iovec *iov)
232 {
233 	const unsigned long addr  = (unsigned long) iov->iov_base;
234 	const unsigned long  len  = iov->iov_len;
235 	const unsigned long spage = addr & PAGE_MASK;
236 	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
237 
238 	return 1 + ((epage - spage) >> PAGE_SHIFT);
239 }
240 
241 /*
242  * Truncate length to page boundary.
243  */
244 static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
245 {
246 	const unsigned long offset = addr & ~PAGE_MASK;
247 
248 	return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
249 }
250 
251 static void qib_user_sdma_free_pkt_frag(struct device *dev,
252 					struct qib_user_sdma_queue *pq,
253 					struct qib_user_sdma_pkt *pkt,
254 					int frag)
255 {
256 	const int i = frag;
257 
258 	if (pkt->addr[i].page) {
259 		if (pkt->addr[i].dma_mapped)
260 			dma_unmap_page(dev,
261 				       pkt->addr[i].addr,
262 				       pkt->addr[i].length,
263 				       DMA_TO_DEVICE);
264 
265 		if (pkt->addr[i].kvaddr)
266 			kunmap(pkt->addr[i].page);
267 
268 		if (pkt->addr[i].put_page)
269 			put_page(pkt->addr[i].page);
270 		else
271 			__free_page(pkt->addr[i].page);
272 	} else if (pkt->addr[i].kvaddr)
273 		/* free coherent mem from cache... */
274 		dma_pool_free(pq->header_cache,
275 			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
276 }
277 
278 /* return number of pages pinned... */
279 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
280 				   struct qib_user_sdma_pkt *pkt,
281 				   unsigned long addr, int tlen, int npages)
282 {
283 	struct page *pages[2];
284 	int j;
285 	int ret;
286 
287 	ret = get_user_pages(current, current->mm, addr,
288 			     npages, 0, 1, pages, NULL);
289 
290 	if (ret != npages) {
291 		int i;
292 
293 		for (i = 0; i < ret; i++)
294 			put_page(pages[i]);
295 
296 		ret = -ENOMEM;
297 		goto done;
298 	}
299 
300 	for (j = 0; j < npages; j++) {
301 		/* map the pages... */
302 		const int flen = qib_user_sdma_page_length(addr, tlen);
303 		dma_addr_t dma_addr =
304 			dma_map_page(&dd->pcidev->dev,
305 				     pages[j], 0, flen, DMA_TO_DEVICE);
306 		unsigned long fofs = addr & ~PAGE_MASK;
307 
308 		if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
309 			ret = -ENOMEM;
310 			goto done;
311 		}
312 
313 		qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
314 					pages[j], kmap(pages[j]), dma_addr);
315 
316 		pkt->naddr++;
317 		addr += flen;
318 		tlen -= flen;
319 	}
320 
321 done:
322 	return ret;
323 }
324 
325 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
326 				 struct qib_user_sdma_queue *pq,
327 				 struct qib_user_sdma_pkt *pkt,
328 				 const struct iovec *iov,
329 				 unsigned long niov)
330 {
331 	int ret = 0;
332 	unsigned long idx;
333 
334 	for (idx = 0; idx < niov; idx++) {
335 		const int npages = qib_user_sdma_num_pages(iov + idx);
336 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
337 
338 		ret = qib_user_sdma_pin_pages(dd, pkt, addr,
339 					      iov[idx].iov_len, npages);
340 		if (ret < 0)
341 			goto free_pkt;
342 	}
343 
344 	goto done;
345 
346 free_pkt:
347 	for (idx = 0; idx < pkt->naddr; idx++)
348 		qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
349 
350 done:
351 	return ret;
352 }
353 
354 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
355 				      struct qib_user_sdma_queue *pq,
356 				      struct qib_user_sdma_pkt *pkt,
357 				      const struct iovec *iov,
358 				      unsigned long niov, int npages)
359 {
360 	int ret = 0;
361 
362 	if (npages >= ARRAY_SIZE(pkt->addr))
363 		ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
364 	else
365 		ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
366 
367 	return ret;
368 }
369 
370 /* free a packet list -- return counter value of last packet */
371 static void qib_user_sdma_free_pkt_list(struct device *dev,
372 					struct qib_user_sdma_queue *pq,
373 					struct list_head *list)
374 {
375 	struct qib_user_sdma_pkt *pkt, *pkt_next;
376 
377 	list_for_each_entry_safe(pkt, pkt_next, list, list) {
378 		int i;
379 
380 		for (i = 0; i < pkt->naddr; i++)
381 			qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
382 
383 		kmem_cache_free(pq->pkt_slab, pkt);
384 	}
385 	INIT_LIST_HEAD(list);
386 }
387 
388 /*
389  * copy headers, coalesce etc -- pq->lock must be held
390  *
391  * we queue all the packets to list, returning the
392  * number of bytes total.  list must be empty initially,
393  * as, if there is an error we clean it...
394  */
395 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
396 				    struct qib_user_sdma_queue *pq,
397 				    struct list_head *list,
398 				    const struct iovec *iov,
399 				    unsigned long niov,
400 				    int maxpkts)
401 {
402 	unsigned long idx = 0;
403 	int ret = 0;
404 	int npkts = 0;
405 	struct page *page = NULL;
406 	__le32 *pbc;
407 	dma_addr_t dma_addr;
408 	struct qib_user_sdma_pkt *pkt = NULL;
409 	size_t len;
410 	size_t nw;
411 	u32 counter = pq->counter;
412 	int dma_mapped = 0;
413 
414 	while (idx < niov && npkts < maxpkts) {
415 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
416 		const unsigned long idx_save = idx;
417 		unsigned pktnw;
418 		unsigned pktnwc;
419 		int nfrags = 0;
420 		int npages = 0;
421 		int cfur;
422 
423 		dma_mapped = 0;
424 		len = iov[idx].iov_len;
425 		nw = len >> 2;
426 		page = NULL;
427 
428 		pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
429 		if (!pkt) {
430 			ret = -ENOMEM;
431 			goto free_list;
432 		}
433 
434 		if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
435 		    len > PAGE_SIZE || len & 3 || addr & 3) {
436 			ret = -EINVAL;
437 			goto free_pkt;
438 		}
439 
440 		if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
441 			pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
442 					     &dma_addr);
443 		else
444 			pbc = NULL;
445 
446 		if (!pbc) {
447 			page = alloc_page(GFP_KERNEL);
448 			if (!page) {
449 				ret = -ENOMEM;
450 				goto free_pkt;
451 			}
452 			pbc = kmap(page);
453 		}
454 
455 		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
456 		if (cfur) {
457 			ret = -EFAULT;
458 			goto free_pbc;
459 		}
460 
461 		/*
462 		 * This assignment is a bit strange.  it's because the
463 		 * the pbc counts the number of 32 bit words in the full
464 		 * packet _except_ the first word of the pbc itself...
465 		 */
466 		pktnwc = nw - 1;
467 
468 		/*
469 		 * pktnw computation yields the number of 32 bit words
470 		 * that the caller has indicated in the PBC.  note that
471 		 * this is one less than the total number of words that
472 		 * goes to the send DMA engine as the first 32 bit word
473 		 * of the PBC itself is not counted.  Armed with this count,
474 		 * we can verify that the packet is consistent with the
475 		 * iovec lengths.
476 		 */
477 		pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
478 		if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
479 			ret = -EINVAL;
480 			goto free_pbc;
481 		}
482 
483 		idx++;
484 		while (pktnwc < pktnw && idx < niov) {
485 			const size_t slen = iov[idx].iov_len;
486 			const unsigned long faddr =
487 				(unsigned long) iov[idx].iov_base;
488 
489 			if (slen & 3 || faddr & 3 || !slen ||
490 			    slen > PAGE_SIZE) {
491 				ret = -EINVAL;
492 				goto free_pbc;
493 			}
494 
495 			npages++;
496 			if ((faddr & PAGE_MASK) !=
497 			    ((faddr + slen - 1) & PAGE_MASK))
498 				npages++;
499 
500 			pktnwc += slen >> 2;
501 			idx++;
502 			nfrags++;
503 		}
504 
505 		if (pktnwc != pktnw) {
506 			ret = -EINVAL;
507 			goto free_pbc;
508 		}
509 
510 		if (page) {
511 			dma_addr = dma_map_page(&dd->pcidev->dev,
512 						page, 0, len, DMA_TO_DEVICE);
513 			if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
514 				ret = -ENOMEM;
515 				goto free_pbc;
516 			}
517 
518 			dma_mapped = 1;
519 		}
520 
521 		qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
522 					  page, pbc, dma_addr);
523 
524 		if (nfrags) {
525 			ret = qib_user_sdma_init_payload(dd, pq, pkt,
526 							 iov + idx_save + 1,
527 							 nfrags, npages);
528 			if (ret < 0)
529 				goto free_pbc_dma;
530 		}
531 
532 		counter++;
533 		npkts++;
534 
535 		list_add_tail(&pkt->list, list);
536 	}
537 
538 	ret = idx;
539 	goto done;
540 
541 free_pbc_dma:
542 	if (dma_mapped)
543 		dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
544 free_pbc:
545 	if (page) {
546 		kunmap(page);
547 		__free_page(page);
548 	} else
549 		dma_pool_free(pq->header_cache, pbc, dma_addr);
550 free_pkt:
551 	kmem_cache_free(pq->pkt_slab, pkt);
552 free_list:
553 	qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
554 done:
555 	return ret;
556 }
557 
558 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
559 					       u32 c)
560 {
561 	pq->sent_counter = c;
562 }
563 
564 /* try to clean out queue -- needs pq->lock */
565 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
566 				     struct qib_user_sdma_queue *pq)
567 {
568 	struct qib_devdata *dd = ppd->dd;
569 	struct list_head free_list;
570 	struct qib_user_sdma_pkt *pkt;
571 	struct qib_user_sdma_pkt *pkt_prev;
572 	int ret = 0;
573 
574 	INIT_LIST_HEAD(&free_list);
575 
576 	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
577 		s64 descd = ppd->sdma_descq_removed - pkt->added;
578 
579 		if (descd < 0)
580 			break;
581 
582 		list_move_tail(&pkt->list, &free_list);
583 
584 		/* one more packet cleaned */
585 		ret++;
586 	}
587 
588 	if (!list_empty(&free_list)) {
589 		u32 counter;
590 
591 		pkt = list_entry(free_list.prev,
592 				 struct qib_user_sdma_pkt, list);
593 		counter = pkt->counter;
594 
595 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
596 		qib_user_sdma_set_complete_counter(pq, counter);
597 	}
598 
599 	return ret;
600 }
601 
602 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
603 {
604 	if (!pq)
605 		return;
606 
607 	kmem_cache_destroy(pq->pkt_slab);
608 	dma_pool_destroy(pq->header_cache);
609 	kfree(pq);
610 }
611 
612 /* clean descriptor queue, returns > 0 if some elements cleaned */
613 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
614 {
615 	int ret;
616 	unsigned long flags;
617 
618 	spin_lock_irqsave(&ppd->sdma_lock, flags);
619 	ret = qib_sdma_make_progress(ppd);
620 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
621 
622 	return ret;
623 }
624 
625 /* we're in close, drain packets so that we can cleanup successfully... */
626 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
627 			       struct qib_user_sdma_queue *pq)
628 {
629 	struct qib_devdata *dd = ppd->dd;
630 	int i;
631 
632 	if (!pq)
633 		return;
634 
635 	for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
636 		mutex_lock(&pq->lock);
637 		if (list_empty(&pq->sent)) {
638 			mutex_unlock(&pq->lock);
639 			break;
640 		}
641 		qib_user_sdma_hwqueue_clean(ppd);
642 		qib_user_sdma_queue_clean(ppd, pq);
643 		mutex_unlock(&pq->lock);
644 		msleep(10);
645 	}
646 
647 	if (!list_empty(&pq->sent)) {
648 		struct list_head free_list;
649 
650 		qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
651 		INIT_LIST_HEAD(&free_list);
652 		mutex_lock(&pq->lock);
653 		list_splice_init(&pq->sent, &free_list);
654 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
655 		mutex_unlock(&pq->lock);
656 	}
657 }
658 
659 static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
660 					 u64 addr, u64 dwlen, u64 dwoffset)
661 {
662 	u8 tmpgen;
663 
664 	tmpgen = ppd->sdma_generation;
665 
666 	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
667 			   ((addr & 0xfffffffcULL) << 32) |
668 			   /* SDmaGeneration[1:0] */
669 			   ((tmpgen & 3ULL) << 30) |
670 			   /* SDmaDwordCount[10:0] */
671 			   ((dwlen & 0x7ffULL) << 16) |
672 			   /* SDmaBufOffset[12:2] */
673 			   (dwoffset & 0x7ffULL));
674 }
675 
676 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
677 {
678 	return descq | cpu_to_le64(1ULL << 12);
679 }
680 
681 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
682 {
683 					      /* last */  /* dma head */
684 	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
685 }
686 
687 static inline __le64 qib_sdma_make_desc1(u64 addr)
688 {
689 	/* SDmaPhyAddr[47:32] */
690 	return cpu_to_le64(addr >> 32);
691 }
692 
693 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
694 				    struct qib_user_sdma_pkt *pkt, int idx,
695 				    unsigned ofs, u16 tail)
696 {
697 	const u64 addr = (u64) pkt->addr[idx].addr +
698 		(u64) pkt->addr[idx].offset;
699 	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
700 	__le64 *descqp;
701 	__le64 descq0;
702 
703 	descqp = &ppd->sdma_descq[tail].qw[0];
704 
705 	descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
706 	if (idx == 0)
707 		descq0 = qib_sdma_make_first_desc0(descq0);
708 	if (idx == pkt->naddr - 1)
709 		descq0 = qib_sdma_make_last_desc0(descq0);
710 
711 	descqp[0] = descq0;
712 	descqp[1] = qib_sdma_make_desc1(addr);
713 }
714 
715 /* pq->lock must be held, get packets on the wire... */
716 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
717 				   struct qib_user_sdma_queue *pq,
718 				   struct list_head *pktlist)
719 {
720 	struct qib_devdata *dd = ppd->dd;
721 	int ret = 0;
722 	unsigned long flags;
723 	u16 tail;
724 	u8 generation;
725 	u64 descq_added;
726 
727 	if (list_empty(pktlist))
728 		return 0;
729 
730 	if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
731 		return -ECOMM;
732 
733 	spin_lock_irqsave(&ppd->sdma_lock, flags);
734 
735 	/* keep a copy for restoring purposes in case of problems */
736 	generation = ppd->sdma_generation;
737 	descq_added = ppd->sdma_descq_added;
738 
739 	if (unlikely(!__qib_sdma_running(ppd))) {
740 		ret = -ECOMM;
741 		goto unlock;
742 	}
743 
744 	tail = ppd->sdma_descq_tail;
745 	while (!list_empty(pktlist)) {
746 		struct qib_user_sdma_pkt *pkt =
747 			list_entry(pktlist->next, struct qib_user_sdma_pkt,
748 				   list);
749 		int i;
750 		unsigned ofs = 0;
751 		u16 dtail = tail;
752 
753 		if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
754 			goto unlock_check_tail;
755 
756 		for (i = 0; i < pkt->naddr; i++) {
757 			qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
758 			ofs += pkt->addr[i].length >> 2;
759 
760 			if (++tail == ppd->sdma_descq_cnt) {
761 				tail = 0;
762 				++ppd->sdma_generation;
763 			}
764 		}
765 
766 		if ((ofs << 2) > ppd->ibmaxlen) {
767 			ret = -EMSGSIZE;
768 			goto unlock;
769 		}
770 
771 		/*
772 		 * If the packet is >= 2KB mtu equivalent, we have to use
773 		 * the large buffers, and have to mark each descriptor as
774 		 * part of a large buffer packet.
775 		 */
776 		if (ofs > dd->piosize2kmax_dwords) {
777 			for (i = 0; i < pkt->naddr; i++) {
778 				ppd->sdma_descq[dtail].qw[0] |=
779 					cpu_to_le64(1ULL << 14);
780 				if (++dtail == ppd->sdma_descq_cnt)
781 					dtail = 0;
782 			}
783 		}
784 
785 		ppd->sdma_descq_added += pkt->naddr;
786 		pkt->added = ppd->sdma_descq_added;
787 		list_move_tail(&pkt->list, &pq->sent);
788 		ret++;
789 	}
790 
791 unlock_check_tail:
792 	/* advance the tail on the chip if necessary */
793 	if (ppd->sdma_descq_tail != tail)
794 		dd->f_sdma_update_tail(ppd, tail);
795 
796 unlock:
797 	if (unlikely(ret < 0)) {
798 		ppd->sdma_generation = generation;
799 		ppd->sdma_descq_added = descq_added;
800 	}
801 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
802 
803 	return ret;
804 }
805 
806 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
807 			 struct qib_user_sdma_queue *pq,
808 			 const struct iovec *iov,
809 			 unsigned long dim)
810 {
811 	struct qib_devdata *dd = rcd->dd;
812 	struct qib_pportdata *ppd = rcd->ppd;
813 	int ret = 0;
814 	struct list_head list;
815 	int npkts = 0;
816 
817 	INIT_LIST_HEAD(&list);
818 
819 	mutex_lock(&pq->lock);
820 
821 	/* why not -ECOMM like qib_user_sdma_push_pkts() below? */
822 	if (!qib_sdma_running(ppd))
823 		goto done_unlock;
824 
825 	if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
826 		qib_user_sdma_hwqueue_clean(ppd);
827 		qib_user_sdma_queue_clean(ppd, pq);
828 	}
829 
830 	while (dim) {
831 		const int mxp = 8;
832 
833 		down_write(&current->mm->mmap_sem);
834 		ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
835 		up_write(&current->mm->mmap_sem);
836 
837 		if (ret <= 0)
838 			goto done_unlock;
839 		else {
840 			dim -= ret;
841 			iov += ret;
842 		}
843 
844 		/* force packets onto the sdma hw queue... */
845 		if (!list_empty(&list)) {
846 			/*
847 			 * Lazily clean hw queue.  the 4 is a guess of about
848 			 * how many sdma descriptors a packet will take (it
849 			 * doesn't have to be perfect).
850 			 */
851 			if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
852 				qib_user_sdma_hwqueue_clean(ppd);
853 				qib_user_sdma_queue_clean(ppd, pq);
854 			}
855 
856 			ret = qib_user_sdma_push_pkts(ppd, pq, &list);
857 			if (ret < 0)
858 				goto done_unlock;
859 			else {
860 				npkts += ret;
861 				pq->counter += ret;
862 
863 				if (!list_empty(&list))
864 					goto done_unlock;
865 			}
866 		}
867 	}
868 
869 done_unlock:
870 	if (!list_empty(&list))
871 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
872 	mutex_unlock(&pq->lock);
873 
874 	return (ret < 0) ? ret : npkts;
875 }
876 
877 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
878 				struct qib_user_sdma_queue *pq)
879 {
880 	int ret = 0;
881 
882 	mutex_lock(&pq->lock);
883 	qib_user_sdma_hwqueue_clean(ppd);
884 	ret = qib_user_sdma_queue_clean(ppd, pq);
885 	mutex_unlock(&pq->lock);
886 
887 	return ret;
888 }
889 
890 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
891 {
892 	return pq ? pq->sent_counter : 0;
893 }
894 
895 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
896 {
897 	return pq ? pq->counter : 0;
898 }
899