xref: /linux/drivers/infiniband/hw/qib/qib_user_sdma.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 /*
2  * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/mm.h>
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
39 #include <linux/io.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44 
45 #include "qib.h"
46 #include "qib_user_sdma.h"
47 
48 /* minimum size of header */
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* attempt to drain the queue for 5secs */
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 250
54 
55 /*
56  * track how many times a process open this driver.
57  */
58 static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
59 
60 struct qib_user_sdma_rb_node {
61 	struct rb_node node;
62 	int refcount;
63 	pid_t pid;
64 };
65 
66 struct qib_user_sdma_pkt {
67 	struct list_head list;  /* list element */
68 
69 	u8  tiddma;		/* if this is NEW tid-sdma */
70 	u8  largepkt;		/* this is large pkt from kmalloc */
71 	u16 frag_size;		/* frag size used by PSM */
72 	u16 index;              /* last header index or push index */
73 	u16 naddr;              /* dimension of addr (1..3) ... */
74 	u16 addrlimit;		/* addr array size */
75 	u16 tidsmidx;		/* current tidsm index */
76 	u16 tidsmcount;		/* tidsm array item count */
77 	u16 payload_size;	/* payload size so far for header */
78 	u32 bytes_togo;		/* bytes for processing */
79 	u32 counter;            /* sdma pkts queued counter for this entry */
80 	struct qib_tid_session_member *tidsm;	/* tid session member array */
81 	struct qib_user_sdma_queue *pq;	/* which pq this pkt belongs to */
82 	u64 added;              /* global descq number of entries */
83 
84 	struct {
85 		u16 offset;                     /* offset for kvaddr, addr */
86 		u16 length;                     /* length in page */
87 		u16 first_desc;			/* first desc */
88 		u16 last_desc;			/* last desc */
89 		u16 put_page;                   /* should we put_page? */
90 		u16 dma_mapped;                 /* is page dma_mapped? */
91 		u16 dma_length;			/* for dma_unmap_page() */
92 		u16 padding;
93 		struct page *page;              /* may be NULL (coherent mem) */
94 		void *kvaddr;                   /* FIXME: only for pio hack */
95 		dma_addr_t addr;
96 	} addr[4];   /* max pages, any more and we coalesce */
97 };
98 
99 struct qib_user_sdma_queue {
100 	/*
101 	 * pkts sent to dma engine are queued on this
102 	 * list head.  the type of the elements of this
103 	 * list are struct qib_user_sdma_pkt...
104 	 */
105 	struct list_head sent;
106 
107 	/*
108 	 * Because above list will be accessed by both process and
109 	 * signal handler, we need a spinlock for it.
110 	 */
111 	spinlock_t sent_lock ____cacheline_aligned_in_smp;
112 
113 	/* headers with expected length are allocated from here... */
114 	char header_cache_name[64];
115 	struct dma_pool *header_cache;
116 
117 	/* packets are allocated from the slab cache... */
118 	char pkt_slab_name[64];
119 	struct kmem_cache *pkt_slab;
120 
121 	/* as packets go on the queued queue, they are counted... */
122 	u32 counter;
123 	u32 sent_counter;
124 	/* pending packets, not sending yet */
125 	u32 num_pending;
126 	/* sending packets, not complete yet */
127 	u32 num_sending;
128 	/* global descq number of entry of last sending packet */
129 	u64 added;
130 
131 	/* dma page table */
132 	struct rb_root dma_pages_root;
133 
134 	struct qib_user_sdma_rb_node *sdma_rb_node;
135 
136 	/* protect everything above... */
137 	struct mutex lock;
138 };
139 
140 static struct qib_user_sdma_rb_node *
qib_user_sdma_rb_search(struct rb_root * root,pid_t pid)141 qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
142 {
143 	struct qib_user_sdma_rb_node *sdma_rb_node;
144 	struct rb_node *node = root->rb_node;
145 
146 	while (node) {
147 		sdma_rb_node = rb_entry(node, struct qib_user_sdma_rb_node,
148 					node);
149 		if (pid < sdma_rb_node->pid)
150 			node = node->rb_left;
151 		else if (pid > sdma_rb_node->pid)
152 			node = node->rb_right;
153 		else
154 			return sdma_rb_node;
155 	}
156 	return NULL;
157 }
158 
159 static int
qib_user_sdma_rb_insert(struct rb_root * root,struct qib_user_sdma_rb_node * new)160 qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
161 {
162 	struct rb_node **node = &(root->rb_node);
163 	struct rb_node *parent = NULL;
164 	struct qib_user_sdma_rb_node *got;
165 
166 	while (*node) {
167 		got = rb_entry(*node, struct qib_user_sdma_rb_node, node);
168 		parent = *node;
169 		if (new->pid < got->pid)
170 			node = &((*node)->rb_left);
171 		else if (new->pid > got->pid)
172 			node = &((*node)->rb_right);
173 		else
174 			return 0;
175 	}
176 
177 	rb_link_node(&new->node, parent, node);
178 	rb_insert_color(&new->node, root);
179 	return 1;
180 }
181 
182 struct qib_user_sdma_queue *
qib_user_sdma_queue_create(struct device * dev,int unit,int ctxt,int sctxt)183 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
184 {
185 	struct qib_user_sdma_queue *pq =
186 		kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
187 	struct qib_user_sdma_rb_node *sdma_rb_node;
188 
189 	if (!pq)
190 		goto done;
191 
192 	pq->counter = 0;
193 	pq->sent_counter = 0;
194 	pq->num_pending = 0;
195 	pq->num_sending = 0;
196 	pq->added = 0;
197 	pq->sdma_rb_node = NULL;
198 
199 	INIT_LIST_HEAD(&pq->sent);
200 	spin_lock_init(&pq->sent_lock);
201 	mutex_init(&pq->lock);
202 
203 	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
204 		 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
205 	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
206 					 sizeof(struct qib_user_sdma_pkt),
207 					 0, 0, NULL);
208 
209 	if (!pq->pkt_slab)
210 		goto err_kfree;
211 
212 	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
213 		 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
214 	pq->header_cache = dma_pool_create(pq->header_cache_name,
215 					   dev,
216 					   QIB_USER_SDMA_EXP_HEADER_LENGTH,
217 					   4, 0);
218 	if (!pq->header_cache)
219 		goto err_slab;
220 
221 	pq->dma_pages_root = RB_ROOT;
222 
223 	sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
224 					current->pid);
225 	if (sdma_rb_node) {
226 		sdma_rb_node->refcount++;
227 	} else {
228 		sdma_rb_node = kmalloc(sizeof(
229 			struct qib_user_sdma_rb_node), GFP_KERNEL);
230 		if (!sdma_rb_node)
231 			goto err_rb;
232 
233 		sdma_rb_node->refcount = 1;
234 		sdma_rb_node->pid = current->pid;
235 
236 		qib_user_sdma_rb_insert(&qib_user_sdma_rb_root, sdma_rb_node);
237 	}
238 	pq->sdma_rb_node = sdma_rb_node;
239 
240 	goto done;
241 
242 err_rb:
243 	dma_pool_destroy(pq->header_cache);
244 err_slab:
245 	kmem_cache_destroy(pq->pkt_slab);
246 err_kfree:
247 	kfree(pq);
248 	pq = NULL;
249 
250 done:
251 	return pq;
252 }
253 
qib_user_sdma_init_frag(struct qib_user_sdma_pkt * pkt,int i,u16 offset,u16 len,u16 first_desc,u16 last_desc,u16 put_page,u16 dma_mapped,struct page * page,void * kvaddr,dma_addr_t dma_addr,u16 dma_length)254 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
255 				    int i, u16 offset, u16 len,
256 				    u16 first_desc, u16 last_desc,
257 				    u16 put_page, u16 dma_mapped,
258 				    struct page *page, void *kvaddr,
259 				    dma_addr_t dma_addr, u16 dma_length)
260 {
261 	pkt->addr[i].offset = offset;
262 	pkt->addr[i].length = len;
263 	pkt->addr[i].first_desc = first_desc;
264 	pkt->addr[i].last_desc = last_desc;
265 	pkt->addr[i].put_page = put_page;
266 	pkt->addr[i].dma_mapped = dma_mapped;
267 	pkt->addr[i].page = page;
268 	pkt->addr[i].kvaddr = kvaddr;
269 	pkt->addr[i].addr = dma_addr;
270 	pkt->addr[i].dma_length = dma_length;
271 }
272 
qib_user_sdma_alloc_header(struct qib_user_sdma_queue * pq,size_t len,dma_addr_t * dma_addr)273 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
274 				size_t len, dma_addr_t *dma_addr)
275 {
276 	void *hdr;
277 
278 	if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
279 		hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
280 					     dma_addr);
281 	else
282 		hdr = NULL;
283 
284 	if (!hdr) {
285 		hdr = kmalloc(len, GFP_KERNEL);
286 		if (!hdr)
287 			return NULL;
288 
289 		*dma_addr = 0;
290 	}
291 
292 	return hdr;
293 }
294 
qib_user_sdma_page_to_frags(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,struct page * page,u16 put,u16 offset,u16 len,void * kvaddr)295 static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
296 				       struct qib_user_sdma_queue *pq,
297 				       struct qib_user_sdma_pkt *pkt,
298 				       struct page *page, u16 put,
299 				       u16 offset, u16 len, void *kvaddr)
300 {
301 	__le16 *pbc16;
302 	void *pbcvaddr;
303 	struct qib_message_header *hdr;
304 	u16 newlen, pbclen, lastdesc, dma_mapped;
305 	u32 vcto;
306 	union qib_seqnum seqnum;
307 	dma_addr_t pbcdaddr;
308 	dma_addr_t dma_addr =
309 		dma_map_page(&dd->pcidev->dev,
310 			page, offset, len, DMA_TO_DEVICE);
311 	int ret = 0;
312 
313 	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
314 		/*
315 		 * dma mapping error, pkt has not managed
316 		 * this page yet, return the page here so
317 		 * the caller can ignore this page.
318 		 */
319 		if (put) {
320 			unpin_user_page(page);
321 		} else {
322 			/* coalesce case */
323 			__free_page(page);
324 		}
325 		ret = -ENOMEM;
326 		goto done;
327 	}
328 	offset = 0;
329 	dma_mapped = 1;
330 
331 
332 next_fragment:
333 
334 	/*
335 	 * In tid-sdma, the transfer length is restricted by
336 	 * receiver side current tid page length.
337 	 */
338 	if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
339 		newlen = pkt->tidsm[pkt->tidsmidx].length;
340 	else
341 		newlen = len;
342 
343 	/*
344 	 * Then the transfer length is restricted by MTU.
345 	 * the last descriptor flag is determined by:
346 	 * 1. the current packet is at frag size length.
347 	 * 2. the current tid page is done if tid-sdma.
348 	 * 3. there is no more byte togo if sdma.
349 	 */
350 	lastdesc = 0;
351 	if ((pkt->payload_size + newlen) >= pkt->frag_size) {
352 		newlen = pkt->frag_size - pkt->payload_size;
353 		lastdesc = 1;
354 	} else if (pkt->tiddma) {
355 		if (newlen == pkt->tidsm[pkt->tidsmidx].length)
356 			lastdesc = 1;
357 	} else {
358 		if (newlen == pkt->bytes_togo)
359 			lastdesc = 1;
360 	}
361 
362 	/* fill the next fragment in this page */
363 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
364 		offset, newlen,		/* offset, len */
365 		0, lastdesc,		/* first last desc */
366 		put, dma_mapped,	/* put page, dma mapped */
367 		page, kvaddr,		/* struct page, virt addr */
368 		dma_addr, len);		/* dma addr, dma length */
369 	pkt->bytes_togo -= newlen;
370 	pkt->payload_size += newlen;
371 	pkt->naddr++;
372 	if (pkt->naddr == pkt->addrlimit) {
373 		ret = -EFAULT;
374 		goto done;
375 	}
376 
377 	/* If there is no more byte togo. (lastdesc==1) */
378 	if (pkt->bytes_togo == 0) {
379 		/* The packet is done, header is not dma mapped yet.
380 		 * it should be from kmalloc */
381 		if (!pkt->addr[pkt->index].addr) {
382 			pkt->addr[pkt->index].addr =
383 				dma_map_single(&dd->pcidev->dev,
384 					pkt->addr[pkt->index].kvaddr,
385 					pkt->addr[pkt->index].dma_length,
386 					DMA_TO_DEVICE);
387 			if (dma_mapping_error(&dd->pcidev->dev,
388 					pkt->addr[pkt->index].addr)) {
389 				ret = -ENOMEM;
390 				goto done;
391 			}
392 			pkt->addr[pkt->index].dma_mapped = 1;
393 		}
394 
395 		goto done;
396 	}
397 
398 	/* If tid-sdma, advance tid info. */
399 	if (pkt->tiddma) {
400 		pkt->tidsm[pkt->tidsmidx].length -= newlen;
401 		if (pkt->tidsm[pkt->tidsmidx].length) {
402 			pkt->tidsm[pkt->tidsmidx].offset += newlen;
403 		} else {
404 			pkt->tidsmidx++;
405 			if (pkt->tidsmidx == pkt->tidsmcount) {
406 				ret = -EFAULT;
407 				goto done;
408 			}
409 		}
410 	}
411 
412 	/*
413 	 * If this is NOT the last descriptor. (newlen==len)
414 	 * the current packet is not done yet, but the current
415 	 * send side page is done.
416 	 */
417 	if (lastdesc == 0)
418 		goto done;
419 
420 	/*
421 	 * If running this driver under PSM with message size
422 	 * fitting into one transfer unit, it is not possible
423 	 * to pass this line. otherwise, it is a buggggg.
424 	 */
425 
426 	/*
427 	 * Since the current packet is done, and there are more
428 	 * bytes togo, we need to create a new sdma header, copying
429 	 * from previous sdma header and modify both.
430 	 */
431 	pbclen = pkt->addr[pkt->index].length;
432 	pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
433 	if (!pbcvaddr) {
434 		ret = -ENOMEM;
435 		goto done;
436 	}
437 	/* Copy the previous sdma header to new sdma header */
438 	pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
439 	memcpy(pbcvaddr, pbc16, pbclen);
440 
441 	/* Modify the previous sdma header */
442 	hdr = (struct qib_message_header *)&pbc16[4];
443 
444 	/* New pbc length */
445 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
446 
447 	/* New packet length */
448 	hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
449 
450 	if (pkt->tiddma) {
451 		/* turn on the header suppression */
452 		hdr->iph.pkt_flags =
453 			cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
454 		/* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
455 		hdr->flags &= ~(0x04|0x20);
456 	} else {
457 		/* turn off extra bytes: 20-21 bits */
458 		hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
459 		/* turn off ACK_REQ: 0x04 */
460 		hdr->flags &= ~(0x04);
461 	}
462 
463 	/* New kdeth checksum */
464 	vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
465 	hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
466 		be16_to_cpu(hdr->lrh[2]) -
467 		((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
468 		le16_to_cpu(hdr->iph.pkt_flags));
469 
470 	/* The packet is done, header is not dma mapped yet.
471 	 * it should be from kmalloc */
472 	if (!pkt->addr[pkt->index].addr) {
473 		pkt->addr[pkt->index].addr =
474 			dma_map_single(&dd->pcidev->dev,
475 				pkt->addr[pkt->index].kvaddr,
476 				pkt->addr[pkt->index].dma_length,
477 				DMA_TO_DEVICE);
478 		if (dma_mapping_error(&dd->pcidev->dev,
479 				pkt->addr[pkt->index].addr)) {
480 			ret = -ENOMEM;
481 			goto done;
482 		}
483 		pkt->addr[pkt->index].dma_mapped = 1;
484 	}
485 
486 	/* Modify the new sdma header */
487 	pbc16 = (__le16 *)pbcvaddr;
488 	hdr = (struct qib_message_header *)&pbc16[4];
489 
490 	/* New pbc length */
491 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
492 
493 	/* New packet length */
494 	hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
495 
496 	if (pkt->tiddma) {
497 		/* Set new tid and offset for new sdma header */
498 		hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
499 			(le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
500 			(pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
501 			(pkt->tidsm[pkt->tidsmidx].offset>>2));
502 	} else {
503 		/* Middle protocol new packet offset */
504 		hdr->uwords[2] += pkt->payload_size;
505 	}
506 
507 	/* New kdeth checksum */
508 	vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
509 	hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
510 		be16_to_cpu(hdr->lrh[2]) -
511 		((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
512 		le16_to_cpu(hdr->iph.pkt_flags));
513 
514 	/* Next sequence number in new sdma header */
515 	seqnum.val = be32_to_cpu(hdr->bth[2]);
516 	if (pkt->tiddma)
517 		seqnum.seq++;
518 	else
519 		seqnum.pkt++;
520 	hdr->bth[2] = cpu_to_be32(seqnum.val);
521 
522 	/* Init new sdma header. */
523 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
524 		0, pbclen,		/* offset, len */
525 		1, 0,			/* first last desc */
526 		0, 0,			/* put page, dma mapped */
527 		NULL, pbcvaddr,		/* struct page, virt addr */
528 		pbcdaddr, pbclen);	/* dma addr, dma length */
529 	pkt->index = pkt->naddr;
530 	pkt->payload_size = 0;
531 	pkt->naddr++;
532 	if (pkt->naddr == pkt->addrlimit) {
533 		ret = -EFAULT;
534 		goto done;
535 	}
536 
537 	/* Prepare for next fragment in this page */
538 	if (newlen != len) {
539 		if (dma_mapped) {
540 			put = 0;
541 			dma_mapped = 0;
542 			page = NULL;
543 			kvaddr = NULL;
544 		}
545 		len -= newlen;
546 		offset += newlen;
547 
548 		goto next_fragment;
549 	}
550 
551 done:
552 	return ret;
553 }
554 
555 /* we've too many pages in the iovec, coalesce to a single page */
qib_user_sdma_coalesce(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov)556 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
557 				  struct qib_user_sdma_queue *pq,
558 				  struct qib_user_sdma_pkt *pkt,
559 				  const struct iovec *iov,
560 				  unsigned long niov)
561 {
562 	int ret = 0;
563 	struct page *page = alloc_page(GFP_KERNEL);
564 	void *mpage_save;
565 	char *mpage;
566 	int i;
567 	int len = 0;
568 
569 	if (!page) {
570 		ret = -ENOMEM;
571 		goto done;
572 	}
573 
574 	mpage = page_address(page);
575 	mpage_save = mpage;
576 	for (i = 0; i < niov; i++) {
577 		int cfur;
578 
579 		cfur = copy_from_user(mpage,
580 				      iov[i].iov_base, iov[i].iov_len);
581 		if (cfur) {
582 			ret = -EFAULT;
583 			goto page_free;
584 		}
585 
586 		mpage += iov[i].iov_len;
587 		len += iov[i].iov_len;
588 	}
589 
590 	ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
591 			page, 0, 0, len, mpage_save);
592 	goto done;
593 
594 page_free:
595 	__free_page(page);
596 done:
597 	return ret;
598 }
599 
600 /*
601  * How many pages in this iovec element?
602  */
qib_user_sdma_num_pages(const struct iovec * iov)603 static size_t qib_user_sdma_num_pages(const struct iovec *iov)
604 {
605 	const unsigned long addr  = (unsigned long) iov->iov_base;
606 	const unsigned long  len  = iov->iov_len;
607 	const unsigned long spage = addr & PAGE_MASK;
608 	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
609 
610 	return 1 + ((epage - spage) >> PAGE_SHIFT);
611 }
612 
qib_user_sdma_free_pkt_frag(struct device * dev,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,int frag)613 static void qib_user_sdma_free_pkt_frag(struct device *dev,
614 					struct qib_user_sdma_queue *pq,
615 					struct qib_user_sdma_pkt *pkt,
616 					int frag)
617 {
618 	const int i = frag;
619 
620 	if (pkt->addr[i].page) {
621 		/* only user data has page */
622 		if (pkt->addr[i].dma_mapped)
623 			dma_unmap_page(dev,
624 				       pkt->addr[i].addr,
625 				       pkt->addr[i].dma_length,
626 				       DMA_TO_DEVICE);
627 
628 		if (pkt->addr[i].put_page)
629 			unpin_user_page(pkt->addr[i].page);
630 		else
631 			__free_page(pkt->addr[i].page);
632 	} else if (pkt->addr[i].kvaddr) {
633 		/* for headers */
634 		if (pkt->addr[i].dma_mapped) {
635 			/* from kmalloc & dma mapped */
636 			dma_unmap_single(dev,
637 				       pkt->addr[i].addr,
638 				       pkt->addr[i].dma_length,
639 				       DMA_TO_DEVICE);
640 			kfree(pkt->addr[i].kvaddr);
641 		} else if (pkt->addr[i].addr) {
642 			/* free coherent mem from cache... */
643 			dma_pool_free(pq->header_cache,
644 			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
645 		} else {
646 			/* from kmalloc but not dma mapped */
647 			kfree(pkt->addr[i].kvaddr);
648 		}
649 	}
650 }
651 
652 /* return number of pages pinned... */
qib_user_sdma_pin_pages(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,unsigned long addr,int tlen,size_t npages)653 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
654 				   struct qib_user_sdma_queue *pq,
655 				   struct qib_user_sdma_pkt *pkt,
656 				   unsigned long addr, int tlen, size_t npages)
657 {
658 	struct page *pages[8];
659 	int i, j;
660 	int ret = 0;
661 
662 	while (npages) {
663 		if (npages > 8)
664 			j = 8;
665 		else
666 			j = npages;
667 
668 		ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
669 		if (ret != j) {
670 			i = 0;
671 			j = ret;
672 			ret = -ENOMEM;
673 			goto free_pages;
674 		}
675 
676 		for (i = 0; i < j; i++) {
677 			/* map the pages... */
678 			unsigned long fofs = addr & ~PAGE_MASK;
679 			int flen = ((fofs + tlen) > PAGE_SIZE) ?
680 				(PAGE_SIZE - fofs) : tlen;
681 
682 			ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
683 				pages[i], 1, fofs, flen, NULL);
684 			if (ret < 0) {
685 				/* current page has beed taken
686 				 * care of inside above call.
687 				 */
688 				i++;
689 				goto free_pages;
690 			}
691 
692 			addr += flen;
693 			tlen -= flen;
694 		}
695 
696 		npages -= j;
697 	}
698 
699 	goto done;
700 
701 	/* if error, return all pages not managed by pkt */
702 free_pages:
703 	while (i < j)
704 		unpin_user_page(pages[i++]);
705 
706 done:
707 	return ret;
708 }
709 
qib_user_sdma_pin_pkt(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov)710 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
711 				 struct qib_user_sdma_queue *pq,
712 				 struct qib_user_sdma_pkt *pkt,
713 				 const struct iovec *iov,
714 				 unsigned long niov)
715 {
716 	int ret = 0;
717 	unsigned long idx;
718 
719 	for (idx = 0; idx < niov; idx++) {
720 		const size_t npages = qib_user_sdma_num_pages(iov + idx);
721 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
722 
723 		ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
724 					      iov[idx].iov_len, npages);
725 		if (ret < 0)
726 			goto free_pkt;
727 	}
728 
729 	goto done;
730 
731 free_pkt:
732 	/* we need to ignore the first entry here */
733 	for (idx = 1; idx < pkt->naddr; idx++)
734 		qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
735 
736 	/* need to dma unmap the first entry, this is to restore to
737 	 * the original state so that caller can free the memory in
738 	 * error condition. Caller does not know if dma mapped or not*/
739 	if (pkt->addr[0].dma_mapped) {
740 		dma_unmap_single(&dd->pcidev->dev,
741 		       pkt->addr[0].addr,
742 		       pkt->addr[0].dma_length,
743 		       DMA_TO_DEVICE);
744 		pkt->addr[0].addr = 0;
745 		pkt->addr[0].dma_mapped = 0;
746 	}
747 
748 done:
749 	return ret;
750 }
751 
qib_user_sdma_init_payload(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov,int npages)752 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
753 				      struct qib_user_sdma_queue *pq,
754 				      struct qib_user_sdma_pkt *pkt,
755 				      const struct iovec *iov,
756 				      unsigned long niov, int npages)
757 {
758 	int ret = 0;
759 
760 	if (pkt->frag_size == pkt->bytes_togo &&
761 			npages >= ARRAY_SIZE(pkt->addr))
762 		ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
763 	else
764 		ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
765 
766 	return ret;
767 }
768 
769 /* free a packet list -- return counter value of last packet */
qib_user_sdma_free_pkt_list(struct device * dev,struct qib_user_sdma_queue * pq,struct list_head * list)770 static void qib_user_sdma_free_pkt_list(struct device *dev,
771 					struct qib_user_sdma_queue *pq,
772 					struct list_head *list)
773 {
774 	struct qib_user_sdma_pkt *pkt, *pkt_next;
775 
776 	list_for_each_entry_safe(pkt, pkt_next, list, list) {
777 		int i;
778 
779 		for (i = 0; i < pkt->naddr; i++)
780 			qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
781 
782 		if (pkt->largepkt)
783 			kfree(pkt);
784 		else
785 			kmem_cache_free(pq->pkt_slab, pkt);
786 	}
787 	INIT_LIST_HEAD(list);
788 }
789 
790 /*
791  * copy headers, coalesce etc -- pq->lock must be held
792  *
793  * we queue all the packets to list, returning the
794  * number of bytes total.  list must be empty initially,
795  * as, if there is an error we clean it...
796  */
qib_user_sdma_queue_pkts(const struct qib_devdata * dd,struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq,const struct iovec * iov,unsigned long niov,struct list_head * list,int * maxpkts,int * ndesc)797 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
798 				    struct qib_pportdata *ppd,
799 				    struct qib_user_sdma_queue *pq,
800 				    const struct iovec *iov,
801 				    unsigned long niov,
802 				    struct list_head *list,
803 				    int *maxpkts, int *ndesc)
804 {
805 	unsigned long idx = 0;
806 	int ret = 0;
807 	int npkts = 0;
808 	__le32 *pbc;
809 	dma_addr_t dma_addr;
810 	struct qib_user_sdma_pkt *pkt = NULL;
811 	size_t len;
812 	size_t nw;
813 	u32 counter = pq->counter;
814 	u16 frag_size;
815 
816 	while (idx < niov && npkts < *maxpkts) {
817 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
818 		const unsigned long idx_save = idx;
819 		unsigned pktnw;
820 		unsigned pktnwc;
821 		int nfrags = 0;
822 		size_t npages = 0;
823 		size_t bytes_togo = 0;
824 		int tiddma = 0;
825 		int cfur;
826 
827 		len = iov[idx].iov_len;
828 		nw = len >> 2;
829 
830 		if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
831 		    len > PAGE_SIZE || len & 3 || addr & 3) {
832 			ret = -EINVAL;
833 			goto free_list;
834 		}
835 
836 		pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
837 		if (!pbc) {
838 			ret = -ENOMEM;
839 			goto free_list;
840 		}
841 
842 		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
843 		if (cfur) {
844 			ret = -EFAULT;
845 			goto free_pbc;
846 		}
847 
848 		/*
849 		 * This assignment is a bit strange.  it's because
850 		 * the pbc counts the number of 32 bit words in the full
851 		 * packet _except_ the first word of the pbc itself...
852 		 */
853 		pktnwc = nw - 1;
854 
855 		/*
856 		 * pktnw computation yields the number of 32 bit words
857 		 * that the caller has indicated in the PBC.  note that
858 		 * this is one less than the total number of words that
859 		 * goes to the send DMA engine as the first 32 bit word
860 		 * of the PBC itself is not counted.  Armed with this count,
861 		 * we can verify that the packet is consistent with the
862 		 * iovec lengths.
863 		 */
864 		pktnw = le32_to_cpu(*pbc) & 0xFFFF;
865 		if (pktnw < pktnwc) {
866 			ret = -EINVAL;
867 			goto free_pbc;
868 		}
869 
870 		idx++;
871 		while (pktnwc < pktnw && idx < niov) {
872 			const size_t slen = iov[idx].iov_len;
873 			const unsigned long faddr =
874 				(unsigned long) iov[idx].iov_base;
875 
876 			if (slen & 3 || faddr & 3 || !slen) {
877 				ret = -EINVAL;
878 				goto free_pbc;
879 			}
880 
881 			npages += qib_user_sdma_num_pages(&iov[idx]);
882 
883 			if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
884 			    bytes_togo > type_max(typeof(pkt->bytes_togo))) {
885 				ret = -EINVAL;
886 				goto free_pbc;
887 			}
888 			pktnwc += slen >> 2;
889 			idx++;
890 			nfrags++;
891 		}
892 
893 		if (pktnwc != pktnw) {
894 			ret = -EINVAL;
895 			goto free_pbc;
896 		}
897 
898 		frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
899 		if (((frag_size ? frag_size : bytes_togo) + len) >
900 						ppd->ibmaxlen) {
901 			ret = -EINVAL;
902 			goto free_pbc;
903 		}
904 
905 		if (frag_size) {
906 			size_t tidsmsize, n, pktsize, sz, addrlimit;
907 
908 			n = npages*((2*PAGE_SIZE/frag_size)+1);
909 			pktsize = struct_size(pkt, addr, n);
910 
911 			/*
912 			 * Determine if this is tid-sdma or just sdma.
913 			 */
914 			tiddma = (((le32_to_cpu(pbc[7])>>
915 				QLOGIC_IB_I_TID_SHIFT)&
916 				QLOGIC_IB_I_TID_MASK) !=
917 				QLOGIC_IB_I_TID_MASK);
918 
919 			if (tiddma)
920 				tidsmsize = iov[idx].iov_len;
921 			else
922 				tidsmsize = 0;
923 
924 			if (check_add_overflow(pktsize, tidsmsize, &sz)) {
925 				ret = -EINVAL;
926 				goto free_pbc;
927 			}
928 			pkt = kmalloc(sz, GFP_KERNEL);
929 			if (!pkt) {
930 				ret = -ENOMEM;
931 				goto free_pbc;
932 			}
933 			pkt->largepkt = 1;
934 			pkt->frag_size = frag_size;
935 			if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
936 					       &addrlimit) ||
937 			    addrlimit > type_max(typeof(pkt->addrlimit))) {
938 				ret = -EINVAL;
939 				goto free_pkt;
940 			}
941 			pkt->addrlimit = addrlimit;
942 
943 			if (tiddma) {
944 				char *tidsm = (char *)pkt + pktsize;
945 
946 				cfur = copy_from_user(tidsm,
947 					iov[idx].iov_base, tidsmsize);
948 				if (cfur) {
949 					ret = -EFAULT;
950 					goto free_pkt;
951 				}
952 				pkt->tidsm =
953 					(struct qib_tid_session_member *)tidsm;
954 				pkt->tidsmcount = tidsmsize/
955 					sizeof(struct qib_tid_session_member);
956 				pkt->tidsmidx = 0;
957 				idx++;
958 			}
959 
960 			/*
961 			 * pbc 'fill1' field is borrowed to pass frag size,
962 			 * we need to clear it after picking frag size, the
963 			 * hardware requires this field to be zero.
964 			 */
965 			*pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
966 		} else {
967 			pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
968 			if (!pkt) {
969 				ret = -ENOMEM;
970 				goto free_pbc;
971 			}
972 			pkt->largepkt = 0;
973 			pkt->frag_size = bytes_togo;
974 			pkt->addrlimit = ARRAY_SIZE(pkt->addr);
975 		}
976 		pkt->bytes_togo = bytes_togo;
977 		pkt->payload_size = 0;
978 		pkt->counter = counter;
979 		pkt->tiddma = tiddma;
980 
981 		/* setup the first header */
982 		qib_user_sdma_init_frag(pkt, 0, /* index */
983 			0, len,		/* offset, len */
984 			1, 0,		/* first last desc */
985 			0, 0,		/* put page, dma mapped */
986 			NULL, pbc,	/* struct page, virt addr */
987 			dma_addr, len);	/* dma addr, dma length */
988 		pkt->index = 0;
989 		pkt->naddr = 1;
990 
991 		if (nfrags) {
992 			ret = qib_user_sdma_init_payload(dd, pq, pkt,
993 							 iov + idx_save + 1,
994 							 nfrags, npages);
995 			if (ret < 0)
996 				goto free_pkt;
997 		} else {
998 			/* since there is no payload, mark the
999 			 * header as the last desc. */
1000 			pkt->addr[0].last_desc = 1;
1001 
1002 			if (dma_addr == 0) {
1003 				/*
1004 				 * the header is not dma mapped yet.
1005 				 * it should be from kmalloc.
1006 				 */
1007 				dma_addr = dma_map_single(&dd->pcidev->dev,
1008 					pbc, len, DMA_TO_DEVICE);
1009 				if (dma_mapping_error(&dd->pcidev->dev,
1010 								dma_addr)) {
1011 					ret = -ENOMEM;
1012 					goto free_pkt;
1013 				}
1014 				pkt->addr[0].addr = dma_addr;
1015 				pkt->addr[0].dma_mapped = 1;
1016 			}
1017 		}
1018 
1019 		counter++;
1020 		npkts++;
1021 		pkt->pq = pq;
1022 		pkt->index = 0; /* reset index for push on hw */
1023 		*ndesc += pkt->naddr;
1024 
1025 		list_add_tail(&pkt->list, list);
1026 	}
1027 
1028 	*maxpkts = npkts;
1029 	ret = idx;
1030 	goto done;
1031 
1032 free_pkt:
1033 	if (pkt->largepkt)
1034 		kfree(pkt);
1035 	else
1036 		kmem_cache_free(pq->pkt_slab, pkt);
1037 free_pbc:
1038 	if (dma_addr)
1039 		dma_pool_free(pq->header_cache, pbc, dma_addr);
1040 	else
1041 		kfree(pbc);
1042 free_list:
1043 	qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
1044 done:
1045 	return ret;
1046 }
1047 
qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue * pq,u32 c)1048 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
1049 					       u32 c)
1050 {
1051 	pq->sent_counter = c;
1052 }
1053 
1054 /* try to clean out queue -- needs pq->lock */
qib_user_sdma_queue_clean(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1055 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
1056 				     struct qib_user_sdma_queue *pq)
1057 {
1058 	struct qib_devdata *dd = ppd->dd;
1059 	struct list_head free_list;
1060 	struct qib_user_sdma_pkt *pkt;
1061 	struct qib_user_sdma_pkt *pkt_prev;
1062 	unsigned long flags;
1063 	int ret = 0;
1064 
1065 	if (!pq->num_sending)
1066 		return 0;
1067 
1068 	INIT_LIST_HEAD(&free_list);
1069 
1070 	/*
1071 	 * We need this spin lock here because interrupt handler
1072 	 * might modify this list in qib_user_sdma_send_desc(), also
1073 	 * we can not get interrupted, otherwise it is a deadlock.
1074 	 */
1075 	spin_lock_irqsave(&pq->sent_lock, flags);
1076 	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
1077 		s64 descd = ppd->sdma_descq_removed - pkt->added;
1078 
1079 		if (descd < 0)
1080 			break;
1081 
1082 		list_move_tail(&pkt->list, &free_list);
1083 
1084 		/* one more packet cleaned */
1085 		ret++;
1086 		pq->num_sending--;
1087 	}
1088 	spin_unlock_irqrestore(&pq->sent_lock, flags);
1089 
1090 	if (!list_empty(&free_list)) {
1091 		u32 counter;
1092 
1093 		pkt = list_entry(free_list.prev,
1094 				 struct qib_user_sdma_pkt, list);
1095 		counter = pkt->counter;
1096 
1097 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1098 		qib_user_sdma_set_complete_counter(pq, counter);
1099 	}
1100 
1101 	return ret;
1102 }
1103 
qib_user_sdma_queue_destroy(struct qib_user_sdma_queue * pq)1104 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1105 {
1106 	if (!pq)
1107 		return;
1108 
1109 	pq->sdma_rb_node->refcount--;
1110 	if (pq->sdma_rb_node->refcount == 0) {
1111 		rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1112 		kfree(pq->sdma_rb_node);
1113 	}
1114 	dma_pool_destroy(pq->header_cache);
1115 	kmem_cache_destroy(pq->pkt_slab);
1116 	kfree(pq);
1117 }
1118 
1119 /* clean descriptor queue, returns > 0 if some elements cleaned */
qib_user_sdma_hwqueue_clean(struct qib_pportdata * ppd)1120 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
1121 {
1122 	int ret;
1123 	unsigned long flags;
1124 
1125 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1126 	ret = qib_sdma_make_progress(ppd);
1127 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1128 
1129 	return ret;
1130 }
1131 
1132 /* we're in close, drain packets so that we can cleanup successfully... */
qib_user_sdma_queue_drain(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1133 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1134 			       struct qib_user_sdma_queue *pq)
1135 {
1136 	struct qib_devdata *dd = ppd->dd;
1137 	unsigned long flags;
1138 	int i;
1139 
1140 	if (!pq)
1141 		return;
1142 
1143 	for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
1144 		mutex_lock(&pq->lock);
1145 		if (!pq->num_pending && !pq->num_sending) {
1146 			mutex_unlock(&pq->lock);
1147 			break;
1148 		}
1149 		qib_user_sdma_hwqueue_clean(ppd);
1150 		qib_user_sdma_queue_clean(ppd, pq);
1151 		mutex_unlock(&pq->lock);
1152 		msleep(20);
1153 	}
1154 
1155 	if (pq->num_pending || pq->num_sending) {
1156 		struct qib_user_sdma_pkt *pkt;
1157 		struct qib_user_sdma_pkt *pkt_prev;
1158 		struct list_head free_list;
1159 
1160 		mutex_lock(&pq->lock);
1161 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1162 		/*
1163 		 * Since we hold sdma_lock, it is safe without sent_lock.
1164 		 */
1165 		if (pq->num_pending) {
1166 			list_for_each_entry_safe(pkt, pkt_prev,
1167 					&ppd->sdma_userpending, list) {
1168 				if (pkt->pq == pq) {
1169 					list_move_tail(&pkt->list, &pq->sent);
1170 					pq->num_pending--;
1171 					pq->num_sending++;
1172 				}
1173 			}
1174 		}
1175 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1176 
1177 		qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
1178 		INIT_LIST_HEAD(&free_list);
1179 		list_splice_init(&pq->sent, &free_list);
1180 		pq->num_sending = 0;
1181 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1182 		mutex_unlock(&pq->lock);
1183 	}
1184 }
1185 
qib_sdma_make_desc0(u8 gen,u64 addr,u64 dwlen,u64 dwoffset)1186 static inline __le64 qib_sdma_make_desc0(u8 gen,
1187 					 u64 addr, u64 dwlen, u64 dwoffset)
1188 {
1189 	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
1190 			   ((addr & 0xfffffffcULL) << 32) |
1191 			   /* SDmaGeneration[1:0] */
1192 			   ((gen & 3ULL) << 30) |
1193 			   /* SDmaDwordCount[10:0] */
1194 			   ((dwlen & 0x7ffULL) << 16) |
1195 			   /* SDmaBufOffset[12:2] */
1196 			   (dwoffset & 0x7ffULL));
1197 }
1198 
qib_sdma_make_first_desc0(__le64 descq)1199 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
1200 {
1201 	return descq | cpu_to_le64(1ULL << 12);
1202 }
1203 
qib_sdma_make_last_desc0(__le64 descq)1204 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
1205 {
1206 					      /* last */  /* dma head */
1207 	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
1208 }
1209 
qib_sdma_make_desc1(u64 addr)1210 static inline __le64 qib_sdma_make_desc1(u64 addr)
1211 {
1212 	/* SDmaPhyAddr[47:32] */
1213 	return cpu_to_le64(addr >> 32);
1214 }
1215 
qib_user_sdma_send_frag(struct qib_pportdata * ppd,struct qib_user_sdma_pkt * pkt,int idx,unsigned ofs,u16 tail,u8 gen)1216 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
1217 				    struct qib_user_sdma_pkt *pkt, int idx,
1218 				    unsigned ofs, u16 tail, u8 gen)
1219 {
1220 	const u64 addr = (u64) pkt->addr[idx].addr +
1221 		(u64) pkt->addr[idx].offset;
1222 	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
1223 	__le64 *descqp;
1224 	__le64 descq0;
1225 
1226 	descqp = &ppd->sdma_descq[tail].qw[0];
1227 
1228 	descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
1229 	if (pkt->addr[idx].first_desc)
1230 		descq0 = qib_sdma_make_first_desc0(descq0);
1231 	if (pkt->addr[idx].last_desc) {
1232 		descq0 = qib_sdma_make_last_desc0(descq0);
1233 		if (ppd->sdma_intrequest) {
1234 			descq0 |= cpu_to_le64(1ULL << 15);
1235 			ppd->sdma_intrequest = 0;
1236 		}
1237 	}
1238 
1239 	descqp[0] = descq0;
1240 	descqp[1] = qib_sdma_make_desc1(addr);
1241 }
1242 
qib_user_sdma_send_desc(struct qib_pportdata * ppd,struct list_head * pktlist)1243 void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
1244 				struct list_head *pktlist)
1245 {
1246 	struct qib_devdata *dd = ppd->dd;
1247 	u16 nfree, nsent;
1248 	u16 tail, tail_c;
1249 	u8 gen, gen_c;
1250 
1251 	nfree = qib_sdma_descq_freecnt(ppd);
1252 	if (!nfree)
1253 		return;
1254 
1255 retry:
1256 	nsent = 0;
1257 	tail_c = tail = ppd->sdma_descq_tail;
1258 	gen_c = gen = ppd->sdma_generation;
1259 	while (!list_empty(pktlist)) {
1260 		struct qib_user_sdma_pkt *pkt =
1261 			list_entry(pktlist->next, struct qib_user_sdma_pkt,
1262 				   list);
1263 		int i, j, c = 0;
1264 		unsigned ofs = 0;
1265 		u16 dtail = tail;
1266 
1267 		for (i = pkt->index; i < pkt->naddr && nfree; i++) {
1268 			qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
1269 			ofs += pkt->addr[i].length >> 2;
1270 
1271 			if (++tail == ppd->sdma_descq_cnt) {
1272 				tail = 0;
1273 				++gen;
1274 				ppd->sdma_intrequest = 1;
1275 			} else if (tail == (ppd->sdma_descq_cnt>>1)) {
1276 				ppd->sdma_intrequest = 1;
1277 			}
1278 			nfree--;
1279 			if (pkt->addr[i].last_desc == 0)
1280 				continue;
1281 
1282 			/*
1283 			 * If the packet is >= 2KB mtu equivalent, we
1284 			 * have to use the large buffers, and have to
1285 			 * mark each descriptor as part of a large
1286 			 * buffer packet.
1287 			 */
1288 			if (ofs > dd->piosize2kmax_dwords) {
1289 				for (j = pkt->index; j <= i; j++) {
1290 					ppd->sdma_descq[dtail].qw[0] |=
1291 						cpu_to_le64(1ULL << 14);
1292 					if (++dtail == ppd->sdma_descq_cnt)
1293 						dtail = 0;
1294 				}
1295 			}
1296 			c += i + 1 - pkt->index;
1297 			pkt->index = i + 1; /* index for next first */
1298 			tail_c = dtail = tail;
1299 			gen_c = gen;
1300 			ofs = 0;  /* reset for next packet */
1301 		}
1302 
1303 		ppd->sdma_descq_added += c;
1304 		nsent += c;
1305 		if (pkt->index == pkt->naddr) {
1306 			pkt->added = ppd->sdma_descq_added;
1307 			pkt->pq->added = pkt->added;
1308 			pkt->pq->num_pending--;
1309 			spin_lock(&pkt->pq->sent_lock);
1310 			pkt->pq->num_sending++;
1311 			list_move_tail(&pkt->list, &pkt->pq->sent);
1312 			spin_unlock(&pkt->pq->sent_lock);
1313 		}
1314 		if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
1315 			break;
1316 	}
1317 
1318 	/* advance the tail on the chip if necessary */
1319 	if (ppd->sdma_descq_tail != tail_c) {
1320 		ppd->sdma_generation = gen_c;
1321 		dd->f_sdma_update_tail(ppd, tail_c);
1322 	}
1323 
1324 	if (nfree && !list_empty(pktlist))
1325 		goto retry;
1326 }
1327 
1328 /* pq->lock must be held, get packets on the wire... */
qib_user_sdma_push_pkts(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq,struct list_head * pktlist,int count)1329 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
1330 				 struct qib_user_sdma_queue *pq,
1331 				 struct list_head *pktlist, int count)
1332 {
1333 	unsigned long flags;
1334 
1335 	if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
1336 		return -ECOMM;
1337 
1338 	/* non-blocking mode */
1339 	if (pq->sdma_rb_node->refcount > 1) {
1340 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1341 		if (unlikely(!__qib_sdma_running(ppd))) {
1342 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1343 			return -ECOMM;
1344 		}
1345 		pq->num_pending += count;
1346 		list_splice_tail_init(pktlist, &ppd->sdma_userpending);
1347 		qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
1348 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1349 		return 0;
1350 	}
1351 
1352 	/* In this case, descriptors from this process are not
1353 	 * linked to ppd pending queue, interrupt handler
1354 	 * won't update this process, it is OK to directly
1355 	 * modify without sdma lock.
1356 	 */
1357 
1358 
1359 	pq->num_pending += count;
1360 	/*
1361 	 * Blocking mode for single rail process, we must
1362 	 * release/regain sdma_lock to give other process
1363 	 * chance to make progress. This is important for
1364 	 * performance.
1365 	 */
1366 	do {
1367 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1368 		if (unlikely(!__qib_sdma_running(ppd))) {
1369 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1370 			return -ECOMM;
1371 		}
1372 		qib_user_sdma_send_desc(ppd, pktlist);
1373 		if (!list_empty(pktlist))
1374 			qib_sdma_make_progress(ppd);
1375 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1376 	} while (!list_empty(pktlist));
1377 
1378 	return 0;
1379 }
1380 
qib_user_sdma_writev(struct qib_ctxtdata * rcd,struct qib_user_sdma_queue * pq,const struct iovec * iov,unsigned long dim)1381 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
1382 			 struct qib_user_sdma_queue *pq,
1383 			 const struct iovec *iov,
1384 			 unsigned long dim)
1385 {
1386 	struct qib_devdata *dd = rcd->dd;
1387 	struct qib_pportdata *ppd = rcd->ppd;
1388 	int ret = 0;
1389 	struct list_head list;
1390 	int npkts = 0;
1391 
1392 	INIT_LIST_HEAD(&list);
1393 
1394 	mutex_lock(&pq->lock);
1395 
1396 	/* why not -ECOMM like qib_user_sdma_push_pkts() below? */
1397 	if (!qib_sdma_running(ppd))
1398 		goto done_unlock;
1399 
1400 	/* if I have packets not complete yet */
1401 	if (pq->added > ppd->sdma_descq_removed)
1402 		qib_user_sdma_hwqueue_clean(ppd);
1403 	/* if I have complete packets to be freed */
1404 	if (pq->num_sending)
1405 		qib_user_sdma_queue_clean(ppd, pq);
1406 
1407 	while (dim) {
1408 		int mxp = 1;
1409 		int ndesc = 0;
1410 
1411 		ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
1412 				iov, dim, &list, &mxp, &ndesc);
1413 		if (ret < 0)
1414 			goto done_unlock;
1415 		else {
1416 			dim -= ret;
1417 			iov += ret;
1418 		}
1419 
1420 		/* force packets onto the sdma hw queue... */
1421 		if (!list_empty(&list)) {
1422 			/*
1423 			 * Lazily clean hw queue.
1424 			 */
1425 			if (qib_sdma_descq_freecnt(ppd) < ndesc) {
1426 				qib_user_sdma_hwqueue_clean(ppd);
1427 				if (pq->num_sending)
1428 					qib_user_sdma_queue_clean(ppd, pq);
1429 			}
1430 
1431 			ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
1432 			if (ret < 0)
1433 				goto done_unlock;
1434 			else {
1435 				npkts += mxp;
1436 				pq->counter += mxp;
1437 			}
1438 		}
1439 	}
1440 
1441 done_unlock:
1442 	if (!list_empty(&list))
1443 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
1444 	mutex_unlock(&pq->lock);
1445 
1446 	return (ret < 0) ? ret : npkts;
1447 }
1448 
qib_user_sdma_make_progress(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1449 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
1450 				struct qib_user_sdma_queue *pq)
1451 {
1452 	int ret = 0;
1453 
1454 	mutex_lock(&pq->lock);
1455 	qib_user_sdma_hwqueue_clean(ppd);
1456 	ret = qib_user_sdma_queue_clean(ppd, pq);
1457 	mutex_unlock(&pq->lock);
1458 
1459 	return ret;
1460 }
1461 
qib_user_sdma_complete_counter(const struct qib_user_sdma_queue * pq)1462 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
1463 {
1464 	return pq ? pq->sent_counter : 0;
1465 }
1466 
qib_user_sdma_inflight_counter(struct qib_user_sdma_queue * pq)1467 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
1468 {
1469 	return pq ? pq->counter : 0;
1470 }
1471