Lines Matching refs:pkt

254 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,  in qib_user_sdma_init_frag()  argument
261 pkt->addr[i].offset = offset; in qib_user_sdma_init_frag()
262 pkt->addr[i].length = len; in qib_user_sdma_init_frag()
263 pkt->addr[i].first_desc = first_desc; in qib_user_sdma_init_frag()
264 pkt->addr[i].last_desc = last_desc; in qib_user_sdma_init_frag()
265 pkt->addr[i].put_page = put_page; in qib_user_sdma_init_frag()
266 pkt->addr[i].dma_mapped = dma_mapped; in qib_user_sdma_init_frag()
267 pkt->addr[i].page = page; in qib_user_sdma_init_frag()
268 pkt->addr[i].kvaddr = kvaddr; in qib_user_sdma_init_frag()
269 pkt->addr[i].addr = dma_addr; in qib_user_sdma_init_frag()
270 pkt->addr[i].dma_length = dma_length; in qib_user_sdma_init_frag()
297 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_page_to_frags() argument
338 if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length) in qib_user_sdma_page_to_frags()
339 newlen = pkt->tidsm[pkt->tidsmidx].length; in qib_user_sdma_page_to_frags()
351 if ((pkt->payload_size + newlen) >= pkt->frag_size) { in qib_user_sdma_page_to_frags()
352 newlen = pkt->frag_size - pkt->payload_size; in qib_user_sdma_page_to_frags()
354 } else if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
355 if (newlen == pkt->tidsm[pkt->tidsmidx].length) in qib_user_sdma_page_to_frags()
358 if (newlen == pkt->bytes_togo) in qib_user_sdma_page_to_frags()
363 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ in qib_user_sdma_page_to_frags()
369 pkt->bytes_togo -= newlen; in qib_user_sdma_page_to_frags()
370 pkt->payload_size += newlen; in qib_user_sdma_page_to_frags()
371 pkt->naddr++; in qib_user_sdma_page_to_frags()
372 if (pkt->naddr == pkt->addrlimit) { in qib_user_sdma_page_to_frags()
378 if (pkt->bytes_togo == 0) { in qib_user_sdma_page_to_frags()
381 if (!pkt->addr[pkt->index].addr) { in qib_user_sdma_page_to_frags()
382 pkt->addr[pkt->index].addr = in qib_user_sdma_page_to_frags()
384 pkt->addr[pkt->index].kvaddr, in qib_user_sdma_page_to_frags()
385 pkt->addr[pkt->index].dma_length, in qib_user_sdma_page_to_frags()
388 pkt->addr[pkt->index].addr)) { in qib_user_sdma_page_to_frags()
392 pkt->addr[pkt->index].dma_mapped = 1; in qib_user_sdma_page_to_frags()
399 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
400 pkt->tidsm[pkt->tidsmidx].length -= newlen; in qib_user_sdma_page_to_frags()
401 if (pkt->tidsm[pkt->tidsmidx].length) { in qib_user_sdma_page_to_frags()
402 pkt->tidsm[pkt->tidsmidx].offset += newlen; in qib_user_sdma_page_to_frags()
404 pkt->tidsmidx++; in qib_user_sdma_page_to_frags()
405 if (pkt->tidsmidx == pkt->tidsmcount) { in qib_user_sdma_page_to_frags()
431 pbclen = pkt->addr[pkt->index].length; in qib_user_sdma_page_to_frags()
438 pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr; in qib_user_sdma_page_to_frags()
445 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2)); in qib_user_sdma_page_to_frags()
450 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
472 if (!pkt->addr[pkt->index].addr) { in qib_user_sdma_page_to_frags()
473 pkt->addr[pkt->index].addr = in qib_user_sdma_page_to_frags()
475 pkt->addr[pkt->index].kvaddr, in qib_user_sdma_page_to_frags()
476 pkt->addr[pkt->index].dma_length, in qib_user_sdma_page_to_frags()
479 pkt->addr[pkt->index].addr)) { in qib_user_sdma_page_to_frags()
483 pkt->addr[pkt->index].dma_mapped = 1; in qib_user_sdma_page_to_frags()
491 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2)); in qib_user_sdma_page_to_frags()
496 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
500 (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) + in qib_user_sdma_page_to_frags()
501 (pkt->tidsm[pkt->tidsmidx].offset>>2)); in qib_user_sdma_page_to_frags()
504 hdr->uwords[2] += pkt->payload_size; in qib_user_sdma_page_to_frags()
516 if (pkt->tiddma) in qib_user_sdma_page_to_frags()
519 seqnum.pkt++; in qib_user_sdma_page_to_frags()
523 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ in qib_user_sdma_page_to_frags()
529 pkt->index = pkt->naddr; in qib_user_sdma_page_to_frags()
530 pkt->payload_size = 0; in qib_user_sdma_page_to_frags()
531 pkt->naddr++; in qib_user_sdma_page_to_frags()
532 if (pkt->naddr == pkt->addrlimit) { in qib_user_sdma_page_to_frags()
558 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_coalesce() argument
590 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_coalesce()
615 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_free_pkt_frag() argument
620 if (pkt->addr[i].page) { in qib_user_sdma_free_pkt_frag()
622 if (pkt->addr[i].dma_mapped) in qib_user_sdma_free_pkt_frag()
624 pkt->addr[i].addr, in qib_user_sdma_free_pkt_frag()
625 pkt->addr[i].dma_length, in qib_user_sdma_free_pkt_frag()
628 if (pkt->addr[i].put_page) in qib_user_sdma_free_pkt_frag()
629 unpin_user_page(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
631 __free_page(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
632 } else if (pkt->addr[i].kvaddr) { in qib_user_sdma_free_pkt_frag()
634 if (pkt->addr[i].dma_mapped) { in qib_user_sdma_free_pkt_frag()
637 pkt->addr[i].addr, in qib_user_sdma_free_pkt_frag()
638 pkt->addr[i].dma_length, in qib_user_sdma_free_pkt_frag()
640 kfree(pkt->addr[i].kvaddr); in qib_user_sdma_free_pkt_frag()
641 } else if (pkt->addr[i].addr) { in qib_user_sdma_free_pkt_frag()
644 pkt->addr[i].kvaddr, pkt->addr[i].addr); in qib_user_sdma_free_pkt_frag()
647 kfree(pkt->addr[i].kvaddr); in qib_user_sdma_free_pkt_frag()
655 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_pin_pages() argument
682 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_pin_pages()
712 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_pin_pkt() argument
723 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, in qib_user_sdma_pin_pkt()
733 for (idx = 1; idx < pkt->naddr; idx++) in qib_user_sdma_pin_pkt()
734 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in qib_user_sdma_pin_pkt()
739 if (pkt->addr[0].dma_mapped) { in qib_user_sdma_pin_pkt()
741 pkt->addr[0].addr, in qib_user_sdma_pin_pkt()
742 pkt->addr[0].dma_length, in qib_user_sdma_pin_pkt()
744 pkt->addr[0].addr = 0; in qib_user_sdma_pin_pkt()
745 pkt->addr[0].dma_mapped = 0; in qib_user_sdma_pin_pkt()
754 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_init_payload() argument
760 if (pkt->frag_size == pkt->bytes_togo && in qib_user_sdma_init_payload()
761 npages >= ARRAY_SIZE(pkt->addr)) in qib_user_sdma_init_payload()
762 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
764 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
774 struct qib_user_sdma_pkt *pkt, *pkt_next; in qib_user_sdma_free_pkt_list() local
776 list_for_each_entry_safe(pkt, pkt_next, list, list) { in qib_user_sdma_free_pkt_list()
779 for (i = 0; i < pkt->naddr; i++) in qib_user_sdma_free_pkt_list()
780 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); in qib_user_sdma_free_pkt_list()
782 if (pkt->largepkt) in qib_user_sdma_free_pkt_list()
783 kfree(pkt); in qib_user_sdma_free_pkt_list()
785 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_free_pkt_list()
810 struct qib_user_sdma_pkt *pkt = NULL; in qib_user_sdma_queue_pkts() local
884 bytes_togo > type_max(typeof(pkt->bytes_togo))) { in qib_user_sdma_queue_pkts()
909 pktsize = struct_size(pkt, addr, n); in qib_user_sdma_queue_pkts()
928 pkt = kmalloc(sz, GFP_KERNEL); in qib_user_sdma_queue_pkts()
929 if (!pkt) { in qib_user_sdma_queue_pkts()
933 pkt->largepkt = 1; in qib_user_sdma_queue_pkts()
934 pkt->frag_size = frag_size; in qib_user_sdma_queue_pkts()
935 if (check_add_overflow(n, ARRAY_SIZE(pkt->addr), in qib_user_sdma_queue_pkts()
937 addrlimit > type_max(typeof(pkt->addrlimit))) { in qib_user_sdma_queue_pkts()
941 pkt->addrlimit = addrlimit; in qib_user_sdma_queue_pkts()
944 char *tidsm = (char *)pkt + pktsize; in qib_user_sdma_queue_pkts()
952 pkt->tidsm = in qib_user_sdma_queue_pkts()
954 pkt->tidsmcount = tidsmsize/ in qib_user_sdma_queue_pkts()
956 pkt->tidsmidx = 0; in qib_user_sdma_queue_pkts()
967 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); in qib_user_sdma_queue_pkts()
968 if (!pkt) { in qib_user_sdma_queue_pkts()
972 pkt->largepkt = 0; in qib_user_sdma_queue_pkts()
973 pkt->frag_size = bytes_togo; in qib_user_sdma_queue_pkts()
974 pkt->addrlimit = ARRAY_SIZE(pkt->addr); in qib_user_sdma_queue_pkts()
976 pkt->bytes_togo = bytes_togo; in qib_user_sdma_queue_pkts()
977 pkt->payload_size = 0; in qib_user_sdma_queue_pkts()
978 pkt->counter = counter; in qib_user_sdma_queue_pkts()
979 pkt->tiddma = tiddma; in qib_user_sdma_queue_pkts()
982 qib_user_sdma_init_frag(pkt, 0, /* index */ in qib_user_sdma_queue_pkts()
988 pkt->index = 0; in qib_user_sdma_queue_pkts()
989 pkt->naddr = 1; in qib_user_sdma_queue_pkts()
992 ret = qib_user_sdma_init_payload(dd, pq, pkt, in qib_user_sdma_queue_pkts()
1000 pkt->addr[0].last_desc = 1; in qib_user_sdma_queue_pkts()
1014 pkt->addr[0].addr = dma_addr; in qib_user_sdma_queue_pkts()
1015 pkt->addr[0].dma_mapped = 1; in qib_user_sdma_queue_pkts()
1021 pkt->pq = pq; in qib_user_sdma_queue_pkts()
1022 pkt->index = 0; /* reset index for push on hw */ in qib_user_sdma_queue_pkts()
1023 *ndesc += pkt->naddr; in qib_user_sdma_queue_pkts()
1025 list_add_tail(&pkt->list, list); in qib_user_sdma_queue_pkts()
1033 if (pkt->largepkt) in qib_user_sdma_queue_pkts()
1034 kfree(pkt); in qib_user_sdma_queue_pkts()
1036 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_queue_pkts()
1060 struct qib_user_sdma_pkt *pkt; in qib_user_sdma_queue_clean() local
1076 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { in qib_user_sdma_queue_clean()
1077 s64 descd = ppd->sdma_descq_removed - pkt->added; in qib_user_sdma_queue_clean()
1082 list_move_tail(&pkt->list, &free_list); in qib_user_sdma_queue_clean()
1093 pkt = list_entry(free_list.prev, in qib_user_sdma_queue_clean()
1095 counter = pkt->counter; in qib_user_sdma_queue_clean()
1156 struct qib_user_sdma_pkt *pkt; in qib_user_sdma_queue_drain() local
1166 list_for_each_entry_safe(pkt, pkt_prev, in qib_user_sdma_queue_drain()
1168 if (pkt->pq == pq) { in qib_user_sdma_queue_drain()
1169 list_move_tail(&pkt->list, &pq->sent); in qib_user_sdma_queue_drain()
1217 struct qib_user_sdma_pkt *pkt, int idx, in qib_user_sdma_send_frag() argument
1220 const u64 addr = (u64) pkt->addr[idx].addr + in qib_user_sdma_send_frag()
1221 (u64) pkt->addr[idx].offset; in qib_user_sdma_send_frag()
1222 const u64 dwlen = (u64) pkt->addr[idx].length / 4; in qib_user_sdma_send_frag()
1229 if (pkt->addr[idx].first_desc) in qib_user_sdma_send_frag()
1231 if (pkt->addr[idx].last_desc) { in qib_user_sdma_send_frag()
1260 struct qib_user_sdma_pkt *pkt = in qib_user_sdma_send_desc() local
1267 for (i = pkt->index; i < pkt->naddr && nfree; i++) { in qib_user_sdma_send_desc()
1268 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen); in qib_user_sdma_send_desc()
1269 ofs += pkt->addr[i].length >> 2; in qib_user_sdma_send_desc()
1279 if (pkt->addr[i].last_desc == 0) in qib_user_sdma_send_desc()
1289 for (j = pkt->index; j <= i; j++) { in qib_user_sdma_send_desc()
1296 c += i + 1 - pkt->index; in qib_user_sdma_send_desc()
1297 pkt->index = i + 1; /* index for next first */ in qib_user_sdma_send_desc()
1305 if (pkt->index == pkt->naddr) { in qib_user_sdma_send_desc()
1306 pkt->added = ppd->sdma_descq_added; in qib_user_sdma_send_desc()
1307 pkt->pq->added = pkt->added; in qib_user_sdma_send_desc()
1308 pkt->pq->num_pending--; in qib_user_sdma_send_desc()
1309 spin_lock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1310 pkt->pq->num_sending++; in qib_user_sdma_send_desc()
1311 list_move_tail(&pkt->list, &pkt->pq->sent); in qib_user_sdma_send_desc()
1312 spin_unlock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()