Lines Matching full:dd
61 static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
149 struct hfi1_devdata *dd = container_of(inode->i_cdev, in hfi1_file_open() local
153 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1)) in hfi1_file_open()
156 if (!refcount_inc_not_zero(&dd->user_refcount)) in hfi1_file_open()
169 fd->dd = dd; in hfi1_file_open()
175 if (refcount_dec_and_test(&dd->user_refcount)) in hfi1_file_open()
176 complete(&dd->user_comp); in hfi1_file_open()
279 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter()
322 struct hfi1_devdata *dd; in hfi1_file_mmap() local
338 dd = uctxt->dd; in hfi1_file_mmap()
357 memaddr = ((dd->physaddr + TXE_PIO_SEND) + in hfi1_file_mmap()
385 (u64)dd->cr_base[uctxt->numa_id].va) & in hfi1_file_mmap()
387 memvirt = dd->cr_base[uctxt->numa_id].va + cr_page_offset; in hfi1_file_mmap()
388 memdma = dd->cr_base[uctxt->numa_id].dma + cr_page_offset; in hfi1_file_mmap()
416 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n", in hfi1_file_mmap()
443 ret = dma_mmap_coherent(&dd->pcidev->dev, vma, in hfi1_file_mmap()
463 (dd->physaddr + RXE_PER_CONTEXT_USER) in hfi1_file_mmap()
480 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK; in hfi1_file_mmap()
494 memaddr = kvirt_to_phys((void *)dd->status); in hfi1_file_mmap()
569 ret = dma_mmap_coherent(&dd->pcidev->dev, vma, in hfi1_file_mmap()
631 struct hfi1_devdata *dd = container_of(inode->i_cdev, in hfi1_file_close() local
664 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt; in hfi1_file_close()
667 spin_lock_irqsave(&dd->uctxt_lock, flags); in hfi1_file_close()
670 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in hfi1_file_close()
673 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in hfi1_file_close()
679 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | in hfi1_file_close()
688 hfi1_clear_ctxt_jkey(dd, uctxt); in hfi1_file_close()
699 hfi1_clear_ctxt_pkey(dd, uctxt); in hfi1_file_close()
706 if (refcount_dec_and_test(&dd->user_refcount)) in hfi1_file_close()
707 complete(&dd->user_comp); in hfi1_file_close()
765 spin_lock_irqsave(&fd->dd->uctxt_lock, flags); in complete_subctxt()
767 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); in complete_subctxt()
814 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt); in assign_ctxt()
848 struct hfi1_devdata *dd = fd->dd; in match_ctxt() local
868 spin_lock_irqsave(&dd->uctxt_lock, flags); in match_ctxt()
871 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in match_ctxt()
878 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in match_ctxt()
884 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in match_ctxt()
910 struct hfi1_devdata *dd = fd->dd; in find_sub_ctxt() local
917 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) { in find_sub_ctxt()
918 uctxt = hfi1_rcd_get_by_index(dd, i); in find_sub_ctxt()
931 static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, in allocate_ctxt() argument
938 if (dd->flags & HFI1_FROZEN) { in allocate_ctxt()
949 if (!dd->freectxts) in allocate_ctxt()
956 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node); in allocate_ctxt()
961 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt); in allocate_ctxt()
963 dd_dev_err(dd, "user ctxtdata allocation failed\n"); in allocate_ctxt()
973 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node); in allocate_ctxt()
1007 if (dd->freectxts-- == dd->num_user_contexts) in allocate_ctxt()
1008 aspm_disable_all(dd); in allocate_ctxt()
1023 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts) in deallocate_ctxt()
1024 aspm_enable_all(uctxt->dd); in deallocate_ctxt()
1097 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey); in user_init()
1124 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt); in user_init()
1145 cinfo.unit = uctxt->dd->unit; in get_ctxt_info()
1149 uctxt->dd->rcv_entries.group_size) + in get_ctxt_info()
1162 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo); in get_ctxt_info()
1188 struct hfi1_devdata *dd = uctxt->dd; in setup_base_ctxt() local
1194 ret = hfi1_create_rcvhdrq(dd, uctxt); in setup_base_ctxt()
1248 struct hfi1_devdata *dd = uctxt->dd; in get_base_info() local
1251 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt); in get_base_info()
1257 binfo.hw_version = dd->revision; in get_base_info()
1268 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE; in get_base_info()
1293 sizeof(*dd->events)); in get_base_info()
1299 dd->status); in get_base_info()
1442 struct hfi1_devdata *dd = uctxt->dd; in poll_urgent() local
1447 spin_lock_irq(&dd->uctxt_lock); in poll_urgent()
1455 spin_unlock_irq(&dd->uctxt_lock); in poll_urgent()
1465 struct hfi1_devdata *dd = uctxt->dd; in poll_next() local
1470 spin_lock_irq(&dd->uctxt_lock); in poll_next()
1473 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt); in poll_next()
1478 spin_unlock_irq(&dd->uctxt_lock); in poll_next()
1491 struct hfi1_devdata *dd = ppd->dd; in hfi1_set_uevent_bits() local
1494 if (!dd->events) in hfi1_set_uevent_bits()
1497 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts; in hfi1_set_uevent_bits()
1499 uctxt = hfi1_rcd_get_by_index(dd, ctxt); in hfi1_set_uevent_bits()
1507 evs = dd->events + uctxt_offset(uctxt); in hfi1_set_uevent_bits()
1531 struct hfi1_devdata *dd = uctxt->dd; in manage_rcvq() local
1557 hfi1_rcvctrl(dd, rcvctrl_op, uctxt); in manage_rcvq()
1572 struct hfi1_devdata *dd = uctxt->dd; in user_event_ack() local
1576 if (!dd->events) in user_event_ack()
1582 evs = dd->events + uctxt_offset(uctxt) + subctxt; in user_event_ack()
1596 struct hfi1_devdata *dd = uctxt->dd; in set_ctxt_pkey() local
1610 return hfi1_set_ctxt_pkey(dd, uctxt, pkey); in set_ctxt_pkey()
1622 struct hfi1_devdata *dd; in ctxt_reset() local
1625 if (!uctxt || !uctxt->dd || !uctxt->sc) in ctxt_reset()
1634 dd = uctxt->dd; in ctxt_reset()
1653 dd->event_queue, in ctxt_reset()
1654 !(READ_ONCE(dd->flags) & HFI1_FROZEN), in ctxt_reset()
1656 if (dd->flags & HFI1_FROZEN) in ctxt_reset()
1659 if (dd->flags & HFI1_FORCED_FREEZE) in ctxt_reset()
1668 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt); in ctxt_reset()
1678 static void user_remove(struct hfi1_devdata *dd) in user_remove() argument
1681 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); in user_remove()
1684 static int user_add(struct hfi1_devdata *dd) in user_add() argument
1689 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); in user_add()
1690 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops, in user_add()
1691 &dd->user_cdev, &dd->user_device, in user_add()
1692 true, &dd->verbs_dev.rdi.ibdev.dev.kobj); in user_add()
1694 user_remove(dd); in user_add()
1702 int hfi1_device_create(struct hfi1_devdata *dd) in hfi1_device_create() argument
1704 return user_add(dd); in hfi1_device_create()
1711 void hfi1_device_remove(struct hfi1_devdata *dd) in hfi1_device_remove() argument
1713 user_remove(dd); in hfi1_device_remove()