/linux/drivers/atm/ |
H A D | nicstar.c | 115 #define scq_virt_to_bus(scq, p) \ argument 116 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) 126 static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); 134 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, 137 static void drain_scq(ns_dev * card, scq_info * scq, int pos); 248 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); in nicstar_remove_one() 864 scq_info *scq; in get_scq() local 869 scq = kmalloc(sizeof(*scq), GFP_KERNEL); in get_scq() 870 if (!scq) in get_scq() 872 scq->org = dma_alloc_coherent(&card->pcidev->dev, in get_scq() [all …]
|
H A D | idt77252.c | 639 struct scq_info *scq; in alloc_scq() local 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); in alloc_scq() 642 if (!scq) in alloc_scq() 644 scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, in alloc_scq() 645 &scq->paddr, GFP_KERNEL); in alloc_scq() 646 if (scq->base == NULL) { in alloc_scq() 647 kfree(scq); in alloc_scq() 651 scq->next = scq->base; in alloc_scq() 652 scq->last = scq->base + (SCQ_ENTRIES - 1); in alloc_scq() 653 atomic_set(&scq->used, 0); in alloc_scq() [all …]
|
H A D | nicstar.h | 702 scq_info *scq; /* To keep track of the SCQ */ member
|
H A D | idt77252.h | 217 struct scq_info *scq; /* To keep track of the SCQ */ member
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 65 static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, in pvrdma_lock_cqs() argument 68 __acquires(scq->cq_lock) __acquires(rcq->cq_lock) in pvrdma_lock_cqs() 70 if (scq == rcq) { in pvrdma_lock_cqs() 71 spin_lock_irqsave(&scq->cq_lock, *scq_flags); in pvrdma_lock_cqs() 73 } else if (scq->cq_handle < rcq->cq_handle) { in pvrdma_lock_cqs() 74 spin_lock_irqsave(&scq->cq_lock, *scq_flags); in pvrdma_lock_cqs() 79 spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags, in pvrdma_lock_cqs() 84 static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, in pvrdma_unlock_cqs() argument 87 __releases(scq->cq_lock) __releases(rcq->cq_lock) in pvrdma_unlock_cqs() 89 if (scq == rcq) { in pvrdma_unlock_cqs() [all …]
|
/linux/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_fp.c | 74 struct bnxt_qplib_cq *scq, *rcq; in __bnxt_qplib_add_flush_qp() local 76 scq = qp->scq; in __bnxt_qplib_add_flush_qp() 80 dev_dbg(&scq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 83 list_add_tail(&qp->sq_flush, &scq->sqf_head); in __bnxt_qplib_add_flush_qp() 98 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) in bnxt_qplib_acquire_cq_flush_locks() 100 spin_lock_irqsave(&qp->scq->flush_lock, *flags); in bnxt_qplib_acquire_cq_flush_locks() 101 if (qp->scq == qp->rcq) in bnxt_qplib_acquire_cq_flush_locks() 109 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) in bnxt_qplib_release_cq_flush_locks() 111 if (qp->scq == qp->rcq) in bnxt_qplib_release_cq_flush_locks() 115 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); in bnxt_qplib_release_cq_flush_locks() [all …]
|
H A D | ib_verbs.c | 889 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) in bnxt_re_lock_cqs() 893 spin_lock_irqsave(&qp->scq->cq_lock, flags); in bnxt_re_lock_cqs() 894 if (qp->rcq != qp->scq) in bnxt_re_lock_cqs() 904 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) in bnxt_re_unlock_cqs() 906 if (qp->rcq != qp->scq) in bnxt_re_unlock_cqs() 910 spin_unlock_irqrestore(&qp->scq->cq_lock, flags); in bnxt_re_unlock_cqs() 1002 scq_nq = qplib_qp->scq->nq; in bnxt_re_destroy_qp() 1231 qp->qplib_qp.scq = qp1_qp->scq; in bnxt_re_create_shadow_qp() 1474 qplqp->scq = &cq->qplib_cq; in bnxt_re_init_qp_attr() 1475 qp->scq = cq; in bnxt_re_init_qp_attr() [all …]
|
H A D | ib_verbs.h | 96 struct bnxt_re_cq *scq; member
|
H A D | qplib_fp.h | 313 struct bnxt_qplib_cq *scq; member
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_comp.c | 457 rxe_cq_post(qp->scq, &cqe, 0); in do_complete() 574 err = rxe_cq_post(qp->scq, &cqe, 0); in flush_send_wqe() 576 rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err); in flush_send_wqe()
|
H A D | rxe_verbs.h | 210 struct rxe_cq *scq; member
|
/linux/drivers/infiniband/hw/cxgb4/ |
H A D | ev.c | 139 cqid = qhp->attr.scq; in c4iw_ev_dispatch()
|
H A D | iw_cxgb4.h | 450 u32 scq; member
|
/linux/drivers/infiniband/core/ |
H A D | uverbs_cmd.c | 1284 struct ib_cq *scq = NULL, *rcq = NULL; in create_qp() local 1381 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, in create_qp() 1384 rcq = rcq ?: scq; in create_qp() 1387 if (!pd || (!scq && has_sq)) { in create_qp() 1396 attr.send_cq = scq; in create_qp() 1457 if (scq) in create_qp() 1458 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, in create_qp() 1460 if (rcq && rcq != scq) in create_qp() 1485 if (scq) in create_qp() 1486 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, in create_qp() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | resource_tracker.c | 112 struct res_cq *scq; member 2966 struct res_cq *scq; in mlx4_RST2INIT_QP_wrapper() local 3004 err = get_res(dev, slave, scqn, RES_CQ, &scq); in mlx4_RST2INIT_QP_wrapper() 3008 scq = rcq; in mlx4_RST2INIT_QP_wrapper() 3025 atomic_inc(&scq->ref_count); in mlx4_RST2INIT_QP_wrapper() 3026 qp->scq = scq; in mlx4_RST2INIT_QP_wrapper() 4004 atomic_dec(&qp->scq->ref_count); in mlx4_2RST_QP_wrapper() 4717 atomic_dec(&qp->scq->ref_count); in rem_slave_qps()
|
/linux/drivers/infiniband/sw/siw/ |
H A D | siw.h | 427 struct siw_cq *scq; member
|
H A D | siw_verbs.c | 405 qp->scq = to_siw_cq(attrs->send_cq); in siw_create_qp() 637 qp->scq = qp->rcq = NULL; in siw_destroy_qp()
|
H A D | siw_qp.c | 1066 struct siw_cq *cq = qp->scq; in siw_sqe_complete()
|