/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_controlq.c | 465 mutex_lock(&cq->sq_lock); in ice_shutdown_sq() 486 mutex_unlock(&cq->sq_lock); in ice_shutdown_sq() 788 mutex_init(&cq->sq_lock); in ice_init_ctrlq_locks() 826 mutex_destroy(&cq->sq_lock); in ice_destroy_ctrlq_locks() 1008 mutex_lock(&cq->sq_lock); in ice_sq_send_cmd() 1141 mutex_unlock(&cq->sq_lock); in ice_sq_send_cmd()
|
H A D | ice_controlq.h | 100 struct mutex sq_lock; /* Send queue lock */ member
|
/linux/drivers/net/ethernet/fungible/funcore/ |
H A D | fun_dev.c | 389 spin_lock(&funq->sq_lock); in fun_submit_admin_cmd() 405 spin_unlock(&funq->sq_lock); in fun_submit_admin_cmd() 428 spin_lock(&fdev->admin_q->sq_lock); in fun_admin_stop() 430 spin_unlock(&fdev->admin_q->sq_lock); in fun_admin_stop()
|
/linux/drivers/infiniband/sw/siw/ |
H A D | siw_qp_tx.c | 1079 spin_lock_irqsave(&qp->sq_lock, flags); in siw_qp_sq_process() 1082 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_qp_sq_process() 1114 spin_lock_irqsave(&qp->sq_lock, flags); in siw_qp_sq_process() 1124 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_qp_sq_process()
|
H A D | siw_verbs.c | 369 spin_lock_init(&qp->sq_lock); in siw_create_qp() 827 spin_lock_irqsave(&qp->sq_lock, flags); in siw_post_send() 966 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_post_send() 970 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_post_send()
|
H A D | siw_qp.c | 269 spin_lock_irqsave(&qp->sq_lock, flags); in siw_qp_mpa_rts() 272 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_qp_mpa_rts() 315 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_qp_mpa_rts()
|
H A D | siw_qp_rx.c | 677 spin_lock_irqsave(&qp->sq_lock, flags); in siw_init_rresp() 725 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_init_rresp()
|
H A D | siw.h | 432 spinlock_t sq_lock; member
|
/linux/drivers/nvme/host/ |
H A D | pci.c | 195 spinlock_t sq_lock; member 507 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs() 510 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs() 982 spin_lock(&nvmeq->sq_lock); in nvme_queue_rq() 985 spin_unlock(&nvmeq->sq_lock); in nvme_queue_rq() 993 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmds() 1000 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmds() 1233 spin_lock(&nvmeq->sq_lock); in nvme_pci_submit_async_event() 1236 spin_unlock(&nvmeq->sq_lock); in nvme_pci_submit_async_event() 1674 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
|
/linux/drivers/infiniband/hw/bnxt_re/ |
H A D | ib_verbs.h | 88 spinlock_t sq_lock; /* protect sq */ member
|
H A D | ib_verbs.c | 1255 spin_lock_init(&qp->sq_lock); in bnxt_re_create_shadow_qp() 1663 spin_lock_init(&qp->sq_lock); in bnxt_re_create_qp() 2790 spin_lock_irqsave(&qp->sq_lock, flags); in bnxt_re_post_send_shadow_qp() 2827 spin_unlock_irqrestore(&qp->sq_lock, flags); in bnxt_re_post_send_shadow_qp() 2851 spin_lock_irqsave(&qp->sq_lock, flags); in bnxt_re_post_send() 2940 spin_unlock_irqrestore(&qp->sq_lock, flags); in bnxt_re_post_send() 3779 spin_lock_irqsave(&qp->sq_lock, flags); in send_phantom_wqe() 3791 spin_unlock_irqrestore(&qp->sq_lock, flags); in send_phantom_wqe()
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_verbs.h | 82 spinlock_t sq_lock; /* guard queue */ member
|
H A D | rxe_verbs.c | 901 spin_lock_irqsave(&qp->sq.sq_lock, flags); in rxe_post_send_kernel() 912 spin_unlock_irqrestore(&qp->sq.sq_lock, flags); in rxe_post_send_kernel()
|
/linux/drivers/ufs/core/ |
H A D | ufshcd-priv.h | 355 __must_hold(&q->sq_lock) in ufshcd_inc_sq_tail()
|
H A D | ufs-mcq.c | 478 spin_lock_init(&hwq->sq_lock); in ufshcd_mcq_init()
|
H A D | ufshcd.c | 2299 spin_lock(&hwq->sq_lock); in ufshcd_send_command() 2303 spin_unlock(&hwq->sq_lock); in ufshcd_send_command()
|
/linux/include/ufs/ |
H A D | ufshcd.h | 1163 spinlock_t sq_lock; member
|