| /linux/drivers/nvme/target/ |
| H A D | fabrics-cmd-auth.c | 17 struct nvmet_sq *sq = container_of(to_delayed_work(work), in nvmet_auth_expired_work() local 21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); in nvmet_auth_expired_work() 22 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; in nvmet_auth_expired_work() 23 sq->dhchap_tid = -1; in nvmet_auth_expired_work() 26 void nvmet_auth_sq_init(struct nvmet_sq *sq) in nvmet_auth_sq_init() argument 29 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work); in nvmet_auth_sq_init() 30 sq->authenticated = false; in nvmet_auth_sq_init() 31 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; in nvmet_auth_sq_init() 36 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_auth_negotiate() 41 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate() [all …]
|
| H A D | auth.c | 143 u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq) in nvmet_setup_auth() argument 169 if (nvmet_queue_tls_keyid(sq)) { in nvmet_setup_auth() 239 void nvmet_auth_sq_free(struct nvmet_sq *sq) in nvmet_auth_sq_free() argument 241 cancel_delayed_work(&sq->auth_expired_work); in nvmet_auth_sq_free() 243 sq->tls_key = NULL; in nvmet_auth_sq_free() 245 kfree(sq->dhchap_c1); in nvmet_auth_sq_free() 246 sq->dhchap_c1 = NULL; in nvmet_auth_sq_free() 247 kfree(sq->dhchap_c2); in nvmet_auth_sq_free() 248 sq->dhchap_c2 = NULL; in nvmet_auth_sq_free() 249 kfree(sq->dhchap_skey); in nvmet_auth_sq_free() [all …]
|
| /linux/tools/include/io_uring/ |
| H A D | mini_liburing.h | 55 struct io_uring_sq sq; member 69 struct io_uring_sq *sq, struct io_uring_cq *cq) in io_uring_mmap() argument 75 sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned int); in io_uring_mmap() 76 ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, in io_uring_mmap() 80 sq->khead = ptr + p->sq_off.head; in io_uring_mmap() 81 sq->ktail = ptr + p->sq_off.tail; in io_uring_mmap() 82 sq->kring_mask = ptr + p->sq_off.ring_mask; in io_uring_mmap() 83 sq->kring_entries = ptr + p->sq_off.ring_entries; in io_uring_mmap() 84 sq->kflags = ptr + p->sq_off.flags; in io_uring_mmap() 85 sq->kdropped = ptr + p->sq_off.dropped; in io_uring_mmap() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| H A D | tx.c | 46 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, in mlx5e_xsk_tx_post_err() argument 49 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_xsk_tx_post_err() 50 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; in mlx5e_xsk_tx_post_err() 56 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err() 57 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, *xdpi); in mlx5e_xsk_tx_post_err() 58 if (xp_tx_metadata_enabled(sq->xsk_pool)) in mlx5e_xsk_tx_post_err() 59 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, in mlx5e_xsk_tx_post_err() 61 sq->doorbell_cseg = &nopwqe->ctrl; in mlx5e_xsk_tx_post_err() 64 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) in mlx5e_xsk_tx() argument 66 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | wc.c | 157 struct mlx5_wc_sq *sq) in create_wc_sq() argument 164 sizeof(u64) * sq->wq_ctrl.buf.npages; in create_wc_sq() 173 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); in create_wc_sq() 184 MLX5_SET(wq, wq, uar_page, sq->bfreg.index); in create_wc_sq() 185 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - in create_wc_sq() 187 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); in create_wc_sq() 189 mlx5_fill_page_frag_array(&sq->wq_ctrl.buf, in create_wc_sq() 192 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); in create_wc_sq() 203 err = mlx5_core_modify_sq(mdev, sq->sqn, in); in create_wc_sq() 206 sq->sqn, err); in create_wc_sq() [all …]
|
| H A D | en_dim.c | 58 struct mlx5e_txqsq *sq = dim->priv; in mlx5e_tx_dim_work() local 62 mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); in mlx5e_tx_dim_work() 124 int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable) in mlx5e_dim_tx_change() argument 126 if (enable == !!sq->dim) in mlx5e_dim_tx_change() 130 struct mlx5e_channel *c = sq->channel; in mlx5e_dim_tx_change() 133 dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu, in mlx5e_dim_tx_change() 134 c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq); in mlx5e_dim_tx_change() 138 sq->dim = dim; in mlx5e_dim_tx_change() 140 __set_bit(MLX5E_SQ_STATE_DIM, &sq->state); in mlx5e_dim_tx_change() 142 __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state); in mlx5e_dim_tx_change() [all …]
|
| /linux/sound/oss/dmasound/ |
| H A D | dmasound_core.c | 411 static int sq_allocate_buffers(struct sound_queue *sq, int num, int size) in sq_allocate_buffers() argument 415 if (sq->buffers) in sq_allocate_buffers() 417 sq->numBufs = num; in sq_allocate_buffers() 418 sq->bufSize = size; in sq_allocate_buffers() 419 sq->buffers = kmalloc_array (num, sizeof(char *), GFP_KERNEL); in sq_allocate_buffers() 420 if (!sq->buffers) in sq_allocate_buffers() 423 sq->buffers[i] = dmasound.mach.dma_alloc(size, GFP_KERNEL); in sq_allocate_buffers() 424 if (!sq->buffers[i]) { in sq_allocate_buffers() 426 dmasound.mach.dma_free(sq->buffers[i], size); in sq_allocate_buffers() 427 kfree(sq->buffers); in sq_allocate_buffers() [all …]
|
| /linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
| H A D | otx2_txrx.c | 34 static int otx2_get_free_sqe(struct otx2_snd_queue *sq) in otx2_get_free_sqe() argument 36 return (sq->cons_head - sq->head - 1 + sq->sqe_cnt) in otx2_get_free_sqe() 37 & (sq->sqe_cnt - 1); in otx2_get_free_sqe() 46 static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq, in otx2_sq_set_sqe_base() argument 51 sq->sqe_base = sq->sqe_ring->base + sq->sqe_size + in otx2_sq_set_sqe_base() 52 (sq->head * (sq->sqe_size * 2)); in otx2_sq_set_sqe_base() 54 sq->sqe_base = sq->sqe->base; in otx2_sq_set_sqe_base() 106 struct otx2_snd_queue *sq, in otx2_xdp_snd_pkt_handler() argument 113 sg = &sq->sg[snd_comp->sqe_id]; in otx2_xdp_snd_pkt_handler() 127 struct otx2_snd_queue *sq, in otx2_snd_pkt_handler() argument [all …]
|
| H A D | qos_sq.c | 38 struct otx2_snd_queue *sq; in otx2_qos_sq_aura_pool_init() local 76 sq = &qset->sq[qidx]; in otx2_qos_sq_aura_pool_init() 77 sq->sqb_count = 0; in otx2_qos_sq_aura_pool_init() 78 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); in otx2_qos_sq_aura_pool_init() 79 if (!sq->sqb_ptrs) { in otx2_qos_sq_aura_pool_init() 89 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; in otx2_qos_sq_aura_pool_init() 96 if (!sq->sqb_ptrs[ptr]) in otx2_qos_sq_aura_pool_init() 98 iova = sq->sqb_ptrs[ptr]; in otx2_qos_sq_aura_pool_init() 106 sq->sqb_count = 0; in otx2_qos_sq_aura_pool_init() 107 kfree(sq->sqb_ptrs); in otx2_qos_sq_aura_pool_init() [all …]
|
| H A D | otx2_xsk.c | 57 rq_aq->sq.ena = 0; in otx2_xsk_ctx_disable() 144 struct otx2_snd_queue *sq; in otx2_xsk_pool_disable() local 150 sq = &pf->qset.sq[qidx + pf->hw.tx_queues]; in otx2_xsk_pool_disable() 151 sq->xsk_pool = NULL; in otx2_xsk_pool_disable() 194 void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx) in otx2_attach_xsk_buff() argument 197 sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx); in otx2_attach_xsk_buff() 204 struct otx2_snd_queue *sq; in otx2_xsk_sq_append_pkt() local 207 sq = &pfvf->qset.sq[qidx]; in otx2_xsk_sq_append_pkt() 208 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); in otx2_xsk_sq_append_pkt() 210 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); in otx2_xsk_sq_append_pkt() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_controlq.c | 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ 16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| H A D | ktls_rx.c | 129 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, in icosq_fill_wi() argument 132 sq->db.wqe_info[pi] = *wi; in icosq_fill_wi() 136 post_static_params(struct mlx5e_icosq *sq, in post_static_params() argument 144 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) in post_static_params() 147 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); in post_static_params() 148 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); in post_static_params() 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, in post_static_params() 159 icosq_fill_wi(sq, pi, &wi); in post_static_params() 160 sq->pc += num_wqebbs; in post_static_params() 166 post_progress_params(struct mlx5e_icosq *sq, in post_progress_params() argument [all …]
|
| H A D | ktls_tx.c | 525 static void tx_fill_wi(struct mlx5e_txqsq *sq, in tx_fill_wi() argument 529 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; in tx_fill_wi() 549 post_static_params(struct mlx5e_txqsq *sq, in post_static_params() argument 557 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); in post_static_params() 558 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); in post_static_params() 559 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, in post_static_params() 563 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); in post_static_params() 564 sq->pc += num_wqebbs; in post_static_params() 568 post_progress_params(struct mlx5e_txqsq *sq, in post_progress_params() argument 576 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); in post_progress_params() [all …]
|
| H A D | ktls_utils.h | 62 #define MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi) \ argument 64 mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_set_tls_static_params_wqe))) 66 #define MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi) \ argument 68 mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_set_tls_progress_params_wqe))) 70 #define MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi) \ argument 72 mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_get_tls_progress_params_wqe))) 74 #define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \ argument 76 mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_dump_wqe)))
|
| /linux/drivers/soc/qcom/ |
| H A D | qmi_interface.c | 19 struct sockaddr_qrtr *sq); 168 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local 178 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup() 179 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup() 180 sq.sq_port = QRTR_PORT_CTRL; in qmi_send_new_lookup() 182 msg.msg_name = &sq; in qmi_send_new_lookup() 183 msg.msg_namelen = sizeof(sq); in qmi_send_new_lookup() 231 struct sockaddr_qrtr sq; in qmi_send_new_server() local 240 pkt.server.node = cpu_to_le32(qmi->sq.sq_node); in qmi_send_new_server() 241 pkt.server.port = cpu_to_le32(qmi->sq.sq_port); in qmi_send_new_server() [all …]
|
| /linux/net/qrtr/ |
| H A D | ns.c | 53 struct sockaddr_qrtr sq; member 195 static int announce_servers(struct sockaddr_qrtr *sq) in announce_servers() argument 208 ret = service_announce_new(sq, srv); in announce_servers() 291 lookup_notify(&lookup->sq, srv, false); in server_del() 323 static int ctrl_cmd_hello(struct sockaddr_qrtr *sq) in ctrl_cmd_hello() argument 327 ret = say_hello(sq); in ctrl_cmd_hello() 331 return announce_servers(sq); in ctrl_cmd_hello() 339 struct sockaddr_qrtr sq; in ctrl_cmd_bye() local 367 sq.sq_family = AF_QIPCRTR; in ctrl_cmd_bye() 368 sq in ctrl_cmd_bye() 391 struct sockaddr_qrtr sq; ctrl_cmd_del_client() local 592 struct sockaddr_qrtr sq; qrtr_ns_worker() local 689 struct sockaddr_qrtr sq; qrtr_ns_init() local [all...] |
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_qp.c | 59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument 61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument 93 struct hinic_sq *sq, u16 global_qid) in hinic_sq_prepare_ctxt() argument 100 wq = sq->wq; in hinic_sq_prepare_ctxt() 219 static int alloc_sq_skb_arr(struct hinic_sq *sq) in alloc_sq_skb_arr() argument 221 struct hinic_wq *wq = sq->wq; in alloc_sq_skb_arr() 224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr() 225 sq->saved_skb = vzalloc(skb_arr_size); in alloc_sq_skb_arr() 226 if (!sq->saved_skb) in alloc_sq_skb_arr() 236 static void free_sq_skb_arr(struct hinic_sq *sq) in free_sq_skb_arr() argument [all …]
|
| H A D | hinic_tx.c | 47 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) argument 503 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame() 512 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame() 516 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame() 533 hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges); in hinic_lb_xmit_frame() 534 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_lb_xmit_frame() 539 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_lb_xmit_frame() 564 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame() 593 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame() 600 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame() [all …]
|
| H A D | hinic_hw_qp.h | 57 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument 58 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) 122 struct hinic_sq sq; member 133 struct hinic_sq *sq, u16 global_qid); 138 int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, 142 void hinic_clean_sq(struct hinic_sq *sq); 149 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); 178 void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe, 181 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, 184 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| H A D | qos.c | 79 struct mlx5e_txqsq *sq; in mlx5e_open_qos_sq() local 117 sq = kzalloc(sizeof(*sq), GFP_KERNEL); in mlx5e_open_qos_sq() 119 if (!sq) in mlx5e_open_qos_sq() 128 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq); in mlx5e_open_qos_sq() 134 err = mlx5e_open_txqsq(c, tisn, txq_ix, params, ¶m_sq, sq, 0, hw_id, in mlx5e_open_qos_sq() 139 rcu_assign_pointer(qos_sqs[qid], sq); in mlx5e_open_qos_sq() 144 mlx5e_close_cq(&sq->cq); in mlx5e_open_qos_sq() 146 kfree(sq); in mlx5e_open_qos_sq() 160 struct mlx5e_txqsq *sq; in mlx5e_activate_qos_sq() local 163 sq = mlx5e_get_qos_sq(priv, node_qid); in mlx5e_activate_qos_sq() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| H A D | send.c | 100 static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq, in hws_send_engine_post_ring() argument 106 *sq->wq.db = cpu_to_be32(sq->cur_post); in hws_send_engine_post_ring() 113 mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map); in hws_send_engine_post_ring() 139 struct mlx5hws_send_ring_sq *sq; in mlx5hws_send_engine_post_end() local 143 sq = &ctrl->send_ring->send_sq; in mlx5hws_send_engine_post_end() 144 idx = sq->cur_post & sq->buf_mask; in mlx5hws_send_engine_post_end() 145 sq->last_idx = idx; in mlx5hws_send_engine_post_end() 147 wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx); in mlx5hws_send_engine_post_end() 151 ((sq->cur_post & 0xffff) << 8) | in mlx5hws_send_engine_post_end() 155 sq->sqn << 8); in mlx5hws_send_engine_post_end() [all …]
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| H A D | qplib_fp.c | 66 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing() 67 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing() 68 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing() 79 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp() 84 qp->sq.flushed = true; in __bnxt_qplib_add_flush_qp() 129 if (qp->sq.flushed) { in __bnxt_qplib_del_flush_qp() 130 qp->sq.flushed = false; in __bnxt_qplib_del_flush_qp() 147 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp() 148 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp() 182 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_free_qp_hdr_buf() local [all …]
|
| /linux/drivers/net/ |
| H A D | virtio_net.c | 396 struct send_queue *sq; member 531 static void virtnet_xsk_completed(struct send_queue *sq, int num); 571 static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data, in virtnet_add_outbuf() argument 574 return virtqueue_add_outbuf(sq->vq, sq->sg, num, in virtnet_add_outbuf() 590 static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq, in __free_old_xmit() argument 598 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in __free_old_xmit() 634 static void virtnet_free_old_xmit(struct send_queue *sq, in virtnet_free_old_xmit() argument 639 __free_old_xmit(sq, txq, in_napi, stats); in virtnet_free_old_xmit() 642 virtnet_xsk_completed(sq, stats->xsk); in virtnet_free_old_xmit() 779 struct send_queue *sq) in virtnet_tx_wake_queue() argument [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | wr.h | 33 static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx) in get_sq_edge() argument 38 (&sq->fbc, in get_sq_edge() 39 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx)); in get_sq_edge() 51 static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, in handle_post_send_edge() argument 59 idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1); in handle_post_send_edge() 60 *cur_edge = get_sq_edge(sq, idx); in handle_post_send_edge() 62 *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx); in handle_post_send_edge() 74 static inline void mlx5r_memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge, in mlx5r_memcpy_send_wqe() argument 90 handle_post_send_edge(sq, seg, *wqe_sz, cur_edge); in mlx5r_memcpy_send_wqe()
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | cq.c | 195 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe() 229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe() 247 if (wq->sq.flush_cidx == -1) in c4iw_flush_sq() 248 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq() 249 idx = wq->sq.flush_cidx; in c4iw_flush_sq() 250 while (idx != wq->sq.pidx) { in c4iw_flush_sq() 251 swsqe = &wq->sq.sw_sq[idx]; in c4iw_flush_sq() 254 if (wq->sq.oldest_read == swsqe) { in c4iw_flush_sq() 258 if (++idx == wq->sq.size) in c4iw_flush_sq() 261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq() [all …]
|