Searched refs:tx_ctx (Results 1 – 8 of 8) sorted by relevance
| /linux/drivers/net/ethernet/fungible/funeth/ |
| H A D | funeth_ktls.c | 32 struct fun_ktls_tx_ctx *tx_ctx; in fun_ktls_add() local 64 tx_ctx = tls_driver_ctx(sk, direction); in fun_ktls_add() 65 tx_ctx->tlsid = rsp.tlsid; in fun_ktls_add() 66 tx_ctx->next_seq = start_offload_tcp_sn; in fun_ktls_add() 77 struct fun_ktls_tx_ctx *tx_ctx; in fun_ktls_del() local 82 tx_ctx = __tls_driver_ctx(tls_ctx, direction); in fun_ktls_del() 89 req.tlsid = tx_ctx->tlsid; in fun_ktls_del() 100 struct fun_ktls_tx_ctx *tx_ctx; in fun_ktls_resync() local 106 tx_ctx = tls_driver_ctx(sk, direction); in fun_ktls_resync() 113 req.tlsid = tx_ctx->tlsid; in fun_ktls_resync() [all …]
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_qp_tx.c | 708 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_prepare_fpdu() 795 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_qp_sq_proc_tx() 797 int rv = 0, burst_len = qp->tx_ctx.burst; in siw_qp_sq_proc_tx() 917 qp->tx_ctx.burst = burst_len; in siw_qp_sq_proc_tx() 1036 if (unlikely(qp->tx_ctx.tx_suspend)) { in siw_qp_sq_process() 1094 qp->tx_ctx.ctrl_sent, qp->tx_ctx.ctrl_len, in siw_qp_sq_process() 1095 qp->tx_ctx.bytes_unsent); in siw_qp_sq_process() 1131 if (!qp->tx_ctx.tx_suspend) in siw_qp_sq_process() 1176 !qp->tx_ctx.tx_suspend)) { in siw_sq_resume() 1184 if (!qp->tx_ctx.tx_suspend) in siw_sq_resume()
|
| H A D | siw.h | 433 struct siw_iwarp_tx tx_ctx; /* Transmit context */ member 471 #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx) 472 #define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
|
| H A D | siw_cm.c | 441 qp->tx_ctx.tx_suspend = 1; in siw_qp_cm_drop() 816 qp->tx_ctx.gso_seg_limit = 0; in siw_proc_mpareply() 1363 cep->qp->tx_ctx.tx_suspend = 1; in siw_cm_llp_state_change() 1638 qp->tx_ctx.gso_seg_limit = 0; in siw_accept()
|
| H A D | siw_qp_rx.c | 1144 if (qp->tx_ctx.orq_fence) { in siw_check_tx_fence() 1165 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence() 1173 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| H A D | ktls_tx.c | 98 struct tls_offload_context_tx *tx_ctx; member 496 priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); in mlx5e_ktls_add_tx() 625 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; in tx_sync_info_get() local 632 spin_lock_irqsave(&tx_ctx->lock, flags); in tx_sync_info_get() 633 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); in tx_sync_info_get() 672 spin_unlock_irqrestore(&tx_ctx->lock, flags); in tx_sync_info_get()
|
| /linux/net/tipc/ |
| H A D | crypto.c | 738 struct tipc_crypto_tx_ctx *tx_ctx; in tipc_aead_encrypt() local 773 ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg); in tipc_aead_encrypt() 809 tx_ctx = (struct tipc_crypto_tx_ctx *)ctx; in tipc_aead_encrypt() 810 tx_ctx->aead = aead; in tipc_aead_encrypt() 811 tx_ctx->bearer = b; in tipc_aead_encrypt() 812 memcpy(&tx_ctx->dst, dst, sizeof(*dst)); in tipc_aead_encrypt() 844 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; in tipc_aead_encrypt_done() local 845 struct tipc_bearer *b = tx_ctx->bearer; in tipc_aead_encrypt_done() 846 struct tipc_aead *aead = tx_ctx->aead; in tipc_aead_encrypt_done() 855 b->media->send_msg(net, skb, b, &tx_ctx->dst); in tipc_aead_encrypt_done() [all …]
|
| /linux/net/tls/ |
| H A D | tls_sw.c | 2670 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); in tls_sw_write_space() local 2673 if (tls_is_tx_ready(tx_ctx) && in tls_sw_write_space() 2674 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) in tls_sw_write_space() 2675 schedule_delayed_work(&tx_ctx->tx_work.work, 0); in tls_sw_write_space()
|