/linux/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_ktls.c | 32 struct fun_ktls_tx_ctx *tx_ctx; in fun_ktls_add() local 64 tx_ctx = tls_driver_ctx(sk, direction); in fun_ktls_add() 65 tx_ctx->tlsid = rsp.tlsid; in fun_ktls_add() 66 tx_ctx->next_seq = start_offload_tcp_sn; in fun_ktls_add() 77 struct fun_ktls_tx_ctx *tx_ctx; in fun_ktls_del() local 82 tx_ctx = __tls_driver_ctx(tls_ctx, direction); in fun_ktls_del() 89 req.tlsid = tx_ctx->tlsid; in fun_ktls_del() 100 struct fun_ktls_tx_ctx *tx_ctx; in fun_ktls_resync() local 106 tx_ctx = tls_driver_ctx(sk, direction); in fun_ktls_resync() 113 req.tlsid = tx_ctx->tlsid; in fun_ktls_resync() [all …]
|
/linux/drivers/infiniband/sw/siw/ |
H A D | siw_qp.c | 137 qp->tx_ctx.tx_suspend = 1; in siw_qp_llp_close() 232 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_qp_enable_crc() 586 if (qp->tx_ctx.mpa_crc_hd) { in siw_send_terminate() 587 crypto_shash_init(qp->tx_ctx.mpa_crc_hd); in siw_send_terminate() 588 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, in siw_send_terminate() 594 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, in siw_send_terminate() 599 crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc); in siw_send_terminate() 663 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0; in siw_qp_nextstate_from_idle() 664 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0; in siw_qp_nextstate_from_idle() 665 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0; in siw_qp_nextstate_from_idle() [all …]
|
H A D | siw_qp_tx.c | 704 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_prepare_fpdu() 792 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_qp_sq_proc_tx() 794 int rv = 0, burst_len = qp->tx_ctx.burst; in siw_qp_sq_proc_tx() 914 qp->tx_ctx.burst = burst_len; in siw_qp_sq_proc_tx() 1033 if (unlikely(qp->tx_ctx.tx_suspend)) { in siw_qp_sq_process() 1091 qp->tx_ctx.ctrl_sent, qp->tx_ctx.ctrl_len, in siw_qp_sq_process() 1092 qp->tx_ctx.bytes_unsent); in siw_qp_sq_process() 1128 if (!qp->tx_ctx.tx_suspend) in siw_qp_sq_process() 1173 !qp->tx_ctx.tx_suspend)) { in siw_sq_resume() 1181 if (!qp->tx_ctx.tx_suspend) in siw_sq_resume()
|
H A D | siw.h | 431 struct siw_iwarp_tx tx_ctx; /* Transmit context */ member 469 #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx) 470 #define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
|
H A D | siw_verbs.c | 436 qp->tx_ctx.gso_seg_limit = 1; in siw_create_qp() 437 qp->tx_ctx.zcopy_tx = zcopy_tx; in siw_create_qp() 585 qp->tx_ctx.tx_suspend = 1; in siw_verbs_modify_qp() 634 kfree(qp->tx_ctx.mpa_crc_hd); in siw_destroy_qp() 977 qp->tx_ctx.in_syscall = 1; in siw_post_send() 979 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend)) in siw_post_send() 982 qp->tx_ctx.in_syscall = 0; in siw_post_send()
|
H A D | siw_cm.c | 392 qp->tx_ctx.tx_suspend = 1; in siw_qp_cm_drop() 767 qp->tx_ctx.gso_seg_limit = 0; in siw_proc_mpareply() 1314 cep->qp->tx_ctx.tx_suspend = 1; in siw_cm_llp_state_change() 1588 qp->tx_ctx.gso_seg_limit = 0; in siw_accept()
|
H A D | siw_qp_rx.c | 1145 if (qp->tx_ctx.orq_fence) { in siw_check_tx_fence() 1166 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence() 1174 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence()
|
/linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/ |
H A D | chcr_ktls.c | 646 struct tls_offload_context_tx *tx_ctx; in chcr_ktls_cpl_act_open_rpl() local 685 tx_ctx = tls_offload_ctx_tx(tls_ctx); in chcr_ktls_cpl_act_open_rpl() 688 ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx, in chcr_ktls_cpl_act_open_rpl() 1923 struct tls_offload_context_tx *tx_ctx; in chcr_ktls_xmit() local 1943 tx_ctx = tls_offload_ctx_tx(tls_ctx); in chcr_ktls_xmit() 1978 spin_lock_irqsave(&tx_ctx->lock, flags); in chcr_ktls_xmit() 1984 record = tls_get_record(tx_ctx, tcp_seq, in chcr_ktls_xmit() 1990 spin_unlock_irqrestore(&tx_ctx->lock, flags); in chcr_ktls_xmit() 2014 spin_unlock_irqrestore(&tx_ctx->lock, in chcr_ktls_xmit() 2045 spin_unlock_irqrestore(&tx_ctx->lock, flags); in chcr_ktls_xmit() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
H A D | ktls_tx.c | 98 struct tls_offload_context_tx *tx_ctx; member 496 priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); in mlx5e_ktls_add_tx() 625 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; in tx_sync_info_get() local 632 spin_lock_irqsave(&tx_ctx->lock, flags); in tx_sync_info_get() 633 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); in tx_sync_info_get() 672 spin_unlock_irqrestore(&tx_ctx->lock, flags); in tx_sync_info_get()
|
/linux/net/tls/ |
H A D | tls_sw.c | 2622 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); in tls_sw_write_space() local 2625 if (tls_is_tx_ready(tx_ctx) && in tls_sw_write_space() 2626 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) in tls_sw_write_space() 2627 schedule_delayed_work(&tx_ctx->tx_work.work, 0); in tls_sw_write_space()
|
/linux/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_main.c | 3447 struct i40e_hmc_obj_txq tx_ctx; in i40e_configure_tx_ring() local 3466 memset(&tx_ctx, 0, sizeof(tx_ctx)); in i40e_configure_tx_ring() 3468 tx_ctx.new_context = 1; in i40e_configure_tx_ring() 3469 tx_ctx.base = (ring->dma / 128); in i40e_configure_tx_ring() 3470 tx_ctx.qlen = ring->count; in i40e_configure_tx_ring() 3473 tx_ctx.fd_ena = 1; in i40e_configure_tx_ring() 3475 tx_ctx.timesync_ena = 1; in i40e_configure_tx_ring() 3478 tx_ctx.head_wb_ena = 1; in i40e_configure_tx_ring() 3479 tx_ctx.head_wb_addr = ring->dma + in i40e_configure_tx_ring() 3494 tx_ctx.rdylist = in i40e_configure_tx_ring() [all …]
|