| /linux/sound/soc/codecs/ |
| H A D | lpass-tx-macro.c | 245 struct tx_macro *tx; member 251 struct tx_macro *tx; member 294 /* TX Macro */ 438 /* Update volatile list for tx/tx macros */ in tx_is_volatile_register() 613 static int tx_macro_mclk_enable(struct tx_macro *tx, in tx_macro_mclk_enable() argument 616 struct regmap *regmap = tx->regmap; in tx_macro_mclk_enable() 619 if (tx->tx_mclk_users == 0) { in tx_macro_mclk_enable() 631 tx->tx_mclk_users++; in tx_macro_mclk_enable() 633 if (tx in tx_macro_mclk_enable() 651 is_amic_enabled(struct snd_soc_component * component,struct tx_macro * tx,u8 decimator) is_amic_enabled() argument 676 struct tx_macro *tx; tx_macro_tx_hpf_corner_freq_callback() local 720 struct tx_macro *tx; tx_macro_mute_update_callback() local 738 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_mclk_event() local 756 tx_macro_update_smic_sel_v9(struct snd_soc_component * component,struct snd_soc_dapm_widget * widget,struct tx_macro * tx,u16 mic_sel_reg,unsigned int val) tx_macro_update_smic_sel_v9() argument 778 tx_macro_update_smic_sel_v9_2(struct snd_soc_component * component,struct snd_soc_dapm_widget * widget,struct tx_macro * tx,u16 mic_sel_reg,unsigned int val) tx_macro_update_smic_sel_v9_2() argument 806 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_put_dec_enum() local 867 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_tx_mixer_get() local 887 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_tx_mixer_put() local 920 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_enable_dec() local 1074 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_dec_mode_get() local 1090 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_dec_mode_put() local 1104 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_get_bcs() local 1116 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_set_bcs() local 1131 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_hw_params() local 1174 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_get_channel_map() local 1192 struct tx_macro *tx = snd_soc_component_get_drvdata(component); tx_macro_digital_mute() local 2085 struct tx_macro *tx = snd_soc_component_get_drvdata(comp); tx_macro_component_extend() local 2111 struct tx_macro *tx = snd_soc_component_get_drvdata(comp); tx_macro_component_probe() local 2148 struct tx_macro *tx = to_tx_macro(hw); swclk_gate_enable() local 2168 struct tx_macro *tx = to_tx_macro(hw); swclk_gate_disable() local 2180 struct tx_macro *tx = to_tx_macro(hw); swclk_gate_is_enabled() local 2203 tx_macro_register_mclk_output(struct tx_macro * tx) tx_macro_register_mclk_output() argument 2246 struct tx_macro *tx; tx_macro_probe() local 2391 struct tx_macro *tx = dev_get_drvdata(&pdev->dev); tx_macro_remove() local 2404 struct tx_macro *tx = dev_get_drvdata(dev); tx_macro_runtime_suspend() local 2418 struct tx_macro *tx = dev_get_drvdata(dev); tx_macro_runtime_resume() local [all...] |
| /linux/drivers/net/ethernet/google/gve/ |
| H A D | gve_tx_dqo.c | 19 static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count) in gve_has_free_tx_qpl_bufs() argument 23 if (!tx->dqo.qpl) in gve_has_free_tx_qpl_bufs() 26 num_avail = tx->dqo.num_tx_qpl_bufs - in gve_has_free_tx_qpl_bufs() 27 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - in gve_has_free_tx_qpl_bufs() 28 tx->dqo_tx.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs() 34 tx->dqo_tx.free_tx_qpl_buf_cnt = in gve_has_free_tx_qpl_bufs() 35 atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs() 37 num_avail = tx->dqo.num_tx_qpl_bufs - in gve_has_free_tx_qpl_bufs() 38 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - in gve_has_free_tx_qpl_bufs() 39 tx in gve_has_free_tx_qpl_bufs() 45 gve_alloc_tx_qpl_buf(struct gve_tx_ring * tx) gve_alloc_tx_qpl_buf() argument 70 gve_free_tx_qpl_bufs(struct gve_tx_ring * tx,struct gve_tx_pending_packet_dqo * pkt) gve_free_tx_qpl_bufs() argument 102 gve_has_pending_packet(struct gve_tx_ring * tx) gve_has_pending_packet() argument 118 struct gve_tx_ring *tx = &priv->tx[tx_qid]; gve_xdp_tx_flush_dqo() local 124 gve_alloc_pending_packet(struct gve_tx_ring * tx) gve_alloc_pending_packet() argument 153 gve_free_pending_packet(struct gve_tx_ring * tx,struct gve_tx_pending_packet_dqo * pending_packet) gve_free_pending_packet() argument 172 gve_tx_clean_pending_packets(struct gve_tx_ring * tx) gve_tx_clean_pending_packets() argument 204 struct gve_tx_ring *tx = &priv->tx[idx]; gve_tx_stop_ring_dqo() local 217 gve_tx_free_ring_dqo(struct gve_priv * priv,struct gve_tx_ring * tx,struct gve_tx_alloc_rings_cfg * cfg) gve_tx_free_ring_dqo() argument 263 gve_tx_qpl_buf_init(struct gve_tx_ring * tx) gve_tx_qpl_buf_init() argument 289 struct gve_tx_ring *tx = &priv->tx[idx]; gve_tx_start_ring_dqo() local 300 gve_tx_alloc_ring_dqo(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg,struct gve_tx_ring * tx,int idx) gve_tx_alloc_ring_dqo() argument 408 struct gve_tx_ring *tx = cfg->tx; gve_tx_alloc_rings_dqo() local 448 struct gve_tx_ring *tx = cfg->tx; gve_tx_free_rings_dqo() local 462 num_avail_tx_slots(const struct gve_tx_ring * tx) num_avail_tx_slots() argument 470 gve_has_tx_slots_available(struct gve_tx_ring * tx,u32 slots_req) gve_has_tx_slots_available() argument 485 gve_has_avail_slots_tx_dqo(struct gve_tx_ring * tx,int desc_count,int buf_count) gve_has_avail_slots_tx_dqo() argument 496 gve_maybe_stop_tx_dqo(struct gve_tx_ring * tx,int desc_count,int buf_count) gve_maybe_stop_tx_dqo() argument 537 gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring * tx,u32 * desc_idx,bool enable_csum,u32 len,u64 addr,s16 compl_tag,bool eop,bool is_gso) gve_tx_fill_pkt_desc_dqo() argument 648 gve_tx_update_tail(struct gve_tx_ring * tx,u32 desc_idx) gve_tx_update_tail() argument 667 gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring * tx,struct sk_buff * skb,struct gve_tx_pending_packet_dqo * pkt,s16 completion_tag,u32 * desc_idx,bool is_gso) gve_tx_add_skb_no_copy_dqo() argument 747 gve_tx_buf_get_addr(struct gve_tx_ring * tx,s16 index,void ** va,dma_addr_t * dma_addr) gve_tx_buf_get_addr() argument 758 gve_tx_add_skb_copy_dqo(struct gve_tx_ring * tx,struct sk_buff * skb,struct gve_tx_pending_packet_dqo * pkt,s16 completion_tag,u32 * desc_idx,bool is_gso) gve_tx_add_skb_copy_dqo() argument 811 gve_tx_add_skb_dqo(struct gve_tx_ring * tx,struct sk_buff * skb) gve_tx_add_skb_dqo() argument 960 gve_try_tx_skb(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb) gve_try_tx_skb() argument 1010 gve_xsk_reorder_queue_push_dqo(struct gve_tx_ring * tx,u16 completion_tag) gve_xsk_reorder_queue_push_dqo() argument 1021 gve_xsk_reorder_queue_head(struct gve_tx_ring * tx) gve_xsk_reorder_queue_head() argument 1036 gve_xsk_reorder_queue_pop_dqo(struct gve_tx_ring * tx) gve_xsk_reorder_queue_pop_dqo() argument 1046 struct gve_tx_ring *tx; gve_tx_dqo() local 1065 gve_xsk_tx_dqo(struct gve_priv * priv,struct gve_tx_ring * tx,int budget) gve_xsk_tx_dqo() argument 1121 add_to_list(struct gve_tx_ring * tx,struct gve_index_list * list,struct gve_tx_pending_packet_dqo * pending_packet) add_to_list() argument 1138 remove_from_list(struct gve_tx_ring * tx,struct gve_index_list * list,struct gve_tx_pending_packet_dqo * pkt) remove_from_list() argument 1184 gve_handle_packet_completion(struct gve_priv * priv,struct gve_tx_ring * tx,bool is_napi,u16 compl_tag,u64 * bytes,u64 * pkts,bool is_reinjection) gve_handle_packet_completion() argument 1267 gve_handle_miss_completion(struct gve_priv * priv,struct gve_tx_ring * tx,u16 compl_tag,u64 * bytes,u64 * pkts) gve_handle_miss_completion() argument 1299 remove_miss_completions(struct gve_priv * priv,struct gve_tx_ring * tx) remove_miss_completions() argument 1345 remove_timed_out_completions(struct gve_priv * priv,struct gve_tx_ring * tx) remove_timed_out_completions() argument 1369 gve_tx_process_xsk_completions(struct gve_tx_ring * tx) gve_tx_process_xsk_completions() argument 1390 gve_clean_tx_done_dqo(struct gve_priv * priv,struct gve_tx_ring * tx,struct napi_struct * napi) gve_clean_tx_done_dqo() argument 1481 struct gve_tx_ring *tx = block->tx; gve_tx_poll_dqo() local 1507 struct gve_tx_ring *tx; gve_xsk_tx_poll_dqo() local 1519 struct gve_tx_ring *tx = block->tx; gve_xdp_poll_dqo() local 1529 gve_xdp_xmit_one_dqo(struct gve_priv * priv,struct gve_tx_ring * tx,struct xdp_frame * xdpf) gve_xdp_xmit_one_dqo() argument 1580 struct gve_tx_ring *tx; gve_xdp_xmit_dqo() local [all...] |
| H A D | gve_tx.c | 26 struct gve_tx_ring *tx = &priv->tx[tx_qid]; in gve_xdp_tx_flush() local 28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_tx_flush() 32 * We copy skb payloads into the registered segment before writing Tx 33 * descriptors and ringing the Tx doorbell. 74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO 135 /* gve_tx_free_fifo - Return space to Tx FIFO 157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_xdp_done() argument 168 idx = tx in gve_clean_xdp_done() 203 struct gve_tx_ring *tx = &priv->tx[idx]; gve_tx_stop_ring_gqi() local 217 gve_tx_free_ring_gqi(struct gve_priv * priv,struct gve_tx_ring * tx,struct gve_tx_alloc_rings_cfg * cfg) gve_tx_free_ring_gqi() argument 253 struct gve_tx_ring *tx = &priv->tx[idx]; gve_tx_start_ring_gqi() local 263 gve_tx_alloc_ring_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg,struct gve_tx_ring * tx,int idx) gve_tx_alloc_ring_gqi() argument 336 struct gve_tx_ring *tx = cfg->tx; gve_tx_alloc_rings_gqi() local 376 struct gve_tx_ring *tx = cfg->tx; gve_tx_free_rings_gqi() local 396 gve_tx_avail(struct gve_tx_ring * tx) gve_tx_avail() argument 401 gve_skb_fifo_bytes_required(struct gve_tx_ring * tx,struct sk_buff * skb) gve_skb_fifo_bytes_required() argument 446 gve_can_tx(struct gve_tx_ring * tx,int bytes_required) gve_can_tx() argument 459 gve_maybe_stop_tx(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb) gve_maybe_stop_tx() argument 562 gve_tx_add_skb_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb) gve_tx_add_skb_copy() argument 636 gve_tx_add_skb_no_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb) gve_tx_add_skb_no_copy() argument 740 struct gve_tx_ring *tx; gve_tx() local 779 gve_tx_fill_xdp(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p,bool is_xsk) gve_tx_fill_xdp() argument 830 struct gve_tx_ring *tx; gve_xdp_xmit_gqi() local 865 gve_xdp_xmit_one(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p) gve_xdp_xmit_one() argument 881 gve_clean_tx_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do,bool try_to_wake) gve_clean_tx_done() argument 938 gve_tx_load_event_counter(struct gve_priv * priv,struct gve_tx_ring * tx) gve_tx_load_event_counter() argument 946 gve_xsk_tx(struct gve_priv * priv,struct gve_tx_ring * tx,int budget) gve_xsk_tx() argument 977 struct gve_tx_ring *tx; gve_xsk_tx_poll() local 997 struct gve_tx_ring *tx = block->tx; gve_xdp_poll() local 1013 struct gve_tx_ring *tx = block->tx; gve_tx_poll() local 1035 gve_tx_clean_pending(struct gve_priv * priv,struct gve_tx_ring * tx) gve_tx_clean_pending() argument [all...] |
| /linux/drivers/rpmsg/ |
| H A D | qcom_glink_trace.h | 14 TP_PROTO(const char *remote, unsigned int version, unsigned int features, bool tx), 15 TP_ARGS(remote, version, features, tx), 20 __field(bool, tx) 26 __entry->tx = tx; 29 __entry->tx ? "tx" : "rx", 39 TP_PROTO(const char *remote, unsigned int version, unsigned int features, bool tx), 40 TP_ARGS(remote, version, features, tx), 45 __field(bool, tx) 51 __entry->tx = tx; 54 __entry->tx ? "tx" : "rx", [all …]
|
| /linux/drivers/media/i2c/adv748x/ |
| H A D | adv748x-csi2.c | 25 int adv748x_csi2_set_virtual_channel(struct adv748x_csi2 *tx, unsigned int vc) in adv748x_csi2_set_virtual_channel() argument 27 return tx_write(tx, ADV748X_CSI_VC_REF, vc << ADV748X_CSI_VC_REF_SHIFT); in adv748x_csi2_set_virtual_channel() 33 * @tx: CSI2 private entity 36 * @src_pad: Pad number of source to link to this @tx 42 static int adv748x_csi2_register_link(struct adv748x_csi2 *tx, in adv748x_csi2_register_link() argument 57 &tx->sd.entity, ADV748X_CSI2_SINK, in adv748x_csi2_register_link() 63 tx->src = src; in adv748x_csi2_register_link() 104 struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd); in adv748x_csi2_registered() local 105 struct adv748x_state *state = tx->state; in adv748x_csi2_registered() 108 adv_dbg(state, "Registered %s (%s)", is_txa(tx) ? "TXA":"TXB", in adv748x_csi2_registered() [all …]
|
| /linux/drivers/firmware/tegra/ |
| H A D | ivc.c | 48 * written through the tx.channel pointer, while the second is only written 62 } tx; member 102 u32 tx = tegra_ivc_header_read_field(map, tx.count); in tegra_ivc_empty() local 115 if (tx - rx > ivc->num_frames) in tegra_ivc_empty() 118 return tx == rx; in tegra_ivc_empty() 123 u32 tx = tegra_ivc_header_read_field(map, tx.count); in tegra_ivc_full() local 130 return tx - rx >= ivc->num_frames; in tegra_ivc_full() 135 u32 tx = tegra_ivc_header_read_field(map, tx.count); in tegra_ivc_available() local 144 return tx - rx; in tegra_ivc_available() 149 unsigned int count = tegra_ivc_header_read_field(&ivc->tx.map, tx.count); in tegra_ivc_advance_tx() [all …]
|
| /linux/tools/testing/selftests/drivers/net/netdevsim/ |
| H A D | ethtool-coalesce.sh | 31 ["tx-frames-low"]="tx-frame-low" 33 ["tx-frames-high"]="tx-frame-high" 38 ["tx-usecs"]="tx-usecs" 39 ["tx-frames"]="tx-frames" 40 ["tx-usecs-irq"]="tx-usecs-irq" 41 ["tx-frames-irq"]="tx-frames-irq" 45 ["tx-usecs-low"]="tx-usecs-low" 48 ["tx-usecs-high"]="tx-usecs-high" 54 ["tx-frames-low"]="" 56 ["tx-frames-high"]="" [all …]
|
| /linux/tools/testing/selftests/drivers/net/hw/ |
| H A D | ethtool_mm.sh | 47 local tx=$1; shift 55 # MAC Merge TX direction is disabled. That being said, at least the 56 # NXP ENETC does not, and requires tx-enabled on in order to respond to 58 ethtool --set-mm $rx tx-enabled on 59 ethtool --set-mm $tx verify-enabled on tx-enabled on 64 ethtool --json --show-mm $tx | jq -r '.[]."verify-status"' | \ 68 ethtool --json --show-mm $tx | jq -r '.[]."tx-active"' | grep -q 'true' 69 check_err "$?" "pMAC TX is not active" 71 traffic_test $tx "pmac" 72 check_err "$?" "Traffic did not get sent through $tx's pMAC" [all …]
|
| /linux/drivers/net/wireless/ti/wl18xx/ |
| H A D | debugfs.c | 45 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u"); 46 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u"); 47 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_prepared, "%u"); 48 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_prepared, "%u"); 49 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u"); 50 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u"); 51 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u"); 52 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u"); 53 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u"); 54 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_template [all...] |
| /linux/drivers/fpga/ |
| H A D | machxo2-spi.c | 69 struct spi_transfer rx, tx; in get_status() local 74 memset(&tx, 0, sizeof(tx)); in get_status() 75 tx.tx_buf = cmd; in get_status() 76 tx.len = sizeof(cmd); in get_status() 80 spi_message_add_tail(&tx, &msg); in get_status() 139 struct spi_transfer tx[2]; in machxo2_cleanup() local 144 memset(tx, 0, sizeof(tx)); in machxo2_cleanup() 146 tx[0].tx_buf = &erase; in machxo2_cleanup() 147 tx[0].len = sizeof(erase); in machxo2_cleanup() 148 spi_message_add_tail(&tx[0], &msg); in machxo2_cleanup() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | vnic_sdma.c | 41 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); in vnic_sdma_complete() local 42 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma; in vnic_sdma_complete() 45 dev_kfree_skb_any(tx->skb); in vnic_sdma_complete() 46 kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx); in vnic_sdma_complete() 50 struct vnic_txreq *tx) in build_vnic_ulp_payload() argument 56 &tx->txreq, in build_vnic_ulp_payload() 57 tx->skb->data, in build_vnic_ulp_payload() 58 skb_headlen(tx->skb)); in build_vnic_ulp_payload() 62 for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) { in build_vnic_ulp_payload() 63 skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i]; in build_vnic_ulp_payload() [all …]
|
| H A D | ipoib_tx.c | 101 * The tx queue len can be adjusted upward while the interface is in hfi1_ipoib_check_queue_stopped() 103 * The tx queue len can be large enough to overflow the txreq_ring. in hfi1_ipoib_check_queue_stopped() 114 static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) in hfi1_ipoib_free_tx() argument 116 struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; in hfi1_ipoib_free_tx() 118 if (likely(!tx->sdma_status)) { in hfi1_ipoib_free_tx() 119 dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len); in hfi1_ipoib_free_tx() 124 __func__, tx->sdma_status, in hfi1_ipoib_free_tx() 125 le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx() 126 tx->txq->sde->this_idx); in hfi1_ipoib_free_tx() 129 napi_consume_skb(tx->skb, budget); in hfi1_ipoib_free_tx() [all …]
|
| /linux/drivers/net/ethernet/engleder/ |
| H A D | tsnep_main.c | 10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used 100 /* handle TX/RX queue 0 interrupt */ in tsnep_irq() 116 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx() 278 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) in tsnep_tx_ring_cleanup() argument 280 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup() 283 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup() 286 if (tx->page[i]) { in tsnep_tx_ring_cleanup() 287 dma_free_coherent(dmadev, PAGE_SIZE, tx in tsnep_tx_ring_cleanup() 295 tsnep_tx_ring_create(struct tsnep_tx * tx) tsnep_tx_ring_create() argument 334 tsnep_tx_init(struct tsnep_tx * tx) tsnep_tx_init() argument 347 tsnep_tx_enable(struct tsnep_tx * tx) tsnep_tx_enable() argument 358 tsnep_tx_disable(struct tsnep_tx * tx,struct napi_struct * napi) tsnep_tx_disable() argument 381 tsnep_tx_activate(struct tsnep_tx * tx,int index,int length,bool last) tsnep_tx_activate() argument 442 tsnep_tx_desc_available(struct tsnep_tx * tx) tsnep_tx_desc_available() argument 483 tsnep_tx_map(struct sk_buff * skb,struct tsnep_tx * tx,int count,bool do_tstamp) tsnep_tx_map() argument 534 tsnep_tx_unmap(struct tsnep_tx * tx,int index,int count) tsnep_tx_unmap() argument 564 tsnep_xmit_frame_ring(struct sk_buff * skb,struct tsnep_tx * tx) tsnep_xmit_frame_ring() argument 626 tsnep_xdp_tx_map(struct xdp_frame * xdpf,struct tsnep_tx * tx,struct skb_shared_info * shinfo,int count,u32 type) tsnep_xdp_tx_map() argument 683 tsnep_xdp_xmit_frame_ring(struct xdp_frame * xdpf,struct tsnep_tx * tx,u32 type) tsnep_xdp_xmit_frame_ring() argument 725 tsnep_xdp_xmit_flush(struct tsnep_tx * tx) tsnep_xdp_xmit_flush() argument 732 tsnep_xdp_xmit_back(struct tsnep_adapter * adapter,struct xdp_buff * xdp,struct netdev_queue * tx_nq,struct tsnep_tx * tx,bool zc) tsnep_xdp_xmit_back() argument 761 tsnep_xdp_tx_map_zc(struct xdp_desc * xdpd,struct tsnep_tx * tx) tsnep_xdp_tx_map_zc() argument 781 tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc * xdpd,struct tsnep_tx * tx) tsnep_xdp_xmit_frame_ring_zc() argument 791 tsnep_xdp_xmit_zc(struct tsnep_tx * tx) tsnep_xdp_xmit_zc() argument 819 tsnep_tx_poll(struct tsnep_tx * tx,int napi_budget) tsnep_tx_poll() argument 911 tsnep_tx_pending(struct tsnep_tx * tx) tsnep_tx_pending() argument 933 tsnep_tx_open(struct tsnep_tx * tx) tsnep_tx_open() argument 946 tsnep_tx_close(struct tsnep_tx * tx) tsnep_tx_close() argument 1288 tsnep_xdp_run_prog(struct tsnep_rx * rx,struct bpf_prog * prog,struct xdp_buff * xdp,int * status,struct netdev_queue * tx_nq,struct tsnep_tx * tx) tsnep_xdp_run_prog() argument 1333 tsnep_xdp_run_prog_zc(struct tsnep_rx * rx,struct bpf_prog * prog,struct xdp_buff * xdp,int * status,struct netdev_queue * tx_nq,struct tsnep_tx * tx) tsnep_xdp_run_prog_zc() argument 1369 tsnep_finalize_xdp(struct tsnep_adapter * adapter,int status,struct netdev_queue * tx_nq,struct tsnep_tx * tx) tsnep_finalize_xdp() argument 1443 struct tsnep_tx *tx; tsnep_rx_poll() local 1546 struct tsnep_tx *tx; tsnep_rx_poll_zc() local 1919 struct tsnep_tx *tx = queue->tx; tsnep_queue_open() local 2333 struct tsnep_tx *tx; tsnep_netdev_xdp_xmit() local [all...] |
| /linux/crypto/async_tx/ |
| H A D | async_tx.c | 62 * @tx: the new operation 66 struct dma_async_tx_descriptor *tx) in async_tx_channel_switch() argument 74 if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { in async_tx_channel_switch() 75 txd_chain(depend_tx, tx); in async_tx_channel_switch() 100 txd_chain(intr_tx, tx); in async_tx_channel_switch() 121 tx->tx_submit(tx); in async_tx_channel_switch() 143 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, in async_tx_submit() argument 148 tx->callback = submit->cb_fn; in async_tx_submit() 149 tx->callback_param = submit->cb_param; in async_tx_submit() 161 txd_parent(tx)); in async_tx_submit() [all …]
|
| H A D | async_raid6_recov.c | 36 struct dma_async_tx_descriptor *tx; in async_sum_product() local 54 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, in async_sum_product() 56 if (tx) { in async_sum_product() 57 dma_set_unmap(tx, unmap); in async_sum_product() 58 async_tx_submit(chan, tx, submit); in async_sum_product() 60 return tx; in async_sum_product() 104 struct dma_async_tx_descriptor *tx; in async_mult() local 122 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, in async_mult() 125 if (tx) { in async_mult() 126 dma_set_unmap(tx, unmap); in async_mult() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlxbf_gige/ |
| H A D | mlxbf_gige_tx.c | 14 * 1) Allocates TX WQE array using coherent DMA mapping 15 * 2) Allocates TX completion counter using coherent DMA mapping 30 /* Write TX WQE base address into MMIO reg */ in mlxbf_gige_tx_init() 33 /* Allocate address for TX completion count */ in mlxbf_gige_tx_init() 42 /* Write TX CC base address into MMIO reg */ in mlxbf_gige_tx_init() 56 * namely the TX WQE array and the TX completion counter 92 /* Function that returns status of TX ring: 93 * 0: TX ring is full, i.e. there are no 94 * available un-used entries in TX ring. 95 * non-null: TX ring is not full, i.e. there are [all …]
|
| /linux/drivers/clk/mediatek/ |
| H A D | clk-apmixed.c | 32 struct mtk_ref2usb_tx *tx = to_mtk_ref2usb_tx(hw); in mtk_ref2usb_tx_is_prepared() local 34 return (readl(tx->base_addr) & REF2USB_EN_MASK) == REF2USB_EN_MASK; in mtk_ref2usb_tx_is_prepared() 39 struct mtk_ref2usb_tx *tx = to_mtk_ref2usb_tx(hw); in mtk_ref2usb_tx_prepare() local 42 val = readl(tx->base_addr); in mtk_ref2usb_tx_prepare() 45 writel(val, tx->base_addr); in mtk_ref2usb_tx_prepare() 49 writel(val, tx->base_addr); in mtk_ref2usb_tx_prepare() 52 writel(val, tx->base_addr); in mtk_ref2usb_tx_prepare() 59 struct mtk_ref2usb_tx *tx = to_mtk_ref2usb_tx(hw); in mtk_ref2usb_tx_unprepare() local 62 val = readl(tx->base_addr); in mtk_ref2usb_tx_unprepare() 64 writel(val, tx->base_addr); in mtk_ref2usb_tx_unprepare() [all …]
|
| /linux/sound/soc/fsl/ |
| H A D | fsl_sai.c | 55 int adir = (dir == TX) ? RX : TX; in fsl_sai_dir_is_synced() 101 /* Tx IRQ */ in fsl_sai_isr() 111 dev_dbg(dev, "isr: Start of Tx word detected\n"); in fsl_sai_isr() 114 dev_dbg(dev, "isr: Tx Frame sync error detected\n"); in fsl_sai_isr() 170 bool tx = true; in fsl_sai_set_dai_tdm_slot_tx() local 172 sai->slots[tx] = slots; in fsl_sai_set_dai_tdm_slot_tx() 173 sai->slot_width[tx] = slot_width; in fsl_sai_set_dai_tdm_slot_tx() 182 bool tx = false; in fsl_sai_set_dai_tdm_slot_rx() local 184 sai->slots[tx] = slots; in fsl_sai_set_dai_tdm_slot_rx() 185 sai->slot_width[tx] = slot_width; in fsl_sai_set_dai_tdm_slot_rx() [all …]
|
| /linux/drivers/net/ethernet/microchip/ |
| H A D | lan743x_main.c | 221 struct lan743x_tx *tx = context; in lan743x_tx_isr() local 222 struct lan743x_adapter *adapter = tx->adapter; in lan743x_tx_isr() 228 INT_BIT_DMA_TX_(tx->channel_number)); in lan743x_tx_isr() 231 if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) { in lan743x_tx_isr() 232 u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); in lan743x_tx_isr() 249 napi_schedule(&tx->napi); in lan743x_tx_isr() 257 INT_BIT_DMA_TX_(tx->channel_number)); in lan743x_tx_isr() 324 lan743x_tx_isr(&adapter->tx[channel], in lan743x_intr_shared_isr() 674 /* map TX interrupt to vector */ in lan743x_intr_open() 678 /* Remove TX interrup in lan743x_intr_open() 1568 lan743x_tx_release_desc(struct lan743x_tx * tx,int descriptor_index,bool cleanup) lan743x_tx_release_desc() argument 1634 lan743x_tx_next_index(struct lan743x_tx * tx,int index) lan743x_tx_next_index() argument 1639 lan743x_tx_release_completed_descriptors(struct lan743x_tx * tx) lan743x_tx_release_completed_descriptors() argument 1647 lan743x_tx_release_all_descriptors(struct lan743x_tx * tx) lan743x_tx_release_all_descriptors() argument 1662 lan743x_tx_get_desc_cnt(struct lan743x_tx * tx,struct sk_buff * skb) lan743x_tx_get_desc_cnt() argument 1675 lan743x_tx_get_avail_desc(struct lan743x_tx * tx) lan743x_tx_get_avail_desc() argument 1731 lan743x_tx_set_timestamping_mode(struct lan743x_tx * tx,bool enable_timestamping,bool enable_onestep_sync) lan743x_tx_set_timestamping_mode() argument 1745 lan743x_tx_frame_start(struct lan743x_tx * tx,unsigned char * first_buffer,unsigned int first_buffer_length,unsigned int frame_length,bool time_stamp,bool check_sum) lan743x_tx_frame_start() argument 1799 lan743x_tx_frame_add_lso(struct lan743x_tx * tx,unsigned int frame_length,int nr_frags) lan743x_tx_frame_add_lso() argument 1841 lan743x_tx_frame_add_fragment(struct lan743x_tx * tx,const skb_frag_t * fragment,unsigned int frame_length) lan743x_tx_frame_add_fragment() argument 1907 lan743x_tx_frame_end(struct lan743x_tx * tx,struct sk_buff * skb,bool time_stamp,bool ignore_sync) lan743x_tx_frame_end() argument 1954 lan743x_tx_xmit_frame(struct lan743x_tx * tx,struct sk_buff * skb) lan743x_tx_xmit_frame() argument 2047 struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi); lan743x_tx_napi_poll() local 2087 lan743x_tx_ring_cleanup(struct lan743x_tx * tx) lan743x_tx_ring_cleanup() argument 2110 lan743x_tx_ring_init(struct lan743x_tx * tx) lan743x_tx_ring_init() argument 2173 lan743x_tx_close(struct lan743x_tx * tx) lan743x_tx_close() argument 2203 lan743x_tx_open(struct lan743x_tx * tx) lan743x_tx_open() argument 3492 struct lan743x_tx *tx; lan743x_hardware_init() local [all...] |
| /linux/sound/soc/mediatek/common/ |
| H A D | mtk-btcvsd.c | 27 /* TX */ 130 spinlock_t tx_lock; /* spinlock for bt tx stream control */ 135 struct mtk_btcvsd_snd_stream *tx; member 209 dev_dbg(bt->dev, "%s(), stream %d, state %d, tx->state %d, rx->state %d, irq_disabled %d\n", in mtk_btcvsd_snd_set_state() 212 bt->tx->state, bt->rx->state, bt->irq_disabled); in mtk_btcvsd_snd_set_state() 216 if (bt->tx->state == BT_SCO_STATE_IDLE && in mtk_btcvsd_snd_set_state() 234 memset(bt->tx, 0, sizeof(*bt->tx)); in mtk_btcvsd_snd_tx_init() 237 bt->tx->packet_size = BTCVSD_TX_PACKET_SIZE; in mtk_btcvsd_snd_tx_init() 238 bt->tx in mtk_btcvsd_snd_tx_init() [all...] |
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_ptp.c | 93 * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device 96 * Program the device to respond appropriately to the Tx timestamp interrupt 122 /* Configure the Tx timestamp interrupt */ in ice_ptp_cfg_tx_interrupt() 273 * This algorithm works even if the PHC time was updated after a Tx timestamp 274 * was requested, but before the Tx timestamp event was reported from 283 * a second, and (b) discarding any Tx timestamp packet if it has waited for 320 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 358 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 359 * @tx: the PTP Tx timestam 367 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx) ice_ptp_is_tx_tracker_up() argument 379 ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx) ice_ptp_req_tx_single_tstamp() argument 426 ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx) ice_ptp_complete_tx_single_tstamp() argument 557 ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx) ice_ptp_process_tx_tstamp() argument 679 struct ice_ptp_tx *tx = &port->tx; ice_ptp_tx_tstamp_owner() local 710 ice_ptp_tx_tstamp(struct ice_ptp_tx * tx) ice_ptp_tx_tstamp() argument 740 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx) ice_ptp_alloc_tx_tracker() argument 776 ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx) ice_ptp_flush_tx_tracker() argument 830 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx) ice_ptp_mark_tx_tracker_stale() argument 863 ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx) ice_ptp_release_tx_tracker() argument 901 ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port) ice_ptp_init_tx_e82x() argument 923 ice_ptp_init_tx(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port) ice_ptp_init_tx() argument 2616 ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb) ice_ptp_request_ts() argument 2703 struct ice_ptp_tx *tx = &pf->ptp.port.tx; ice_ptp_ts_irq() local [all...] |
| /linux/drivers/net/wireless/purelifi/plfxlc/ |
| H A D | usb.c | 38 struct plfxlc_usb_tx *tx = &usb->tx; in plfxlc_send_packet_from_data_queue() local 43 spin_lock_irqsave(&tx->lock, flags); in plfxlc_send_packet_from_data_queue() 47 if (!(tx->station[usb->sidx].flag & STATION_CONNECTED_FLAG)) in plfxlc_send_packet_from_data_queue() 49 if (!(tx->station[usb->sidx].flag & STATION_FIFO_FULL_FLAG)) in plfxlc_send_packet_from_data_queue() 50 skb = skb_peek(&tx->station[usb->sidx].data_list); in plfxlc_send_packet_from_data_queue() 54 skb = skb_dequeue(&tx->station[usb->sidx].data_list); in plfxlc_send_packet_from_data_queue() 57 if (skb_queue_len(&tx->station[usb->sidx].data_list) <= 60) in plfxlc_send_packet_from_data_queue() 60 spin_unlock_irqrestore(&tx->lock, flags); in plfxlc_send_packet_from_data_queue() 71 struct plfxlc_usb_tx *tx; in rx_urb_complete() local 94 tx = &usb->tx; in rx_urb_complete() [all …]
|
| /linux/drivers/net/phy/ |
| H A D | linkmode.c | 19 * 0 1 1 1 TX 21 * 1 X 1 X TX+RX 49 * @tx: boolean from ethtool struct ethtool_pauseparam tx_pause member 53 * capabilities of provided in @tx and @rx. 56 * tx rx Pause AsymDir 62 * Note: this translation from ethtool tx/rx notation to the advertisement 65 * For tx=0 rx=1, meaning transmit is unsupported, receive is supported: 69 * 1 1 1 0 TX + RX - but we have no TX support. 72 * For tx=1 rx=1, meaning we have the capability to transmit and receive 77 * 1 0 0 1 Disabled - but since we do support tx and rx, [all …]
|
| /linux/drivers/net/wireless/broadcom/b43/ |
| H A D | radio_2055.h | 21 #define B2055_C1_SP_TXGC1 0x0D /* SP TX GC1 Core 1 */ 22 #define B2055_C1_SP_TXGC2 0x0E /* SP TX GC2 Core 1 */ 23 #define B2055_C2_SP_TXGC1 0x0F /* SP TX GC1 Core 2 */ 24 #define B2055_C2_SP_TXGC2 0x10 /* SP TX GC2 Core 2 */ 30 #define B2055_C1_PD_TX 0x16 /* PD Core 1 TX */ 34 #define B2055_C2_PD_TX 0x1A /* PD Core 2 TX */ 129 #define B2055_C1_RX_TXBBRCAL 0x79 /* Core 1 RX TX BB RCAL */ 130 #define B2055_C1_TX_RF_SPGA 0x7A /* Core 1 TX RF SGM PGA */ 131 #define B2055_C1_TX_RF_SPAD 0x7B /* Core 1 TX RF SGM PAD */ 132 #define B2055_C1_TX_RF_CNTPGA1 0x7C /* Core 1 TX RF counter PGA 1 */ [all …]
|
| /linux/drivers/net/wireless/intel/iwlwifi/fw/api/ |
| H A D | tx.h | 11 * enum iwl_tx_flags - bitmasks for tx_flags in TX command 13 * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame 15 * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. 16 * Otherwise, use rate_n_flags from the TX command 32 * @TX_CMD_FLG_CALIB: activate PA TX power calibrations 73 * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000 74 * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command 93 * enum iwl_tx_pm_timeouts - pm timeout values in TX command 109 * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command 117 * from the table instead of from the TX comman 756 struct iwl_tx_cmd_v6_params tx; global() member 775 struct iwl_tx_cmd_v6_params tx; global() member [all...] |