| /linux/drivers/net/ethernet/sfc/siena/ |
| H A D | tx_common.c | 17 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) in efx_tx_cb_page_count() argument 19 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, in efx_tx_cb_page_count() 23 int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue) in efx_siena_probe_tx_queue() argument 25 struct efx_nic *efx = tx_queue->efx; in efx_siena_probe_tx_queue() 32 tx_queue->ptr_mask = entries - 1; in efx_siena_probe_tx_queue() 36 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_siena_probe_tx_queue() 39 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in efx_siena_probe_tx_queue() 41 if (!tx_queue->buffer) in efx_siena_probe_tx_queue() 44 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), in efx_siena_probe_tx_queue() 45 sizeof(tx_queue->cb_page[0]), GFP_KERNEL); in efx_siena_probe_tx_queue() [all …]
|
| H A D | tx.c | 26 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer() argument 29 unsigned int index = efx_tx_queue_get_insert_index(tx_queue); in efx_tx_get_copy_buffer() 31 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; in efx_tx_get_copy_buffer() 36 efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tx_get_copy_buffer() 83 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_copy() argument 93 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_copy() 95 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); in efx_enqueue_skb_copy() 106 ++tx_queue->insert_count; in efx_enqueue_skb_copy() 139 netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, in __efx_siena_enqueue_skb() argument 142 unsigned int old_insert_count = tx_queue->insert_count; in __efx_siena_enqueue_skb() [all …]
|
| H A D | tx_common.h | 14 int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue); 15 void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue); 16 void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue); 17 void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue); 24 void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue); 25 void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 27 void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, 30 struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, 32 int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 36 int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
| H A D | farch.c | 283 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) in efx_farch_notify_tx_desc() argument 288 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_notify_tx_desc() 290 efx_writed_page(tx_queue->efx, ®, in efx_farch_notify_tx_desc() 291 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); in efx_farch_notify_tx_desc() 295 static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, in efx_farch_push_tx_desc() argument 304 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_push_tx_desc() 308 efx_writeo_page(tx_queue->efx, ®, in efx_farch_push_tx_desc() 309 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); in efx_farch_push_tx_desc() 317 void efx_farch_tx_write(struct efx_tx_queue *tx_queue) in efx_farch_tx_write() argument 322 unsigned old_write_count = tx_queue->write_count; in efx_farch_tx_write() [all …]
|
| H A D | net_driver.h | 546 struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL]; member 1346 int (*tx_probe)(struct efx_tx_queue *tx_queue); 1347 void (*tx_init)(struct efx_tx_queue *tx_queue); 1348 void (*tx_remove)(struct efx_tx_queue *tx_queue); 1349 void (*tx_write)(struct efx_tx_queue *tx_queue); 1350 netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 1351 unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, 1542 for (_tx_queue = (_channel)->tx_queue; \ 1543 _tx_queue < (_channel)->tx_queue + \ 1631 struct efx_tx_queue *tx_queue; in efx_channel_tx_fill_level() local [all …]
|
| H A D | selftest.c | 416 static int efx_begin_loopback(struct efx_tx_queue *tx_queue) in efx_begin_loopback() argument 418 struct efx_nic *efx = tx_queue->efx; in efx_begin_loopback() 450 rc = efx_enqueue_skb(tx_queue, skb); in efx_begin_loopback() 456 "%d in %s loopback test\n", tx_queue->label, in efx_begin_loopback() 476 static int efx_end_loopback(struct efx_tx_queue *tx_queue, in efx_end_loopback() argument 479 struct efx_nic *efx = tx_queue->efx; in efx_end_loopback() 508 tx_queue->label, tx_done, state->packet_count, in efx_end_loopback() 519 tx_queue->label, rx_good, state->packet_count, in efx_end_loopback() 526 lb_tests->tx_sent[tx_queue->label] += state->packet_count; in efx_end_loopback() 527 lb_tests->tx_done[tx_queue->label] += tx_done; in efx_end_loopback() [all …]
|
| H A D | nic.h | 124 int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); 125 void efx_farch_tx_init(struct efx_tx_queue *tx_queue); 126 void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); 127 void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); 128 void efx_farch_tx_write(struct efx_tx_queue *tx_queue); 129 unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
|
| H A D | efx.h | 16 void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); 19 netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, 21 static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in efx_enqueue_skb() argument 23 return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue, in efx_enqueue_skb() 24 __efx_siena_enqueue_skb, tx_queue, skb); in efx_enqueue_skb()
|
| /linux/drivers/net/ethernet/sfc/ |
| H A D | nic_common.h | 60 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) in efx_tx_desc() argument 62 return ((efx_qword_t *)(tx_queue->txd.addr)) + index; in efx_tx_desc() 68 static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) in efx_nic_tx_is_empty() argument 70 unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); in efx_nic_tx_is_empty() 78 int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 87 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, in efx_nic_may_push_tx_desc() argument 90 bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count); in efx_nic_may_push_tx_desc() 92 tx_queue->empty_read_count = 0; in efx_nic_may_push_tx_desc() 93 return was_empty && tx_queue->write_count - write_count == 1; in efx_nic_may_push_tx_desc() 119 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) in efx_nic_probe_tx() argument [all …]
|
| H A D | tx_tso.c | 79 static inline void prefetch_ptr(struct efx_tx_queue *tx_queue) in prefetch_ptr() argument 81 unsigned int insert_ptr = efx_tx_queue_get_insert_index(tx_queue); in prefetch_ptr() 84 ptr = (char *) (tx_queue->buffer + insert_ptr); in prefetch_ptr() 88 ptr = (char *)(((efx_qword_t *)tx_queue->txd.addr) + insert_ptr); in prefetch_ptr() 102 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, in efx_tx_queue_insert() argument 112 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_tx_queue_insert() 113 ++tx_queue->insert_count; in efx_tx_queue_insert() 115 EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count - in efx_tx_queue_insert() 116 tx_queue->read_count >= in efx_tx_queue_insert() 117 tx_queue->efx->txq_entries); in efx_tx_queue_insert() [all …]
|
| H A D | mcdi_functions.c | 163 int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue) in efx_mcdi_tx_init() argument 167 bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; in efx_mcdi_tx_init() 168 bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM; in efx_mcdi_tx_init() 169 size_t entries = tx_queue->txd.len / EFX_BUF_SIZE; in efx_mcdi_tx_init() 170 struct efx_channel *channel = tx_queue->channel; in efx_mcdi_tx_init() 171 struct efx_nic *efx = tx_queue->efx; in efx_mcdi_tx_init() 178 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); in efx_mcdi_tx_init() 180 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label); in efx_mcdi_tx_init() 181 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); in efx_mcdi_tx_init() 185 dma_addr = tx_queue->txd.dma_addr; in efx_mcdi_tx_init() [all …]
|
| H A D | ef100_tx.h | 18 int ef100_tx_probe(struct efx_tx_queue *tx_queue); 19 void ef100_tx_init(struct efx_tx_queue *tx_queue); 20 void ef100_tx_write(struct efx_tx_queue *tx_queue); 25 netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 26 int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
| H A D | selftest.c | 415 static int efx_begin_loopback(struct efx_tx_queue *tx_queue) in efx_begin_loopback() argument 417 struct efx_nic *efx = tx_queue->efx; in efx_begin_loopback() 449 rc = efx_enqueue_skb(tx_queue, skb); in efx_begin_loopback() 455 "%d in %s loopback test\n", tx_queue->label, in efx_begin_loopback() 475 static int efx_end_loopback(struct efx_tx_queue *tx_queue, in efx_end_loopback() argument 478 struct efx_nic *efx = tx_queue->efx; in efx_end_loopback() 507 tx_queue->label, tx_done, state->packet_count, in efx_end_loopback() 518 tx_queue->label, rx_good, state->packet_count, in efx_end_loopback() 525 lb_tests->tx_sent[tx_queue->label] += state->packet_count; in efx_end_loopback() 526 lb_tests->tx_done[tx_queue->label] += tx_done; in efx_end_loopback() [all …]
|
| H A D | net_driver.h | 578 struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL]; member 1447 int (*tx_probe)(struct efx_tx_queue *tx_queue); 1448 void (*tx_init)(struct efx_tx_queue *tx_queue); 1449 void (*tx_remove)(struct efx_tx_queue *tx_queue); 1450 void (*tx_write)(struct efx_tx_queue *tx_queue); 1451 netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 1452 unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, 1649 for (_tx_queue = (_channel)->tx_queue; \ 1650 _tx_queue < (_channel)->tx_queue + \ 1738 struct efx_tx_queue *tx_queue; in efx_channel_tx_fill_level() local [all …]
|
| H A D | ef10.c | 779 struct efx_tx_queue *tx_queue; in efx_ef10_link_piobufs() local 816 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_ef10_link_piobufs() 822 tx_queue->channel->channel - 1) * in efx_ef10_link_piobufs() 832 if (tx_queue->queue == nic_data->pio_write_vi_base) { in efx_ef10_link_piobufs() 841 tx_queue->queue); in efx_ef10_link_piobufs() 853 tx_queue->queue, index, rc); in efx_ef10_link_piobufs() 854 tx_queue->piobuf = NULL; in efx_ef10_link_piobufs() 856 tx_queue->piobuf = in efx_ef10_link_piobufs() 859 tx_queue->piobuf_offset = offset; in efx_ef10_link_piobufs() 862 tx_queue->queue, index, in efx_ef10_link_piobufs() [all …]
|
| H A D | efx.c | 621 struct efx_tx_queue *tx_queue; in efx_get_queue_stats_tx() local 629 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_get_queue_stats_tx() 630 stats->packets += tx_queue->complete_packets - in efx_get_queue_stats_tx() 631 tx_queue->old_complete_packets; in efx_get_queue_stats_tx() 632 stats->bytes += tx_queue->complete_bytes - in efx_get_queue_stats_tx() 633 tx_queue->old_complete_bytes; in efx_get_queue_stats_tx() 638 stats->hw_gso_packets += tx_queue->tso_bursts - in efx_get_queue_stats_tx() 639 tx_queue->old_tso_bursts; in efx_get_queue_stats_tx() 640 stats->hw_gso_wire_packets += tx_queue->tso_packets - in efx_get_queue_stats_tx() 641 tx_queue->old_tso_packets; in efx_get_queue_stats_tx() [all …]
|
| H A D | mcdi_functions.h | 22 int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue); 23 void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue); 24 void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
|
| /linux/drivers/net/ethernet/freescale/ |
| H A D | gianfar.c | 134 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); in gfar_init_tx_rx_base() 249 if (likely(priv->tx_queue[i]->txcoalescing)) in gfar_configure_coalescing() 250 gfar_write(baddr + i, priv->tx_queue[i]->txic); in gfar_configure_coalescing() 264 if (likely(priv->tx_queue[0]->txcoalescing)) in gfar_configure_coalescing() 265 gfar_write(®s->txic, priv->tx_queue[0]->txic); in gfar_configure_coalescing() 290 stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes; in gfar_get_stats64() 291 stats->tx_packets += priv->tx_queue[i]->stats.tx_packets; in gfar_get_stats64() 416 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), in gfar_alloc_tx_queues() 418 if (!priv->tx_queue[i]) in gfar_alloc_tx_queues() 421 priv->tx_queue[i]->tx_skbuff = NULL; in gfar_alloc_tx_queues() [all …]
|
| /linux/drivers/net/wireless/rsi/ |
| H A D | rsi_91x_core.c | 36 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_determine_min_weight_queue() 60 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_recalculate_weights() 106 if (skb_queue_len(&common->tx_queue[q_num])) in rsi_get_num_pkts_dequeue() 107 skb = skb_peek(&common->tx_queue[q_num]); in rsi_get_num_pkts_dequeue() 121 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt) in rsi_get_num_pkts_dequeue() 145 if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) { in rsi_core_determine_hal_queue() 149 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { in rsi_core_determine_hal_queue() 172 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_core_determine_hal_queue() 187 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue() 200 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue() [all …]
|
| /linux/drivers/net/wireless/silabs/wfx/ |
| H A D | queue.c | 69 skb_queue_head_init(&wvif->tx_queue[i].normal); in wfx_tx_queues_init() 70 skb_queue_head_init(&wvif->tx_queue[i].cab); in wfx_tx_queues_init() 71 skb_queue_head_init(&wvif->tx_queue[i].offchan); in wfx_tx_queues_init() 72 wvif->tx_queue[i].priority = priorities[i]; in wfx_tx_queues_init() 88 WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames)); in wfx_tx_queues_check_empty() 89 WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i])); in wfx_tx_queues_check_empty() 117 struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_tx_queues_put() 138 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_pending_drop() 164 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_pending_get() 221 if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab)) in wfx_tx_queues_has_cab() [all …]
|
| /linux/drivers/net/ethernet/sfc/falcon/ |
| H A D | selftest.c | 418 static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue) in ef4_begin_loopback() argument 420 struct ef4_nic *efx = tx_queue->efx; in ef4_begin_loopback() 452 rc = ef4_enqueue_skb(tx_queue, skb); in ef4_begin_loopback() 458 "%d in %s loopback test\n", tx_queue->queue, in ef4_begin_loopback() 478 static int ef4_end_loopback(struct ef4_tx_queue *tx_queue, in ef4_end_loopback() argument 481 struct ef4_nic *efx = tx_queue->efx; in ef4_end_loopback() 510 tx_queue->queue, tx_done, state->packet_count, in ef4_end_loopback() 521 tx_queue->queue, rx_good, state->packet_count, in ef4_end_loopback() 528 lb_tests->tx_sent[tx_queue->queue] += state->packet_count; in ef4_end_loopback() 529 lb_tests->tx_done[tx_queue->queue] += tx_done; in ef4_end_loopback() [all …]
|
| H A D | efx.c | 234 struct ef4_tx_queue *tx_queue; in ef4_process_channel() local 240 ef4_for_each_channel_tx_queue(tx_queue, channel) { in ef4_process_channel() 241 tx_queue->pkts_compl = 0; in ef4_process_channel() 242 tx_queue->bytes_compl = 0; in ef4_process_channel() 255 ef4_for_each_channel_tx_queue(tx_queue, channel) { in ef4_process_channel() 256 if (tx_queue->bytes_compl) { in ef4_process_channel() 257 netdev_tx_completed_queue(tx_queue->core_txq, in ef4_process_channel() 258 tx_queue->pkts_compl, tx_queue->bytes_compl); in ef4_process_channel() 423 struct ef4_tx_queue *tx_queue; in ef4_alloc_channel() local 435 tx_queue = &channel->tx_queue[j]; in ef4_alloc_channel() [all …]
|
| /linux/drivers/net/wireless/ath/ath5k/ |
| H A D | dma.c | 132 u32 tx_queue; in ath5k_hw_start_tx_dma() local 141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); in ath5k_hw_start_tx_dma() 148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; in ath5k_hw_start_tx_dma() 151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; in ath5k_hw_start_tx_dma() 156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; in ath5k_hw_start_tx_dma() 164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); in ath5k_hw_start_tx_dma() 191 u32 tx_queue, pending; in ath5k_hw_stop_tx_dma() local 200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); in ath5k_hw_stop_tx_dma() 207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; in ath5k_hw_stop_tx_dma() 212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; in ath5k_hw_stop_tx_dma() [all …]
|
| /linux/drivers/net/mctp/ |
| H A D | mctp-i2c.c | 71 struct sk_buff_head tx_queue; member 624 spin_lock_irqsave(&midev->tx_queue.lock, flags); in mctp_i2c_tx_thread() 625 skb = __skb_dequeue(&midev->tx_queue); in mctp_i2c_tx_thread() 628 spin_unlock_irqrestore(&midev->tx_queue.lock, flags); in mctp_i2c_tx_thread() 639 !skb_queue_empty(&midev->tx_queue) || in mctp_i2c_tx_thread() 653 spin_lock_irqsave(&midev->tx_queue.lock, flags); in mctp_i2c_start_xmit() 654 if (skb_queue_len(&midev->tx_queue) >= MCTP_I2C_TX_WORK_LEN) { in mctp_i2c_start_xmit() 656 spin_unlock_irqrestore(&midev->tx_queue.lock, flags); in mctp_i2c_start_xmit() 661 __skb_queue_tail(&midev->tx_queue, skb); in mctp_i2c_start_xmit() 662 if (skb_queue_len(&midev->tx_queue) == MCTP_I2C_TX_WORK_LEN) in mctp_i2c_start_xmit() [all …]
|
| /linux/net/nfc/ |
| H A D | llcp_commands.c | 352 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_disconnect() 453 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_connect() 512 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_cc() 574 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_snl_sdres() 608 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_snl_sdreq() 641 skb_queue_head(&local->tx_queue, skb); in nfc_llcp_send_dm() 665 skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) { in nfc_llcp_send_i_frame() 673 skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) { in nfc_llcp_send_i_frame() 675 skb_queue_len(&sock->tx_queue)); in nfc_llcp_send_i_frame() 712 skb_queue_tail(&sock->tx_queue, pdu); in nfc_llcp_send_i_frame() [all …]
|