/linux/drivers/net/ethernet/sfc/falcon/ |
H A D | rx.c | 76 ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf) in ef4_rx_buf_next() argument 78 if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask))) in ef4_rx_buf_next() 79 return ef4_rx_buffer(rx_queue, 0); in ef4_rx_buf_next() 106 static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue) in ef4_reuse_page() argument 108 struct ef4_nic *efx = rx_queue->efx; in ef4_reuse_page() 113 if (unlikely(!rx_queue->page_ring)) in ef4_reuse_page() 115 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in ef4_reuse_page() 116 page = rx_queue->page_ring[index]; in ef4_reuse_page() 120 rx_queue->page_ring[index] = NULL; in ef4_reuse_page() 122 if (rx_queue->page_remove != rx_queue->page_add) in ef4_reuse_page() [all …]
|
/linux/drivers/net/ethernet/sfc/siena/ |
H A D | rx_common.c | 38 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) in efx_reuse_page() argument 40 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page() 45 if (unlikely(!rx_queue->page_ring)) in efx_reuse_page() 47 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page() 48 page = rx_queue->page_ring[index]; in efx_reuse_page() 52 rx_queue->page_ring[index] = NULL; in efx_reuse_page() 54 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page() 55 ++rx_queue->page_remove; in efx_reuse_page() 59 ++rx_queue->page_recycle_count; in efx_reuse_page() 67 ++rx_queue->page_recycle_failed; in efx_reuse_page() [all …]
|
H A D | rx.c | 43 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, in efx_rx_packet__check_len() argument 47 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet__check_len() 61 efx_rx_queue_index(rx_queue), len, max_len); in efx_rx_packet__check_len() 63 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; in efx_rx_packet__check_len() 105 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_mk_skb() 121 void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, in efx_siena_rx_packet() argument 124 struct efx_nic *efx = rx_queue->efx; in efx_siena_rx_packet() 125 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in efx_siena_rx_packet() 128 rx_queue->rx_packets++; in efx_siena_rx_packet() 130 rx_buf = efx_rx_buffer(rx_queue, index); in efx_siena_rx_packet() [all …]
|
H A D | farch.c | 465 efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) in efx_farch_build_rx_desc() argument 470 rxd = efx_rx_desc(rx_queue, index); in efx_farch_build_rx_desc() 471 rx_buf = efx_rx_buffer(rx_queue, index); in efx_farch_build_rx_desc() 475 rx_queue->efx->type->rx_buffer_padding, in efx_farch_build_rx_desc() 483 void efx_farch_rx_write(struct efx_rx_queue *rx_queue) in efx_farch_rx_write() argument 485 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_write() 489 while (rx_queue->notified_count != rx_queue->added_count) { in efx_farch_rx_write() 491 rx_queue, in efx_farch_rx_write() 492 rx_queue->notified_count & rx_queue->ptr_mask); in efx_farch_rx_write() 493 ++rx_queue->notified_count; in efx_farch_rx_write() [all …]
|
H A D | efx_channels.c | 534 struct efx_rx_queue *rx_queue; in efx_alloc_channel() local 559 rx_queue = &channel->rx_queue; in efx_alloc_channel() 560 rx_queue->efx = efx; in efx_alloc_channel() 561 timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); in efx_alloc_channel() 605 struct efx_rx_queue *rx_queue; in efx_copy_channel() local 631 rx_queue = &channel->rx_queue; in efx_copy_channel() 632 rx_queue->buffer = NULL; in efx_copy_channel() 633 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); in efx_copy_channel() 634 timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); in efx_copy_channel() 645 struct efx_rx_queue *rx_queue; in efx_probe_channel() local [all …]
|
H A D | rx_common.h | 55 int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue); 56 void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue); 57 void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue); 58 void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue); 68 void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue, 73 void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
|
H A D | net_driver.h | 547 struct efx_rx_queue rx_queue; member 1356 int (*rx_probe)(struct efx_rx_queue *rx_queue); 1357 void (*rx_init)(struct efx_rx_queue *rx_queue); 1358 void (*rx_remove)(struct efx_rx_queue *rx_queue); 1359 void (*rx_write)(struct efx_rx_queue *rx_queue); 1360 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); 1549 return channel->rx_queue.core_index >= 0; in efx_channel_has_rx_queue() 1556 return &channel->rx_queue; in efx_channel_get_rx_queue() 1564 for (_rx_queue = &(_channel)->rx_queue; \ 1569 efx_rx_queue_channel(struct efx_rx_queue *rx_queue) in efx_rx_queue_channel() argument [all …]
|
H A D | nic.h | 131 int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); 132 void efx_farch_rx_init(struct efx_rx_queue *rx_queue); 133 void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); 134 void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); 135 void efx_farch_rx_write(struct efx_rx_queue *rx_queue); 136 void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
|
/linux/drivers/net/ethernet/sfc/ |
H A D | nic_common.h | 98 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) in efx_rx_desc() argument 100 return ((efx_qword_t *)(rx_queue->rxd.addr)) + index; in efx_rx_desc() 138 static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) in efx_nic_probe_rx() argument 140 return rx_queue->efx->type->rx_probe(rx_queue); in efx_nic_probe_rx() 142 static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) in efx_nic_init_rx() argument 144 rx_queue->efx->type->rx_init(rx_queue); in efx_nic_init_rx() 146 static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) in efx_nic_remove_rx() argument 148 rx_queue->efx->type->rx_remove(rx_queue); in efx_nic_remove_rx() 150 static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) in efx_nic_notify_rx_desc() argument 152 rx_queue->efx->type->rx_write(rx_queue); in efx_nic_notify_rx_desc() [all …]
|
H A D | mcdi_functions.c | 270 int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) in efx_mcdi_rx_probe() argument 272 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd, in efx_mcdi_rx_probe() 273 (rx_queue->ptr_mask + 1) * in efx_mcdi_rx_probe() 278 void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) in efx_mcdi_rx_init() argument 280 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in efx_mcdi_rx_init() 281 size_t entries = rx_queue->rxd.len / EFX_BUF_SIZE; in efx_mcdi_rx_init() 283 struct efx_nic *efx = rx_queue->efx; in efx_mcdi_rx_init() 290 rx_queue->scatter_n = 0; in efx_mcdi_rx_init() 291 rx_queue->scatter_len = 0; in efx_mcdi_rx_init() 297 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); in efx_mcdi_rx_init() [all …]
|
H A D | rx_common.h | 55 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 56 void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 57 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 58 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); 59 void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue); 61 void efx_init_rx_buffer(struct efx_rx_queue *rx_queue, 75 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, 79 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 81 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
|
H A D | efx_channels.c | 532 struct efx_rx_queue *rx_queue; in efx_alloc_channel() local 557 rx_queue = &channel->rx_queue; in efx_alloc_channel() 558 rx_queue->efx = efx; in efx_alloc_channel() 559 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); in efx_alloc_channel() 602 struct efx_rx_queue *rx_queue; in efx_copy_channel() local 628 rx_queue = &channel->rx_queue; in efx_copy_channel() 629 rx_queue->buffer = NULL; in efx_copy_channel() 630 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); in efx_copy_channel() 631 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); in efx_copy_channel() 642 struct efx_rx_queue *rx_queue; in efx_probe_channel() local [all …]
|
H A D | tc_counters.c | 280 struct efx_rx_queue *rx_queue = &channel->rx_queue; in efx_tc_probe_channel() local 283 rx_queue->core_index = 0; in efx_tc_probe_channel() 285 INIT_WORK(&rx_queue->grant_work, efx_mae_counters_grant_credits); in efx_tc_probe_channel() 292 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_tc_start_channel() local 295 return efx_mae_start_counters(efx, rx_queue); in efx_tc_start_channel() 300 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_tc_stop_channel() local 304 rc = efx_mae_stop_counters(efx, rx_queue); in efx_tc_stop_channel() 309 rx_queue->grant_credits = false; in efx_tc_stop_channel() 310 flush_work(&rx_queue->grant_work); in efx_tc_stop_channel() 510 static bool efx_tc_rx(struct efx_rx_queue *rx_queue, u32 mark) in efx_tc_rx() argument [all …]
|
H A D | mcdi_functions.h | 25 int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue); 26 void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue); 27 void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue); 28 void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
|
H A D | ef10.c | 2588 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) in efx_ef10_build_rx_desc() argument 2593 rxd = efx_rx_desc(rx_queue, index); in efx_ef10_build_rx_desc() 2594 rx_buf = efx_rx_buffer(rx_queue, index); in efx_ef10_build_rx_desc() 2600 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) in efx_ef10_rx_write() argument 2602 struct efx_nic *efx = rx_queue->efx; in efx_ef10_rx_write() 2607 write_count = rx_queue->added_count & ~7; in efx_ef10_rx_write() 2608 if (rx_queue->notified_count == write_count) in efx_ef10_rx_write() 2613 rx_queue, in efx_ef10_rx_write() 2614 rx_queue->notified_count & rx_queue->ptr_mask); in efx_ef10_rx_write() 2615 while (++rx_queue->notified_count != write_count); in efx_ef10_rx_write() [all …]
|
/linux/drivers/net/ethernet/freescale/ |
H A D | gianfar.c | 109 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, in gfar_init_rxbdp() argument 117 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) in gfar_init_rxbdp() 139 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); in gfar_init_tx_rx_base() 152 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | in gfar_init_rqprm() 255 if (likely(priv->rx_queue[i]->rxcoalescing)) in gfar_configure_coalescing() 256 gfar_write(baddr + i, priv->rx_queue[i]->rxic); in gfar_configure_coalescing() 267 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing() 268 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing() 283 stats->rx_packets += priv->rx_queue[i]->stats.rx_packets; in gfar_get_stats64() 284 stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes; in gfar_get_stats64() [all …]
|
H A D | gianfar_ethtool.c | 255 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_gcoalesce() local 268 rx_queue = priv->rx_queue[0]; in gfar_gcoalesce() 271 rxtime = get_ictt_value(rx_queue->rxic); in gfar_gcoalesce() 272 rxcount = get_icft_value(rx_queue->rxic); in gfar_gcoalesce() 335 priv->rx_queue[i]->rxcoalescing = 0; in gfar_scoalesce() 338 priv->rx_queue[i]->rxcoalescing = 1; in gfar_scoalesce() 342 priv->rx_queue[i]->rxic = mk_ic_value( in gfar_scoalesce() 385 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_gringparam() local 388 rx_queue = priv->rx_queue[0]; in gfar_gringparam() 398 rvals->rx_pending = rx_queue->rx_ring_size; in gfar_gringparam() [all …]
|
/linux/net/rxrpc/ |
H A D | io_thread.c | 28 struct sk_buff_head *rx_queue; in rxrpc_encap_rcv() local 46 rx_queue = &local->rx_queue; in rxrpc_encap_rcv() 51 rx_queue = &local->rx_delay_queue; in rxrpc_encap_rcv() 55 skb_queue_tail(rx_queue, skb); in rxrpc_encap_rcv() 78 skb_queue_tail(&local->rx_queue, skb); in rxrpc_error_report() 433 struct sk_buff_head rx_queue; in rxrpc_io_thread() local 444 skb_queue_head_init(&rx_queue); in rxrpc_io_thread() 487 if ((skb = __skb_dequeue(&rx_queue))) { in rxrpc_io_thread() 521 skb_queue_tail(&local->rx_queue, skb); in rxrpc_io_thread() 525 if (!skb_queue_empty(&local->rx_queue)) { in rxrpc_io_thread() [all …]
|
/linux/drivers/net/xen-netback/ |
H A D | rx.c | 90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() 98 if (skb_queue_empty(&queue->rx_queue)) in xenvif_rx_queue_tail() 101 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail() 106 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() 115 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue() 117 skb = __skb_dequeue(&queue->rx_queue); in xenvif_rx_dequeue() 119 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); in xenvif_rx_dequeue() 130 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue() 148 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_drop_expired() 500 !skb_queue_empty(&queue->rx_queue) && in xenvif_rx_action() [all …]
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | bpf_iter_tcp4.c | 85 int rx_queue; in dump_tcp_sock() local 116 rx_queue = sp->sk_ack_backlog; in dump_tcp_sock() 118 rx_queue = tp->rcv_nxt - tp->copied_seq; in dump_tcp_sock() 119 if (rx_queue < 0) in dump_tcp_sock() 120 rx_queue = 0; in dump_tcp_sock() 127 tp->write_seq - tp->snd_una, rx_queue, in dump_tcp_sock() 214 "st tx_queue rx_queue tr tm->when retrnsmt" in dump_tcp4()
|
H A D | bpf_iter_tcp6.c | 85 int rx_queue; in dump_tcp6_sock() local 116 rx_queue = sp->sk_ack_backlog; in dump_tcp6_sock() 118 rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq; in dump_tcp6_sock() 119 if (rx_queue < 0) in dump_tcp6_sock() 120 rx_queue = 0; in dump_tcp6_sock() 131 tp->tcp.write_seq - tp->tcp.snd_una, rx_queue, in dump_tcp6_sock() 230 "st tx_queue rx_queue tr tm->when retrnsmt" in dump_tcp6()
|
/linux/drivers/net/ethernet/ibm/ |
H A D | ibmveth.c | 106 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); in ibmveth_rxq_flags() 117 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; in ibmveth_rxq_pending_buffer() 137 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); in ibmveth_rxq_frame_length() 419 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; in ibmveth_rxq_get_buffer() 434 cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; in ibmveth_rxq_harvest_buffer() 437 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { in ibmveth_rxq_harvest_buffer() 438 adapter->rx_queue.index = 0; in ibmveth_rxq_harvest_buffer() 439 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; in ibmveth_rxq_harvest_buffer() 535 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * in ibmveth_open() 537 adapter->rx_queue.queue_addr = in ibmveth_open() [all …]
|
/linux/drivers/net/wan/ |
H A D | hdlc_x25.c | 28 struct sk_buff_head rx_queue; member 42 struct sk_buff *skb = skb_dequeue(&x25st->rx_queue); in x25_rx_queue_kick() 46 skb = skb_dequeue(&x25st->rx_queue); in x25_rx_queue_kick() 67 skb_queue_tail(&x25st->rx_queue, skb); in x25_connect_disconnect() 98 skb_queue_tail(&x25st->rx_queue, skb); in x25_data_indication() 348 skb_queue_head_init(&state(hdlc)->rx_queue); in x25_ioctl()
|
H A D | lapbether.c | 57 struct sk_buff_head rx_queue; member 96 skb = skb_dequeue(&lapbeth->rx_queue); in lapbeth_napi_poll() 177 skb_queue_tail(&lapbeth->rx_queue, skb); in lapbeth_data_indication() 276 skb_queue_tail(&lapbeth->rx_queue, skb); in lapbeth_connected() 294 skb_queue_tail(&lapbeth->rx_queue, skb); in lapbeth_disconnected() 414 skb_queue_head_init(&lapbeth->rx_queue); in lapbeth_new_device()
|
/linux/include/net/tc_act/ |
H A D | tc_skbedit.h | 100 u16 rx_queue; in tcf_skbedit_rx_queue_mapping() local 103 rx_queue = rcu_dereference(to_skbedit(a)->params)->queue_mapping; in tcf_skbedit_rx_queue_mapping() 106 return rx_queue; in tcf_skbedit_rx_queue_mapping()
|