Lines Matching +full:spec +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
29 * This must be at least 1 to prevent overflow, plus one packet-worth
40 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page()
45 if (unlikely(!rx_queue->page_ring)) in efx_reuse_page()
47 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page()
48 page = rx_queue->page_ring[index]; in efx_reuse_page()
52 rx_queue->page_ring[index] = NULL; in efx_reuse_page()
54 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page()
55 ++rx_queue->page_remove; in efx_reuse_page()
59 ++rx_queue->page_recycle_count; in efx_reuse_page()
63 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_reuse_page()
64 PAGE_SIZE << efx->rx_buffer_order, in efx_reuse_page()
67 ++rx_queue->page_recycle_failed; in efx_reuse_page()
81 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page()
82 struct page *page = rx_buf->page; in efx_recycle_rx_page()
86 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) in efx_recycle_rx_page()
89 index = rx_queue->page_add & rx_queue->page_ptr_mask; in efx_recycle_rx_page()
90 if (rx_queue->page_ring[index] == NULL) { in efx_recycle_rx_page()
91 unsigned int read_index = rx_queue->page_remove & in efx_recycle_rx_page()
92 rx_queue->page_ptr_mask; in efx_recycle_rx_page()
99 ++rx_queue->page_remove; in efx_recycle_rx_page()
100 rx_queue->page_ring[index] = page; in efx_recycle_rx_page()
101 ++rx_queue->page_add; in efx_recycle_rx_page()
104 ++rx_queue->page_recycle_full; in efx_recycle_rx_page()
106 put_page(rx_buf->page); in efx_recycle_rx_page()
116 if (unlikely(!rx_queue->page_ring)) in efx_siena_recycle_rx_pages()
122 } while (--n_frags); in efx_siena_recycle_rx_pages()
139 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_recycle_ring()
143 efx->rx_bufs_per_page); in efx_init_rx_recycle_ring()
144 rx_queue->page_ring = kcalloc(page_ring_size, in efx_init_rx_recycle_ring()
145 sizeof(*rx_queue->page_ring), GFP_KERNEL); in efx_init_rx_recycle_ring()
146 if (!rx_queue->page_ring) in efx_init_rx_recycle_ring()
147 rx_queue->page_ptr_mask = 0; in efx_init_rx_recycle_ring()
149 rx_queue->page_ptr_mask = page_ring_size - 1; in efx_init_rx_recycle_ring()
154 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_recycle_ring()
157 if (unlikely(!rx_queue->page_ring)) in efx_fini_rx_recycle_ring()
161 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { in efx_fini_rx_recycle_ring()
162 struct page *page = rx_queue->page_ring[i]; in efx_fini_rx_recycle_ring()
169 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_fini_rx_recycle_ring()
170 PAGE_SIZE << efx->rx_buffer_order, in efx_fini_rx_recycle_ring()
174 kfree(rx_queue->page_ring); in efx_fini_rx_recycle_ring()
175 rx_queue->page_ring = NULL; in efx_fini_rx_recycle_ring()
182 if (rx_buf->page) in efx_fini_rx_buffer()
183 put_page(rx_buf->page); in efx_fini_rx_buffer()
186 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { in efx_fini_rx_buffer()
187 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
190 rx_buf->page = NULL; in efx_fini_rx_buffer()
195 struct efx_nic *efx = rx_queue->efx; in efx_siena_probe_rx_queue()
199 /* Create the smallest power-of-two aligned ring */ in efx_siena_probe_rx_queue()
200 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); in efx_siena_probe_rx_queue()
202 rx_queue->ptr_mask = entries - 1; in efx_siena_probe_rx_queue()
204 netif_dbg(efx, probe, efx->net_dev, in efx_siena_probe_rx_queue()
206 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_siena_probe_rx_queue()
207 rx_queue->ptr_mask); in efx_siena_probe_rx_queue()
210 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), in efx_siena_probe_rx_queue()
212 if (!rx_queue->buffer) in efx_siena_probe_rx_queue()
213 return -ENOMEM; in efx_siena_probe_rx_queue()
217 kfree(rx_queue->buffer); in efx_siena_probe_rx_queue()
218 rx_queue->buffer = NULL; in efx_siena_probe_rx_queue()
227 struct efx_nic *efx = rx_queue->efx; in efx_siena_init_rx_queue()
230 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_init_rx_queue()
234 rx_queue->added_count = 0; in efx_siena_init_rx_queue()
235 rx_queue->notified_count = 0; in efx_siena_init_rx_queue()
236 rx_queue->removed_count = 0; in efx_siena_init_rx_queue()
237 rx_queue->min_fill = -1U; in efx_siena_init_rx_queue()
240 rx_queue->page_remove = 0; in efx_siena_init_rx_queue()
241 rx_queue->page_add = rx_queue->page_ptr_mask + 1; in efx_siena_init_rx_queue()
242 rx_queue->page_recycle_count = 0; in efx_siena_init_rx_queue()
243 rx_queue->page_recycle_failed = 0; in efx_siena_init_rx_queue()
244 rx_queue->page_recycle_full = 0; in efx_siena_init_rx_queue()
247 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; in efx_siena_init_rx_queue()
249 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_siena_init_rx_queue()
258 rx_queue->max_fill = max_fill; in efx_siena_init_rx_queue()
259 rx_queue->fast_fill_trigger = trigger; in efx_siena_init_rx_queue()
260 rx_queue->refill_enabled = true; in efx_siena_init_rx_queue()
263 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, in efx_siena_init_rx_queue()
264 rx_queue->core_index, 0); in efx_siena_init_rx_queue()
267 netif_err(efx, rx_err, efx->net_dev, in efx_siena_init_rx_queue()
270 efx->xdp_rxq_info_failed = true; in efx_siena_init_rx_queue()
272 rx_queue->xdp_rxq_info_valid = true; in efx_siena_init_rx_queue()
284 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_fini_rx_queue()
287 del_timer_sync(&rx_queue->slow_fill); in efx_siena_fini_rx_queue()
290 if (rx_queue->buffer) { in efx_siena_fini_rx_queue()
291 for (i = rx_queue->removed_count; i < rx_queue->added_count; in efx_siena_fini_rx_queue()
293 unsigned int index = i & rx_queue->ptr_mask; in efx_siena_fini_rx_queue()
302 if (rx_queue->xdp_rxq_info_valid) in efx_siena_fini_rx_queue()
303 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); in efx_siena_fini_rx_queue()
305 rx_queue->xdp_rxq_info_valid = false; in efx_siena_fini_rx_queue()
310 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_remove_rx_queue()
315 kfree(rx_queue->buffer); in efx_siena_remove_rx_queue()
316 rx_queue->buffer = NULL; in efx_siena_remove_rx_queue()
319 /* Unmap a DMA-mapped page. This function is only called for the final RX
325 struct page *page = rx_buf->page; in efx_unmap_rx_buffer()
330 dma_unmap_page(&efx->pci_dev->dev, in efx_unmap_rx_buffer()
331 state->dma_addr, in efx_unmap_rx_buffer()
332 PAGE_SIZE << efx->rx_buffer_order, in efx_unmap_rx_buffer()
342 if (rx_buf->page) { in efx_siena_free_rx_buffers()
343 put_page(rx_buf->page); in efx_siena_free_rx_buffers()
344 rx_buf->page = NULL; in efx_siena_free_rx_buffers()
347 } while (--num_bufs); in efx_siena_free_rx_buffers()
356 ++rx_queue->slow_fill_count; in efx_siena_rx_slow_fill()
361 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); in efx_schedule_slow_fill()
364 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
376 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers()
388 efx->rx_buffer_order); in efx_init_rx_buffers()
390 return -ENOMEM; in efx_init_rx_buffers()
392 dma_map_page(&efx->pci_dev->dev, page, 0, in efx_init_rx_buffers()
393 PAGE_SIZE << efx->rx_buffer_order, in efx_init_rx_buffers()
395 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, in efx_init_rx_buffers()
397 __free_pages(page, efx->rx_buffer_order); in efx_init_rx_buffers()
398 return -EIO; in efx_init_rx_buffers()
401 state->dma_addr = dma_addr; in efx_init_rx_buffers()
404 dma_addr = state->dma_addr; in efx_init_rx_buffers()
411 index = rx_queue->added_count & rx_queue->ptr_mask; in efx_init_rx_buffers()
413 rx_buf->dma_addr = dma_addr + efx->rx_ip_align + in efx_init_rx_buffers()
415 rx_buf->page = page; in efx_init_rx_buffers()
416 rx_buf->page_offset = page_offset + efx->rx_ip_align + in efx_init_rx_buffers()
418 rx_buf->len = efx->rx_dma_len; in efx_init_rx_buffers()
419 rx_buf->flags = 0; in efx_init_rx_buffers()
420 ++rx_queue->added_count; in efx_init_rx_buffers()
422 dma_addr += efx->rx_page_buf_step; in efx_init_rx_buffers()
423 page_offset += efx->rx_page_buf_step; in efx_init_rx_buffers()
424 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in efx_init_rx_buffers()
426 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; in efx_init_rx_buffers()
427 } while (++count < efx->rx_pages_per_batch); in efx_init_rx_buffers()
434 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + in efx_siena_rx_config_page_split()
437 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : in efx_siena_rx_config_page_split()
438 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / in efx_siena_rx_config_page_split()
439 efx->rx_page_buf_step); in efx_siena_rx_config_page_split()
440 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / in efx_siena_rx_config_page_split()
441 efx->rx_bufs_per_page; in efx_siena_rx_config_page_split()
442 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, in efx_siena_rx_config_page_split()
443 efx->rx_bufs_per_page); in efx_siena_rx_config_page_split()
446 /* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly
450 * @rx_queue->@max_fill. If there is insufficient atomic
460 struct efx_nic *efx = rx_queue->efx; in efx_siena_fast_push_rx_descriptors()
464 if (!rx_queue->refill_enabled) in efx_siena_fast_push_rx_descriptors()
468 fill_level = (rx_queue->added_count - rx_queue->removed_count); in efx_siena_fast_push_rx_descriptors()
469 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_siena_fast_push_rx_descriptors()
470 if (fill_level >= rx_queue->fast_fill_trigger) in efx_siena_fast_push_rx_descriptors()
474 if (unlikely(fill_level < rx_queue->min_fill)) { in efx_siena_fast_push_rx_descriptors()
476 rx_queue->min_fill = fill_level; in efx_siena_fast_push_rx_descriptors()
479 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_siena_fast_push_rx_descriptors()
480 space = rx_queue->max_fill - fill_level; in efx_siena_fast_push_rx_descriptors()
483 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_siena_fast_push_rx_descriptors()
484 "RX queue %d fast-filling descriptor ring from" in efx_siena_fast_push_rx_descriptors()
487 rx_queue->max_fill); in efx_siena_fast_push_rx_descriptors()
496 } while ((space -= batch_size) >= batch_size); in efx_siena_fast_push_rx_descriptors()
498 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_siena_fast_push_rx_descriptors()
499 "RX queue %d fast-filled descriptor ring " in efx_siena_fast_push_rx_descriptors()
501 rx_queue->added_count - rx_queue->removed_count); in efx_siena_fast_push_rx_descriptors()
504 if (rx_queue->notified_count != rx_queue->added_count) in efx_siena_fast_push_rx_descriptors()
516 struct napi_struct *napi = &channel->napi_str; in efx_siena_rx_packet_gro()
517 struct efx_nic *efx = channel->efx; in efx_siena_rx_packet_gro()
529 if (efx->net_dev->features & NETIF_F_RXHASH) in efx_siena_rx_packet_gro()
533 skb->csum = csum; in efx_siena_rx_packet_gro()
534 skb->ip_summed = CHECKSUM_COMPLETE; in efx_siena_rx_packet_gro()
536 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? in efx_siena_rx_packet_gro()
539 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); in efx_siena_rx_packet_gro()
542 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, in efx_siena_rx_packet_gro()
543 rx_buf->page, rx_buf->page_offset, in efx_siena_rx_packet_gro()
544 rx_buf->len); in efx_siena_rx_packet_gro()
545 rx_buf->page = NULL; in efx_siena_rx_packet_gro()
546 skb->len += rx_buf->len; in efx_siena_rx_packet_gro()
547 if (skb_shinfo(skb)->nr_frags == n_frags) in efx_siena_rx_packet_gro()
550 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_siena_rx_packet_gro()
553 skb->data_len = skb->len; in efx_siena_rx_packet_gro()
554 skb->truesize += n_frags * efx->rx_buffer_truesize; in efx_siena_rx_packet_gro()
556 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_siena_rx_packet_gro()
566 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) in efx_siena_set_default_rx_indir_table()
567 ctx->rx_indir_table[i] = in efx_siena_set_default_rx_indir_table()
568 ethtool_rxfh_indir_default(i, efx->rss_spread); in efx_siena_set_default_rx_indir_table()
572 * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient
573 * @spec: Specification to test
575 * Return: %true if the specification is a non-drop RX filter that
580 bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec) in efx_siena_filter_is_mc_recipient() argument
582 if (!(spec->flags & EFX_FILTER_FLAG_RX) || in efx_siena_filter_is_mc_recipient()
583 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) in efx_siena_filter_is_mc_recipient()
586 if (spec->match_flags & in efx_siena_filter_is_mc_recipient()
588 is_multicast_ether_addr(spec->loc_mac)) in efx_siena_filter_is_mc_recipient()
591 if ((spec->match_flags & in efx_siena_filter_is_mc_recipient()
594 if (spec->ether_type == htons(ETH_P_IP) && in efx_siena_filter_is_mc_recipient()
595 ipv4_is_multicast(spec->loc_host[0])) in efx_siena_filter_is_mc_recipient()
597 if (spec->ether_type == htons(ETH_P_IPV6) && in efx_siena_filter_is_mc_recipient()
598 ((const u8 *)spec->loc_host)[0] == 0xff) in efx_siena_filter_is_mc_recipient()
608 if ((left->match_flags ^ right->match_flags) | in efx_siena_filter_spec_equal()
609 ((left->flags ^ right->flags) & in efx_siena_filter_spec_equal()
613 return memcmp(&left->outer_vid, &right->outer_vid, in efx_siena_filter_spec_equal()
614 sizeof(struct efx_filter_spec) - in efx_siena_filter_spec_equal()
618 u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec) in efx_siena_filter_spec_hash() argument
621 return jhash2((const u32 *)&spec->outer_vid, in efx_siena_filter_spec_hash()
622 (sizeof(struct efx_filter_spec) - in efx_siena_filter_spec_hash()
631 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { in efx_siena_rps_check_rule()
635 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { in efx_siena_rps_check_rule()
639 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; in efx_siena_rps_check_rule()
642 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ in efx_siena_rps_check_rule()
656 const struct efx_filter_spec *spec) in efx_rps_hash_bucket() argument
658 u32 hash = efx_siena_filter_spec_hash(spec); in efx_rps_hash_bucket()
660 lockdep_assert_held(&efx->rps_hash_lock); in efx_rps_hash_bucket()
661 if (!efx->rps_hash_table) in efx_rps_hash_bucket()
663 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; in efx_rps_hash_bucket()
667 const struct efx_filter_spec *spec) in efx_siena_rps_hash_find() argument
673 head = efx_rps_hash_bucket(efx, spec); in efx_siena_rps_hash_find()
678 if (efx_siena_filter_spec_equal(spec, &rule->spec)) in efx_siena_rps_hash_find()
685 const struct efx_filter_spec *spec, in efx_rps_hash_add() argument
692 head = efx_rps_hash_bucket(efx, spec); in efx_rps_hash_add()
697 if (efx_siena_filter_spec_equal(spec, &rule->spec)) { in efx_rps_hash_add()
705 memcpy(&rule->spec, spec, sizeof(rule->spec)); in efx_rps_hash_add()
706 hlist_add_head(&rule->node, head); in efx_rps_hash_add()
712 const struct efx_filter_spec *spec) in efx_siena_rps_hash_del() argument
718 head = efx_rps_hash_bucket(efx, spec); in efx_siena_rps_hash_del()
723 if (efx_siena_filter_spec_equal(spec, &rule->spec)) { in efx_siena_rps_hash_del()
730 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) in efx_siena_rps_hash_del()
746 mutex_lock(&efx->mac_lock); in efx_siena_probe_filters()
747 down_write(&efx->filter_sem); in efx_siena_probe_filters()
748 rc = efx->type->filter_table_probe(efx); in efx_siena_probe_filters()
753 if (efx->type->offload_features & NETIF_F_NTUPLE) { in efx_siena_probe_filters()
758 channel->rps_flow_id = in efx_siena_probe_filters()
759 kcalloc(efx->type->max_rx_ip_filters, in efx_siena_probe_filters()
760 sizeof(*channel->rps_flow_id), in efx_siena_probe_filters()
762 if (!channel->rps_flow_id) in efx_siena_probe_filters()
766 i < efx->type->max_rx_ip_filters; in efx_siena_probe_filters()
768 channel->rps_flow_id[i] = in efx_siena_probe_filters()
770 channel->rfs_expire_index = 0; in efx_siena_probe_filters()
771 channel->rfs_filter_count = 0; in efx_siena_probe_filters()
776 kfree(channel->rps_flow_id); in efx_siena_probe_filters()
777 efx->type->filter_table_remove(efx); in efx_siena_probe_filters()
778 rc = -ENOMEM; in efx_siena_probe_filters()
784 up_write(&efx->filter_sem); in efx_siena_probe_filters()
785 mutex_unlock(&efx->mac_lock); in efx_siena_probe_filters()
795 cancel_delayed_work_sync(&channel->filter_work); in efx_siena_remove_filters()
796 kfree(channel->rps_flow_id); in efx_siena_remove_filters()
797 channel->rps_flow_id = NULL; in efx_siena_remove_filters()
800 down_write(&efx->filter_sem); in efx_siena_remove_filters()
801 efx->type->filter_table_remove(efx); in efx_siena_remove_filters()
802 up_write(&efx->filter_sem); in efx_siena_remove_filters()
811 struct efx_nic *efx = netdev_priv(req->net_dev); in efx_filter_rfs_work()
812 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); in efx_filter_rfs_work()
813 int slot_idx = req - efx->rps_slot; in efx_filter_rfs_work()
818 rc = efx->type->filter_insert(efx, &req->spec, true); in efx_filter_rfs_work()
821 rc %= efx->type->max_rx_ip_filters; in efx_filter_rfs_work()
822 if (efx->rps_hash_table) { in efx_filter_rfs_work()
823 spin_lock_bh(&efx->rps_hash_lock); in efx_filter_rfs_work()
824 rule = efx_siena_rps_hash_find(efx, &req->spec); in efx_filter_rfs_work()
826 * for the same spec was already worked and then expired before in efx_filter_rfs_work()
833 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; in efx_filter_rfs_work()
835 rule->filter_id = rc; in efx_filter_rfs_work()
836 arfs_id = rule->arfs_id; in efx_filter_rfs_work()
838 spin_unlock_bh(&efx->rps_hash_lock); in efx_filter_rfs_work()
844 mutex_lock(&efx->rps_mutex); in efx_filter_rfs_work()
845 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) in efx_filter_rfs_work()
846 channel->rfs_filter_count++; in efx_filter_rfs_work()
847 channel->rps_flow_id[rc] = req->flow_id; in efx_filter_rfs_work()
848 mutex_unlock(&efx->rps_mutex); in efx_filter_rfs_work()
850 if (req->spec.ether_type == htons(ETH_P_IP)) in efx_filter_rfs_work()
851 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
853 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
854 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
855 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
856 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
858 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
860 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
861 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
862 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
863 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
864 channel->n_rfs_succeeded++; in efx_filter_rfs_work()
866 if (req->spec.ether_type == htons(ETH_P_IP)) in efx_filter_rfs_work()
867 netif_dbg(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
869 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
870 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
871 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
872 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
874 netif_dbg(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
876 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
877 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
878 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
879 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
880 channel->n_rfs_failed++; in efx_filter_rfs_work()
885 min(channel->rfs_filter_count, in efx_filter_rfs_work()
890 clear_bit(slot_idx, &efx->rps_slot_map); in efx_filter_rfs_work()
891 dev_put(req->net_dev); in efx_filter_rfs_work()
907 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) in efx_siena_filter_rfs()
910 return -EBUSY; in efx_siena_filter_rfs()
913 rc = -EINVAL; in efx_siena_filter_rfs()
918 rc = -EPROTONOSUPPORT; in efx_siena_filter_rfs()
923 rc = -EPROTONOSUPPORT; in efx_siena_filter_rfs()
927 rc = -EPROTONOSUPPORT; in efx_siena_filter_rfs()
931 req = efx->rps_slot + slot_idx; in efx_siena_filter_rfs()
932 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, in efx_siena_filter_rfs()
933 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, in efx_siena_filter_rfs()
935 req->spec.match_flags = in efx_siena_filter_rfs()
939 req->spec.ether_type = fk.basic.n_proto; in efx_siena_filter_rfs()
940 req->spec.ip_proto = fk.basic.ip_proto; in efx_siena_filter_rfs()
943 req->spec.rem_host[0] = fk.addrs.v4addrs.src; in efx_siena_filter_rfs()
944 req->spec.loc_host[0] = fk.addrs.v4addrs.dst; in efx_siena_filter_rfs()
946 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, in efx_siena_filter_rfs()
948 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, in efx_siena_filter_rfs()
952 req->spec.rem_port = fk.ports.src; in efx_siena_filter_rfs()
953 req->spec.loc_port = fk.ports.dst; in efx_siena_filter_rfs()
955 if (efx->rps_hash_table) { in efx_siena_filter_rfs()
957 spin_lock(&efx->rps_hash_lock); in efx_siena_filter_rfs()
958 rule = efx_rps_hash_add(efx, &req->spec, &new); in efx_siena_filter_rfs()
960 rc = -ENOMEM; in efx_siena_filter_rfs()
964 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; in efx_siena_filter_rfs()
965 rc = rule->arfs_id; in efx_siena_filter_rfs()
967 if (!new && rule->rxq_index == rxq_index && in efx_siena_filter_rfs()
968 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) in efx_siena_filter_rfs()
970 rule->rxq_index = rxq_index; in efx_siena_filter_rfs()
971 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; in efx_siena_filter_rfs()
972 spin_unlock(&efx->rps_hash_lock); in efx_siena_filter_rfs()
983 dev_hold(req->net_dev = net_dev); in efx_siena_filter_rfs()
984 INIT_WORK(&req->work, efx_filter_rfs_work); in efx_siena_filter_rfs()
985 req->rxq_index = rxq_index; in efx_siena_filter_rfs()
986 req->flow_id = flow_id; in efx_siena_filter_rfs()
987 schedule_work(&req->work); in efx_siena_filter_rfs()
990 spin_unlock(&efx->rps_hash_lock); in efx_siena_filter_rfs()
992 clear_bit(slot_idx, &efx->rps_slot_map); in efx_siena_filter_rfs()
1000 struct efx_nic *efx = channel->efx; in __efx_siena_filter_rfs_expire()
1004 if (!mutex_trylock(&efx->rps_mutex)) in __efx_siena_filter_rfs_expire()
1006 expire_one = efx->type->filter_rfs_expire_one; in __efx_siena_filter_rfs_expire()
1007 index = channel->rfs_expire_index; in __efx_siena_filter_rfs_expire()
1009 size = efx->type->max_rx_ip_filters; in __efx_siena_filter_rfs_expire()
1011 flow_id = channel->rps_flow_id[index]; in __efx_siena_filter_rfs_expire()
1014 quota--; in __efx_siena_filter_rfs_expire()
1016 netif_info(efx, rx_status, efx->net_dev, in __efx_siena_filter_rfs_expire()
1018 index, channel->channel, flow_id); in __efx_siena_filter_rfs_expire()
1019 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; in __efx_siena_filter_rfs_expire()
1020 channel->rfs_filter_count--; in __efx_siena_filter_rfs_expire()
1027 * if two callers race), ensure that we don't loop forever - in __efx_siena_filter_rfs_expire()
1034 channel->rfs_expire_index = index; in __efx_siena_filter_rfs_expire()
1035 mutex_unlock(&efx->rps_mutex); in __efx_siena_filter_rfs_expire()