Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-only
25 "RX descriptor ring refill threshold (%)");
29 * This must be at least 1 to prevent overflow, plus one packet-worth
37 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page()
42 if (unlikely(!rx_queue->page_ring)) in efx_reuse_page()
44 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page()
45 page = rx_queue->page_ring[index]; in efx_reuse_page()
49 rx_queue->page_ring[index] = NULL; in efx_reuse_page()
51 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page()
52 ++rx_queue->page_remove; in efx_reuse_page()
56 ++rx_queue->page_recycle_count; in efx_reuse_page()
60 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_reuse_page()
61 PAGE_SIZE << efx->rx_buffer_order, in efx_reuse_page()
64 ++rx_queue->page_recycle_failed; in efx_reuse_page()
78 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page()
79 struct page *page = rx_buf->page; in efx_recycle_rx_page()
83 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) in efx_recycle_rx_page()
86 index = rx_queue->page_add & rx_queue->page_ptr_mask; in efx_recycle_rx_page()
87 if (rx_queue->page_ring[index] == NULL) { in efx_recycle_rx_page()
88 unsigned int read_index = rx_queue->page_remove & in efx_recycle_rx_page()
89 rx_queue->page_ptr_mask; in efx_recycle_rx_page()
96 ++rx_queue->page_remove; in efx_recycle_rx_page()
97 rx_queue->page_ring[index] = page; in efx_recycle_rx_page()
98 ++rx_queue->page_add; in efx_recycle_rx_page()
101 ++rx_queue->page_recycle_full; in efx_recycle_rx_page()
103 put_page(rx_buf->page); in efx_recycle_rx_page()
113 if (unlikely(!rx_queue->page_ring)) in efx_recycle_rx_pages()
119 } while (--n_frags); in efx_recycle_rx_pages()
136 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_recycle_ring()
140 efx->rx_bufs_per_page); in efx_init_rx_recycle_ring()
141 rx_queue->page_ring = kcalloc(page_ring_size, in efx_init_rx_recycle_ring()
142 sizeof(*rx_queue->page_ring), GFP_KERNEL); in efx_init_rx_recycle_ring()
143 if (!rx_queue->page_ring) in efx_init_rx_recycle_ring()
144 rx_queue->page_ptr_mask = 0; in efx_init_rx_recycle_ring()
146 rx_queue->page_ptr_mask = page_ring_size - 1; in efx_init_rx_recycle_ring()
151 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_recycle_ring()
154 if (unlikely(!rx_queue->page_ring)) in efx_fini_rx_recycle_ring()
158 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { in efx_fini_rx_recycle_ring()
159 struct page *page = rx_queue->page_ring[i]; in efx_fini_rx_recycle_ring()
166 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_fini_rx_recycle_ring()
167 PAGE_SIZE << efx->rx_buffer_order, in efx_fini_rx_recycle_ring()
171 kfree(rx_queue->page_ring); in efx_fini_rx_recycle_ring()
172 rx_queue->page_ring = NULL; in efx_fini_rx_recycle_ring()
179 if (rx_buf->page) in efx_fini_rx_buffer()
180 put_page(rx_buf->page); in efx_fini_rx_buffer()
183 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { in efx_fini_rx_buffer()
184 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
187 rx_buf->page = NULL; in efx_fini_rx_buffer()
192 struct efx_nic *efx = rx_queue->efx; in efx_probe_rx_queue()
196 /* Create the smallest power-of-two aligned ring */ in efx_probe_rx_queue()
197 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); in efx_probe_rx_queue()
199 rx_queue->ptr_mask = entries - 1; in efx_probe_rx_queue()
201 netif_dbg(efx, probe, efx->net_dev, in efx_probe_rx_queue()
203 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_probe_rx_queue()
204 rx_queue->ptr_mask); in efx_probe_rx_queue()
207 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), in efx_probe_rx_queue()
209 if (!rx_queue->buffer) in efx_probe_rx_queue()
210 return -ENOMEM; in efx_probe_rx_queue()
214 kfree(rx_queue->buffer); in efx_probe_rx_queue()
215 rx_queue->buffer = NULL; in efx_probe_rx_queue()
224 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_queue()
227 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_init_rx_queue()
231 rx_queue->added_count = 0; in efx_init_rx_queue()
232 rx_queue->notified_count = 0; in efx_init_rx_queue()
233 rx_queue->granted_count = 0; in efx_init_rx_queue()
234 rx_queue->removed_count = 0; in efx_init_rx_queue()
235 rx_queue->min_fill = -1U; in efx_init_rx_queue()
238 rx_queue->page_remove = 0; in efx_init_rx_queue()
239 rx_queue->page_add = rx_queue->page_ptr_mask + 1; in efx_init_rx_queue()
240 rx_queue->page_recycle_count = 0; in efx_init_rx_queue()
241 rx_queue->page_recycle_failed = 0; in efx_init_rx_queue()
242 rx_queue->page_recycle_full = 0; in efx_init_rx_queue()
244 rx_queue->old_rx_packets = rx_queue->rx_packets; in efx_init_rx_queue()
245 rx_queue->old_rx_bytes = rx_queue->rx_bytes; in efx_init_rx_queue()
248 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; in efx_init_rx_queue()
250 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_init_rx_queue()
259 rx_queue->max_fill = max_fill; in efx_init_rx_queue()
260 rx_queue->fast_fill_trigger = trigger; in efx_init_rx_queue()
261 rx_queue->refill_enabled = true; in efx_init_rx_queue()
264 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, in efx_init_rx_queue()
265 rx_queue->core_index, 0); in efx_init_rx_queue()
268 netif_err(efx, rx_err, efx->net_dev, in efx_init_rx_queue()
271 efx->xdp_rxq_info_failed = true; in efx_init_rx_queue()
273 rx_queue->xdp_rxq_info_valid = true; in efx_init_rx_queue()
285 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_fini_rx_queue()
288 del_timer_sync(&rx_queue->slow_fill); in efx_fini_rx_queue()
289 if (rx_queue->grant_credits) in efx_fini_rx_queue()
290 flush_work(&rx_queue->grant_work); in efx_fini_rx_queue()
293 if (rx_queue->buffer) { in efx_fini_rx_queue()
294 for (i = rx_queue->removed_count; i < rx_queue->added_count; in efx_fini_rx_queue()
296 unsigned int index = i & rx_queue->ptr_mask; in efx_fini_rx_queue()
305 if (rx_queue->xdp_rxq_info_valid) in efx_fini_rx_queue()
306 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); in efx_fini_rx_queue()
308 rx_queue->xdp_rxq_info_valid = false; in efx_fini_rx_queue()
313 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_remove_rx_queue()
318 kfree(rx_queue->buffer); in efx_remove_rx_queue()
319 rx_queue->buffer = NULL; in efx_remove_rx_queue()
322 /* Unmap a DMA-mapped page. This function is only called for the final RX
328 struct page *page = rx_buf->page; in efx_unmap_rx_buffer()
333 dma_unmap_page(&efx->pci_dev->dev, in efx_unmap_rx_buffer()
334 state->dma_addr, in efx_unmap_rx_buffer()
335 PAGE_SIZE << efx->rx_buffer_order, in efx_unmap_rx_buffer()
345 if (rx_buf->page) { in efx_free_rx_buffers()
346 put_page(rx_buf->page); in efx_free_rx_buffers()
347 rx_buf->page = NULL; in efx_free_rx_buffers()
350 } while (--num_bufs); in efx_free_rx_buffers()
359 ++rx_queue->slow_fill_count; in efx_rx_slow_fill()
364 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); in efx_schedule_slow_fill()
367 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
376 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) in efx_init_rx_buffers() argument
379 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers()
390 (atomic ? GFP_ATOMIC : GFP_KERNEL), in efx_init_rx_buffers()
391 efx->rx_buffer_order); in efx_init_rx_buffers()
393 return -ENOMEM; in efx_init_rx_buffers()
395 dma_map_page(&efx->pci_dev->dev, page, 0, in efx_init_rx_buffers()
396 PAGE_SIZE << efx->rx_buffer_order, in efx_init_rx_buffers()
398 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, in efx_init_rx_buffers()
400 __free_pages(page, efx->rx_buffer_order); in efx_init_rx_buffers()
401 return -EIO; in efx_init_rx_buffers()
404 state->dma_addr = dma_addr; in efx_init_rx_buffers()
407 dma_addr = state->dma_addr; in efx_init_rx_buffers()
414 index = rx_queue->added_count & rx_queue->ptr_mask; in efx_init_rx_buffers()
416 rx_buf->dma_addr = dma_addr + efx->rx_ip_align + in efx_init_rx_buffers()
418 rx_buf->page = page; in efx_init_rx_buffers()
419 rx_buf->page_offset = page_offset + efx->rx_ip_align + in efx_init_rx_buffers()
421 rx_buf->len = efx->rx_dma_len; in efx_init_rx_buffers()
422 rx_buf->flags = 0; in efx_init_rx_buffers()
423 ++rx_queue->added_count; in efx_init_rx_buffers()
425 dma_addr += efx->rx_page_buf_step; in efx_init_rx_buffers()
426 page_offset += efx->rx_page_buf_step; in efx_init_rx_buffers()
427 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in efx_init_rx_buffers()
429 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; in efx_init_rx_buffers()
430 } while (++count < efx->rx_pages_per_batch); in efx_init_rx_buffers()
437 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + in efx_rx_config_page_split()
440 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : in efx_rx_config_page_split()
441 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / in efx_rx_config_page_split()
442 efx->rx_page_buf_step); in efx_rx_config_page_split()
443 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / in efx_rx_config_page_split()
444 efx->rx_bufs_per_page; in efx_rx_config_page_split()
445 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, in efx_rx_config_page_split()
446 efx->rx_bufs_per_page); in efx_rx_config_page_split()
449 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
453 * @rx_queue->@max_fill. If there is insufficient atomic
460 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) in efx_fast_push_rx_descriptors() argument
462 struct efx_nic *efx = rx_queue->efx; in efx_fast_push_rx_descriptors()
466 if (!rx_queue->refill_enabled) in efx_fast_push_rx_descriptors()
470 fill_level = (rx_queue->added_count - rx_queue->removed_count); in efx_fast_push_rx_descriptors()
471 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_fast_push_rx_descriptors()
472 if (fill_level >= rx_queue->fast_fill_trigger) in efx_fast_push_rx_descriptors()
476 if (unlikely(fill_level < rx_queue->min_fill)) { in efx_fast_push_rx_descriptors()
478 rx_queue->min_fill = fill_level; in efx_fast_push_rx_descriptors()
481 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_fast_push_rx_descriptors()
482 space = rx_queue->max_fill - fill_level; in efx_fast_push_rx_descriptors()
485 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
486 "RX queue %d fast-filling descriptor ring from" in efx_fast_push_rx_descriptors()
489 rx_queue->max_fill); in efx_fast_push_rx_descriptors()
492 rc = efx_init_rx_buffers(rx_queue, atomic); in efx_fast_push_rx_descriptors()
498 } while ((space -= batch_size) >= batch_size); in efx_fast_push_rx_descriptors()
500 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
501 "RX queue %d fast-filled descriptor ring " in efx_fast_push_rx_descriptors()
503 rx_queue->added_count - rx_queue->removed_count); in efx_fast_push_rx_descriptors()
506 if (rx_queue->notified_count != rx_queue->added_count) in efx_fast_push_rx_descriptors()
517 struct napi_struct *napi = &channel->napi_str; in efx_rx_packet_gro()
518 struct efx_nic *efx = channel->efx; in efx_rx_packet_gro()
530 if (efx->net_dev->features & NETIF_F_RXHASH && in efx_rx_packet_gro()
535 skb->csum = csum; in efx_rx_packet_gro()
536 skb->ip_summed = CHECKSUM_COMPLETE; in efx_rx_packet_gro()
538 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? in efx_rx_packet_gro()
541 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); in efx_rx_packet_gro()
544 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, in efx_rx_packet_gro()
545 rx_buf->page, rx_buf->page_offset, in efx_rx_packet_gro()
546 rx_buf->len); in efx_rx_packet_gro()
547 rx_buf->page = NULL; in efx_rx_packet_gro()
548 skb->len += rx_buf->len; in efx_rx_packet_gro()
549 if (skb_shinfo(skb)->nr_frags == n_frags) in efx_rx_packet_gro()
552 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_packet_gro()
555 skb->data_len = skb->len; in efx_rx_packet_gro()
556 skb->truesize += n_frags * efx->rx_buffer_truesize; in efx_rx_packet_gro()
558 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_rx_packet_gro()
568 WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); in efx_find_rss_context_entry()
570 ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id); in efx_find_rss_context_entry()
580 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++) in efx_set_default_rx_indir_table()
581 indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); in efx_set_default_rx_indir_table()
585 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
588 * Return: %true if the specification is a non-drop RX filter that
595 if (!(spec->flags & EFX_FILTER_FLAG_RX) || in efx_filter_is_mc_recipient()
596 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) in efx_filter_is_mc_recipient()
599 if (spec->match_flags & in efx_filter_is_mc_recipient()
601 is_multicast_ether_addr(spec->loc_mac)) in efx_filter_is_mc_recipient()
604 if ((spec->match_flags & in efx_filter_is_mc_recipient()
607 if (spec->ether_type == htons(ETH_P_IP) && in efx_filter_is_mc_recipient()
608 ipv4_is_multicast(spec->loc_host[0])) in efx_filter_is_mc_recipient()
610 if (spec->ether_type == htons(ETH_P_IPV6) && in efx_filter_is_mc_recipient()
611 ((const u8 *)spec->loc_host)[0] == 0xff) in efx_filter_is_mc_recipient()
621 if ((left->match_flags ^ right->match_flags) | in efx_filter_spec_equal()
622 ((left->flags ^ right->flags) & in efx_filter_spec_equal()
626 return memcmp(&left->vport_id, &right->vport_id, in efx_filter_spec_equal()
627 sizeof(struct efx_filter_spec) - in efx_filter_spec_equal()
634 return jhash2((const u32 *)&spec->vport_id, in efx_filter_spec_hash()
635 (sizeof(struct efx_filter_spec) - in efx_filter_spec_hash()
644 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { in efx_rps_check_rule()
648 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { in efx_rps_check_rule()
652 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; in efx_rps_check_rule()
655 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ in efx_rps_check_rule()
673 lockdep_assert_held(&efx->rps_hash_lock); in efx_rps_hash_bucket()
674 if (!efx->rps_hash_table) in efx_rps_hash_bucket()
676 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; in efx_rps_hash_bucket()
691 if (efx_filter_spec_equal(spec, &rule->spec)) in efx_rps_hash_find()
710 if (efx_filter_spec_equal(spec, &rule->spec)) { in efx_rps_hash_add()
718 memcpy(&rule->spec, spec, sizeof(rule->spec)); in efx_rps_hash_add()
719 hlist_add_head(&rule->node, head); in efx_rps_hash_add()
735 if (efx_filter_spec_equal(spec, &rule->spec)) { in efx_rps_hash_del()
742 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) in efx_rps_hash_del()
758 mutex_lock(&efx->mac_lock); in efx_probe_filters()
759 rc = efx->type->filter_table_probe(efx); in efx_probe_filters()
764 if (efx->type->offload_features & NETIF_F_NTUPLE) { in efx_probe_filters()
769 channel->rps_flow_id = in efx_probe_filters()
770 kcalloc(efx->type->max_rx_ip_filters, in efx_probe_filters()
771 sizeof(*channel->rps_flow_id), in efx_probe_filters()
773 if (!channel->rps_flow_id) in efx_probe_filters()
777 i < efx->type->max_rx_ip_filters; in efx_probe_filters()
779 channel->rps_flow_id[i] = in efx_probe_filters()
781 channel->rfs_expire_index = 0; in efx_probe_filters()
782 channel->rfs_filter_count = 0; in efx_probe_filters()
787 kfree(channel->rps_flow_id); in efx_probe_filters()
788 channel->rps_flow_id = NULL; in efx_probe_filters()
790 efx->type->filter_table_remove(efx); in efx_probe_filters()
791 rc = -ENOMEM; in efx_probe_filters()
797 mutex_unlock(&efx->mac_lock); in efx_probe_filters()
807 cancel_delayed_work_sync(&channel->filter_work); in efx_remove_filters()
808 kfree(channel->rps_flow_id); in efx_remove_filters()
809 channel->rps_flow_id = NULL; in efx_remove_filters()
812 efx->type->filter_table_remove(efx); in efx_remove_filters()
821 struct efx_nic *efx = efx_netdev_priv(req->net_dev); in efx_filter_rfs_work()
822 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); in efx_filter_rfs_work()
823 int slot_idx = req - efx->rps_slot; in efx_filter_rfs_work()
828 rc = efx->type->filter_insert(efx, &req->spec, true); in efx_filter_rfs_work()
831 rc %= efx->type->max_rx_ip_filters; in efx_filter_rfs_work()
832 if (efx->rps_hash_table) { in efx_filter_rfs_work()
833 spin_lock_bh(&efx->rps_hash_lock); in efx_filter_rfs_work()
834 rule = efx_rps_hash_find(efx, &req->spec); in efx_filter_rfs_work()
838 * tying us to an arfs_id, meaning that as soon as the filter in efx_filter_rfs_work()
843 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; in efx_filter_rfs_work()
845 rule->filter_id = rc; in efx_filter_rfs_work()
846 arfs_id = rule->arfs_id; in efx_filter_rfs_work()
848 spin_unlock_bh(&efx->rps_hash_lock); in efx_filter_rfs_work()
854 mutex_lock(&efx->rps_mutex); in efx_filter_rfs_work()
855 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) in efx_filter_rfs_work()
856 channel->rfs_filter_count++; in efx_filter_rfs_work()
857 channel->rps_flow_id[rc] = req->flow_id; in efx_filter_rfs_work()
858 mutex_unlock(&efx->rps_mutex); in efx_filter_rfs_work()
860 if (req->spec.ether_type == htons(ETH_P_IP)) in efx_filter_rfs_work()
861 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
863 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
864 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
865 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
866 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
868 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
870 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
871 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
872 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
873 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
874 channel->n_rfs_succeeded++; in efx_filter_rfs_work()
876 if (req->spec.ether_type == htons(ETH_P_IP)) in efx_filter_rfs_work()
877 netif_dbg(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
879 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
880 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
881 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
882 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
884 netif_dbg(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
886 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", in efx_filter_rfs_work()
887 req->spec.rem_host, ntohs(req->spec.rem_port), in efx_filter_rfs_work()
888 req->spec.loc_host, ntohs(req->spec.loc_port), in efx_filter_rfs_work()
889 req->rxq_index, req->flow_id, rc, arfs_id); in efx_filter_rfs_work()
890 channel->n_rfs_failed++; in efx_filter_rfs_work()
894 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, in efx_filter_rfs_work()
899 clear_bit(slot_idx, &efx->rps_slot_map); in efx_filter_rfs_work()
900 netdev_put(req->net_dev, &req->net_dev_tracker); in efx_filter_rfs_work()
916 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) in efx_filter_rfs()
919 return -EBUSY; in efx_filter_rfs()
922 rc = -EINVAL; in efx_filter_rfs()
927 rc = -EPROTONOSUPPORT; in efx_filter_rfs()
932 rc = -EPROTONOSUPPORT; in efx_filter_rfs()
936 rc = -EPROTONOSUPPORT; in efx_filter_rfs()
940 req = efx->rps_slot + slot_idx; in efx_filter_rfs()
941 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, in efx_filter_rfs()
942 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, in efx_filter_rfs()
944 req->spec.match_flags = in efx_filter_rfs()
948 req->spec.ether_type = fk.basic.n_proto; in efx_filter_rfs()
949 req->spec.ip_proto = fk.basic.ip_proto; in efx_filter_rfs()
952 req->spec.rem_host[0] = fk.addrs.v4addrs.src; in efx_filter_rfs()
953 req->spec.loc_host[0] = fk.addrs.v4addrs.dst; in efx_filter_rfs()
955 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, in efx_filter_rfs()
957 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, in efx_filter_rfs()
961 req->spec.rem_port = fk.ports.src; in efx_filter_rfs()
962 req->spec.loc_port = fk.ports.dst; in efx_filter_rfs()
964 if (efx->rps_hash_table) { in efx_filter_rfs()
966 spin_lock(&efx->rps_hash_lock); in efx_filter_rfs()
967 rule = efx_rps_hash_add(efx, &req->spec, &new); in efx_filter_rfs()
969 rc = -ENOMEM; in efx_filter_rfs()
973 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; in efx_filter_rfs()
974 rc = rule->arfs_id; in efx_filter_rfs()
976 if (!new && rule->rxq_index == rxq_index && in efx_filter_rfs()
977 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) in efx_filter_rfs()
979 rule->rxq_index = rxq_index; in efx_filter_rfs()
980 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; in efx_filter_rfs()
981 spin_unlock(&efx->rps_hash_lock); in efx_filter_rfs()
992 req->net_dev = net_dev; in efx_filter_rfs()
993 netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC); in efx_filter_rfs()
994 INIT_WORK(&req->work, efx_filter_rfs_work); in efx_filter_rfs()
995 req->rxq_index = rxq_index; in efx_filter_rfs()
996 req->flow_id = flow_id; in efx_filter_rfs()
997 schedule_work(&req->work); in efx_filter_rfs()
1000 spin_unlock(&efx->rps_hash_lock); in efx_filter_rfs()
1002 clear_bit(slot_idx, &efx->rps_slot_map); in efx_filter_rfs()
1009 struct efx_nic *efx = channel->efx; in __efx_filter_rfs_expire()
1013 if (!mutex_trylock(&efx->rps_mutex)) in __efx_filter_rfs_expire()
1015 expire_one = efx->type->filter_rfs_expire_one; in __efx_filter_rfs_expire()
1016 index = channel->rfs_expire_index; in __efx_filter_rfs_expire()
1018 size = efx->type->max_rx_ip_filters; in __efx_filter_rfs_expire()
1020 flow_id = channel->rps_flow_id[index]; in __efx_filter_rfs_expire()
1023 quota--; in __efx_filter_rfs_expire()
1025 netif_info(efx, rx_status, efx->net_dev, in __efx_filter_rfs_expire()
1027 index, channel->channel, flow_id); in __efx_filter_rfs_expire()
1028 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; in __efx_filter_rfs_expire()
1029 channel->rfs_filter_count--; in __efx_filter_rfs_expire()
1036 * if two callers race), ensure that we don't loop forever - in __efx_filter_rfs_expire()
1043 channel->rfs_expire_index = index; in __efx_filter_rfs_expire()
1044 mutex_unlock(&efx->rps_mutex); in __efx_filter_rfs_expire()