Lines Matching full:napi
136 #include <trace/events/napi.h>
463 * (e.g. NAPI context).
778 struct napi_struct *napi; in napi_by_id() local
780 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) in napi_by_id()
781 if (napi->napi_id == napi_id) in napi_by_id()
782 return napi; in napi_by_id()
791 struct napi_struct *napi; in netdev_napi_by_id() local
793 napi = napi_by_id(napi_id); in netdev_napi_by_id()
794 if (!napi) in netdev_napi_by_id()
797 if (WARN_ON_ONCE(!napi->dev)) in netdev_napi_by_id()
799 if (!net_eq(net, dev_net(napi->dev))) in netdev_napi_by_id()
802 return napi; in netdev_napi_by_id()
806 * netdev_napi_by_id_lock() - find a device by NAPI ID and lock it
808 * @napi_id: ID of a NAPI of a target device
810 * Find a NAPI instance with @napi_id. Lock its device.
814 * Return: pointer to NAPI, its device with lock held, NULL if not found.
819 struct napi_struct *napi; in netdev_napi_by_id_lock() local
823 napi = netdev_napi_by_id(net, napi_id); in netdev_napi_by_id_lock()
824 if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) { in netdev_napi_by_id_lock()
829 dev = napi->dev; in netdev_napi_by_id_lock()
838 napi = netdev_napi_by_id(net, napi_id); in netdev_napi_by_id_lock()
839 if (napi && napi->dev != dev) in netdev_napi_by_id_lock()
840 napi = NULL; in netdev_napi_by_id_lock()
843 if (!napi) in netdev_napi_by_id_lock()
845 return napi; in netdev_napi_by_id_lock()
1015 * @napi_id: ID of the NAPI struct
1017 * Search for an interface by NAPI ID. Returns %NULL if the device
1024 struct napi_struct *napi; in dev_get_by_napi_id() local
1031 napi = napi_by_id(napi_id); in dev_get_by_napi_id()
1033 return napi ? napi->dev : NULL; in dev_get_by_napi_id()
1636 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", in napi_kthread_create()
4843 struct napi_struct *napi) in ____napi_schedule() argument
4849 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { in ____napi_schedule()
4853 * read on napi->thread. Only call in ____napi_schedule()
4856 thread = READ_ONCE(napi->thread); in ____napi_schedule()
4861 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); in ____napi_schedule()
4868 DEBUG_NET_WARN_ON_ONCE(!list_empty(&napi->poll_list)); in ____napi_schedule()
4869 list_add_tail(&napi->poll_list, &sd->poll_list); in ____napi_schedule()
4870 WRITE_ONCE(napi->list_owner, smp_processor_id()); in ____napi_schedule()
5155 * - If this is our own queue, NAPI schedule our backlog.
5271 /* Schedule NAPI for backlog device. We can use in enqueue_to_backlog()
5622 * the upper (protocol) levels to process via the backlog NAPI device. It
5625 * The network buffer is passed via the backlog NAPI device. Modern NIC
5626 * driver should use NAPI and GRO.
6522 static int process_backlog(struct napi_struct *napi, int quota) in process_backlog() argument
6524 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
6536 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); in process_backlog()
6559 * only current cpu owns and manipulates this napi, in process_backlog()
6565 napi->state &= NAPIF_STATE_THREADED; in process_backlog()
6599 * napi_schedule_prep - check if napi can be scheduled
6600 * @n: napi context
6602 * Test if NAPI routine is already running, and if not mark
6604 * insure only one NAPI poll instance runs. We also make
6605 * sure there is no pending NAPI disable.
6655 * 1) Don't let napi dequeue from the cpu poll list in napi_complete_done()
6677 * When the NAPI instance uses a timeout and keeps postponing in napi_complete_done()
6700 * because we will call napi->poll() one more time. in napi_complete_done()
6742 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) in __busy_poll_stop() argument
6745 gro_normal_list(&napi->gro); in __busy_poll_stop()
6746 __napi_schedule(napi); in __busy_poll_stop()
6751 gro_flush_normal(&napi->gro, HZ >= 1000); in __busy_poll_stop()
6753 clear_bit(NAPI_STATE_SCHED, &napi->state); in __busy_poll_stop()
6761 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, in busy_poll_stop() argument
6772 * Since we are about to call napi->poll() once more, we can safely in busy_poll_stop()
6778 clear_bit(NAPI_STATE_MISSED, &napi->state); in busy_poll_stop()
6779 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); in busy_poll_stop()
6785 napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi); in busy_poll_stop()
6786 timeout = napi_get_gro_flush_timeout(napi); in busy_poll_stop()
6787 if (napi->defer_hard_irqs_count && timeout) { in busy_poll_stop()
6788 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); in busy_poll_stop()
6796 rc = napi->poll(napi, budget); in busy_poll_stop()
6797 /* We can't gro_normal_list() here, because napi->poll() might have in busy_poll_stop()
6798 * rearmed the napi (napi_complete_done()) in which case it could in busy_poll_stop()
6801 trace_napi_poll(napi, rc, budget); in busy_poll_stop()
6804 __busy_poll_stop(napi, skip_schedule); in busy_poll_stop()
6814 int (*napi_poll)(struct napi_struct *napi, int budget); in __napi_busy_loop()
6817 struct napi_struct *napi; in __napi_busy_loop() local
6824 napi = napi_by_id(napi_id); in __napi_busy_loop()
6825 if (!napi) in __napi_busy_loop()
6836 unsigned long val = READ_ONCE(napi->state); in __napi_busy_loop()
6838 /* If multiple threads are competing for this napi, in __napi_busy_loop()
6839 * we avoid dirtying napi->state as much as we can. in __napi_busy_loop()
6844 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in __napi_busy_loop()
6847 if (cmpxchg(&napi->state, val, in __napi_busy_loop()
6851 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in __napi_busy_loop()
6854 have_poll_lock = netpoll_poll_lock(napi); in __napi_busy_loop()
6855 napi_poll = napi->poll; in __napi_busy_loop()
6857 work = napi_poll(napi, budget); in __napi_busy_loop()
6858 trace_napi_poll(napi, work, budget); in __napi_busy_loop()
6859 gro_normal_list(&napi->gro); in __napi_busy_loop()
6862 __NET_ADD_STATS(dev_net(napi->dev), in __napi_busy_loop()
6875 busy_poll_stop(napi, have_poll_lock, flags, budget); in __napi_busy_loop()
6888 busy_poll_stop(napi, have_poll_lock, flags, budget); in __napi_busy_loop()
6919 struct napi_struct *napi; in napi_suspend_irqs() local
6922 napi = napi_by_id(napi_id); in napi_suspend_irqs()
6923 if (napi) { in napi_suspend_irqs()
6924 unsigned long timeout = napi_get_irq_suspend_timeout(napi); in napi_suspend_irqs()
6927 hrtimer_start(&napi->timer, ns_to_ktime(timeout), in napi_suspend_irqs()
6935 struct napi_struct *napi; in napi_resume_irqs() local
6938 napi = napi_by_id(napi_id); in napi_resume_irqs()
6939 if (napi) { in napi_resume_irqs()
6945 if (napi_get_irq_suspend_timeout(napi)) { in napi_resume_irqs()
6947 napi_schedule(napi); in napi_resume_irqs()
6956 static void __napi_hash_add_with_id(struct napi_struct *napi, in __napi_hash_add_with_id() argument
6959 napi->gro.cached_napi_id = napi_id; in __napi_hash_add_with_id()
6961 WRITE_ONCE(napi->napi_id, napi_id); in __napi_hash_add_with_id()
6962 hlist_add_head_rcu(&napi->napi_hash_node, in __napi_hash_add_with_id()
6963 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); in __napi_hash_add_with_id()
6966 static void napi_hash_add_with_id(struct napi_struct *napi, in napi_hash_add_with_id() argument
6973 __napi_hash_add_with_id(napi, napi_id); in napi_hash_add_with_id()
6977 static void napi_hash_add(struct napi_struct *napi) in napi_hash_add() argument
6981 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) in napi_hash_add()
6992 __napi_hash_add_with_id(napi, napi_gen_id); in napi_hash_add()
6998 * is respected before freeing memory containing @napi
7000 static void napi_hash_del(struct napi_struct *napi) in napi_hash_del() argument
7006 hlist_del_init_rcu(&napi->napi_hash_node); in napi_hash_del()
7013 struct napi_struct *napi; in napi_watchdog() local
7015 napi = container_of(timer, struct napi_struct, timer); in napi_watchdog()
7020 if (!napi_disable_pending(napi) && in napi_watchdog()
7021 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { in napi_watchdog()
7022 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in napi_watchdog()
7023 __napi_schedule_irqoff(napi); in napi_watchdog()
7029 static void napi_stop_kthread(struct napi_struct *napi) in napi_stop_kthread() argument
7033 /* Wait until the napi STATE_THREADED is unset. */ in napi_stop_kthread()
7035 val = READ_ONCE(napi->state); in napi_stop_kthread()
7037 /* If napi kthread own this napi or the napi is idle, in napi_stop_kthread()
7048 if (try_cmpxchg(&napi->state, &val, new)) in napi_stop_kthread()
7056 if (!test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) in napi_stop_kthread()
7062 kthread_stop(napi->thread); in napi_stop_kthread()
7063 napi->thread = NULL; in napi_stop_kthread()
7066 int napi_set_threaded(struct napi_struct *napi, in napi_set_threaded() argument
7070 if (!napi->thread) { in napi_set_threaded()
7071 int err = napi_kthread_create(napi); in napi_set_threaded()
7078 if (napi->config) in napi_set_threaded()
7079 napi->config->threaded = threaded; in napi_set_threaded()
7081 /* Setting/unsetting threaded mode on a napi might not immediately in napi_set_threaded()
7082 * take effect, if the current napi instance is actively being in napi_set_threaded()
7087 if (!threaded && napi->thread) { in napi_set_threaded()
7088 napi_stop_kthread(napi); in napi_set_threaded()
7092 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); in napi_set_threaded()
7101 struct napi_struct *napi; in netif_set_threaded() local
7107 list_for_each_entry(napi, &dev->napi_list, dev_list) { in netif_set_threaded()
7108 if (!napi->thread) { in netif_set_threaded()
7109 err = napi_kthread_create(napi); in netif_set_threaded()
7121 list_for_each_entry(napi, &dev->napi_list, dev_list) in netif_set_threaded()
7122 WARN_ON_ONCE(napi_set_threaded(napi, threaded)); in netif_set_threaded()
7135 * Enable threaded mode for the NAPI instances of the device. This may be useful
7136 * for devices where multiple NAPI instances get scheduled by a single
7137 * interrupt. Threaded NAPI allows moving the NAPI processing to cores other
7149 * netif_queue_set_napi - Associate queue with the napi
7150 * @dev: device to which NAPI and queue belong
7153 * @napi: NAPI context, pass NULL to clear previously set NAPI
7155 * Set queue with its corresponding napi context. This should be done after
7156 * registering the NAPI handler for the queue-vector and the queues have been
7160 enum netdev_queue_type type, struct napi_struct *napi) in netif_queue_set_napi() argument
7165 if (WARN_ON_ONCE(napi && !napi->dev)) in netif_queue_set_napi()
7172 rxq->napi = napi; in netif_queue_set_napi()
7176 txq->napi = napi; in netif_queue_set_napi()
7188 struct napi_struct *napi = in netif_napi_irq_notify() local
7191 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; in netif_napi_irq_notify()
7195 if (napi->config && napi->dev->irq_affinity_auto) in netif_napi_irq_notify()
7196 cpumask_copy(&napi->config->affinity_mask, mask); in netif_napi_irq_notify()
7199 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_irq_notify()
7200 err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask); in netif_napi_irq_notify()
7202 netdev_warn(napi->dev, "RMAP update failed (%d)\n", in netif_napi_irq_notify()
7211 struct napi_struct *napi = in netif_napi_affinity_release() local
7213 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; in netif_napi_affinity_release()
7215 netdev_assert_locked(napi->dev); in netif_napi_affinity_release()
7217 &napi->state)); in netif_napi_affinity_release()
7219 if (!napi->dev->rx_cpu_rmap_auto) in netif_napi_affinity_release()
7221 rmap->obj[napi->napi_rmap_idx] = NULL; in netif_napi_affinity_release()
7222 napi->napi_rmap_idx = -1; in netif_napi_affinity_release()
7284 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) in netif_napi_set_irq_locked() argument
7288 netdev_assert_locked_or_invisible(napi->dev); in netif_napi_set_irq_locked()
7290 if (napi->irq == irq) in netif_napi_set_irq_locked()
7294 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) in netif_napi_set_irq_locked()
7295 irq_set_affinity_notifier(napi->irq, NULL); in netif_napi_set_irq_locked()
7297 napi->irq = irq; in netif_napi_set_irq_locked()
7299 (!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto)) in netif_napi_set_irq_locked()
7303 if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config)) in netif_napi_set_irq_locked()
7307 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_set_irq_locked()
7308 rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi); in netif_napi_set_irq_locked()
7312 cpu_rmap_get(napi->dev->rx_cpu_rmap); in netif_napi_set_irq_locked()
7313 napi->napi_rmap_idx = rc; in netif_napi_set_irq_locked()
7318 napi->notify.notify = netif_napi_irq_notify; in netif_napi_set_irq_locked()
7319 napi->notify.release = netif_napi_affinity_release; in netif_napi_set_irq_locked()
7320 rc = irq_set_affinity_notifier(irq, &napi->notify); in netif_napi_set_irq_locked()
7322 netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n", in netif_napi_set_irq_locked()
7327 set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state); in netif_napi_set_irq_locked()
7332 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_set_irq_locked()
7333 napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL; in netif_napi_set_irq_locked()
7334 cpu_rmap_put(napi->dev->rx_cpu_rmap); in netif_napi_set_irq_locked()
7335 napi->napi_rmap_idx = -1; in netif_napi_set_irq_locked()
7338 napi->notify.notify = NULL; in netif_napi_set_irq_locked()
7339 napi->notify.release = NULL; in netif_napi_set_irq_locked()
7353 /* a NAPI ID might be stored in the config, if so use it. if not, use in napi_restore_config()
7374 /* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will
7378 netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi) in netif_napi_dev_list_add() argument
7385 if (napi->config && napi->config->napi_id) in netif_napi_dev_list_add()
7386 new_id = napi->config->napi_id; in netif_napi_dev_list_add()
7401 list_add_rcu(&napi->dev_list, higher); /* adds after higher */ in netif_napi_dev_list_add()
7410 static void napi_get_frags_check(struct napi_struct *napi) in napi_get_frags_check() argument
7415 skb = napi_get_frags(napi); in napi_get_frags_check()
7417 napi_free_frags(napi); in napi_get_frags_check()
7422 struct napi_struct *napi, in netif_napi_add_weight_locked() argument
7427 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) in netif_napi_add_weight_locked()
7430 INIT_LIST_HEAD(&napi->poll_list); in netif_napi_add_weight_locked()
7431 INIT_HLIST_NODE(&napi->napi_hash_node); in netif_napi_add_weight_locked()
7432 hrtimer_setup(&napi->timer, napi_watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); in netif_napi_add_weight_locked()
7433 gro_init(&napi->gro); in netif_napi_add_weight_locked()
7434 napi->skb = NULL; in netif_napi_add_weight_locked()
7435 napi->poll = poll; in netif_napi_add_weight_locked()
7439 napi->weight = weight; in netif_napi_add_weight_locked()
7440 napi->dev = dev; in netif_napi_add_weight_locked()
7442 napi->poll_owner = -1; in netif_napi_add_weight_locked()
7444 napi->list_owner = -1; in netif_napi_add_weight_locked()
7445 set_bit(NAPI_STATE_SCHED, &napi->state); in netif_napi_add_weight_locked()
7446 set_bit(NAPI_STATE_NPSVC, &napi->state); in netif_napi_add_weight_locked()
7447 netif_napi_dev_list_add(dev, napi); in netif_napi_add_weight_locked()
7449 /* default settings from sysfs are applied to all NAPIs. any per-NAPI in netif_napi_add_weight_locked()
7452 napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs)); in netif_napi_add_weight_locked()
7453 napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout)); in netif_napi_add_weight_locked()
7455 napi_get_frags_check(napi); in netif_napi_add_weight_locked()
7456 /* Create kthread for this napi if dev->threaded is set. in netif_napi_add_weight_locked()
7460 if (napi_get_threaded_config(dev, napi)) in netif_napi_add_weight_locked()
7461 if (napi_kthread_create(napi)) in netif_napi_add_weight_locked()
7463 netif_napi_set_irq_locked(napi, -1); in netif_napi_add_weight_locked()
7499 * napi_disable() - prevent NAPI from scheduling
7500 * @n: NAPI context
7502 * Stop NAPI from being scheduled on this context.
7534 * napi_enable() - enable NAPI scheduling
7535 * @n: NAPI context
7537 * Enable scheduling of a NAPI instance.
7550 void __netif_napi_del_locked(struct napi_struct *napi) in __netif_napi_del_locked() argument
7552 netdev_assert_locked(napi->dev); in __netif_napi_del_locked()
7554 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) in __netif_napi_del_locked()
7557 /* Make sure NAPI is disabled (or was never enabled). */ in __netif_napi_del_locked()
7558 WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); in __netif_napi_del_locked()
7560 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) in __netif_napi_del_locked()
7561 irq_set_affinity_notifier(napi->irq, NULL); in __netif_napi_del_locked()
7563 if (napi->config) { in __netif_napi_del_locked()
7564 napi->index = -1; in __netif_napi_del_locked()
7565 napi->config = NULL; in __netif_napi_del_locked()
7568 list_del_rcu(&napi->dev_list); in __netif_napi_del_locked()
7569 napi_free_frags(napi); in __netif_napi_del_locked()
7571 gro_cleanup(&napi->gro); in __netif_napi_del_locked()
7573 if (napi->thread) { in __netif_napi_del_locked()
7574 kthread_stop(napi->thread); in __netif_napi_del_locked()
7575 napi->thread = NULL; in __netif_napi_del_locked()
7590 * accidentally calling ->poll() when NAPI is not scheduled. in __napi_poll()
7601 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", in __napi_poll()
7607 /* Drivers must not modify the NAPI state if they in __napi_poll()
7609 * still "owns" the NAPI instance and therefore can in __napi_poll()
7617 /* The NAPI context has more processing work, but busy-polling in __napi_poll()
7623 * that the NAPI is re-scheduled. in __napi_poll()
7637 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", in __napi_poll()
7662 pr_crit("repoll requested for device %s %ps but napi is not scheduled.\n", in napi_poll()
7672 static int napi_thread_wait(struct napi_struct *napi) in napi_thread_wait() argument
7678 * kthread owns this napi and could poll on this napi. in napi_thread_wait()
7682 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) { in napi_thread_wait()
7683 WARN_ON(!list_empty(&napi->poll_list)); in napi_thread_wait()
7696 static void napi_threaded_poll_loop(struct napi_struct *napi) in napi_threaded_poll_loop() argument
7712 have = netpoll_poll_lock(napi); in napi_threaded_poll_loop()
7713 __napi_poll(napi, &repoll); in napi_threaded_poll_loop()
7737 struct napi_struct *napi = data; in napi_threaded_poll() local
7739 while (!napi_thread_wait(napi)) in napi_threaded_poll()
7740 napi_threaded_poll_loop(napi); in napi_threaded_poll()
12580 /* Append NAPI poll list from offline CPU, with one exception : in dev_cpu_dead()
12585 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, in dev_cpu_dead() local
12589 list_del_init(&napi->poll_list); in dev_cpu_dead()
12590 if (napi->poll == process_backlog) in dev_cpu_dead()
12591 napi->state &= NAPIF_STATE_THREADED; in dev_cpu_dead()
12593 ____napi_schedule(sd, napi); in dev_cpu_dead()
12953 struct napi_struct *napi = &sd->backlog; in backlog_napi_should_run() local
12955 return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state); in backlog_napi_should_run()
12968 struct napi_struct *napi = &sd->backlog; in backlog_napi_setup() local
12970 napi->thread = this_cpu_read(backlog_napi); in backlog_napi_setup()
12971 set_bit(NAPI_STATE_THREADED, &napi->state); in backlog_napi_setup()