Lines Matching refs:softnet_data
231 static inline void backlog_lock_irq_save(struct softnet_data *sd, in backlog_lock_irq_save()
240 static inline void backlog_lock_irq_disable(struct softnet_data *sd) in backlog_lock_irq_disable()
248 static inline void backlog_unlock_irq_restore(struct softnet_data *sd, in backlog_unlock_irq_restore()
257 static inline void backlog_unlock_irq_enable(struct softnet_data *sd) in backlog_unlock_irq_enable()
456 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
459 EXPORT_PER_CPU_SYMBOL(softnet_data);
3362 struct softnet_data *sd; in __netif_reschedule()
3366 sd = this_cpu_ptr(&softnet_data); in __netif_reschedule()
3430 skb->next = __this_cpu_read(softnet_data.completion_queue); in dev_kfree_skb_irq_reason()
3431 __this_cpu_write(softnet_data.completion_queue, skb); in dev_kfree_skb_irq_reason()
4253 return __this_cpu_read(softnet_data.xmit.skip_txqueue); in netdev_xmit_txqueue_skipped()
4258 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); in netdev_xmit_skip_txqueue()
4804 static inline void ____napi_schedule(struct softnet_data *sd, in ____napi_schedule()
4890 head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head); in set_rps_cpu()
4973 ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) - in get_rps_cpu()
5028 ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) - in rps_may_expire_flow()
5043 struct softnet_data *sd = data; in rps_trigger_softirq()
5055 struct softnet_data *sd = data; in trigger_rx_softirq()
5071 static void napi_schedule_rps(struct softnet_data *sd) in napi_schedule_rps()
5073 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); in napi_schedule_rps()
5096 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu) in kick_defer_list_purge()
5121 struct softnet_data *sd; in skb_flow_limit()
5127 sd = this_cpu_ptr(&softnet_data); in skb_flow_limit()
5162 struct softnet_data *sd; in enqueue_to_backlog()
5173 sd = &per_cpu(softnet_data, cpu); in enqueue_to_backlog()
5566 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_tx_action()
5795 __this_cpu_inc(softnet_data.processed); in __netif_receive_skb_core()
6286 struct softnet_data *sd; in flush_backlog()
6290 sd = this_cpu_ptr(&softnet_data); in flush_backlog()
6302 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); in flush_backlog()
6310 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); in flush_backlog()
6319 struct softnet_data *sd = &per_cpu(softnet_data, cpu); in flush_required()
6391 static void net_rps_send_ipi(struct softnet_data *remsd) in net_rps_send_ipi()
6395 struct softnet_data *next = remsd->rps_ipi_next; in net_rps_send_ipi()
6408 static void net_rps_action_and_irq_enable(struct softnet_data *sd) in net_rps_action_and_irq_enable()
6411 struct softnet_data *remsd = sd->rps_ipi_list; in net_rps_action_and_irq_enable()
6425 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) in sd_has_rps_ipi_waiting()
6436 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
6452 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6454 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6463 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6465 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6480 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6483 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6505 ____napi_schedule(this_cpu_ptr(&softnet_data), n); in __napi_schedule()
6555 ____napi_schedule(this_cpu_ptr(&softnet_data), n); in __napi_schedule_irqoff()
6631 static void skb_defer_free_flush(struct softnet_data *sd) in skb_defer_free_flush()
6776 skb_defer_free_flush(this_cpu_ptr(&softnet_data)); in __napi_busy_loop()
7611 struct softnet_data *sd; in napi_threaded_poll_loop()
7621 sd = this_cpu_ptr(&softnet_data); in napi_threaded_poll_loop()
7659 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_rx_action()
12440 struct softnet_data *sd, *oldsd, *remsd = NULL; in dev_cpu_dead()
12444 sd = &per_cpu(softnet_data, cpu); in dev_cpu_dead()
12445 oldsd = &per_cpu(softnet_data, oldcpu); in dev_cpu_dead()
12834 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in backlog_napi_should_run()
12842 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in run_backlog_napi()
12849 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in backlog_napi_setup()
12897 struct softnet_data *sd = &per_cpu(softnet_data, i); in net_dev_init()