Lines Matching refs:softnet_data

229 static inline void backlog_lock_irq_save(struct softnet_data *sd,  in backlog_lock_irq_save()
238 static inline void backlog_lock_irq_disable(struct softnet_data *sd) in backlog_lock_irq_disable()
246 static inline void backlog_unlock_irq_restore(struct softnet_data *sd, in backlog_unlock_irq_restore()
255 static inline void backlog_unlock_irq_enable(struct softnet_data *sd) in backlog_unlock_irq_enable()
454 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
457 EXPORT_PER_CPU_SYMBOL(softnet_data);
3335 struct softnet_data *sd; in __netif_reschedule()
3339 sd = this_cpu_ptr(&softnet_data); in __netif_reschedule()
3403 skb->next = __this_cpu_read(softnet_data.completion_queue); in dev_kfree_skb_irq_reason()
3404 __this_cpu_write(softnet_data.completion_queue, skb); in dev_kfree_skb_irq_reason()
4181 return __this_cpu_read(softnet_data.xmit.skip_txqueue); in netdev_xmit_txqueue_skipped()
4186 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); in netdev_xmit_skip_txqueue()
4731 static inline void ____napi_schedule(struct softnet_data *sd, in ____napi_schedule()
4811 head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head); in set_rps_cpu()
4894 ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) - in get_rps_cpu()
4949 ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) - in rps_may_expire_flow()
4964 struct softnet_data *sd = data; in rps_trigger_softirq()
4975 struct softnet_data *sd = data; in trigger_rx_softirq()
4991 static void napi_schedule_rps(struct softnet_data *sd) in napi_schedule_rps()
4993 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); in napi_schedule_rps()
5016 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu) in kick_defer_list_purge()
5041 struct softnet_data *sd; in skb_flow_limit()
5047 sd = this_cpu_ptr(&softnet_data); in skb_flow_limit()
5081 struct softnet_data *sd; in enqueue_to_backlog()
5092 sd = &per_cpu(softnet_data, cpu); in enqueue_to_backlog()
5482 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_tx_action()
5710 __this_cpu_inc(softnet_data.processed); in __netif_receive_skb_core()
6190 struct softnet_data *sd; in flush_backlog()
6193 sd = this_cpu_ptr(&softnet_data); in flush_backlog()
6205 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); in flush_backlog()
6213 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); in flush_backlog()
6220 struct softnet_data *sd = &per_cpu(softnet_data, cpu); in flush_required()
6292 static void net_rps_send_ipi(struct softnet_data *remsd) in net_rps_send_ipi()
6296 struct softnet_data *next = remsd->rps_ipi_next; in net_rps_send_ipi()
6309 static void net_rps_action_and_irq_enable(struct softnet_data *sd) in net_rps_action_and_irq_enable()
6312 struct softnet_data *remsd = sd->rps_ipi_list; in net_rps_action_and_irq_enable()
6326 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) in sd_has_rps_ipi_waiting()
6337 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
6353 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6355 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6364 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6366 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6381 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6384 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); in process_backlog()
6406 ____napi_schedule(this_cpu_ptr(&softnet_data), n); in __napi_schedule()
6456 ____napi_schedule(this_cpu_ptr(&softnet_data), n); in __napi_schedule_irqoff()
6534 static void skb_defer_free_flush(struct softnet_data *sd) in skb_defer_free_flush()
6684 skb_defer_free_flush(this_cpu_ptr(&softnet_data)); in __napi_busy_loop()
7297 struct softnet_data *sd; in napi_threaded_poll_loop()
7307 sd = this_cpu_ptr(&softnet_data); in napi_threaded_poll_loop()
7345 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_rx_action()
12141 struct softnet_data *sd, *oldsd, *remsd = NULL; in dev_cpu_dead()
12145 sd = &per_cpu(softnet_data, cpu); in dev_cpu_dead()
12146 oldsd = &per_cpu(softnet_data, oldcpu); in dev_cpu_dead()
12535 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in backlog_napi_should_run()
12543 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in run_backlog_napi()
12550 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in backlog_napi_setup()
12598 struct softnet_data *sd = &per_cpu(softnet_data, i); in net_dev_init()