/linux/net/openvswitch/ |
H A D | datapath.h | 84 struct datapath { struct 193 static inline struct net *ovs_dp_get_net(const struct datapath *dp) in ovs_dp_get_net() 198 static inline void ovs_dp_set_net(struct datapath *dp, struct net *net) in ovs_dp_set_net() 203 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no); 205 static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no) in ovs_vport_rcu() 211 static inline struct vport *ovs_vport_ovsl_rcu(const struct datapath *dp, int port_no) in ovs_vport_ovsl_rcu() 217 static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_no) in ovs_vport_ovsl() 224 static inline struct datapath *get_dp_rcu(struct net *net, int dp_ifindex) in get_dp_rcu() 241 static inline struct datapath *get_dp(struct net *net, int dp_ifindex) in get_dp() 243 struct datapath *dp; in get_dp() [all …]
|
H A D | meter.h | 18 struct datapath; 57 int ovs_meters_init(struct datapath *dp); 58 void ovs_meters_exit(struct datapath *dp); 59 bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
|
H A D | datapath.c | 127 static int queue_gso_packets(struct datapath *dp, struct sk_buff *, 131 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, 138 static int ovs_dp_set_upcall_portids(struct datapath *, const struct nlattr *); 141 const char *ovs_dp_name(const struct datapath *dp) in ovs_dp_name() 147 static int get_dpifindex(const struct datapath *dp) in get_dpifindex() 167 struct datapath *dp = container_of(rcu, struct datapath, rcu); in destroy_dp_rcu() 177 static struct hlist_head *vport_hash_bucket(const struct datapath *dp, in vport_hash_bucket() 184 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) in ovs_lookup_vport() 205 struct datapath *dp = parms->dp; in new_vport() 248 struct datapath *dp = p->dp; in ovs_dp_process_packet() [all …]
|
H A D | dp_notify.c | 17 struct datapath *dp; in dp_detach_port_notify() 37 struct datapath *dp; in ovs_dp_notify_wq()
|
H A D | meter.c | 279 struct datapath *dp; in ovs_meter_cmd_features() 414 struct datapath *dp; in ovs_meter_cmd_set() 490 struct datapath *dp; in ovs_meter_cmd_get() 543 struct datapath *dp; in ovs_meter_cmd_del() 592 bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb, in ovs_meter_execute() 729 int ovs_meters_init(struct datapath *dp) in ovs_meters_init() 756 void ovs_meters_exit(struct datapath *dp) in ovs_meters_exit()
|
H A D | vport.h | 77 struct datapath *dp; 108 struct datapath *dp;
|
H A D | actions.c | 160 static int clone_execute(struct datapath *dp, struct sk_buff *skb, 166 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, 932 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, in do_output() 971 static int output_userspace(struct datapath *dp, struct sk_buff *skb, in output_userspace() 1030 static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb, in dec_ttl_exception_handler() 1049 static int sample(struct datapath *dp, struct sk_buff *skb, in sample() 1090 static int clone(struct datapath *dp, struct sk_buff *skb, in clone() 1226 static int execute_recirc(struct datapath *dp, struct sk_buff *skb, in execute_recirc() 1245 static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb, in execute_check_pkt_len() 1324 static void execute_psample(struct datapath *dp, struct sk_buff *skb, in execute_psample() [all …]
|
H A D | Makefile | 10 datapath.o \
|
H A D | openvswitch_trace.h | 10 #include "datapath.h" 14 TP_PROTO(struct datapath *dp, struct sk_buff *skb, 84 TP_PROTO(struct datapath *dp, struct sk_buff *skb,
|
H A D | vport-netdev.c | 68 static struct net_device *get_dpdev(const struct datapath *dp) in get_dpdev()
|
H A D | Kconfig | 31 The Open vSwitch datapath provides an in-kernel fast path for packet
|
/linux/fs/overlayfs/ |
H A D | namei.c | 365 struct path *datapath) in ovl_lookup_data_layer() argument 371 datapath); in ovl_lookup_data_layer() 379 if (ovl_dentry_weird(datapath->dentry)) in ovl_lookup_data_layer() 384 if (!d_is_reg(datapath->dentry)) in ovl_lookup_data_layer() 390 path_put(datapath); in ovl_lookup_data_layer() 401 struct path datapath; in ovl_lookup_data_layers() local 407 err = ovl_lookup_data_layer(dentry, redirect, layer, &datapath); in ovl_lookup_data_layers() 409 mntput(datapath.mnt); in ovl_lookup_data_layers() 410 lowerdata->dentry = datapath.dentry; in ovl_lookup_data_layers() 924 struct path datapath, metapath; in ovl_maybe_validate_verity() local [all …]
|
H A D | util.c | 336 int ovl_dentry_set_lowerdata(struct dentry *dentry, struct ovl_path *datapath) in ovl_dentry_set_lowerdata() argument 340 struct dentry *datadentry = datapath->dentry; in ovl_dentry_set_lowerdata() 345 WRITE_ONCE(lowerdata->layer, datapath->layer); in ovl_dentry_set_lowerdata() 1377 int ovl_ensure_verity_loaded(struct path *datapath) in ovl_ensure_verity_loaded() argument 1379 struct inode *inode = d_inode(datapath->dentry); in ovl_ensure_verity_loaded() 1387 filp = kernel_file_open(datapath, O_RDONLY, current_cred()); in ovl_ensure_verity_loaded() 1398 struct path *datapath) in ovl_validate_verity() argument 1426 err = ovl_ensure_verity_loaded(datapath); in ovl_validate_verity() 1429 datapath->dentry); in ovl_validate_verity() 1433 digest_size = fsverity_get_digest(d_inode(datapath->dentry), actual_digest, in ovl_validate_verity() [all …]
|
H A D | copy_up.c | 264 struct path datapath; in ovl_copy_up_file() local 274 ovl_path_lowerdata(dentry, &datapath); in ovl_copy_up_file() 275 if (WARN_ON_ONCE(datapath.dentry == NULL) || in ovl_copy_up_file() 279 old_file = ovl_path_open(&datapath, O_LARGEFILE | O_RDONLY); in ovl_copy_up_file()
|
H A D | overlayfs.h | 458 int ovl_dentry_set_lowerdata(struct dentry *dentry, struct ovl_path *datapath); 547 struct path *datapath);
|
/linux/Documentation/networking/ |
H A D | failover.rst | 17 datapath. It also allows live migration of VMs with direct attached VFs by 18 failing over to the paravirtual datapath when the VF is unplugged.
|
H A D | net_failover.rst | 24 datapath. It also enables hypervisor controlled live migration of a VM with 25 direct attached VF by failing over to the paravirtual datapath when the VF 28 virtio-net accelerated datapath: STANDBY mode 31 net_failover enables hypervisor controlled accelerated datapath to virtio-net 118 the paravirtual datapath when the VF is unplugged.
|
H A D | nf_flowtable.rst | 8 you to define a fastpath through the flowtable datapath. This infrastructure 37 The flowtable datapath is represented in Fig.1, which describes the classic IP 124 netdevice behind VLAN and PPPoE netdevices. The flowtable software datapath 127 flowtable datapath also deals with layer 2 decapsulation. 206 to the hardware offload datapath being used by the flow.
|
H A D | openvswitch.rst | 4 Open vSwitch datapath developer documentation 15 within a bridge). Each datapath also has associated with it a "flow
|
/linux/drivers/soc/fsl/qbman/ |
H A D | Kconfig | 14 that allows software and accelerators on the datapath to acquire and 18 that allows software and accelerators on the datapath to enqueue and
|
/linux/drivers/vdpa/ |
H A D | Kconfig | 7 datapath which complies with virtio specifications with 74 of virtio net datapath such that descriptors put on the ring will
|
/linux/Documentation/networking/device_drivers/ethernet/google/ |
H A D | gve.rst | 74 - GQI descriptors and datapath registers are Big Endian. 75 - DQO descriptors and datapath registers are Little Endian.
|
/linux/Documentation/admin-guide/perf/ |
H A D | xgene-pmu.rst | 25 performance of a specific datapath. For example, agents of a L3 cache can be
|
/linux/Documentation/accel/qaic/ |
H A D | qaic.rst | 180 Configures QAIC to use a polling thread for datapath events instead of relying 203 Sets the polling interval in microseconds (us) when datapath polling is active.
|
/linux/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/ |
H A D | kconfig.rst | 157 | of virtio net datapath such that descriptors put on the ring will
|