Home
last modified time | relevance | path

Searched full:flow (Results 1 – 25 of 1555) sorted by relevance

12345678910>>...63

/linux/tools/testing/selftests/tc-testing/tc-tests/filters/
H A Dflow.json4 "name": "Add flow filter with map key and ops",
7 "flow"
15 …"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key ds…
17 "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow",
18 …"matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst …
26 "name": "Add flow filter with map key or ops",
29 "flow"
37 …"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key ds…
39 "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow",
40 …"matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst.…
[all …]
/linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
H A Dflowring.c43 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) in brcmf_flowring_is_tdls_mac() argument
47 search = flow->tdls_entry; in brcmf_flowring_is_tdls_mac()
59 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_lookup() argument
71 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_lookup()
77 if ((sta) && (flow->tdls_active) && in brcmf_flowring_lookup()
78 (brcmf_flowring_is_tdls_mac(flow, da))) { in brcmf_flowring_lookup()
85 hash = flow->hash; in brcmf_flowring_lookup()
103 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_create() argument
116 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_create()
122 if ((sta) && (flow->tdls_active) && in brcmf_flowring_create()
[all …]
H A Dflowring.h50 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
52 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
54 void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
55 void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
56 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
57 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
59 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
60 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
62 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
63 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
[all …]
/linux/drivers/net/phy/mscc/
H A Dmscc_macsec.c371 struct macsec_flow *flow) in vsc8584_macsec_flow() argument
374 enum macsec_bank bank = flow->bank; in vsc8584_macsec_flow()
375 u32 val, match = 0, mask = 0, action = 0, idx = flow->index; in vsc8584_macsec_flow()
377 if (flow->match.tagged) in vsc8584_macsec_flow()
379 if (flow->match.untagged) in vsc8584_macsec_flow()
382 if (bank == MACSEC_INGR && flow->assoc_num >= 0) { in vsc8584_macsec_flow()
383 match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num); in vsc8584_macsec_flow()
387 if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) { in vsc8584_macsec_flow()
388 u64 sci = (__force u64)flow->rx_sa->sc->sci; in vsc8584_macsec_flow()
400 if (flow->match.etype) { in vsc8584_macsec_flow()
[all …]
/linux/include/net/
H A Dfq_impl.h16 __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets, in __fq_adjust_removal() argument
19 struct fq_tin *tin = flow->tin; in __fq_adjust_removal()
24 flow->backlog -= bytes; in __fq_adjust_removal()
28 if (flow->backlog) in __fq_adjust_removal()
31 if (flow == &tin->default_flow) { in __fq_adjust_removal()
36 idx = flow - fq->flows; in __fq_adjust_removal()
41 struct fq_flow *flow, in fq_adjust_removal() argument
44 __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize); in fq_adjust_removal()
48 struct fq_flow *flow) in fq_flow_dequeue() argument
54 skb = __skb_dequeue(&flow->queue); in fq_flow_dequeue()
[all …]
/linux/drivers/gpu/ipu-v3/
H A Dipu-dp.c46 u32 flow; member
64 struct ipu_flow flow[IPUV3_NUM_FLOWS]; member
82 struct ipu_flow *flow = to_flow(dp); in ipu_dp_set_global_alpha() local
83 struct ipu_dp_priv *priv = flow->priv; in ipu_dp_set_global_alpha()
88 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
93 writel(reg, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
96 reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL; in ipu_dp_set_global_alpha()
98 flow->base + DP_GRAPH_WIND_CTRL); in ipu_dp_set_global_alpha()
100 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
101 writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
[all …]
/linux/net/netfilter/
H A Dnf_flow_table_core.c21 flow_offload_fill_dir(struct flow_offload *flow, in flow_offload_fill_dir() argument
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; in flow_offload_fill_dir()
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple; in flow_offload_fill_dir()
54 struct flow_offload *flow; in flow_offload_alloc() local
59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); in flow_offload_alloc()
60 if (!flow) in flow_offload_alloc()
64 flow->ct = ct; in flow_offload_alloc()
66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL); in flow_offload_alloc()
67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY); in flow_offload_alloc()
70 __set_bit(NF_FLOW_SNAT, &flow->flags); in flow_offload_alloc()
[all …]
H A Dnf_flow_table_offload.c24 struct flow_offload *flow; member
224 const struct flow_offload *flow, in flow_offload_eth_src() argument
236 this_tuple = &flow->tuplehash[dir].tuple; in flow_offload_eth_src()
243 other_tuple = &flow->tuplehash[!dir].tuple; in flow_offload_eth_src()
271 const struct flow_offload *flow, in flow_offload_eth_dst() argument
286 this_tuple = &flow->tuplehash[dir].tuple; in flow_offload_eth_dst()
293 other_tuple = &flow->tuplehash[!dir].tuple; in flow_offload_eth_dst()
328 const struct flow_offload *flow, in flow_offload_ipv4_snat() argument
339 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in flow_offload_ipv4_snat()
343 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; in flow_offload_ipv4_snat()
[all …]
H A Dnf_flow_table_ip.c22 static int nf_flow_state_check(struct flow_offload *flow, int proto, in nf_flow_state_check() argument
31 if (tcph->syn && test_bit(NF_FLOW_CLOSING, &flow->flags)) { in nf_flow_state_check()
32 flow_offload_teardown(flow); in nf_flow_state_check()
37 !test_bit(NF_FLOW_CLOSING, &flow->flags)) in nf_flow_state_check()
38 set_bit(NF_FLOW_CLOSING, &flow->flags); in nf_flow_state_check()
80 static void nf_flow_snat_ip(const struct flow_offload *flow, in nf_flow_snat_ip() argument
89 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in nf_flow_snat_ip()
94 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; in nf_flow_snat_ip()
103 static void nf_flow_dnat_ip(const struct flow_offload *flow, in nf_flow_dnat_ip() argument
112 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; in nf_flow_dnat_ip()
[all …]
H A Dnf_tables_offload.c12 struct nft_flow_rule *flow; in nft_flow_rule_alloc() local
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL); in nft_flow_rule_alloc()
15 if (!flow) in nft_flow_rule_alloc()
18 flow->rule = flow_rule_alloc(num_actions); in nft_flow_rule_alloc()
19 if (!flow->rule) { in nft_flow_rule_alloc()
20 kfree(flow); in nft_flow_rule_alloc()
24 flow->rule->match.dissector = &flow->match.dissector; in nft_flow_rule_alloc()
25 flow->rule->match.mask = &flow in nft_flow_rule_alloc()
31 nft_flow_rule_set_addr_type(struct nft_flow_rule * flow,enum flow_dissector_key_id addr_type) nft_flow_rule_set_addr_type() argument
54 nft_flow_rule_transfer_vlan(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow) nft_flow_rule_transfer_vlan() argument
92 struct nft_flow_rule *flow; nft_flow_rule_create() local
146 nft_flow_rule_destroy(struct nft_flow_rule * flow) nft_flow_rule_destroy() argument
245 nft_flow_cls_offload_setup(struct flow_cls_offload * cls_flow,const struct nft_base_chain * basechain,const struct nft_rule * rule,const struct nft_flow_rule * flow,struct netlink_ext_ack * extack,enum flow_cls_command command) nft_flow_cls_offload_setup() argument
266 nft_flow_offload_cmd(const struct nft_chain * chain,const struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command,struct flow_cls_offload * cls_flow) nft_flow_offload_cmd() argument
286 nft_flow_offload_rule(const struct nft_chain * chain,struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command) nft_flow_offload_rule() argument
[all...]
/linux/Documentation/networking/
H A Dopenvswitch.rst8 flow-level packet processing on selected network devices. It can be
10 VLAN processing, network access control, flow-based network control,
15 within a bridge). Each datapath also has associated with it a "flow
22 extracting its flow key and looking it up in the flow table. If there
23 is a matching flow, it executes the associated actions. If there is
25 its processing, userspace will likely set up a flow to handle further
29 Flow key compatibility
35 versions to parse additional protocols as part of the flow key. It
39 applications to work with any version of the flow key, past or future.
43 flow key that it parsed from the packet. Userspace then extracts its
[all …]
H A Dscaling.rst19 - RFS: Receive Flow Steering
20 - Accelerated Receive Flow Steering
31 of logical flows. Packets for each flow are steered to a separate receive
51 both directions of the flow to land on the same Rx queue (and CPU). The
150 RX flow hash indirection table for eth0 with 13 RX ring(s):
156 RX flow hash indirection table for eth0 with 13 RX ring(s):
164 # ethtool -N eth0 flow-type tcp6 dst-port 22 context 1
195 flow hash over the packet’s addresses or ports (2-tuple or 4-tuple hash
197 associated flow of the packet. The hash is either provided by hardware
202 packet’s flow.
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dtc_priv.h92 /* Flow can be associated with multiple encap IDs.
99 struct list_head peer[MLX5_MAX_PORTS]; /* flows with peer flow */
104 struct net_device *orig_dev; /* netdev adding flow first */
106 struct list_head tmp_list; /* temporary flow list used by neigh update */
131 struct mlx5e_tc_flow *flow,
136 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
138 void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
139 int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
141 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
142 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
[all …]
/linux/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt_tc.c44 /* Return the dst fid of the func for flow forwarding
370 struct bnxt_tc_flow *flow) in bnxt_tc_parse_flow() argument
391 flow->l2_key.ether_type = match.key->n_proto; in bnxt_tc_parse_flow()
392 flow->l2_mask.ether_type = match.mask->n_proto; in bnxt_tc_parse_flow()
396 flow->l4_key.ip_proto = match.key->ip_proto; in bnxt_tc_parse_flow()
397 flow->l4_mask.ip_proto = match.mask->ip_proto; in bnxt_tc_parse_flow()
405 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; in bnxt_tc_parse_flow()
406 ether_addr_copy(flow->l2_key.dmac, match.key->dst); in bnxt_tc_parse_flow()
407 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); in bnxt_tc_parse_flow()
408 ether_addr_copy(flow->l2_key.smac, match.key->src); in bnxt_tc_parse_flow()
[all …]
/linux/net/openvswitch/
H A Dflow_table.c6 #include "flow.h"
74 struct sw_flow *flow; in ovs_flow_alloc() local
77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); in ovs_flow_alloc()
78 if (!flow) in ovs_flow_alloc()
81 flow->stats_last_writer = -1; in ovs_flow_alloc()
82 flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids]; in ovs_flow_alloc()
93 RCU_INIT_POINTER(flow->stats[0], stats); in ovs_flow_alloc()
95 cpumask_set_cpu(0, flow->cpu_used_mask); in ovs_flow_alloc()
97 return flow; in ovs_flow_alloc()
108 flow_free(struct sw_flow * flow) flow_free() argument
130 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); rcu_free_flow_callback() local
135 ovs_flow_free(struct sw_flow * flow,bool deferred) ovs_flow_free() argument
462 table_instance_flow_free(struct flow_table * table,struct table_instance * ti,struct table_instance * ufid_ti,struct sw_flow * flow) table_instance_flow_free() argument
485 struct sw_flow *flow; table_instance_flow_flush() local
528 struct sw_flow *flow; ovs_flow_tbl_dump_next() local
559 table_instance_insert(struct table_instance * ti,struct sw_flow * flow) table_instance_insert() argument
568 ufid_table_instance_insert(struct table_instance * ti,struct sw_flow * flow) ufid_table_instance_insert() argument
587 struct sw_flow *flow; flow_table_copy_flows() local
680 flow_cmp_masked_key(const struct sw_flow * flow,const struct sw_flow_key * key,const struct sw_flow_key_range * range) flow_cmp_masked_key() argument
687 ovs_flow_cmp_unmasked_key(const struct sw_flow * flow,const struct sw_flow_match * match) ovs_flow_cmp_unmasked_key() argument
703 struct sw_flow *flow; masked_flow_lookup() local
736 struct sw_flow *flow; flow_lookup() local
793 struct sw_flow *flow; ovs_flow_tbl_lookup_stats() local
854 struct sw_flow *flow; ovs_flow_tbl_lookup() local
878 struct sw_flow *flow; ovs_flow_tbl_lookup_exact() local
899 ovs_flow_cmp_ufid(const struct sw_flow * flow,const struct sw_flow_id * sfid) ovs_flow_cmp_ufid() argument
908 ovs_flow_cmp(const struct sw_flow * flow,const struct sw_flow_match * match) ovs_flow_cmp() argument
921 struct sw_flow *flow; ovs_flow_tbl_lookup_ufid() local
956 ovs_flow_tbl_remove(struct flow_table * table,struct sw_flow * flow) ovs_flow_tbl_remove() argument
1006 flow_mask_insert(struct flow_table * tbl,struct sw_flow * flow,const struct sw_flow_mask * new) flow_mask_insert() argument
1035 flow_key_insert(struct flow_table * table,struct sw_flow * flow) flow_key_insert() argument
1059 flow_ufid_insert(struct flow_table * table,struct sw_flow * flow) flow_ufid_insert() argument
1081 ovs_flow_tbl_insert(struct flow_table * table,struct sw_flow * flow,const struct sw_flow_mask * mask) ovs_flow_tbl_insert() argument
[all...]
/linux/drivers/infiniband/hw/hfi1/
H A Dtid_rdma.c37 /* Maximum number of packets within a flow generation. */
134 struct tid_rdma_flow *flow,
439 /* Flow and tid waiter functions */
529 * This should be done after the hardware flow and
698 /* Flow functions */
700 * kern_reserve_flow - allocate a hardware flow
702 * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
706 * flow for use in receiving KDETH data packets. If a preferred flow is
707 * specified the function will attempt to reserve that flow again, if
721 /* Attempt to reserve the preferred flow index */ in kern_reserve_flow()
[all …]
/linux/drivers/net/ethernet/netronome/nfp/flower/
H A Dconntrack.h38 * struct nfp_fl_ct_zone_entry - Zone entry containing conntrack flow information
109 /* NFP flow entry flags. */
114 * struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
115 * @cookie: Flow cookie, same as original TC flow, used as key
117 * @chain_index: Chain index of the original flow
118 * @goto_chain_index: goto chain index of the flow
121 * @children: List of tc_merge flows this flow forms part of
122 * @rule: Reference to the original TC flow rule
127 * @flags: Used to indicate flow flag like NAT which used by merge.
149 * @cookie: Flow cookie, combination of pre and post ct cookies
[all …]
/linux/net/sched/
H A Dsch_fq_codel.c32 * Each flow has a CoDel managed queue.
36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
39 * Low memory footprint (64 bytes per flow)
116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument
118 struct sk_buff *skb = flow->head; in dequeue_head()
120 flow->head = skb->next; in dequeue_head()
125 /* add skb to flow queue (tail add) */
126 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument
129 if (flow->head == NULL) in flow_queue_add()
130 flow->head = skb; in flow_queue_add()
[all …]
H A Dsch_fq_pie.c2 /* Flow Queue PIE discipline
19 /* Flow Queue PIE
25 * - Each flow has a PIE managed queue.
28 * - For a given flow, packets are not reordered.
36 * struct fq_pie_flow - contains data for each flow
37 * @vars: pie vars associated with the flow
39 * @backlog: size of data in the flow
40 * @qlen: number of packets in the flow
41 * @flowchain: flowchain for the flow
42 * @head: first packet in the flow
[all …]
H A Dsch_hhf.c20 * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
37 * - For a heavy-hitter flow: *all* of its k array counters must be large.
38 * - For a non-heavy-hitter flow: some of its k array counters can be large
59 * Once a flow is classified as heavy-hitter, we also save its per-flow state
60 * in an exact-matching flow table so that its subsequent packets can be
66 * - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching
67 * heavy-hitter flow table, denoted table T, then send p to the heavy-hitter
70 * + If F decides that p belongs to a non-heavy-hitter flow, then send p
72 * + Otherwise, if F decides that p belongs to a new heavy-hitter flow,
73 * then set up a new flow entry for the flow-id of p in the table T and
[all …]
/linux/Documentation/bpf/
H A Dprog_flow_dissector.rst10 Flow dissector is a routine that parses metadata out of the packets. It's
11 used in the various places in the networking subsystem (RFS, flow hash, etc).
13 BPF flow dissector is an attempt to reimplement C-based flow dissector logic
20 BPF flow dissector programs operate on an ``__sk_buff``. However, only the
22 ``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
31 Flow dissector BPF program should fill out the rest of the ``struct
41 In the VLAN-less case, this is what the initial state of the BPF flow
49 +-- flow dissector starts here
58 In case of VLAN, flow dissector can be called with the two different states.
67 +-- flow dissector starts here
[all …]
/linux/Documentation/core-api/
H A Dgenericirq.rst52 optimize the flow of the interrupt handling for each specific interrupt
58 the flow control in the super-handler. This leads to a mix of flow logic
62 have different flow handling.
64 A more natural abstraction is the clean separation of the 'irq flow' and
68 reveals that most of them can use a generic set of 'irq flow' methods
71 IRQ flow itself but not in the chip details - and thus provides a more
74 Each interrupt descriptor is assigned its own high-level flow handler,
76 flow handler implementation also makes it simple to provide
82 IRQ-flow implementation for 'level type' interrupts and add a
104 2. High-level IRQ flow handlers
[all …]
/linux/Documentation/hwmon/
H A Daquacomputer_d5next.rst14 * Aquacomputer High Flow Next sensor
19 * Aquacomputer High Flow USB flow meter
20 * Aquacomputer MPS Flow devices
31 virtual temperature sensors, as well as two flow sensors. The fans expose their
48 The Octo exposes four physical and sixteen virtual temperature sensors, a flow sensor
50 and current. Flow sensor pulses are also available.
52 The Quadro exposes four physical and sixteen virtual temperature sensors, a flow
54 voltage and current. Flow sensor pulses are also available.
59 The High Flow Next exposes +5V voltages, water quality, conductivity and flow readings.
65 filled with coolant. Pump RPM and flow can be set to enhance on-device calculations,
[all …]
/linux/samples/bpf/
H A Dsockex2_kern.c62 struct flow_key_record *flow) in parse_ip()
72 flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); in parse_ip()
73 flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); in parse_ip()
86 struct flow_key_record *flow) in parse_ipv6()
90 flow->src = ipv6_addr_hash(skb, in parse_ipv6()
92 flow->dst = ipv6_addr_hash(skb, in parse_ipv6()
100 struct flow_key_record *flow) in flow_dissector()
120 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector()
122 nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); in flow_dissector()
158 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector()
63 parse_ip(struct __sk_buff * skb,__u64 nhoff,__u64 * ip_proto,struct flow_key_record * flow) parse_ip() argument
87 parse_ipv6(struct __sk_buff * skb,__u64 nhoff,__u64 * ip_proto,struct flow_key_record * flow) parse_ipv6() argument
101 flow_dissector(struct __sk_buff * skb,struct flow_key_record * flow) flow_dissector() argument
203 struct flow_key_record flow = {}; bpf_prog2() local
[all...]
/linux/drivers/net/ethernet/intel/igc/
H A Digc_mac.c75 * igc_set_fc_watermarks - Set flow control high/low watermarks
78 * Sets the flow control high/low threshold (watermark) registers. If
79 * flow control XON frame transmission is enabled, then set XON frame
86 /* Set the flow control receive threshold registers. Normally, in igc_set_fc_watermarks()
110 * igc_setup_link - Setup flow control and link settings
113 * Determines which flow control settings to use, then configures flow
129 /* If requested flow control is set to default, set flow control in igc_setup_link()
135 /* We want to save off the original Flow Control configuration just in igc_setup_link()
137 * hub or switch with different Flow Control capabilities. in igc_setup_link()
148 /* Initialize the flow control address, type, and PAUSE timer in igc_setup_link()
[all …]

12345678910>>...63