| /linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| H A D | flowring.c | 43 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) in brcmf_flowring_is_tdls_mac() argument 47 search = flow->tdls_entry; in brcmf_flowring_is_tdls_mac() 59 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_lookup() argument 71 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_lookup() 77 if ((sta) && (flow->tdls_active) && in brcmf_flowring_lookup() 78 (brcmf_flowring_is_tdls_mac(flow, da))) { in brcmf_flowring_lookup() 85 hash = flow->hash; in brcmf_flowring_lookup() 103 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_create() argument 116 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_create() 122 if ((sta) && (flow->tdls_active) && in brcmf_flowring_create() [all …]
|
| H A D | flowring.h | 50 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], 52 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], 54 void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid); 55 void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid); 56 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid); 57 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid, 59 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid); 60 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid, 62 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid); 63 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid); [all …]
|
| /linux/drivers/gpu/ipu-v3/ |
| H A D | ipu-dp.c | 46 u32 flow; member 64 struct ipu_flow flow[IPUV3_NUM_FLOWS]; member 82 struct ipu_flow *flow = to_flow(dp); in ipu_dp_set_global_alpha() local 83 struct ipu_dp_priv *priv = flow->priv; in ipu_dp_set_global_alpha() 88 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 93 writel(reg, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 96 reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL; in ipu_dp_set_global_alpha() 98 flow->base + DP_GRAPH_WIND_CTRL); in ipu_dp_set_global_alpha() 100 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 101 writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() [all …]
|
| /linux/include/net/ |
| H A D | fq_impl.h | 16 __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets, in __fq_adjust_removal() argument 19 struct fq_tin *tin = flow->tin; in __fq_adjust_removal() 24 flow->backlog -= bytes; in __fq_adjust_removal() 28 if (flow->backlog) in __fq_adjust_removal() 31 if (flow == &tin->default_flow) { in __fq_adjust_removal() 36 idx = flow - fq->flows; in __fq_adjust_removal() 41 struct fq_flow *flow, in fq_adjust_removal() argument 44 __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize); in fq_adjust_removal() 48 struct fq_flow *flow) in fq_flow_dequeue() argument 54 skb = __skb_dequeue(&flow->queue); in fq_flow_dequeue() [all …]
|
| /linux/drivers/net/phy/mscc/ |
| H A D | mscc_macsec.c | 371 struct macsec_flow *flow) in vsc8584_macsec_flow() argument 374 enum macsec_bank bank = flow->bank; in vsc8584_macsec_flow() 375 u32 val, match = 0, mask = 0, action = 0, idx = flow->index; in vsc8584_macsec_flow() 377 if (flow->match.tagged) in vsc8584_macsec_flow() 379 if (flow->match.untagged) in vsc8584_macsec_flow() 382 if (bank == MACSEC_INGR && flow->assoc_num >= 0) { in vsc8584_macsec_flow() 383 match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num); in vsc8584_macsec_flow() 387 if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) { in vsc8584_macsec_flow() 388 u64 sci = (__force u64)flow->rx_sa->sc->sci; in vsc8584_macsec_flow() 400 if (flow->match.etype) { in vsc8584_macsec_flow() [all …]
|
| /linux/net/netfilter/ |
| H A D | nf_flow_table_core.c | 21 flow_offload_fill_dir(struct flow_offload *flow, in flow_offload_fill_dir() argument 24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; in flow_offload_fill_dir() 25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple; in flow_offload_fill_dir() 54 struct flow_offload *flow; in flow_offload_alloc() local 59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); in flow_offload_alloc() 60 if (!flow) in flow_offload_alloc() 64 flow->ct = ct; in flow_offload_alloc() 66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL); in flow_offload_alloc() 67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPL in flow_offload_alloc() 96 flow_offload_fill_route(struct flow_offload * flow,struct nf_flow_route * route,enum flow_offload_tuple_dir dir) flow_offload_fill_route() argument 147 nft_flow_dst_release(struct flow_offload * flow,enum flow_offload_tuple_dir dir) nft_flow_dst_release() argument 155 flow_offload_route_init(struct flow_offload * flow,struct nf_flow_route * route) flow_offload_route_init() argument 164 nf_flow_has_expired(const struct flow_offload * flow) nf_flow_has_expired() argument 192 flow_offload_fixup_ct(struct flow_offload * flow) flow_offload_fixup_ct() argument 246 flow_offload_route_release(struct flow_offload * flow) flow_offload_route_release() argument 252 flow_offload_free(struct flow_offload * flow) flow_offload_free() argument 300 flow_offload_get_timeout(struct flow_offload * flow) flow_offload_get_timeout() argument 319 flow_offload_add(struct nf_flowtable * flow_table,struct flow_offload * flow) flow_offload_add() argument 353 flow_offload_refresh(struct nf_flowtable * flow_table,struct flow_offload * flow,bool force) flow_offload_refresh() argument 372 flow_offload_del(struct nf_flowtable * flow_table,struct flow_offload * flow) flow_offload_del() argument 383 flow_offload_teardown(struct flow_offload * flow) flow_offload_teardown() argument 396 struct flow_offload *flow; flow_offload_lookup() local 419 nf_flow_table_iterate(struct nf_flowtable * flow_table,void (* iter)(struct nf_flowtable * flowtable,struct flow_offload * flow,void * data),void * data) nf_flow_table_iterate() argument 424 struct flow_offload *flow; nf_flow_table_iterate() local 452 nf_flow_custom_gc(struct nf_flowtable * flow_table,const struct flow_offload * flow) nf_flow_custom_gc() argument 555 nf_flow_offload_gc_step(struct nf_flowtable * flow_table,struct flow_offload * flow,void * data) nf_flow_offload_gc_step() argument 636 nf_flow_snat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir) nf_flow_snat_port() argument 662 nf_flow_dnat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir) nf_flow_dnat_port() argument 713 nf_flow_table_do_cleanup(struct nf_flowtable * flow_table,struct flow_offload * flow,void * data) nf_flow_table_do_cleanup() argument [all...] |
| H A D | nf_flow_table_offload.c | 24 struct flow_offload *flow; member 224 const struct flow_offload *flow, in flow_offload_eth_src() argument 236 this_tuple = &flow->tuplehash[dir].tuple; in flow_offload_eth_src() 243 other_tuple = &flow->tuplehash[!dir].tuple; in flow_offload_eth_src() 271 const struct flow_offload *flow, in flow_offload_eth_dst() argument 286 this_tuple = &flow->tuplehash[dir].tuple; in flow_offload_eth_dst() 293 other_tuple = &flow->tuplehash[!dir].tuple; in flow_offload_eth_dst() 328 const struct flow_offload *flow, in flow_offload_ipv4_snat() argument 339 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in flow_offload_ipv4_snat() 343 addr = flow in flow_offload_ipv4_snat() 355 flow_offload_ipv4_dnat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) flow_offload_ipv4_dnat() argument 396 flow_offload_ipv6_snat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) flow_offload_ipv6_snat() argument 421 flow_offload_ipv6_dnat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) flow_offload_ipv6_dnat() argument 445 flow_offload_l4proto(const struct flow_offload * flow) flow_offload_l4proto() argument 465 flow_offload_port_snat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) flow_offload_port_snat() argument 495 flow_offload_port_dnat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) flow_offload_port_dnat() argument 525 flow_offload_ipv4_checksum(struct net * net,const struct flow_offload * flow,struct nf_flow_rule * flow_rule) flow_offload_ipv4_checksum() argument 545 flow_offload_redirect(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) flow_offload_redirect() argument 577 flow_offload_encap_tunnel(const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) flow_offload_encap_tunnel() argument 602 flow_offload_decap_tunnel(const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) flow_offload_decap_tunnel() argument 627 nf_flow_rule_route_common(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) nf_flow_rule_route_common() argument 682 nf_flow_rule_route_ipv4(struct net * net,struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) nf_flow_rule_route_ipv4() argument 707 nf_flow_rule_route_ipv6(struct net * net,struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule) nf_flow_rule_route_ipv6() argument 738 struct flow_offload *flow = offload->flow; nf_flow_offload_rule_alloc() local 836 nf_flow_offload_tuple(struct nf_flowtable * flowtable,struct flow_offload * flow,struct nf_flow_rule * flow_rule,enum flow_offload_tuple_dir dir,int priority,int cmd,struct flow_stats * stats,struct list_head * block_cb_list) nf_flow_offload_tuple() argument 1017 nf_flow_offload_work_alloc(struct nf_flowtable * flowtable,struct flow_offload * flow,unsigned int cmd) nf_flow_offload_work_alloc() argument 1040 nf_flow_offload_add(struct nf_flowtable * flowtable,struct flow_offload * flow) nf_flow_offload_add() argument 1052 nf_flow_offload_del(struct nf_flowtable * flowtable,struct flow_offload * flow) nf_flow_offload_del() argument 1065 nf_flow_offload_stats(struct nf_flowtable * flowtable,struct flow_offload * flow) nf_flow_offload_stats() argument [all...] |
| H A D | nf_flow_table_ip.c | 22 static int nf_flow_state_check(struct flow_offload *flow, int proto, in nf_flow_state_check() argument 31 if (tcph->syn && test_bit(NF_FLOW_CLOSING, &flow->flags)) { in nf_flow_state_check() 32 flow_offload_teardown(flow); in nf_flow_state_check() 37 !test_bit(NF_FLOW_CLOSING, &flow->flags)) in nf_flow_state_check() 38 set_bit(NF_FLOW_CLOSING, &flow->flags); in nf_flow_state_check() 80 static void nf_flow_snat_ip(const struct flow_offload *flow, in nf_flow_snat_ip() argument 89 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in nf_flow_snat_ip() 94 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; in nf_flow_snat_ip() 103 static void nf_flow_dnat_ip(const struct flow_offload *flow, in nf_flow_dnat_ip() argument 112 new_addr = flow in nf_flow_dnat_ip() 126 nf_flow_nat_ip(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,enum flow_offload_tuple_dir dir,struct iphdr * iph) nf_flow_nat_ip() argument 376 struct flow_offload *flow; nf_flow_offload_forward() local 427 struct flow_offload *flow; nf_flow_offload_ip_hook() local 520 nf_flow_snat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir) nf_flow_snat_ipv6() argument 543 nf_flow_dnat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir) nf_flow_dnat_ipv6() argument 566 nf_flow_nat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,enum flow_offload_tuple_dir dir,struct ipv6hdr * ip6h) nf_flow_nat_ipv6() argument 655 struct flow_offload *flow; nf_flow_offload_ipv6_forward() local 723 struct flow_offload *flow; nf_flow_offload_ipv6_hook() local [all...] |
| H A D | nf_tables_offload.c | 12 struct nft_flow_rule *flow; in nft_flow_rule_alloc() local 14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL); in nft_flow_rule_alloc() 15 if (!flow) in nft_flow_rule_alloc() 18 flow->rule = flow_rule_alloc(num_actions); in nft_flow_rule_alloc() 19 if (!flow->rule) { in nft_flow_rule_alloc() 20 kfree(flow); in nft_flow_rule_alloc() 24 flow->rule->match.dissector = &flow->match.dissector; in nft_flow_rule_alloc() 25 flow->rule->match.mask = &flow in nft_flow_rule_alloc() 31 nft_flow_rule_set_addr_type(struct nft_flow_rule * flow,enum flow_dissector_key_id addr_type) nft_flow_rule_set_addr_type() argument 54 nft_flow_rule_transfer_vlan(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow) nft_flow_rule_transfer_vlan() argument 92 struct nft_flow_rule *flow; nft_flow_rule_create() local 146 nft_flow_rule_destroy(struct nft_flow_rule * flow) nft_flow_rule_destroy() argument 245 nft_flow_cls_offload_setup(struct flow_cls_offload * cls_flow,const struct nft_base_chain * basechain,const struct nft_rule * rule,const struct nft_flow_rule * flow,struct netlink_ext_ack * extack,enum flow_cls_command command) nft_flow_cls_offload_setup() argument 266 nft_flow_offload_cmd(const struct nft_chain * chain,const struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command,struct flow_cls_offload * cls_flow) nft_flow_offload_cmd() argument 286 nft_flow_offload_rule(const struct nft_chain * chain,struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command) nft_flow_offload_rule() argument [all...] |
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| H A D | tc_priv.h | 131 struct mlx5e_tc_flow *flow, 136 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow); 138 void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow); 139 int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow); 141 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); 142 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow); 143 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow); 144 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow); 147 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) in __flow_flag_set() argument 151 set_bit(flag, &flow->flags); in __flow_flag_set() [all …]
|
| H A D | tc_tun_encap.h | 10 struct mlx5e_tc_flow *flow, 15 struct mlx5e_tc_flow *flow, 23 struct mlx5e_tc_flow *flow, 26 struct mlx5e_tc_flow *flow); 29 struct mlx5e_tc_flow *flow); 31 struct mlx5e_tc_flow *flow); 34 struct mlx5e_tc_flow *flow, 39 struct mlx5e_tc_flow *flow, 44 int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
|
| /linux/drivers/net/ethernet/broadcom/bnxt/ |
| H A D | bnxt_tc.c | 370 struct bnxt_tc_flow *flow) in bnxt_tc_parse_flow() argument 391 flow->l2_key.ether_type = match.key->n_proto; in bnxt_tc_parse_flow() 392 flow->l2_mask.ether_type = match.mask->n_proto; in bnxt_tc_parse_flow() 396 flow->l4_key.ip_proto = match.key->ip_proto; in bnxt_tc_parse_flow() 397 flow->l4_mask.ip_proto = match.mask->ip_proto; in bnxt_tc_parse_flow() 405 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; in bnxt_tc_parse_flow() 406 ether_addr_copy(flow->l2_key.dmac, match.key->dst); in bnxt_tc_parse_flow() 407 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); in bnxt_tc_parse_flow() 408 ether_addr_copy(flow->l2_key.smac, match.key->src); in bnxt_tc_parse_flow() 409 ether_addr_copy(flow->l2_mask.smac, match.mask->src); in bnxt_tc_parse_flow() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | tid_rdma.c | 134 struct tid_rdma_flow *flow, 881 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow, in tid_rdma_find_phys_blocks_4k() argument 898 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); in tid_rdma_find_phys_blocks_4k() 901 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, in tid_rdma_find_phys_blocks_4k() 935 trace_hfi1_tid_pageset(flow->req->qp, setcount, in tid_rdma_find_phys_blocks_4k() 1020 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow, in tid_rdma_find_phys_blocks_8k() argument 1034 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); in tid_rdma_find_phys_blocks_8k() 1037 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); in tid_rdma_find_phys_blocks_8k() 1087 static u32 kern_find_pages(struct tid_rdma_flow *flow, in kern_find_pages() argument 1091 struct tid_rdma_request *req = flow->req; in kern_find_pages() [all …]
|
| /linux/net/sched/ |
| H A D | sch_fq_codel.c | 32 * Each flow has a CoDel managed queue. 36 * For a given flow, packets are not reordered (CoDel uses a FIFO) 39 * Low memory footprint (64 bytes per flow) 116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument 118 struct sk_buff *skb = flow->head; in dequeue_head() 120 flow->head = skb->next; in dequeue_head() 125 /* add skb to flow queue (tail add) */ 126 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument 129 if (flow->head == NULL) in flow_queue_add() 130 flow in flow_queue_add() 143 struct fq_codel_flow *flow; fq_codel_drop() local 190 struct fq_codel_flow *flow; fq_codel_enqueue() local 260 struct fq_codel_flow *flow; dequeue_func() local 286 struct fq_codel_flow *flow; fq_codel_dequeue() local 328 fq_codel_flow_purge(struct fq_codel_flow * flow) fq_codel_flow_purge() argument 342 struct fq_codel_flow *flow = q->flows + i; fq_codel_reset() local 512 struct fq_codel_flow *flow = q->flows + i; fq_codel_init() local 652 const struct fq_codel_flow *flow = &q->flows[idx]; fq_codel_dump_class_stats() local [all...] |
| H A D | sch_fq_pie.c | 119 static inline void flow_queue_add(struct fq_pie_flow *flow, in flow_queue_add() argument 122 if (!flow->head) in flow_queue_add() 123 flow->head = skb; in flow_queue_add() 125 flow->tail->next = skb; in flow_queue_add() 126 flow->tail = skb; in flow_queue_add() 229 static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow) in dequeue_head() argument 231 struct sk_buff *skb = flow->head; in dequeue_head() 233 flow->head = skb->next; in dequeue_head() 242 struct fq_pie_flow *flow; in fq_pie_qdisc_dequeue() local 254 flow = list_first_entry(head, struct fq_pie_flow, flowchain); in fq_pie_qdisc_dequeue() [all …]
|
| H A D | sch_hhf.c | 184 struct hh_flow_state *flow, *next; in seek_list() local 190 list_for_each_entry_safe(flow, next, head, flowchain) { in seek_list() 191 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list() 197 if (list_is_last(&flow->flowchain, head)) in seek_list() 199 list_del(&flow->flowchain); in seek_list() 200 kfree(flow); in seek_list() 202 } else if (flow->hash_id == hash) { in seek_list() 203 return flow; in seek_list() 215 struct hh_flow_state *flow; in alloc_new_hh() local 220 list_for_each_entry(flow, head, flowchain) { in alloc_new_hh() [all …]
|
| H A D | cls_flow.c | 67 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_src() argument 69 __be32 src = flow_get_u32_src(flow); in flow_get_src() 77 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_dst() argument 79 __be32 dst = flow_get_u32_dst(flow); in flow_get_dst() 88 const struct flow_keys *flow) in flow_get_proto() argument 90 return flow->basic.ip_proto; in flow_get_proto() 94 const struct flow_keys *flow) in flow_get_proto_src() argument 96 if (flow->ports.ports) in flow_get_proto_src() 97 return ntohs(flow->ports.src); in flow_get_proto_src() 103 const struct flow_keys *flow) in flow_get_proto_dst() argument [all …]
|
| H A D | sch_cake.c | 29 * flows from each other. This prevents a burst on one flow from increasing 127 /* this stuff is all needed per-flow at dequeue time */ 634 struct cake_flow *flow, in cake_dec_srchost_bulk_flow_count() argument 638 q->hosts[flow->srchost].srchost_bulk_flow_count)) in cake_dec_srchost_bulk_flow_count() 639 q->hosts[flow->srchost].srchost_bulk_flow_count--; in cake_dec_srchost_bulk_flow_count() 643 struct cake_flow *flow, in cake_inc_srchost_bulk_flow_count() argument 647 q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES)) in cake_inc_srchost_bulk_flow_count() 648 q->hosts[flow->srchost].srchost_bulk_flow_count++; in cake_inc_srchost_bulk_flow_count() 652 struct cake_flow *flow, in cake_dec_dsthost_bulk_flow_count() argument 656 q->hosts[flow in cake_dec_dsthost_bulk_flow_count() 661 cake_inc_dsthost_bulk_flow_count(struct cake_tin_data * q,struct cake_flow * flow,int flow_mode) cake_inc_dsthost_bulk_flow_count() argument 670 cake_get_flow_quantum(struct cake_tin_data * q,struct cake_flow * flow,int flow_mode) cake_get_flow_quantum() argument 899 dequeue_head(struct cake_flow * flow) dequeue_head() argument 913 flow_queue_add(struct cake_flow * flow,struct sk_buff * skb) flow_queue_add() argument 1199 cake_ack_filter(struct cake_sched_data * q,struct cake_flow * flow) cake_ack_filter() argument 1558 struct cake_flow *flow; cake_drop() local 1709 u16 flow = 0, host = 0; cake_classify() local 1752 struct cake_flow *flow; cake_enqueue() local 1955 struct cake_flow *flow = &b->flows[q->cur_flow]; cake_dequeue_one() local 1992 struct cake_flow *flow; cake_dequeue() local 2778 struct cake_flow *flow = b->flows + j; cake_init() local 3015 const struct cake_flow *flow = NULL; cake_dump_class_stats() local [all...] |
| /linux/samples/bpf/ |
| H A D | sockex2_kern.c | 62 struct flow_key_record *flow) in parse_ip() 72 flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); in parse_ip() 73 flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); in parse_ip() 86 struct flow_key_record *flow) in parse_ipv6() 90 flow->src = ipv6_addr_hash(skb, in parse_ipv6() 92 flow->dst = ipv6_addr_hash(skb, in parse_ipv6() 100 struct flow_key_record *flow) in flow_dissector() 120 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector() 122 nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); in flow_dissector() 158 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector() 63 parse_ip(struct __sk_buff * skb,__u64 nhoff,__u64 * ip_proto,struct flow_key_record * flow) parse_ip() argument 87 parse_ipv6(struct __sk_buff * skb,__u64 nhoff,__u64 * ip_proto,struct flow_key_record * flow) parse_ipv6() argument 101 flow_dissector(struct __sk_buff * skb,struct flow_key_record * flow) flow_dissector() argument 203 struct flow_key_record flow = {}; bpf_prog2() local [all...] |
| /linux/Documentation/networking/ |
| H A D | openvswitch.rst | 8 flow-level packet processing on selected network devices. It can be 10 VLAN processing, network access control, flow-based network control, 15 within a bridge). Each datapath also has associated with it a "flow 22 extracting its flow key and looking it up in the flow table. If there 23 is a matching flow, it executes the associated actions. If there is 25 its processing, userspace will likely set up a flow to handle further 35 versions to parse additional protocols as part of the flow key. It 39 applications to work with any version of the flow key, past or future. 43 flow key that it parsed from the packet. Userspace then extracts its 44 own notion of a flow key from the packet and compares it against the [all …]
|
| /linux/drivers/infiniband/hw/usnic/ |
| H A D | usnic_fwd.c | 203 struct usnic_fwd_flow *flow; in usnic_fwd_alloc_flow() local 213 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); in usnic_fwd_alloc_flow() 214 if (!flow) in usnic_fwd_alloc_flow() 255 flow->flow_id = (uint32_t) a0; in usnic_fwd_alloc_flow() 256 flow->vnic_idx = uaction->vnic_idx; in usnic_fwd_alloc_flow() 257 flow->ufdev = ufdev; in usnic_fwd_alloc_flow() 263 return flow; in usnic_fwd_alloc_flow() 265 kfree(flow); in usnic_fwd_alloc_flow() 269 int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow) in usnic_fwd_dealloc_flow() argument 274 a0 = flow->flow_id; in usnic_fwd_dealloc_flow() [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | test_xdp_noinline.c | 123 struct flow_key flow; member 245 pckt->flow.port16[0] = udp->source; in parse_udp() 246 pckt->flow.port16[1] = udp->dest; in parse_udp() 248 pckt->flow.port16[0] = udp->dest; in parse_udp() 249 pckt->flow.port16[1] = udp->source; in parse_udp() 269 pckt->flow.port16[0] = tcp->source; in parse_tcp() 270 pckt->flow.port16[1] = tcp->dest; in parse_tcp() 272 pckt->flow.port16[0] = tcp->dest; in parse_tcp() 273 pckt->flow.port16[1] = tcp->source; in parse_tcp() 308 ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0]; in encap_v6() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ |
| H A D | goto.c | 10 struct mlx5e_tc_flow *flow, in validate_goto_chain() argument 16 bool is_esw = mlx5e_is_eswitch_flow(flow); in validate_goto_chain() 17 bool ft_flow = mlx5e_is_ft_flow(flow); in validate_goto_chain() 66 struct mlx5e_tc_flow *flow = parse_state->flow; in tc_act_can_offload_goto() local 68 if (validate_goto_chain(flow->priv, flow, attr, act, extack)) in tc_act_can_offload_goto() 93 struct mlx5e_tc_flow *flow = parse_state->flow; in tc_act_post_parse_goto() local 112 if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) { in tc_act_post_parse_goto()
|
| /linux/Documentation/bpf/ |
| H A D | prog_flow_dissector.rst | 11 used in the various places in the networking subsystem (RFS, flow hash, etc). 13 BPF flow dissector is an attempt to reimplement C-based flow dissector logic 20 BPF flow dissector programs operate on an ``__sk_buff``. However, only the 22 ``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input 41 In the VLAN-less case, this is what the initial state of the BPF flow 49 +-- flow dissector starts here 58 In case of VLAN, flow dissector can be called with the two different states. 67 +-- flow dissector starts here 86 +-- flow dissector starts here 94 In this case VLAN information has been processed before the flow dissector [all …]
|
| /linux/Documentation/core-api/ |
| H A D | genericirq.rst | 52 optimize the flow of the interrupt handling for each specific interrupt 58 the flow control in the super-handler. This leads to a mix of flow logic 62 have different flow handling. 64 A more natural abstraction is the clean separation of the 'irq flow' and 68 reveals that most of them can use a generic set of 'irq flow' methods 71 IRQ flow itself but not in the chip details - and thus provides a more 74 Each interrupt descriptor is assigned its own high-level flow handler, 76 flow handler implementation also makes it simple to provide 82 IRQ-flow implementation for 'level type' interrupts and add a 104 2. High-level IRQ flow handlers [all …]
|