| /linux/drivers/net/ethernet/marvell/prestera/ |
| H A D | prestera_flow.c | 179 struct flow_block_cb *block_cb; in prestera_flow_block_get() local 181 block_cb = flow_block_cb_lookup(f->block, in prestera_flow_block_get() 183 if (!block_cb) { in prestera_flow_block_get() 188 block_cb = flow_block_cb_alloc(prestera_flow_block_cb, in prestera_flow_block_get() 191 if (IS_ERR(block_cb)) { in prestera_flow_block_get() 193 return ERR_CAST(block_cb); in prestera_flow_block_get() 196 block->block_cb = block_cb; in prestera_flow_block_get() 199 block = flow_block_cb_priv(block_cb); in prestera_flow_block_get() 203 flow_block_cb_incref(block_cb); in prestera_flow_block_get() 210 struct flow_block_cb *block_cb = block->block_cb; in prestera_flow_block_put() local [all …]
|
| H A D | prestera_flow.h | 23 struct flow_block_cb *block_cb; member
|
| /linux/net/core/ |
| H A D | flow_offload.c | 265 struct flow_block_cb *block_cb; in flow_block_cb_alloc() local 267 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); in flow_block_cb_alloc() 268 if (!block_cb) in flow_block_cb_alloc() 271 block_cb->cb = cb; in flow_block_cb_alloc() 272 block_cb->cb_ident = cb_ident; in flow_block_cb_alloc() 273 block_cb->cb_priv = cb_priv; in flow_block_cb_alloc() 274 block_cb->release = release; in flow_block_cb_alloc() 276 return block_cb; in flow_block_cb_alloc() 280 void flow_block_cb_free(struct flow_block_cb *block_cb) in flow_block_cb_free() argument 282 if (block_cb->release) in flow_block_cb_free() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlxsw/ |
| H A D | spectrum_flow.c | 207 struct flow_block_cb *block_cb; in mlxsw_sp_setup_tc_block_bind() local 211 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb, in mlxsw_sp_setup_tc_block_bind() 213 if (!block_cb) { in mlxsw_sp_setup_tc_block_bind() 217 block_cb = flow_block_cb_alloc(mlxsw_sp_flow_block_cb, in mlxsw_sp_setup_tc_block_bind() 220 if (IS_ERR(block_cb)) { in mlxsw_sp_setup_tc_block_bind() 222 return PTR_ERR(block_cb); in mlxsw_sp_setup_tc_block_bind() 226 flow_block = flow_block_cb_priv(block_cb); in mlxsw_sp_setup_tc_block_bind() 228 flow_block_cb_incref(block_cb); in mlxsw_sp_setup_tc_block_bind() 240 flow_block_cb_add(block_cb, f); in mlxsw_sp_setup_tc_block_bind() 241 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); in mlxsw_sp_setup_tc_block_bind() [all …]
|
| H A D | spectrum_qdisc.c | 2178 struct flow_block_cb *block_cb; in mlxsw_sp_setup_tc_block_qevent_bind() local 2184 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp); in mlxsw_sp_setup_tc_block_qevent_bind() 2185 if (!block_cb) { in mlxsw_sp_setup_tc_block_qevent_bind() 2189 block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block, in mlxsw_sp_setup_tc_block_qevent_bind() 2191 if (IS_ERR(block_cb)) { in mlxsw_sp_setup_tc_block_qevent_bind() 2193 return PTR_ERR(block_cb); in mlxsw_sp_setup_tc_block_qevent_bind() 2197 qevent_block = flow_block_cb_priv(block_cb); in mlxsw_sp_setup_tc_block_qevent_bind() 2199 flow_block_cb_incref(block_cb); in mlxsw_sp_setup_tc_block_qevent_bind() 2233 flow_block_cb_add(block_cb, f); in mlxsw_sp_setup_tc_block_qevent_bind() 2234 list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list); in mlxsw_sp_setup_tc_block_qevent_bind() [all …]
|
| /linux/drivers/net/ethernet/sfc/ |
| H A D | tc_bindings.c | 78 struct flow_block_cb *block_cb; in efx_tc_setup_block() local 92 block_cb = flow_block_cb_alloc(efx_tc_block_cb, binding, in efx_tc_setup_block() 94 rc = PTR_ERR_OR_ZERO(block_cb); in efx_tc_setup_block() 104 flow_block_cb_add(block_cb, tcb); in efx_tc_setup_block() 110 block_cb = flow_block_cb_lookup(tcb->block, in efx_tc_setup_block() 113 if (block_cb) { in efx_tc_setup_block() 114 flow_block_cb_remove(block_cb, tcb); in efx_tc_setup_block() 142 void (*cleanup)(struct flow_block_cb *block_cb)) in efx_tc_indr_setup_cb() argument 146 struct flow_block_cb *block_cb; in efx_tc_indr_setup_cb() local 173 block_cb = flow_indr_block_cb_alloc(efx_tc_block_cb, binding, in efx_tc_indr_setup_cb() [all …]
|
| H A D | tc_bindings.h | 29 void (*cleanup)(struct flow_block_cb *block_cb));
|
| /linux/include/net/netfilter/ |
| H A D | nf_flow_table.h | 254 struct flow_block_cb *block_cb; in nf_flow_table_offload_add_cb() 258 block_cb = flow_block_cb_lookup(block, cb, cb_priv); 259 if (block_cb) { in nf_flow_table_offload_del_cb() 264 block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL); in nf_flow_table_offload_del_cb() 265 if (IS_ERR(block_cb)) { in nf_flow_table_offload_del_cb() 266 err = PTR_ERR(block_cb); in nf_flow_table_offload_del_cb() 270 list_add_tail(&block_cb->list, &block->cb_list); in nf_flow_table_offload_del_cb() 287 struct flow_block_cb *block_cb; 290 block_cb = flow_block_cb_lookup(block, cb, cb_priv); 291 if (block_cb) { 230 struct flow_block_cb *block_cb; nf_flow_table_offload_add_cb() local 263 struct flow_block_cb *block_cb; nf_flow_table_offload_del_cb() local [all...] |
| /linux/drivers/net/ethernet/mediatek/ |
| H A D | mtk_ppe_offload.c | 599 struct flow_block_cb *block_cb; in mtk_eth_setup_tc_block() local 613 block_cb = flow_block_cb_lookup(f->block, cb, dev); in mtk_eth_setup_tc_block() 614 if (block_cb) { in mtk_eth_setup_tc_block() 615 flow_block_cb_incref(block_cb); in mtk_eth_setup_tc_block() 618 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); in mtk_eth_setup_tc_block() 619 if (IS_ERR(block_cb)) in mtk_eth_setup_tc_block() 620 return PTR_ERR(block_cb); in mtk_eth_setup_tc_block() 622 flow_block_cb_incref(block_cb); in mtk_eth_setup_tc_block() 623 flow_block_cb_add(block_cb, f); in mtk_eth_setup_tc_block() 624 list_add_tail(&block_cb->driver_list, &block_cb_list); in mtk_eth_setup_tc_block() [all …]
|
| H A D | mtk_wed.c | 2700 struct flow_block_cb *block_cb; in mtk_wed_setup_tc_block() local 2715 block_cb = flow_block_cb_lookup(f->block, cb, dev); in mtk_wed_setup_tc_block() 2716 if (block_cb) { in mtk_wed_setup_tc_block() 2717 flow_block_cb_incref(block_cb); in mtk_wed_setup_tc_block() 2727 block_cb = flow_block_cb_alloc(cb, dev, priv, NULL); in mtk_wed_setup_tc_block() 2728 if (IS_ERR(block_cb)) { in mtk_wed_setup_tc_block() 2730 return PTR_ERR(block_cb); in mtk_wed_setup_tc_block() 2733 flow_block_cb_incref(block_cb); in mtk_wed_setup_tc_block() 2734 flow_block_cb_add(block_cb, f); in mtk_wed_setup_tc_block() 2735 list_add_tail(&block_cb->driver_list, &block_cb_list); in mtk_wed_setup_tc_block() [all …]
|
| /linux/net/netfilter/ |
| H A D | nf_tables_offload.c | 201 struct flow_block_cb *block_cb; in nft_setup_cb_call() local 204 list_for_each_entry(block_cb, cb_list, list) { in nft_setup_cb_call() 205 err = block_cb->cb(type, type_data, block_cb->cb_priv); in nft_setup_cb_call() 328 struct flow_block_cb *block_cb, *next; in nft_flow_offload_unbind() 342 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { in nft_flow_offload_unbind() 343 list_del(&block_cb->list); in nft_flow_offload_unbind() 344 flow_block_cb_free(block_cb); in nft_flow_offload_unbind() 404 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb) in nft_indr_block_cleanup() 406 struct nft_base_chain *basechain = block_cb in nft_indr_block_cleanup() 324 struct flow_block_cb *block_cb, *next; nft_flow_offload_unbind() local 400 nft_indr_block_cleanup(struct flow_block_cb * block_cb) nft_indr_block_cleanup() argument [all...] |
| H A D | nf_flow_table_offload.c | 845 struct flow_block_cb *block_cb; in nf_flow_offload_tuple() local 855 list_for_each_entry(block_cb, block_cb_list, list) { in nf_flow_offload_tuple() 856 err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, in nf_flow_offload_tuple() 857 block_cb->cb_priv); in nf_flow_offload_tuple() 1102 struct flow_block_cb *block_cb, *next; in nf_flow_table_block_setup() local 1111 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { in nf_flow_table_block_setup() 1112 list_del(&block_cb->list); in nf_flow_table_block_setup() 1113 flow_block_cb_free(block_cb); in nf_flow_table_block_setup() 1141 static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb) in nf_flow_table_indr_cleanup() argument 1143 struct nf_flowtable *flowtable = block_cb in nf_flow_table_indr_cleanup() [all...] |
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/rep/ |
| H A D | tc.c | 489 void (*cleanup)(struct flow_block_cb *block_cb)) in mlx5e_rep_indr_setup_block() argument 492 struct flow_block_cb *block_cb; in mlx5e_rep_indr_setup_block() local 516 block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, in mlx5e_rep_indr_setup_block() 520 if (IS_ERR(block_cb)) { in mlx5e_rep_indr_setup_block() 523 return PTR_ERR(block_cb); in mlx5e_rep_indr_setup_block() 525 flow_block_cb_add(block_cb, f); in mlx5e_rep_indr_setup_block() 526 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list); in mlx5e_rep_indr_setup_block() 534 block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv); in mlx5e_rep_indr_setup_block() 535 if (!block_cb) in mlx5e_rep_indr_setup_block() 538 flow_indr_block_cb_remove(block_cb, f); in mlx5e_rep_indr_setup_block() [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/flower/ |
| H A D | offload.c | 1786 struct flow_block_cb *block_cb; in nfp_flower_setup_tc_block() local 1802 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb, in nfp_flower_setup_tc_block() 1804 if (IS_ERR(block_cb)) in nfp_flower_setup_tc_block() 1805 return PTR_ERR(block_cb); in nfp_flower_setup_tc_block() 1807 flow_block_cb_add(block_cb, f); in nfp_flower_setup_tc_block() 1808 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); in nfp_flower_setup_tc_block() 1811 block_cb = flow_block_cb_lookup(f->block, in nfp_flower_setup_tc_block() 1814 if (!block_cb) in nfp_flower_setup_tc_block() 1817 flow_block_cb_remove(block_cb, f); in nfp_flower_setup_tc_block() 1818 list_del(&block_cb->driver_list); in nfp_flower_setup_tc_block() [all …]
|
| H A D | main.h | 679 void (*cleanup)(struct flow_block_cb *block_cb));
|
| /linux/net/sched/ |
| H A D | cls_api.c | 797 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) in tc_block_indr_cleanup() argument 799 struct tcf_block *block = block_cb->indr.data; in tc_block_indr_cleanup() 800 struct net_device *dev = block_cb->indr.dev; in tc_block_indr_cleanup() 801 struct Qdisc *sch = block_cb->indr.sch; in tc_block_indr_cleanup() 806 block_cb->indr.binder_type, in tc_block_indr_cleanup() 811 list_del(&block_cb->driver_list); in tc_block_indr_cleanup() 812 list_move(&block_cb->list, &bo.cb_list); in tc_block_indr_cleanup() 1629 struct flow_block_cb *block_cb, *next; in tcf_block_bind() local 1634 list_for_each_entry(block_cb, &bo->cb_list, list) { in tcf_block_bind() 1635 err = tcf_block_playback_offloads(block, block_cb in tcf_block_bind() 1671 struct flow_block_cb *block_cb, *next; tcf_block_unbind() local 3598 struct flow_block_cb *block_cb; __tc_setup_cb_call() local [all...] |
| /linux/drivers/net/ethernet/freescale/dpaa2/ |
| H A D | dpaa2-switch.c | 1314 struct flow_block_cb *block_cb; in dpaa2_switch_setup_tc_block_bind() local 1318 block_cb = flow_block_cb_lookup(f->block, in dpaa2_switch_setup_tc_block_bind() 1322 if (!block_cb) { in dpaa2_switch_setup_tc_block_bind() 1329 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, in dpaa2_switch_setup_tc_block_bind() 1331 if (IS_ERR(block_cb)) in dpaa2_switch_setup_tc_block_bind() 1332 return PTR_ERR(block_cb); in dpaa2_switch_setup_tc_block_bind() 1336 filter_block = flow_block_cb_priv(block_cb); in dpaa2_switch_setup_tc_block_bind() 1339 flow_block_cb_incref(block_cb); in dpaa2_switch_setup_tc_block_bind() 1345 flow_block_cb_add(block_cb, f); in dpaa2_switch_setup_tc_block_bind() 1346 list_add_tail(&block_cb->driver_list, in dpaa2_switch_setup_tc_block_bind() [all …]
|
| /linux/drivers/net/ethernet/broadcom/bnxt/ |
| H A D | bnxt_tc.c | 1908 void (*cleanup)(struct flow_block_cb *block_cb)) in bnxt_tc_setup_indr_block() argument 1911 struct flow_block_cb *block_cb; in bnxt_tc_setup_indr_block() local 1926 block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, in bnxt_tc_setup_indr_block() 1930 if (IS_ERR(block_cb)) { in bnxt_tc_setup_indr_block() 1933 return PTR_ERR(block_cb); in bnxt_tc_setup_indr_block() 1936 flow_block_cb_add(block_cb, f); in bnxt_tc_setup_indr_block() 1937 list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list); in bnxt_tc_setup_indr_block() 1944 block_cb = flow_block_cb_lookup(f->block, in bnxt_tc_setup_indr_block() 1947 if (!block_cb) in bnxt_tc_setup_indr_block() 1950 flow_indr_block_cb_remove(block_cb, f); in bnxt_tc_setup_indr_block() [all …]
|
| /linux/drivers/net/ethernet/airoha/ |
| H A D | airoha_eth.c | 2663 struct flow_block_cb *block_cb; in airoha_dev_setup_tc_block() local 2671 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); in airoha_dev_setup_tc_block() 2672 if (block_cb) { in airoha_dev_setup_tc_block() 2673 flow_block_cb_incref(block_cb); in airoha_dev_setup_tc_block() 2676 block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL); in airoha_dev_setup_tc_block() 2677 if (IS_ERR(block_cb)) in airoha_dev_setup_tc_block() 2678 return PTR_ERR(block_cb); in airoha_dev_setup_tc_block() 2680 flow_block_cb_incref(block_cb); in airoha_dev_setup_tc_block() 2681 flow_block_cb_add(block_cb, f); in airoha_dev_setup_tc_block() 2682 list_add_tail(&block_cb->driver_list, &block_cb_list); in airoha_dev_setup_tc_block() [all …]
|
| /linux/net/dsa/ |
| H A D | user.c | 1685 struct flow_block_cb *block_cb; in dsa_user_setup_tc_block() local 1702 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); in dsa_user_setup_tc_block() 1703 if (IS_ERR(block_cb)) in dsa_user_setup_tc_block() 1704 return PTR_ERR(block_cb); in dsa_user_setup_tc_block() 1706 flow_block_cb_add(block_cb, f); in dsa_user_setup_tc_block() 1707 list_add_tail(&block_cb->driver_list, &dsa_user_block_cb_list); in dsa_user_setup_tc_block() 1710 block_cb = flow_block_cb_lookup(f->block, cb, dev); in dsa_user_setup_tc_block() 1711 if (!block_cb) in dsa_user_setup_tc_block() 1714 flow_block_cb_remove(block_cb, f); in dsa_user_setup_tc_block() 1715 list_del(&block_cb->driver_list); in dsa_user_setup_tc_block()
|