Lines Matching full:tx
48 * TX tables are organized differently for Ethernet and for RoCE:
51 * Ethernet Tx | SA KSPI | match
56 * DROP<--------+ |---->|Encrypt|------>|Flowtable|---->| TX NS |
61 * Tx |Flowtable|----->|Flowtable|---->+ |
648 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) in ipsec_counter_rule_tx() argument
660 dest.counter_id = mlx5_fc_id(tx->fc->cnt); in ipsec_counter_rule_tx()
661 fte = mlx5_add_flow_rules(tx->ft.status, NULL, &flow_act, &dest, 1); in ipsec_counter_rule_tx()
664 mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err); in ipsec_counter_rule_tx()
668 tx->status.rule = fte; in ipsec_counter_rule_tx()
675 static void tx_destroy_roce(struct mlx5e_ipsec_tx *tx) { in tx_destroy_roce() argument
676 if (!tx->roce.ft) in tx_destroy_roce()
679 mlx5_del_flow_rules(&tx->roce.rule); in tx_destroy_roce()
680 mlx5_destroy_flow_group(tx->roce.g); in tx_destroy_roce()
681 mlx5_destroy_flow_table(tx->roce.ft); in tx_destroy_roce()
682 tx->roce.ft = NULL; in tx_destroy_roce()
685 /* IPsec TX flow steering */
686 static void tx_destroy(struct mlx5e_ipsec_tx *tx) in tx_destroy() argument
688 tx_destroy_roce(tx); in tx_destroy()
689 if (tx->chains) { in tx_destroy()
690 ipsec_chains_destroy(tx->chains); in tx_destroy()
692 mlx5_del_flow_rules(&tx->pol.rule); in tx_destroy()
693 mlx5_destroy_flow_group(tx->pol.group); in tx_destroy()
694 mlx5_destroy_flow_table(tx->ft.pol); in tx_destroy()
696 mlx5_destroy_flow_table(tx->ft.sa); in tx_destroy()
697 mlx5_del_flow_rules(&tx->kspi_miss.rule); in tx_destroy()
698 mlx5_destroy_flow_group(tx->kspi_miss.group); in tx_destroy()
699 mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule); in tx_destroy()
700 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); in tx_destroy()
701 mlx5_destroy_flow_table(tx->ft.sa_kspi); in tx_destroy()
702 mlx5_del_flow_rules(&tx->status.rule); in tx_destroy()
703 mlx5_destroy_flow_table(tx->ft.status); in tx_destroy()
707 struct mlx5e_ipsec_tx *tx) in ipsec_tx_roce_rule_setup() argument
716 dst.ft = tx->ft.pol; in ipsec_tx_roce_rule_setup()
717 rule = mlx5_add_flow_rules(tx->roce.ft, NULL, &flow_act, &dst, 1); in ipsec_tx_roce_rule_setup()
720 mlx5_core_err(mdev, "Fail to add TX roce ipsec rule err=%d\n", in ipsec_tx_roce_rule_setup()
724 tx->roce.rule = rule; in ipsec_tx_roce_rule_setup()
730 static int ipsec_tx_create_roce(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) in ipsec_tx_create_roce() argument
739 if (!tx->roce.ns) in ipsec_tx_create_roce()
747 ft = mlx5_create_flow_table(tx->roce.ns, &ft_attr); in ipsec_tx_create_roce()
750 mlx5_core_err(mdev, "Fail to create ipsec tx roce ft err=%d\n", in ipsec_tx_create_roce()
754 tx->roce.ft = ft; in ipsec_tx_create_roce()
762 mlx5_core_err(mdev, "Fail to create ipsec tx roce group err=%d\n", in ipsec_tx_create_roce()
766 tx->roce.g = g; in ipsec_tx_create_roce()
768 err = ipsec_tx_roce_rule_setup(mdev, tx); in ipsec_tx_create_roce()
770 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err); in ipsec_tx_create_roce()
778 mlx5_destroy_flow_group(tx->roce.g); in ipsec_tx_create_roce()
780 mlx5_destroy_flow_table(tx->roce.ft); in ipsec_tx_create_roce()
781 tx->roce.ft = NULL; in ipsec_tx_create_roce()
791 * tx - IPSEC TX
795 struct mlx5e_ipsec_tx *tx) in tx_create_kspi_bypass_rules() argument
808 dest.ft = tx->ft.status; in tx_create_kspi_bypass_rules()
813 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act_kspi, in tx_create_kspi_bypass_rules()
821 tx->kspi_bypass_rule.kspi_rule = rule; in tx_create_kspi_bypass_rules()
827 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act, &dest, 1); in tx_create_kspi_bypass_rules()
833 tx->kspi_bypass_rule.rule = rule; in tx_create_kspi_bypass_rules()
838 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); in tx_create_kspi_bypass_rules()
845 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) in tx_create() argument
852 * Tx flow is different for ethernet traffic then for RoCE packets in tx_create()
860 * For RoCE the RoCE TX table direct the packets to policy table explicitly in tx_create()
862 ft = ipsec_tx_ft_create(tx->ns, 0, 0, 4); in tx_create()
865 tx->ft.sa_kspi = ft; in tx_create()
867 ft = ipsec_tx_ft_create(tx->ns, 2, 0, 4); in tx_create()
872 tx->ft.sa = ft; in tx_create()
875 tx->chains = ipsec_chains_create( in tx_create()
876 mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 1, in tx_create()
877 &tx->ft.pol); in tx_create()
878 if (IS_ERR(tx->chains)) { in tx_create()
879 err = PTR_ERR(tx->chains); in tx_create()
883 ft = ipsec_tx_ft_create(tx->ns, 1, 0, 2); in tx_create()
888 tx->ft.pol = ft; in tx_create()
890 dest.ft = tx->ft.sa; in tx_create()
891 err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest); in tx_create()
896 ft = ipsec_tx_ft_create(tx->ns, 2, 0, 1); in tx_create()
901 tx->ft.status = ft; in tx_create()
904 err = ipsec_miss_create(mdev, tx->ft.sa_kspi, &tx->kspi_miss, NULL); in tx_create()
908 err = tx_create_kspi_bypass_rules(mdev, tx); in tx_create()
912 err = ipsec_counter_rule_tx(mdev, tx); in tx_create()
916 err = ipsec_tx_create_roce(mdev, tx); in tx_create()
923 mlx5_del_flow_rules(&tx->status.rule); in tx_create()
925 mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule); in tx_create()
926 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); in tx_create()
928 mlx5_destroy_flow_table(tx->ft.status); in tx_create()
930 if (tx->chains) { in tx_create()
931 ipsec_chains_destroy(tx->chains); in tx_create()
933 mlx5_del_flow_rules(&tx->pol.rule); in tx_create()
934 mlx5_destroy_flow_group(tx->pol.group); in tx_create()
937 if (!tx->chains) in tx_create()
938 mlx5_destroy_flow_table(tx->ft.pol); in tx_create()
940 mlx5_del_flow_rules(&tx->kspi_miss.rule); in tx_create()
941 mlx5_destroy_flow_group(tx->kspi_miss.group); in tx_create()
943 mlx5_destroy_flow_table(tx->ft.sa); in tx_create()
945 mlx5_destroy_flow_table(tx->ft.sa_kspi); in tx_create()
950 struct mlx5e_ipsec_tx *tx) in tx_get() argument
954 if (tx->ft.refcnt) in tx_get()
957 err = tx_create(mdev, tx); in tx_get()
962 tx->ft.refcnt++; in tx_get()
966 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx) in tx_put() argument
968 if (--tx->ft.refcnt) in tx_put()
971 tx_destroy(tx); in tx_put()
977 struct mlx5e_ipsec_tx *tx = ipsec->tx; in tx_ft_get() local
980 mutex_lock(&tx->ft.mutex); in tx_ft_get()
981 err = tx_get(mdev, ipsec, tx); in tx_ft_get()
982 mutex_unlock(&tx->ft.mutex); in tx_ft_get()
986 return tx; in tx_ft_get()
993 struct mlx5e_ipsec_tx *tx = ipsec->tx; in tx_ft_get_policy() local
997 mutex_lock(&tx->ft.mutex); in tx_ft_get_policy()
998 err = tx_get(mdev, ipsec, tx); in tx_ft_get_policy()
1002 ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol; in tx_ft_get_policy()
1008 mutex_unlock(&tx->ft.mutex); in tx_ft_get_policy()
1012 tx_put(ipsec, tx); in tx_ft_get_policy()
1014 mutex_unlock(&tx->ft.mutex); in tx_ft_get_policy()
1020 struct mlx5e_ipsec_tx *tx = ipsec->tx; in tx_ft_put_policy() local
1022 mutex_lock(&tx->ft.mutex); in tx_ft_put_policy()
1023 if (tx->chains) in tx_ft_put_policy()
1024 ipsec_chains_put_table(tx->chains, prio); in tx_ft_put_policy()
1026 tx_put(ipsec, tx); in tx_ft_put_policy()
1027 mutex_unlock(&tx->ft.mutex); in tx_ft_put_policy()
1032 struct mlx5e_ipsec_tx *tx = ipsec->tx; in tx_ft_put() local
1034 mutex_lock(&tx->ft.mutex); in tx_ft_put()
1035 tx_put(ipsec, tx); in tx_ft_put()
1036 mutex_unlock(&tx->ft.mutex); in tx_ft_put()
1119 struct mlx5e_ipsec_tx *tx, in tx_add_kspi_rule() argument
1137 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, flow_act, dest, num_dest); in tx_add_kspi_rule()
1140 mlx5_core_err(mdev, "fail to add TX ipsec kspi rule err=%d\n", err); in tx_add_kspi_rule()
1153 struct mlx5e_ipsec_tx *tx, in tx_add_reqid_ip_rules() argument
1174 rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest); in tx_add_reqid_ip_rules()
1177 mlx5_core_err(mdev, "fail to add TX ipsec reqid rule err=%d\n", err); in tx_add_reqid_ip_rules()
1190 rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest); in tx_add_reqid_ip_rules()
1193 mlx5_core_err(mdev, "fail to add TX ipsec ip rule err=%d\n", err); in tx_add_reqid_ip_rules()
1215 struct mlx5e_ipsec_tx *tx; in tx_add_rule() local
1219 tx = tx_ft_get(mdev, ipsec); in tx_add_rule()
1220 if (IS_ERR(tx)) in tx_add_rule()
1221 return PTR_ERR(tx); in tx_add_rule()
1244 dest[0].ft = tx->ft.status; in tx_add_rule()
1249 err = tx_add_kspi_rule(sa_entry, tx, &flow_act, dest, 2); in tx_add_rule()
1254 err = tx_add_reqid_ip_rules(sa_entry, tx, &flow_act, dest, 2); in tx_add_rule()
1279 struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx; in tx_add_policy() local
1317 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop); in tx_add_policy()
1326 dest[dstn].ft = tx->ft.sa; in tx_add_policy()
1332 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err); in tx_add_policy()
1448 struct mlx5e_ipsec_tx *tx = ipsec->tx; in ipsec_fs_destroy_counters() local
1453 mlx5_fc_destroy(mdev, tx->fc->drop); in ipsec_fs_destroy_counters()
1454 mlx5_fc_destroy(mdev, tx->fc->cnt); in ipsec_fs_destroy_counters()
1455 kfree(tx->fc); in ipsec_fs_destroy_counters()
1463 struct mlx5e_ipsec_tx *tx = ipsec->tx; in ipsec_fs_init_counters() local
1468 fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL); in ipsec_fs_init_counters()
1472 tx->fc = fc; in ipsec_fs_init_counters()
1488 fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL); in ipsec_fs_init_counters()
1518 mlx5_fc_destroy(mdev, tx->fc->drop); in ipsec_fs_init_counters()
1520 mlx5_fc_destroy(mdev, tx->fc->cnt); in ipsec_fs_init_counters()
1522 kfree(tx->fc); in ipsec_fs_init_counters()
2031 mlx5_core_err(mdev, "Failed to init roce tx ns\n"); in mlx5e_accel_ipsec_fs_init_roce()
2035 ipsec->tx->roce.ns = ns; in mlx5e_accel_ipsec_fs_init_roce()
2190 WARN_ON(ipsec->tx->ft.refcnt); in mlx5e_accel_ipsec_fs_cleanup()
2193 mutex_destroy(&ipsec->tx->ft.mutex); in mlx5e_accel_ipsec_fs_cleanup()
2198 kfree(ipsec->tx); in mlx5e_accel_ipsec_fs_cleanup()
2214 ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL); in mlx5e_accel_ipsec_fs_init()
2215 if (!ipsec->tx) in mlx5e_accel_ipsec_fs_init()
2234 ipsec->tx->ns = tns; in mlx5e_accel_ipsec_fs_init()
2235 mutex_init(&ipsec->tx->ft.mutex); in mlx5e_accel_ipsec_fs_init()
2253 kfree(ipsec->tx); in mlx5e_accel_ipsec_fs_init()