xref: /linux/drivers/infiniband/hw/mlx5/ib_rep.c (revision a5d9265e017f081f0dc133c0e2f45103d027b874)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
4  */
5 
6 #include <linux/mlx5/vport.h>
7 #include "ib_rep.h"
8 #include "srq.h"
9 
10 static const struct mlx5_ib_profile vf_rep_profile = {
11 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
12 		     mlx5_ib_stage_init_init,
13 		     mlx5_ib_stage_init_cleanup),
14 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
15 		     mlx5_ib_stage_rep_flow_db_init,
16 		     NULL),
17 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
18 		     mlx5_ib_stage_caps_init,
19 		     NULL),
20 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
21 		     mlx5_ib_stage_rep_non_default_cb,
22 		     NULL),
23 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
24 		     mlx5_ib_stage_rep_roce_init,
25 		     mlx5_ib_stage_rep_roce_cleanup),
26 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
27 		     mlx5_init_srq_table,
28 		     mlx5_cleanup_srq_table),
29 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
30 		     mlx5_ib_stage_dev_res_init,
31 		     mlx5_ib_stage_dev_res_cleanup),
32 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
33 		     mlx5_ib_stage_counters_init,
34 		     mlx5_ib_stage_counters_cleanup),
35 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
36 		     mlx5_ib_stage_bfrag_init,
37 		     mlx5_ib_stage_bfrag_cleanup),
38 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
39 		     NULL,
40 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
41 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
42 		     mlx5_ib_stage_ib_reg_init,
43 		     mlx5_ib_stage_ib_reg_cleanup),
44 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
45 		     mlx5_ib_stage_post_ib_reg_umr_init,
46 		     NULL),
47 };
48 
49 static int
50 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
51 {
52 	const struct mlx5_ib_profile *profile;
53 	struct mlx5_ib_dev *ibdev;
54 
55 	if (rep->vport == MLX5_VPORT_UPLINK)
56 		profile = &uplink_rep_profile;
57 	else
58 		profile = &vf_rep_profile;
59 
60 	ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
61 	if (!ibdev)
62 		return -ENOMEM;
63 
64 	ibdev->rep = rep;
65 	ibdev->mdev = dev;
66 	ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
67 			       MLX5_CAP_GEN(dev, num_vhca_ports));
68 	if (!__mlx5_ib_add(ibdev, profile))
69 		return -EINVAL;
70 
71 	rep->rep_if[REP_IB].priv = ibdev;
72 
73 	return 0;
74 }
75 
76 static void
77 mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
78 {
79 	struct mlx5_ib_dev *dev;
80 
81 	if (!rep->rep_if[REP_IB].priv)
82 		return;
83 
84 	dev = mlx5_ib_rep_to_dev(rep);
85 	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
86 	rep->rep_if[REP_IB].priv = NULL;
87 	ib_dealloc_device(&dev->ib_dev);
88 }
89 
90 static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
91 {
92 	return mlx5_ib_rep_to_dev(rep);
93 }
94 
95 void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
96 {
97 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
98 	struct mlx5_eswitch_rep_if rep_if = {};
99 
100 	rep_if.load = mlx5_ib_vport_rep_load;
101 	rep_if.unload = mlx5_ib_vport_rep_unload;
102 	rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
103 
104 	mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB);
105 }
106 
107 void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
108 {
109 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
110 
111 	mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
112 }
113 
114 u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
115 {
116 	return mlx5_eswitch_mode(esw);
117 }
118 
119 struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
120 					  int vport_index)
121 {
122 	return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
123 }
124 
125 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
126 					  int vport_index)
127 {
128 	return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
129 }
130 
131 struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
132 {
133 	return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
134 }
135 
136 struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
137 {
138 	return mlx5_eswitch_vport_rep(esw, vport);
139 }
140 
141 int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
142 			      struct mlx5_ib_sq *sq)
143 {
144 	struct mlx5_flow_handle *flow_rule;
145 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
146 
147 	if (!dev->rep)
148 		return 0;
149 
150 	flow_rule =
151 		mlx5_eswitch_add_send_to_vport_rule(esw,
152 						    dev->rep->vport,
153 						    sq->base.mqp.qpn);
154 	if (IS_ERR(flow_rule))
155 		return PTR_ERR(flow_rule);
156 	sq->flow_rule = flow_rule;
157 
158 	return 0;
159 }
160