1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
3
4 #include "macsec.h"
5 #include <linux/mlx5/macsec.h>
6
7 struct mlx5_reserved_gids {
8 int macsec_index;
9 const struct ib_gid_attr *physical_gid;
10 };
11
12 struct mlx5_roce_gids {
13 struct list_head roce_gid_list_entry;
14 u16 gid_idx;
15 union {
16 struct sockaddr_in sockaddr_in;
17 struct sockaddr_in6 sockaddr_in6;
18 } addr;
19 };
20
21 struct mlx5_macsec_device {
22 struct list_head macsec_devices_list_entry;
23 void *macdev;
24 struct list_head macsec_roce_gids;
25 struct list_head tx_rules_list;
26 struct list_head rx_rules_list;
27 };
28
cleanup_macsec_device(struct mlx5_macsec_device * macsec_device)29 static void cleanup_macsec_device(struct mlx5_macsec_device *macsec_device)
30 {
31 if (!list_empty(&macsec_device->tx_rules_list) ||
32 !list_empty(&macsec_device->rx_rules_list) ||
33 !list_empty(&macsec_device->macsec_roce_gids))
34 return;
35
36 list_del(&macsec_device->macsec_devices_list_entry);
37 kfree(macsec_device);
38 }
39
get_macsec_device(void * macdev,struct list_head * macsec_devices_list)40 static struct mlx5_macsec_device *get_macsec_device(void *macdev,
41 struct list_head *macsec_devices_list)
42 {
43 struct mlx5_macsec_device *iter, *macsec_device = NULL;
44
45 list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
46 if (iter->macdev == macdev) {
47 macsec_device = iter;
48 break;
49 }
50 }
51
52 if (macsec_device)
53 return macsec_device;
54
55 macsec_device = kzalloc_obj(*macsec_device);
56 if (!macsec_device)
57 return NULL;
58
59 macsec_device->macdev = macdev;
60 INIT_LIST_HEAD(&macsec_device->tx_rules_list);
61 INIT_LIST_HEAD(&macsec_device->rx_rules_list);
62 INIT_LIST_HEAD(&macsec_device->macsec_roce_gids);
63 list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list);
64
65 return macsec_device;
66 }
67
mlx5_macsec_del_roce_gid(struct mlx5_macsec_device * macsec_device,u16 gid_idx)68 static void mlx5_macsec_del_roce_gid(struct mlx5_macsec_device *macsec_device, u16 gid_idx)
69 {
70 struct mlx5_roce_gids *current_gid, *next_gid;
71
72 list_for_each_entry_safe(current_gid, next_gid, &macsec_device->macsec_roce_gids,
73 roce_gid_list_entry)
74 if (current_gid->gid_idx == gid_idx) {
75 list_del(¤t_gid->roce_gid_list_entry);
76 kfree(current_gid);
77 }
78 }
79
mlx5_macsec_save_roce_gid(struct mlx5_macsec_device * macsec_device,const struct sockaddr * addr,u16 gid_idx)80 static void mlx5_macsec_save_roce_gid(struct mlx5_macsec_device *macsec_device,
81 const struct sockaddr *addr, u16 gid_idx)
82 {
83 struct mlx5_roce_gids *roce_gids;
84
85 roce_gids = kzalloc_obj(*roce_gids);
86 if (!roce_gids)
87 return;
88
89 roce_gids->gid_idx = gid_idx;
90 if (addr->sa_family == AF_INET)
91 memcpy(&roce_gids->addr.sockaddr_in, addr, sizeof(roce_gids->addr.sockaddr_in));
92 else
93 memcpy(&roce_gids->addr.sockaddr_in6, addr, sizeof(roce_gids->addr.sockaddr_in6));
94
95 list_add_tail(&roce_gids->roce_gid_list_entry, &macsec_device->macsec_roce_gids);
96 }
97
handle_macsec_gids(struct list_head * macsec_devices_list,struct mlx5_macsec_event_data * data)98 static void handle_macsec_gids(struct list_head *macsec_devices_list,
99 struct mlx5_macsec_event_data *data)
100 {
101 struct mlx5_macsec_device *macsec_device;
102 struct mlx5_roce_gids *gid;
103
104 macsec_device = get_macsec_device(data->macdev, macsec_devices_list);
105 if (!macsec_device)
106 return;
107
108 list_for_each_entry(gid, &macsec_device->macsec_roce_gids, roce_gid_list_entry) {
109 mlx5_macsec_add_roce_sa_rules(data->fs_id, (struct sockaddr *)&gid->addr,
110 gid->gid_idx, &macsec_device->tx_rules_list,
111 &macsec_device->rx_rules_list, data->macsec_fs,
112 data->is_tx);
113 }
114 }
115
del_sa_roce_rule(struct list_head * macsec_devices_list,struct mlx5_macsec_event_data * data)116 static void del_sa_roce_rule(struct list_head *macsec_devices_list,
117 struct mlx5_macsec_event_data *data)
118 {
119 struct mlx5_macsec_device *macsec_device;
120
121 macsec_device = get_macsec_device(data->macdev, macsec_devices_list);
122 WARN_ON(!macsec_device);
123
124 mlx5_macsec_del_roce_sa_rules(data->fs_id, data->macsec_fs,
125 &macsec_device->tx_rules_list,
126 &macsec_device->rx_rules_list, data->is_tx);
127 }
128
macsec_event(struct notifier_block * nb,unsigned long event,void * data)129 static int macsec_event(struct notifier_block *nb, unsigned long event, void *data)
130 {
131 struct mlx5_macsec *macsec = container_of(nb, struct mlx5_macsec, blocking_events_nb);
132
133 mutex_lock(&macsec->lock);
134 switch (event) {
135 case MLX5_DRIVER_EVENT_MACSEC_SA_ADDED:
136 handle_macsec_gids(&macsec->macsec_devices_list, data);
137 break;
138 case MLX5_DRIVER_EVENT_MACSEC_SA_DELETED:
139 del_sa_roce_rule(&macsec->macsec_devices_list, data);
140 break;
141 default:
142 mutex_unlock(&macsec->lock);
143 return NOTIFY_DONE;
144 }
145 mutex_unlock(&macsec->lock);
146 return NOTIFY_OK;
147 }
148
mlx5r_macsec_event_register(struct mlx5_ib_dev * dev)149 void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev)
150 {
151 if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
152 mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
153 return;
154 }
155
156 dev->macsec.blocking_events_nb.notifier_call = macsec_event;
157 blocking_notifier_chain_register(&dev->mdev->macsec_nh,
158 &dev->macsec.blocking_events_nb);
159 }
160
mlx5r_macsec_event_unregister(struct mlx5_ib_dev * dev)161 void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev)
162 {
163 if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
164 mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
165 return;
166 }
167
168 blocking_notifier_chain_unregister(&dev->mdev->macsec_nh,
169 &dev->macsec.blocking_events_nb);
170 }
171
mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev * dev)172 int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev)
173 {
174 int i, j, max_gids;
175
176 if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
177 mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
178 return 0;
179 }
180
181 max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size);
182 for (i = 0; i < dev->num_ports; i++) {
183 dev->port[i].reserved_gids = kzalloc_objs(*dev->port[i].reserved_gids,
184 max_gids);
185 if (!dev->port[i].reserved_gids)
186 goto err;
187
188 for (j = 0; j < max_gids; j++)
189 dev->port[i].reserved_gids[j].macsec_index = -1;
190 }
191
192 INIT_LIST_HEAD(&dev->macsec.macsec_devices_list);
193 mutex_init(&dev->macsec.lock);
194
195 return 0;
196 err:
197 while (i >= 0) {
198 kfree(dev->port[i].reserved_gids);
199 i--;
200 }
201 return -ENOMEM;
202 }
203
mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev * dev)204 void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev)
205 {
206 int i;
207
208 if (!mlx5_is_macsec_roce_supported(dev->mdev))
209 mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
210
211 for (i = 0; i < dev->num_ports; i++)
212 kfree(dev->port[i].reserved_gids);
213
214 mutex_destroy(&dev->macsec.lock);
215 }
216
mlx5r_add_gid_macsec_operations(const struct ib_gid_attr * attr)217 int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr)
218 {
219 struct mlx5_ib_dev *dev = to_mdev(attr->device);
220 struct mlx5_macsec_device *macsec_device;
221 const struct ib_gid_attr *physical_gid;
222 struct mlx5_reserved_gids *mgids;
223 struct net_device *ndev;
224 int ret = 0;
225 union {
226 struct sockaddr_in sockaddr_in;
227 struct sockaddr_in6 sockaddr_in6;
228 } addr;
229
230 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
231 return 0;
232
233 if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
234 mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
235 return 0;
236 }
237
238 rcu_read_lock();
239 ndev = rcu_dereference(attr->ndev);
240 if (!ndev) {
241 rcu_read_unlock();
242 return -ENODEV;
243 }
244
245 if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) {
246 rcu_read_unlock();
247 return 0;
248 }
249 dev_hold(ndev);
250 rcu_read_unlock();
251
252 mutex_lock(&dev->macsec.lock);
253 macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list);
254 if (!macsec_device) {
255 ret = -ENOMEM;
256 goto dev_err;
257 }
258
259 physical_gid = rdma_find_gid(attr->device, &attr->gid,
260 attr->gid_type, NULL);
261 if (!IS_ERR(physical_gid)) {
262 ret = set_roce_addr(to_mdev(physical_gid->device),
263 physical_gid->port_num,
264 physical_gid->index, NULL,
265 physical_gid);
266 if (ret)
267 goto gid_err;
268
269 mgids = &dev->port[attr->port_num - 1].reserved_gids[physical_gid->index];
270 mgids->macsec_index = attr->index;
271 mgids->physical_gid = physical_gid;
272 }
273
274 /* Proceed with adding steering rules, regardless if there was gid ambiguity or not.*/
275 rdma_gid2ip((struct sockaddr *)&addr, &attr->gid);
276 ret = mlx5_macsec_add_roce_rule(ndev, (struct sockaddr *)&addr, attr->index,
277 &macsec_device->tx_rules_list,
278 &macsec_device->rx_rules_list, dev->mdev->macsec_fs);
279 if (ret && !IS_ERR(physical_gid))
280 goto rule_err;
281
282 mlx5_macsec_save_roce_gid(macsec_device, (struct sockaddr *)&addr, attr->index);
283
284 dev_put(ndev);
285 mutex_unlock(&dev->macsec.lock);
286 return ret;
287
288 rule_err:
289 set_roce_addr(to_mdev(physical_gid->device), physical_gid->port_num,
290 physical_gid->index, &physical_gid->gid, physical_gid);
291 mgids->macsec_index = -1;
292 gid_err:
293 rdma_put_gid_attr(physical_gid);
294 cleanup_macsec_device(macsec_device);
295 dev_err:
296 dev_put(ndev);
297 mutex_unlock(&dev->macsec.lock);
298 return ret;
299 }
300
mlx5r_del_gid_macsec_operations(const struct ib_gid_attr * attr)301 void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr)
302 {
303 struct mlx5_ib_dev *dev = to_mdev(attr->device);
304 struct mlx5_macsec_device *macsec_device;
305 struct mlx5_reserved_gids *mgids;
306 struct net_device *ndev;
307 int i, max_gids;
308
309 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
310 return;
311
312 if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
313 mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
314 return;
315 }
316
317 mgids = &dev->port[attr->port_num - 1].reserved_gids[attr->index];
318 if (mgids->macsec_index != -1) { /* Checking if physical gid has ambiguous IP */
319 rdma_put_gid_attr(mgids->physical_gid);
320 mgids->macsec_index = -1;
321 return;
322 }
323
324 rcu_read_lock();
325 ndev = rcu_dereference(attr->ndev);
326 if (!ndev) {
327 rcu_read_unlock();
328 return;
329 }
330
331 if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) {
332 rcu_read_unlock();
333 return;
334 }
335 dev_hold(ndev);
336 rcu_read_unlock();
337
338 mutex_lock(&dev->macsec.lock);
339 max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size);
340 for (i = 0; i < max_gids; i++) { /* Checking if macsec gid has ambiguous IP */
341 mgids = &dev->port[attr->port_num - 1].reserved_gids[i];
342 if (mgids->macsec_index == attr->index) {
343 const struct ib_gid_attr *physical_gid = mgids->physical_gid;
344
345 set_roce_addr(to_mdev(physical_gid->device),
346 physical_gid->port_num,
347 physical_gid->index,
348 &physical_gid->gid, physical_gid);
349
350 rdma_put_gid_attr(physical_gid);
351 mgids->macsec_index = -1;
352 break;
353 }
354 }
355 macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list);
356 mlx5_macsec_del_roce_rule(attr->index, dev->mdev->macsec_fs,
357 &macsec_device->tx_rules_list, &macsec_device->rx_rules_list);
358 mlx5_macsec_del_roce_gid(macsec_device, attr->index);
359 cleanup_macsec_device(macsec_device);
360
361 dev_put(ndev);
362 mutex_unlock(&dev->macsec.lock);
363 }
364