1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3
4 #include "main.h"
5
6 /* LAG group config flags. */
7 #define NFP_FL_LAG_LAST BIT(1)
8 #define NFP_FL_LAG_FIRST BIT(2)
9 #define NFP_FL_LAG_DATA BIT(3)
10 #define NFP_FL_LAG_XON BIT(4)
11 #define NFP_FL_LAG_SYNC BIT(5)
12 #define NFP_FL_LAG_SWITCH BIT(6)
13 #define NFP_FL_LAG_RESET BIT(7)
14
15 /* LAG port state flags. */
16 #define NFP_PORT_LAG_LINK_UP BIT(0)
17 #define NFP_PORT_LAG_TX_ENABLED BIT(1)
18 #define NFP_PORT_LAG_CHANGED BIT(2)
19
20 enum nfp_fl_lag_batch {
21 NFP_FL_LAG_BATCH_FIRST,
22 NFP_FL_LAG_BATCH_MEMBER,
23 NFP_FL_LAG_BATCH_FINISHED
24 };
25
26 /**
27 * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
28 * @ctrl_flags: Configuration flags
29 * @reserved: Reserved for future use
30 * @ttl: Time to live of packet - host always sets to 0xff
31 * @pkt_number: Config message packet number - increment for each message
32 * @batch_ver: Batch version of messages - increment for each batch of messages
33 * @group_id: Group ID applicable
34 * @group_inst: Group instance number - increment when group is reused
35 * @members: Array of 32-bit words listing all active group members
36 */
37 struct nfp_flower_cmsg_lag_config {
38 u8 ctrl_flags;
39 u8 reserved[2];
40 u8 ttl;
41 __be32 pkt_number;
42 __be32 batch_ver;
43 __be32 group_id;
44 __be32 group_inst;
45 __be32 members[];
46 };
47
48 /**
49 * struct nfp_fl_lag_group - list entry for each LAG group
50 * @group_id: Assigned group ID for host/kernel sync
51 * @group_inst: Group instance in case of ID reuse
52 * @list: List entry
53 * @master_ndev: Group master Netdev
54 * @dirty: Marked if the group needs synced to HW
55 * @offloaded: Marked if the group is currently offloaded to NIC
56 * @to_remove: Marked if the group should be removed from NIC
57 * @to_destroy: Marked if the group should be removed from driver
58 * @slave_cnt: Number of slaves in group
59 */
60 struct nfp_fl_lag_group {
61 unsigned int group_id;
62 u8 group_inst;
63 struct list_head list;
64 struct net_device *master_ndev;
65 bool dirty;
66 bool offloaded;
67 bool to_remove;
68 bool to_destroy;
69 unsigned int slave_cnt;
70 };
71
72 #define NFP_FL_LAG_PKT_NUMBER_MASK GENMASK(30, 0)
73 #define NFP_FL_LAG_VERSION_MASK GENMASK(22, 0)
74 #define NFP_FL_LAG_HOST_TTL 0xff
75
76 /* Use this ID with zero members to ack a batch config */
77 #define NFP_FL_LAG_SYNC_ID 0
78 #define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */
79 #define NFP_FL_LAG_GROUP_MAX 31 /* IDs 1 to 31 are valid */
80
81 /* wait for more config */
82 #define NFP_FL_LAG_DELAY (msecs_to_jiffies(2))
83
84 #define NFP_FL_LAG_RETRANS_LIMIT 100 /* max retrans cmsgs to store */
85
nfp_fl_get_next_pkt_number(struct nfp_fl_lag * lag)86 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
87 {
88 lag->pkt_num++;
89 lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
90
91 return lag->pkt_num;
92 }
93
nfp_fl_increment_version(struct nfp_fl_lag * lag)94 static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
95 {
96 /* LSB is not considered by firmware so add 2 for each increment. */
97 lag->batch_ver += 2;
98 lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
99
100 /* Zero is reserved by firmware. */
101 if (!lag->batch_ver)
102 lag->batch_ver += 2;
103 }
104
105 static struct nfp_fl_lag_group *
nfp_fl_lag_group_create(struct nfp_fl_lag * lag,struct net_device * master)106 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
107 {
108 struct nfp_fl_lag_group *group;
109 struct nfp_flower_priv *priv;
110 int id;
111
112 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
113
114 id = ida_alloc_range(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
115 NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
116 if (id < 0) {
117 nfp_flower_cmsg_warn(priv->app,
118 "No more bonding groups available\n");
119 return ERR_PTR(id);
120 }
121
122 group = kmalloc_obj(*group);
123 if (!group) {
124 ida_free(&lag->ida_handle, id);
125 return ERR_PTR(-ENOMEM);
126 }
127
128 group->group_id = id;
129 group->master_ndev = master;
130 group->dirty = true;
131 group->offloaded = false;
132 group->to_remove = false;
133 group->to_destroy = false;
134 group->slave_cnt = 0;
135 group->group_inst = ++lag->global_inst;
136 list_add_tail(&group->list, &lag->group_list);
137
138 return group;
139 }
140
141 static struct nfp_fl_lag_group *
nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag * lag,struct net_device * master)142 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
143 struct net_device *master)
144 {
145 struct nfp_fl_lag_group *entry;
146
147 if (!master)
148 return NULL;
149
150 list_for_each_entry(entry, &lag->group_list, list)
151 if (entry->master_ndev == master)
152 return entry;
153
154 return NULL;
155 }
156
nfp_fl_lag_get_group_info(struct nfp_app * app,struct net_device * netdev,__be16 * group_id,u8 * batch_ver,u8 * group_inst)157 static int nfp_fl_lag_get_group_info(struct nfp_app *app,
158 struct net_device *netdev,
159 __be16 *group_id,
160 u8 *batch_ver,
161 u8 *group_inst)
162 {
163 struct nfp_flower_priv *priv = app->priv;
164 struct nfp_fl_lag_group *group = NULL;
165 __be32 temp_vers;
166
167 mutex_lock(&priv->nfp_lag.lock);
168 group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
169 netdev);
170 if (!group) {
171 mutex_unlock(&priv->nfp_lag.lock);
172 return -ENOENT;
173 }
174
175 if (group_id)
176 *group_id = cpu_to_be16(group->group_id);
177
178 if (batch_ver) {
179 temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
180 NFP_FL_PRE_LAG_VER_OFF);
181 memcpy(batch_ver, &temp_vers, 3);
182 }
183
184 if (group_inst)
185 *group_inst = group->group_inst;
186
187 mutex_unlock(&priv->nfp_lag.lock);
188
189 return 0;
190 }
191
nfp_flower_lag_populate_pre_action(struct nfp_app * app,struct net_device * master,struct nfp_fl_pre_lag * pre_act,struct netlink_ext_ack * extack)192 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
193 struct net_device *master,
194 struct nfp_fl_pre_lag *pre_act,
195 struct netlink_ext_ack *extack)
196 {
197 if (nfp_fl_lag_get_group_info(app, master, &pre_act->group_id,
198 pre_act->lag_version,
199 &pre_act->instance)) {
200 NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
201 return -ENOENT;
202 }
203
204 return 0;
205 }
206
nfp_flower_lag_get_info_from_netdev(struct nfp_app * app,struct net_device * netdev,struct nfp_tun_neigh_lag * lag)207 void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app,
208 struct net_device *netdev,
209 struct nfp_tun_neigh_lag *lag)
210 {
211 nfp_fl_lag_get_group_info(app, netdev, NULL,
212 lag->lag_version, &lag->lag_instance);
213 }
214
nfp_flower_lag_get_output_id(struct nfp_app * app,struct net_device * master)215 int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
216 {
217 struct nfp_flower_priv *priv = app->priv;
218 struct nfp_fl_lag_group *group = NULL;
219 int group_id = -ENOENT;
220
221 mutex_lock(&priv->nfp_lag.lock);
222 group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
223 master);
224 if (group)
225 group_id = group->group_id;
226 mutex_unlock(&priv->nfp_lag.lock);
227
228 return group_id;
229 }
230
231 static int
nfp_fl_lag_config_group(struct nfp_fl_lag * lag,struct nfp_fl_lag_group * group,struct net_device ** active_members,unsigned int member_cnt,enum nfp_fl_lag_batch * batch)232 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
233 struct net_device **active_members,
234 unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
235 {
236 struct nfp_flower_cmsg_lag_config *cmsg_payload;
237 struct nfp_flower_priv *priv;
238 unsigned long int flags;
239 unsigned int size, i;
240 struct sk_buff *skb;
241
242 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
243 size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
244 skb = nfp_flower_cmsg_alloc(priv->app, size,
245 NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
246 GFP_KERNEL);
247 if (!skb)
248 return -ENOMEM;
249
250 cmsg_payload = nfp_flower_cmsg_get_data(skb);
251 flags = 0;
252
253 /* Increment batch version for each new batch of config messages. */
254 if (*batch == NFP_FL_LAG_BATCH_FIRST) {
255 flags |= NFP_FL_LAG_FIRST;
256 nfp_fl_increment_version(lag);
257 *batch = NFP_FL_LAG_BATCH_MEMBER;
258 }
259
260 /* If it is a reset msg then it is also the end of the batch. */
261 if (lag->rst_cfg) {
262 flags |= NFP_FL_LAG_RESET;
263 *batch = NFP_FL_LAG_BATCH_FINISHED;
264 }
265
266 /* To signal the end of a batch, both the switch and last flags are set
267 * and the reserved SYNC group ID is used.
268 */
269 if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
270 flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
271 lag->rst_cfg = false;
272 cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
273 cmsg_payload->group_inst = 0;
274 } else {
275 cmsg_payload->group_id = cpu_to_be32(group->group_id);
276 cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
277 }
278
279 cmsg_payload->reserved[0] = 0;
280 cmsg_payload->reserved[1] = 0;
281 cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
282 cmsg_payload->ctrl_flags = flags;
283 cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
284 cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
285
286 for (i = 0; i < member_cnt; i++)
287 cmsg_payload->members[i] =
288 cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
289
290 nfp_ctrl_tx(priv->app->ctrl, skb);
291 return 0;
292 }
293
nfp_fl_lag_do_work(struct work_struct * work)294 static void nfp_fl_lag_do_work(struct work_struct *work)
295 {
296 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
297 struct nfp_fl_lag_group *entry, *storage;
298 struct delayed_work *delayed_work;
299 struct nfp_flower_priv *priv;
300 struct nfp_fl_lag *lag;
301 int err;
302
303 delayed_work = to_delayed_work(work);
304 lag = container_of(delayed_work, struct nfp_fl_lag, work);
305 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
306
307 mutex_lock(&lag->lock);
308 list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
309 struct net_device *iter_netdev, **acti_netdevs;
310 struct nfp_flower_repr_priv *repr_priv;
311 int active_count = 0, slaves = 0;
312 struct nfp_repr *repr;
313 unsigned long *flags;
314
315 if (entry->to_remove) {
316 /* Active count of 0 deletes group on hw. */
317 err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
318 &batch);
319 if (!err) {
320 entry->to_remove = false;
321 entry->offloaded = false;
322 } else {
323 nfp_flower_cmsg_warn(priv->app,
324 "group delete failed\n");
325 schedule_delayed_work(&lag->work,
326 NFP_FL_LAG_DELAY);
327 continue;
328 }
329
330 if (entry->to_destroy) {
331 ida_free(&lag->ida_handle, entry->group_id);
332 list_del(&entry->list);
333 kfree(entry);
334 }
335 continue;
336 }
337
338 acti_netdevs = kmalloc_objs(*acti_netdevs, entry->slave_cnt);
339 if (!acti_netdevs) {
340 schedule_delayed_work(&lag->work,
341 NFP_FL_LAG_DELAY);
342 continue;
343 }
344
345 /* Include sanity check in the loop. It may be that a bond has
346 * changed between processing the last notification and the
347 * work queue triggering. If the number of slaves has changed
348 * or it now contains netdevs that cannot be offloaded, ignore
349 * the group until pending notifications are processed.
350 */
351 rcu_read_lock();
352 for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
353 if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
354 slaves = 0;
355 break;
356 }
357
358 repr = netdev_priv(iter_netdev);
359
360 if (repr->app != priv->app) {
361 slaves = 0;
362 break;
363 }
364
365 slaves++;
366 if (slaves > entry->slave_cnt)
367 break;
368
369 /* Check the ports for state changes. */
370 repr_priv = repr->app_priv;
371 flags = &repr_priv->lag_port_flags;
372
373 if (*flags & NFP_PORT_LAG_CHANGED) {
374 *flags &= ~NFP_PORT_LAG_CHANGED;
375 entry->dirty = true;
376 }
377
378 if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
379 (*flags & NFP_PORT_LAG_LINK_UP))
380 acti_netdevs[active_count++] = iter_netdev;
381 }
382 rcu_read_unlock();
383
384 if (slaves != entry->slave_cnt || !entry->dirty) {
385 kfree(acti_netdevs);
386 continue;
387 }
388
389 err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
390 active_count, &batch);
391 if (!err) {
392 entry->offloaded = true;
393 entry->dirty = false;
394 } else {
395 nfp_flower_cmsg_warn(priv->app,
396 "group offload failed\n");
397 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
398 }
399
400 kfree(acti_netdevs);
401 }
402
403 /* End the config batch if at least one packet has been batched. */
404 if (batch == NFP_FL_LAG_BATCH_MEMBER) {
405 batch = NFP_FL_LAG_BATCH_FINISHED;
406 err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
407 if (err)
408 nfp_flower_cmsg_warn(priv->app,
409 "group batch end cmsg failed\n");
410 }
411
412 mutex_unlock(&lag->lock);
413 }
414
415 static int
nfp_fl_lag_put_unprocessed(struct nfp_fl_lag * lag,struct sk_buff * skb)416 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
417 {
418 struct nfp_flower_cmsg_lag_config *cmsg_payload;
419
420 cmsg_payload = nfp_flower_cmsg_get_data(skb);
421 if (be32_to_cpu(cmsg_payload->group_id) > NFP_FL_LAG_GROUP_MAX)
422 return -EINVAL;
423
424 /* Drop cmsg retrans if storage limit is exceeded to prevent
425 * overloading. If the fw notices that expected messages have not been
426 * received in a given time block, it will request a full resync.
427 */
428 if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
429 return -ENOSPC;
430
431 __skb_queue_tail(&lag->retrans_skbs, skb);
432
433 return 0;
434 }
435
nfp_fl_send_unprocessed(struct nfp_fl_lag * lag)436 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
437 {
438 struct nfp_flower_priv *priv;
439 struct sk_buff *skb;
440
441 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
442
443 while ((skb = __skb_dequeue(&lag->retrans_skbs)))
444 nfp_ctrl_tx(priv->app->ctrl, skb);
445 }
446
nfp_flower_lag_unprocessed_msg(struct nfp_app * app,struct sk_buff * skb)447 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
448 {
449 struct nfp_flower_cmsg_lag_config *cmsg_payload;
450 struct nfp_flower_priv *priv = app->priv;
451 struct nfp_fl_lag_group *group_entry;
452 unsigned long int flags;
453 bool store_skb = false;
454 int err;
455
456 cmsg_payload = nfp_flower_cmsg_get_data(skb);
457 flags = cmsg_payload->ctrl_flags;
458
459 /* Note the intentional fall through below. If DATA and XON are both
460 * set, the message will stored and sent again with the rest of the
461 * unprocessed messages list.
462 */
463
464 /* Store */
465 if (flags & NFP_FL_LAG_DATA)
466 if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
467 store_skb = true;
468
469 /* Send stored */
470 if (flags & NFP_FL_LAG_XON)
471 nfp_fl_send_unprocessed(&priv->nfp_lag);
472
473 /* Resend all */
474 if (flags & NFP_FL_LAG_SYNC) {
475 /* To resend all config:
476 * 1) Clear all unprocessed messages
477 * 2) Mark all groups dirty
478 * 3) Reset NFP group config
479 * 4) Schedule a LAG config update
480 */
481
482 __skb_queue_purge(&priv->nfp_lag.retrans_skbs);
483
484 mutex_lock(&priv->nfp_lag.lock);
485 list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
486 list)
487 group_entry->dirty = true;
488
489 err = nfp_flower_lag_reset(&priv->nfp_lag);
490 if (err)
491 nfp_flower_cmsg_warn(priv->app,
492 "mem err in group reset msg\n");
493 mutex_unlock(&priv->nfp_lag.lock);
494
495 schedule_delayed_work(&priv->nfp_lag.work, 0);
496 }
497
498 return store_skb;
499 }
500
501 static void
nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag * lag,struct nfp_fl_lag_group * group)502 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
503 struct nfp_fl_lag_group *group)
504 {
505 group->to_remove = true;
506
507 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
508 }
509
510 static void
nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag * lag,struct net_device * master)511 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
512 struct net_device *master)
513 {
514 struct nfp_fl_lag_group *group;
515 struct nfp_flower_priv *priv;
516
517 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
518
519 if (!netif_is_bond_master(master))
520 return;
521
522 mutex_lock(&lag->lock);
523 group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
524 if (!group) {
525 mutex_unlock(&lag->lock);
526 nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
527 netdev_name(master));
528 return;
529 }
530
531 group->to_remove = true;
532 group->to_destroy = true;
533 mutex_unlock(&lag->lock);
534
535 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
536 }
537
538 static int
nfp_fl_lag_changeupper_event(struct nfp_fl_lag * lag,struct netdev_notifier_changeupper_info * info)539 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
540 struct netdev_notifier_changeupper_info *info)
541 {
542 struct net_device *upper = info->upper_dev, *iter_netdev;
543 struct netdev_lag_upper_info *lag_upper_info;
544 struct nfp_fl_lag_group *group;
545 struct nfp_flower_priv *priv;
546 unsigned int slave_count = 0;
547 bool can_offload = true;
548 struct nfp_repr *repr;
549
550 if (!netif_is_lag_master(upper))
551 return 0;
552
553 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
554
555 rcu_read_lock();
556 for_each_netdev_in_bond_rcu(upper, iter_netdev) {
557 if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
558 can_offload = false;
559 break;
560 }
561 repr = netdev_priv(iter_netdev);
562
563 /* Ensure all ports are created by the same app/on same card. */
564 if (repr->app != priv->app) {
565 can_offload = false;
566 break;
567 }
568
569 slave_count++;
570 }
571 rcu_read_unlock();
572
573 lag_upper_info = info->upper_info;
574
575 /* Firmware supports active/backup and L3/L4 hash bonds. */
576 if (lag_upper_info &&
577 lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
578 (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
579 (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
580 lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
581 lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
582 can_offload = false;
583 nfp_flower_cmsg_warn(priv->app,
584 "Unable to offload tx_type %u hash %u\n",
585 lag_upper_info->tx_type,
586 lag_upper_info->hash_type);
587 }
588
589 mutex_lock(&lag->lock);
590 group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
591
592 if (slave_count == 0 || !can_offload) {
593 /* Cannot offload the group - remove if previously offloaded. */
594 if (group && group->offloaded)
595 nfp_fl_lag_schedule_group_remove(lag, group);
596
597 mutex_unlock(&lag->lock);
598 return 0;
599 }
600
601 if (!group) {
602 group = nfp_fl_lag_group_create(lag, upper);
603 if (IS_ERR(group)) {
604 mutex_unlock(&lag->lock);
605 return PTR_ERR(group);
606 }
607 }
608
609 group->dirty = true;
610 group->slave_cnt = slave_count;
611
612 /* Group may have been on queue for removal but is now offloadable. */
613 group->to_remove = false;
614 mutex_unlock(&lag->lock);
615
616 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
617 return 0;
618 }
619
620 static void
nfp_fl_lag_changels_event(struct nfp_fl_lag * lag,struct net_device * netdev,struct netdev_notifier_changelowerstate_info * info)621 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
622 struct netdev_notifier_changelowerstate_info *info)
623 {
624 struct netdev_lag_lower_state_info *lag_lower_info;
625 struct nfp_flower_repr_priv *repr_priv;
626 struct nfp_flower_priv *priv;
627 struct nfp_repr *repr;
628 unsigned long *flags;
629
630 if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
631 return;
632
633 lag_lower_info = info->lower_state_info;
634 if (!lag_lower_info)
635 return;
636
637 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
638 repr = netdev_priv(netdev);
639
640 /* Verify that the repr is associated with this app. */
641 if (repr->app != priv->app)
642 return;
643
644 repr_priv = repr->app_priv;
645 flags = &repr_priv->lag_port_flags;
646
647 mutex_lock(&lag->lock);
648 if (lag_lower_info->link_up)
649 *flags |= NFP_PORT_LAG_LINK_UP;
650 else
651 *flags &= ~NFP_PORT_LAG_LINK_UP;
652
653 if (lag_lower_info->tx_enabled)
654 *flags |= NFP_PORT_LAG_TX_ENABLED;
655 else
656 *flags &= ~NFP_PORT_LAG_TX_ENABLED;
657
658 *flags |= NFP_PORT_LAG_CHANGED;
659 mutex_unlock(&lag->lock);
660
661 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
662 }
663
nfp_flower_lag_netdev_event(struct nfp_flower_priv * priv,struct net_device * netdev,unsigned long event,void * ptr)664 int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
665 struct net_device *netdev,
666 unsigned long event, void *ptr)
667 {
668 struct nfp_fl_lag *lag = &priv->nfp_lag;
669 int err;
670
671 switch (event) {
672 case NETDEV_CHANGEUPPER:
673 err = nfp_fl_lag_changeupper_event(lag, ptr);
674 if (err)
675 return NOTIFY_BAD;
676 return NOTIFY_OK;
677 case NETDEV_CHANGELOWERSTATE:
678 nfp_fl_lag_changels_event(lag, netdev, ptr);
679 return NOTIFY_OK;
680 case NETDEV_UNREGISTER:
681 nfp_fl_lag_schedule_group_delete(lag, netdev);
682 return NOTIFY_OK;
683 }
684
685 return NOTIFY_DONE;
686 }
687
nfp_flower_lag_reset(struct nfp_fl_lag * lag)688 int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
689 {
690 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
691
692 lag->rst_cfg = true;
693 return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
694 }
695
nfp_flower_lag_init(struct nfp_fl_lag * lag)696 void nfp_flower_lag_init(struct nfp_fl_lag *lag)
697 {
698 INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
699 INIT_LIST_HEAD(&lag->group_list);
700 mutex_init(&lag->lock);
701 ida_init(&lag->ida_handle);
702
703 __skb_queue_head_init(&lag->retrans_skbs);
704
705 /* 0 is a reserved batch version so increment to first valid value. */
706 nfp_fl_increment_version(lag);
707 }
708
nfp_flower_lag_cleanup(struct nfp_fl_lag * lag)709 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
710 {
711 struct nfp_fl_lag_group *entry, *storage;
712
713 cancel_delayed_work_sync(&lag->work);
714
715 __skb_queue_purge(&lag->retrans_skbs);
716
717 /* Remove all groups. */
718 mutex_lock(&lag->lock);
719 list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
720 list_del(&entry->list);
721 kfree(entry);
722 }
723 mutex_unlock(&lag->lock);
724 mutex_destroy(&lag->lock);
725 ida_destroy(&lag->ida_handle);
726 }
727