xref: /linux/net/ncsi/ncsi-manage.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright Gavin Shan, IBM Corporation 2016.
4  */
5 
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 
12 #include <net/ncsi.h>
13 #include <net/net_namespace.h>
14 #include <net/sock.h>
15 #include <net/addrconf.h>
16 #include <net/ipv6.h>
17 #include <net/genetlink.h>
18 
19 #include "internal.h"
20 #include "ncsi-pkt.h"
21 #include "ncsi-netlink.h"
22 
23 LIST_HEAD(ncsi_dev_list);
24 DEFINE_SPINLOCK(ncsi_dev_lock);
25 
26 bool ncsi_channel_has_link(struct ncsi_channel *channel)
27 {
28 	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
29 }
30 
31 bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
32 			  struct ncsi_channel *channel)
33 {
34 	struct ncsi_package *np;
35 	struct ncsi_channel *nc;
36 
37 	NCSI_FOR_EACH_PACKAGE(ndp, np)
38 		NCSI_FOR_EACH_CHANNEL(np, nc) {
39 			if (nc == channel)
40 				continue;
41 			if (nc->state == NCSI_CHANNEL_ACTIVE &&
42 			    ncsi_channel_has_link(nc))
43 				return false;
44 		}
45 
46 	return true;
47 }
48 
49 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
50 {
51 	struct ncsi_dev *nd = &ndp->ndev;
52 	struct ncsi_package *np;
53 	struct ncsi_channel *nc;
54 	unsigned long flags;
55 
56 	nd->state = ncsi_dev_state_functional;
57 	if (force_down) {
58 		nd->link_up = 0;
59 		goto report;
60 	}
61 
62 	nd->link_up = 0;
63 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
64 		NCSI_FOR_EACH_CHANNEL(np, nc) {
65 			spin_lock_irqsave(&nc->lock, flags);
66 
67 			if (!list_empty(&nc->link) ||
68 			    nc->state != NCSI_CHANNEL_ACTIVE) {
69 				spin_unlock_irqrestore(&nc->lock, flags);
70 				continue;
71 			}
72 
73 			if (ncsi_channel_has_link(nc)) {
74 				spin_unlock_irqrestore(&nc->lock, flags);
75 				nd->link_up = 1;
76 				goto report;
77 			}
78 
79 			spin_unlock_irqrestore(&nc->lock, flags);
80 		}
81 	}
82 
83 report:
84 	nd->handler(nd);
85 }
86 
87 static void ncsi_channel_monitor(struct timer_list *t)
88 {
89 	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
90 	struct ncsi_package *np = nc->package;
91 	struct ncsi_dev_priv *ndp = np->ndp;
92 	struct ncsi_channel_mode *ncm;
93 	struct ncsi_cmd_arg nca;
94 	bool enabled, chained;
95 	unsigned int monitor_state;
96 	unsigned long flags;
97 	int state, ret;
98 
99 	spin_lock_irqsave(&nc->lock, flags);
100 	state = nc->state;
101 	chained = !list_empty(&nc->link);
102 	enabled = nc->monitor.enabled;
103 	monitor_state = nc->monitor.state;
104 	spin_unlock_irqrestore(&nc->lock, flags);
105 
106 	if (!enabled || chained) {
107 		ncsi_stop_channel_monitor(nc);
108 		return;
109 	}
110 	if (state != NCSI_CHANNEL_INACTIVE &&
111 	    state != NCSI_CHANNEL_ACTIVE) {
112 		ncsi_stop_channel_monitor(nc);
113 		return;
114 	}
115 
116 	switch (monitor_state) {
117 	case NCSI_CHANNEL_MONITOR_START:
118 	case NCSI_CHANNEL_MONITOR_RETRY:
119 		nca.ndp = ndp;
120 		nca.package = np->id;
121 		nca.channel = nc->id;
122 		nca.type = NCSI_PKT_CMD_GLS;
123 		nca.req_flags = 0;
124 		ret = ncsi_xmit_cmd(&nca);
125 		if (ret)
126 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
127 				   ret);
128 		break;
129 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
130 		break;
131 	default:
132 		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
133 			   nc->id);
134 		ncsi_report_link(ndp, true);
135 		ndp->flags |= NCSI_DEV_RESHUFFLE;
136 
137 		ncsi_stop_channel_monitor(nc);
138 
139 		ncm = &nc->modes[NCSI_MODE_LINK];
140 		spin_lock_irqsave(&nc->lock, flags);
141 		nc->state = NCSI_CHANNEL_INVISIBLE;
142 		ncm->data[2] &= ~0x1;
143 		spin_unlock_irqrestore(&nc->lock, flags);
144 
145 		spin_lock_irqsave(&ndp->lock, flags);
146 		nc->state = NCSI_CHANNEL_ACTIVE;
147 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
148 		spin_unlock_irqrestore(&ndp->lock, flags);
149 		ncsi_process_next_channel(ndp);
150 		return;
151 	}
152 
153 	spin_lock_irqsave(&nc->lock, flags);
154 	nc->monitor.state++;
155 	spin_unlock_irqrestore(&nc->lock, flags);
156 	mod_timer(&nc->monitor.timer, jiffies + HZ);
157 }
158 
159 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
160 {
161 	unsigned long flags;
162 
163 	spin_lock_irqsave(&nc->lock, flags);
164 	WARN_ON_ONCE(nc->monitor.enabled);
165 	nc->monitor.enabled = true;
166 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
167 	spin_unlock_irqrestore(&nc->lock, flags);
168 
169 	mod_timer(&nc->monitor.timer, jiffies + HZ);
170 }
171 
172 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
173 {
174 	unsigned long flags;
175 
176 	spin_lock_irqsave(&nc->lock, flags);
177 	if (!nc->monitor.enabled) {
178 		spin_unlock_irqrestore(&nc->lock, flags);
179 		return;
180 	}
181 	nc->monitor.enabled = false;
182 	spin_unlock_irqrestore(&nc->lock, flags);
183 
184 	del_timer_sync(&nc->monitor.timer);
185 }
186 
187 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
188 				       unsigned char id)
189 {
190 	struct ncsi_channel *nc;
191 
192 	NCSI_FOR_EACH_CHANNEL(np, nc) {
193 		if (nc->id == id)
194 			return nc;
195 	}
196 
197 	return NULL;
198 }
199 
200 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
201 {
202 	struct ncsi_channel *nc, *tmp;
203 	int index;
204 	unsigned long flags;
205 
206 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
207 	if (!nc)
208 		return NULL;
209 
210 	nc->id = id;
211 	nc->package = np;
212 	nc->state = NCSI_CHANNEL_INACTIVE;
213 	nc->monitor.enabled = false;
214 	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
215 	spin_lock_init(&nc->lock);
216 	INIT_LIST_HEAD(&nc->link);
217 	for (index = 0; index < NCSI_CAP_MAX; index++)
218 		nc->caps[index].index = index;
219 	for (index = 0; index < NCSI_MODE_MAX; index++)
220 		nc->modes[index].index = index;
221 
222 	spin_lock_irqsave(&np->lock, flags);
223 	tmp = ncsi_find_channel(np, id);
224 	if (tmp) {
225 		spin_unlock_irqrestore(&np->lock, flags);
226 		kfree(nc);
227 		return tmp;
228 	}
229 
230 	list_add_tail_rcu(&nc->node, &np->channels);
231 	np->channel_num++;
232 	spin_unlock_irqrestore(&np->lock, flags);
233 
234 	return nc;
235 }
236 
237 static void ncsi_remove_channel(struct ncsi_channel *nc)
238 {
239 	struct ncsi_package *np = nc->package;
240 	unsigned long flags;
241 
242 	spin_lock_irqsave(&nc->lock, flags);
243 
244 	/* Release filters */
245 	kfree(nc->mac_filter.addrs);
246 	kfree(nc->vlan_filter.vids);
247 
248 	nc->state = NCSI_CHANNEL_INACTIVE;
249 	spin_unlock_irqrestore(&nc->lock, flags);
250 	ncsi_stop_channel_monitor(nc);
251 
252 	/* Remove and free channel */
253 	spin_lock_irqsave(&np->lock, flags);
254 	list_del_rcu(&nc->node);
255 	np->channel_num--;
256 	spin_unlock_irqrestore(&np->lock, flags);
257 
258 	kfree(nc);
259 }
260 
261 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
262 				       unsigned char id)
263 {
264 	struct ncsi_package *np;
265 
266 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
267 		if (np->id == id)
268 			return np;
269 	}
270 
271 	return NULL;
272 }
273 
274 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
275 				      unsigned char id)
276 {
277 	struct ncsi_package *np, *tmp;
278 	unsigned long flags;
279 
280 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
281 	if (!np)
282 		return NULL;
283 
284 	np->id = id;
285 	np->ndp = ndp;
286 	spin_lock_init(&np->lock);
287 	INIT_LIST_HEAD(&np->channels);
288 	np->channel_whitelist = UINT_MAX;
289 
290 	spin_lock_irqsave(&ndp->lock, flags);
291 	tmp = ncsi_find_package(ndp, id);
292 	if (tmp) {
293 		spin_unlock_irqrestore(&ndp->lock, flags);
294 		kfree(np);
295 		return tmp;
296 	}
297 
298 	list_add_tail_rcu(&np->node, &ndp->packages);
299 	ndp->package_num++;
300 	spin_unlock_irqrestore(&ndp->lock, flags);
301 
302 	return np;
303 }
304 
305 void ncsi_remove_package(struct ncsi_package *np)
306 {
307 	struct ncsi_dev_priv *ndp = np->ndp;
308 	struct ncsi_channel *nc, *tmp;
309 	unsigned long flags;
310 
311 	/* Release all child channels */
312 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
313 		ncsi_remove_channel(nc);
314 
315 	/* Remove and free package */
316 	spin_lock_irqsave(&ndp->lock, flags);
317 	list_del_rcu(&np->node);
318 	ndp->package_num--;
319 	spin_unlock_irqrestore(&ndp->lock, flags);
320 
321 	kfree(np);
322 }
323 
324 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
325 				   unsigned char id,
326 				   struct ncsi_package **np,
327 				   struct ncsi_channel **nc)
328 {
329 	struct ncsi_package *p;
330 	struct ncsi_channel *c;
331 
332 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
333 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
334 
335 	if (np)
336 		*np = p;
337 	if (nc)
338 		*nc = c;
339 }
340 
341 /* For two consecutive NCSI commands, the packet IDs shouldn't
342  * be same. Otherwise, the bogus response might be replied. So
343  * the available IDs are allocated in round-robin fashion.
344  */
345 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
346 					unsigned int req_flags)
347 {
348 	struct ncsi_request *nr = NULL;
349 	int i, limit = ARRAY_SIZE(ndp->requests);
350 	unsigned long flags;
351 
352 	/* Check if there is one available request until the ceiling */
353 	spin_lock_irqsave(&ndp->lock, flags);
354 	for (i = ndp->request_id; i < limit; i++) {
355 		if (ndp->requests[i].used)
356 			continue;
357 
358 		nr = &ndp->requests[i];
359 		nr->used = true;
360 		nr->flags = req_flags;
361 		ndp->request_id = i + 1;
362 		goto found;
363 	}
364 
365 	/* Fail back to check from the starting cursor */
366 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
367 		if (ndp->requests[i].used)
368 			continue;
369 
370 		nr = &ndp->requests[i];
371 		nr->used = true;
372 		nr->flags = req_flags;
373 		ndp->request_id = i + 1;
374 		goto found;
375 	}
376 
377 found:
378 	spin_unlock_irqrestore(&ndp->lock, flags);
379 	return nr;
380 }
381 
382 void ncsi_free_request(struct ncsi_request *nr)
383 {
384 	struct ncsi_dev_priv *ndp = nr->ndp;
385 	struct sk_buff *cmd, *rsp;
386 	unsigned long flags;
387 	bool driven;
388 
389 	if (nr->enabled) {
390 		nr->enabled = false;
391 		del_timer_sync(&nr->timer);
392 	}
393 
394 	spin_lock_irqsave(&ndp->lock, flags);
395 	cmd = nr->cmd;
396 	rsp = nr->rsp;
397 	nr->cmd = NULL;
398 	nr->rsp = NULL;
399 	nr->used = false;
400 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
401 	spin_unlock_irqrestore(&ndp->lock, flags);
402 
403 	if (driven && cmd && --ndp->pending_req_num == 0)
404 		schedule_work(&ndp->work);
405 
406 	/* Release command and response */
407 	consume_skb(cmd);
408 	consume_skb(rsp);
409 }
410 
411 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
412 {
413 	struct ncsi_dev_priv *ndp;
414 
415 	NCSI_FOR_EACH_DEV(ndp) {
416 		if (ndp->ndev.dev == dev)
417 			return &ndp->ndev;
418 	}
419 
420 	return NULL;
421 }
422 
423 static void ncsi_request_timeout(struct timer_list *t)
424 {
425 	struct ncsi_request *nr = from_timer(nr, t, timer);
426 	struct ncsi_dev_priv *ndp = nr->ndp;
427 	struct ncsi_cmd_pkt *cmd;
428 	struct ncsi_package *np;
429 	struct ncsi_channel *nc;
430 	unsigned long flags;
431 
432 	/* If the request already had associated response,
433 	 * let the response handler to release it.
434 	 */
435 	spin_lock_irqsave(&ndp->lock, flags);
436 	nr->enabled = false;
437 	if (nr->rsp || !nr->cmd) {
438 		spin_unlock_irqrestore(&ndp->lock, flags);
439 		return;
440 	}
441 	spin_unlock_irqrestore(&ndp->lock, flags);
442 
443 	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
444 		if (nr->cmd) {
445 			/* Find the package */
446 			cmd = (struct ncsi_cmd_pkt *)
447 			      skb_network_header(nr->cmd);
448 			ncsi_find_package_and_channel(ndp,
449 						      cmd->cmd.common.channel,
450 						      &np, &nc);
451 			ncsi_send_netlink_timeout(nr, np, nc);
452 		}
453 	}
454 
455 	/* Release the request */
456 	ncsi_free_request(nr);
457 }
458 
459 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
460 {
461 	struct ncsi_dev *nd = &ndp->ndev;
462 	struct ncsi_package *np;
463 	struct ncsi_channel *nc, *tmp;
464 	struct ncsi_cmd_arg nca;
465 	unsigned long flags;
466 	int ret;
467 
468 	np = ndp->active_package;
469 	nc = ndp->active_channel;
470 	nca.ndp = ndp;
471 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
472 	switch (nd->state) {
473 	case ncsi_dev_state_suspend:
474 		nd->state = ncsi_dev_state_suspend_select;
475 		/* Fall through */
476 	case ncsi_dev_state_suspend_select:
477 		ndp->pending_req_num = 1;
478 
479 		nca.type = NCSI_PKT_CMD_SP;
480 		nca.package = np->id;
481 		nca.channel = NCSI_RESERVED_CHANNEL;
482 		if (ndp->flags & NCSI_DEV_HWA)
483 			nca.bytes[0] = 0;
484 		else
485 			nca.bytes[0] = 1;
486 
487 		/* To retrieve the last link states of channels in current
488 		 * package when current active channel needs fail over to
489 		 * another one. It means we will possibly select another
490 		 * channel as next active one. The link states of channels
491 		 * are most important factor of the selection. So we need
492 		 * accurate link states. Unfortunately, the link states on
493 		 * inactive channels can't be updated with LSC AEN in time.
494 		 */
495 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
496 			nd->state = ncsi_dev_state_suspend_gls;
497 		else
498 			nd->state = ncsi_dev_state_suspend_dcnt;
499 		ret = ncsi_xmit_cmd(&nca);
500 		if (ret)
501 			goto error;
502 
503 		break;
504 	case ncsi_dev_state_suspend_gls:
505 		ndp->pending_req_num = np->channel_num;
506 
507 		nca.type = NCSI_PKT_CMD_GLS;
508 		nca.package = np->id;
509 
510 		nd->state = ncsi_dev_state_suspend_dcnt;
511 		NCSI_FOR_EACH_CHANNEL(np, nc) {
512 			nca.channel = nc->id;
513 			ret = ncsi_xmit_cmd(&nca);
514 			if (ret)
515 				goto error;
516 		}
517 
518 		break;
519 	case ncsi_dev_state_suspend_dcnt:
520 		ndp->pending_req_num = 1;
521 
522 		nca.type = NCSI_PKT_CMD_DCNT;
523 		nca.package = np->id;
524 		nca.channel = nc->id;
525 
526 		nd->state = ncsi_dev_state_suspend_dc;
527 		ret = ncsi_xmit_cmd(&nca);
528 		if (ret)
529 			goto error;
530 
531 		break;
532 	case ncsi_dev_state_suspend_dc:
533 		ndp->pending_req_num = 1;
534 
535 		nca.type = NCSI_PKT_CMD_DC;
536 		nca.package = np->id;
537 		nca.channel = nc->id;
538 		nca.bytes[0] = 1;
539 
540 		nd->state = ncsi_dev_state_suspend_deselect;
541 		ret = ncsi_xmit_cmd(&nca);
542 		if (ret)
543 			goto error;
544 
545 		NCSI_FOR_EACH_CHANNEL(np, tmp) {
546 			/* If there is another channel active on this package
547 			 * do not deselect the package.
548 			 */
549 			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
550 				nd->state = ncsi_dev_state_suspend_done;
551 				break;
552 			}
553 		}
554 		break;
555 	case ncsi_dev_state_suspend_deselect:
556 		ndp->pending_req_num = 1;
557 
558 		nca.type = NCSI_PKT_CMD_DP;
559 		nca.package = np->id;
560 		nca.channel = NCSI_RESERVED_CHANNEL;
561 
562 		nd->state = ncsi_dev_state_suspend_done;
563 		ret = ncsi_xmit_cmd(&nca);
564 		if (ret)
565 			goto error;
566 
567 		break;
568 	case ncsi_dev_state_suspend_done:
569 		spin_lock_irqsave(&nc->lock, flags);
570 		nc->state = NCSI_CHANNEL_INACTIVE;
571 		spin_unlock_irqrestore(&nc->lock, flags);
572 		if (ndp->flags & NCSI_DEV_RESET)
573 			ncsi_reset_dev(nd);
574 		else
575 			ncsi_process_next_channel(ndp);
576 		break;
577 	default:
578 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
579 			    nd->state);
580 	}
581 
582 	return;
583 error:
584 	nd->state = ncsi_dev_state_functional;
585 }
586 
587 /* Check the VLAN filter bitmap for a set filter, and construct a
588  * "Set VLAN Filter - Disable" packet if found.
589  */
590 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
591 			 struct ncsi_cmd_arg *nca)
592 {
593 	struct ncsi_channel_vlan_filter *ncf;
594 	unsigned long flags;
595 	void *bitmap;
596 	int index;
597 	u16 vid;
598 
599 	ncf = &nc->vlan_filter;
600 	bitmap = &ncf->bitmap;
601 
602 	spin_lock_irqsave(&nc->lock, flags);
603 	index = find_next_bit(bitmap, ncf->n_vids, 0);
604 	if (index >= ncf->n_vids) {
605 		spin_unlock_irqrestore(&nc->lock, flags);
606 		return -1;
607 	}
608 	vid = ncf->vids[index];
609 
610 	clear_bit(index, bitmap);
611 	ncf->vids[index] = 0;
612 	spin_unlock_irqrestore(&nc->lock, flags);
613 
614 	nca->type = NCSI_PKT_CMD_SVF;
615 	nca->words[1] = vid;
616 	/* HW filter index starts at 1 */
617 	nca->bytes[6] = index + 1;
618 	nca->bytes[7] = 0x00;
619 	return 0;
620 }
621 
622 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
623  * packet.
624  */
625 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
626 		       struct ncsi_cmd_arg *nca)
627 {
628 	struct ncsi_channel_vlan_filter *ncf;
629 	struct vlan_vid *vlan = NULL;
630 	unsigned long flags;
631 	int i, index;
632 	void *bitmap;
633 	u16 vid;
634 
635 	if (list_empty(&ndp->vlan_vids))
636 		return -1;
637 
638 	ncf = &nc->vlan_filter;
639 	bitmap = &ncf->bitmap;
640 
641 	spin_lock_irqsave(&nc->lock, flags);
642 
643 	rcu_read_lock();
644 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
645 		vid = vlan->vid;
646 		for (i = 0; i < ncf->n_vids; i++)
647 			if (ncf->vids[i] == vid) {
648 				vid = 0;
649 				break;
650 			}
651 		if (vid)
652 			break;
653 	}
654 	rcu_read_unlock();
655 
656 	if (!vid) {
657 		/* No VLAN ID is not set */
658 		spin_unlock_irqrestore(&nc->lock, flags);
659 		return -1;
660 	}
661 
662 	index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
663 	if (index < 0 || index >= ncf->n_vids) {
664 		netdev_err(ndp->ndev.dev,
665 			   "Channel %u already has all VLAN filters set\n",
666 			   nc->id);
667 		spin_unlock_irqrestore(&nc->lock, flags);
668 		return -1;
669 	}
670 
671 	ncf->vids[index] = vid;
672 	set_bit(index, bitmap);
673 	spin_unlock_irqrestore(&nc->lock, flags);
674 
675 	nca->type = NCSI_PKT_CMD_SVF;
676 	nca->words[1] = vid;
677 	/* HW filter index starts at 1 */
678 	nca->bytes[6] = index + 1;
679 	nca->bytes[7] = 0x01;
680 
681 	return 0;
682 }
683 
684 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
685 
686 /* NCSI OEM Command APIs */
687 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
688 {
689 	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
690 	int ret = 0;
691 
692 	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
693 
694 	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
695 	*(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
696 	data[5] = NCSI_OEM_BCM_CMD_GMA;
697 
698 	nca->data = data;
699 
700 	ret = ncsi_xmit_cmd(nca);
701 	if (ret)
702 		netdev_err(nca->ndp->ndev.dev,
703 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
704 			   nca->type);
705 	return ret;
706 }
707 
708 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
709 {
710 	union {
711 		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
712 		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
713 	} u;
714 	int ret = 0;
715 
716 	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
717 
718 	memset(&u, 0, sizeof(u));
719 	u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
720 	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
721 	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
722 
723 	nca->data = u.data_u8;
724 
725 	ret = ncsi_xmit_cmd(nca);
726 	if (ret)
727 		netdev_err(nca->ndp->ndev.dev,
728 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
729 			   nca->type);
730 	return ret;
731 }
732 
733 /* OEM Command handlers initialization */
734 static struct ncsi_oem_gma_handler {
735 	unsigned int	mfr_id;
736 	int		(*handler)(struct ncsi_cmd_arg *nca);
737 } ncsi_oem_gma_handlers[] = {
738 	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
739 	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
740 };
741 
742 static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
743 {
744 	struct ncsi_oem_gma_handler *nch = NULL;
745 	int i;
746 
747 	/* This function should only be called once, return if flag set */
748 	if (nca->ndp->gma_flag == 1)
749 		return -1;
750 
751 	/* Find gma handler for given manufacturer id */
752 	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
753 		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
754 			if (ncsi_oem_gma_handlers[i].handler)
755 				nch = &ncsi_oem_gma_handlers[i];
756 			break;
757 			}
758 	}
759 
760 	if (!nch) {
761 		netdev_err(nca->ndp->ndev.dev,
762 			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
763 			   mf_id);
764 		return -1;
765 	}
766 
767 	/* Set the flag for GMA command which should only be called once */
768 	nca->ndp->gma_flag = 1;
769 
770 	/* Get Mac address from NCSI device */
771 	return nch->handler(nca);
772 }
773 
774 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
775 
776 /* Determine if a given channel from the channel_queue should be used for Tx */
777 static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
778 			       struct ncsi_channel *nc)
779 {
780 	struct ncsi_channel_mode *ncm;
781 	struct ncsi_channel *channel;
782 	struct ncsi_package *np;
783 
784 	/* Check if any other channel has Tx enabled; a channel may have already
785 	 * been configured and removed from the channel queue.
786 	 */
787 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
788 		if (!ndp->multi_package && np != nc->package)
789 			continue;
790 		NCSI_FOR_EACH_CHANNEL(np, channel) {
791 			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
792 			if (ncm->enable)
793 				return false;
794 		}
795 	}
796 
797 	/* This channel is the preferred channel and has link */
798 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
799 		np = channel->package;
800 		if (np->preferred_channel &&
801 		    ncsi_channel_has_link(np->preferred_channel)) {
802 			return np->preferred_channel == nc;
803 		}
804 	}
805 
806 	/* This channel has link */
807 	if (ncsi_channel_has_link(nc))
808 		return true;
809 
810 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
811 		if (ncsi_channel_has_link(channel))
812 			return false;
813 
814 	/* No other channel has link; default to this one */
815 	return true;
816 }
817 
818 /* Change the active Tx channel in a multi-channel setup */
819 int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
820 			   struct ncsi_package *package,
821 			   struct ncsi_channel *disable,
822 			   struct ncsi_channel *enable)
823 {
824 	struct ncsi_cmd_arg nca;
825 	struct ncsi_channel *nc;
826 	struct ncsi_package *np;
827 	int ret = 0;
828 
829 	if (!package->multi_channel && !ndp->multi_package)
830 		netdev_warn(ndp->ndev.dev,
831 			    "NCSI: Trying to update Tx channel in single-channel mode\n");
832 	nca.ndp = ndp;
833 	nca.req_flags = 0;
834 
835 	/* Find current channel with Tx enabled */
836 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
837 		if (disable)
838 			break;
839 		if (!ndp->multi_package && np != package)
840 			continue;
841 
842 		NCSI_FOR_EACH_CHANNEL(np, nc)
843 			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
844 				disable = nc;
845 				break;
846 			}
847 	}
848 
849 	/* Find a suitable channel for Tx */
850 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
851 		if (enable)
852 			break;
853 		if (!ndp->multi_package && np != package)
854 			continue;
855 		if (!(ndp->package_whitelist & (0x1 << np->id)))
856 			continue;
857 
858 		if (np->preferred_channel &&
859 		    ncsi_channel_has_link(np->preferred_channel)) {
860 			enable = np->preferred_channel;
861 			break;
862 		}
863 
864 		NCSI_FOR_EACH_CHANNEL(np, nc) {
865 			if (!(np->channel_whitelist & 0x1 << nc->id))
866 				continue;
867 			if (nc->state != NCSI_CHANNEL_ACTIVE)
868 				continue;
869 			if (ncsi_channel_has_link(nc)) {
870 				enable = nc;
871 				break;
872 			}
873 		}
874 	}
875 
876 	if (disable == enable)
877 		return -1;
878 
879 	if (!enable)
880 		return -1;
881 
882 	if (disable) {
883 		nca.channel = disable->id;
884 		nca.package = disable->package->id;
885 		nca.type = NCSI_PKT_CMD_DCNT;
886 		ret = ncsi_xmit_cmd(&nca);
887 		if (ret)
888 			netdev_err(ndp->ndev.dev,
889 				   "Error %d sending DCNT\n",
890 				   ret);
891 	}
892 
893 	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
894 
895 	nca.channel = enable->id;
896 	nca.package = enable->package->id;
897 	nca.type = NCSI_PKT_CMD_ECNT;
898 	ret = ncsi_xmit_cmd(&nca);
899 	if (ret)
900 		netdev_err(ndp->ndev.dev,
901 			   "Error %d sending ECNT\n",
902 			   ret);
903 
904 	return ret;
905 }
906 
907 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
908 {
909 	struct ncsi_package *np = ndp->active_package;
910 	struct ncsi_channel *nc = ndp->active_channel;
911 	struct ncsi_channel *hot_nc = NULL;
912 	struct ncsi_dev *nd = &ndp->ndev;
913 	struct net_device *dev = nd->dev;
914 	struct ncsi_cmd_arg nca;
915 	unsigned char index;
916 	unsigned long flags;
917 	int ret;
918 
919 	nca.ndp = ndp;
920 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
921 	switch (nd->state) {
922 	case ncsi_dev_state_config:
923 	case ncsi_dev_state_config_sp:
924 		ndp->pending_req_num = 1;
925 
926 		/* Select the specific package */
927 		nca.type = NCSI_PKT_CMD_SP;
928 		if (ndp->flags & NCSI_DEV_HWA)
929 			nca.bytes[0] = 0;
930 		else
931 			nca.bytes[0] = 1;
932 		nca.package = np->id;
933 		nca.channel = NCSI_RESERVED_CHANNEL;
934 		ret = ncsi_xmit_cmd(&nca);
935 		if (ret) {
936 			netdev_err(ndp->ndev.dev,
937 				   "NCSI: Failed to transmit CMD_SP\n");
938 			goto error;
939 		}
940 
941 		nd->state = ncsi_dev_state_config_cis;
942 		break;
943 	case ncsi_dev_state_config_cis:
944 		ndp->pending_req_num = 1;
945 
946 		/* Clear initial state */
947 		nca.type = NCSI_PKT_CMD_CIS;
948 		nca.package = np->id;
949 		nca.channel = nc->id;
950 		ret = ncsi_xmit_cmd(&nca);
951 		if (ret) {
952 			netdev_err(ndp->ndev.dev,
953 				   "NCSI: Failed to transmit CMD_CIS\n");
954 			goto error;
955 		}
956 
957 		nd->state = ncsi_dev_state_config_oem_gma;
958 		break;
959 	case ncsi_dev_state_config_oem_gma:
960 		nd->state = ncsi_dev_state_config_clear_vids;
961 		ret = -1;
962 
963 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
964 		nca.type = NCSI_PKT_CMD_OEM;
965 		nca.package = np->id;
966 		nca.channel = nc->id;
967 		ndp->pending_req_num = 1;
968 		ret = ncsi_gma_handler(&nca, nc->version.mf_id);
969 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
970 
971 		if (ret < 0)
972 			schedule_work(&ndp->work);
973 
974 		break;
975 	case ncsi_dev_state_config_clear_vids:
976 	case ncsi_dev_state_config_svf:
977 	case ncsi_dev_state_config_ev:
978 	case ncsi_dev_state_config_sma:
979 	case ncsi_dev_state_config_ebf:
980 	case ncsi_dev_state_config_dgmf:
981 	case ncsi_dev_state_config_ecnt:
982 	case ncsi_dev_state_config_ec:
983 	case ncsi_dev_state_config_ae:
984 	case ncsi_dev_state_config_gls:
985 		ndp->pending_req_num = 1;
986 
987 		nca.package = np->id;
988 		nca.channel = nc->id;
989 
990 		/* Clear any active filters on the channel before setting */
991 		if (nd->state == ncsi_dev_state_config_clear_vids) {
992 			ret = clear_one_vid(ndp, nc, &nca);
993 			if (ret) {
994 				nd->state = ncsi_dev_state_config_svf;
995 				schedule_work(&ndp->work);
996 				break;
997 			}
998 			/* Repeat */
999 			nd->state = ncsi_dev_state_config_clear_vids;
1000 		/* Add known VLAN tags to the filter */
1001 		} else if (nd->state == ncsi_dev_state_config_svf) {
1002 			ret = set_one_vid(ndp, nc, &nca);
1003 			if (ret) {
1004 				nd->state = ncsi_dev_state_config_ev;
1005 				schedule_work(&ndp->work);
1006 				break;
1007 			}
1008 			/* Repeat */
1009 			nd->state = ncsi_dev_state_config_svf;
1010 		/* Enable/Disable the VLAN filter */
1011 		} else if (nd->state == ncsi_dev_state_config_ev) {
1012 			if (list_empty(&ndp->vlan_vids)) {
1013 				nca.type = NCSI_PKT_CMD_DV;
1014 			} else {
1015 				nca.type = NCSI_PKT_CMD_EV;
1016 				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1017 			}
1018 			nd->state = ncsi_dev_state_config_sma;
1019 		} else if (nd->state == ncsi_dev_state_config_sma) {
1020 		/* Use first entry in unicast filter table. Note that
1021 		 * the MAC filter table starts from entry 1 instead of
1022 		 * 0.
1023 		 */
1024 			nca.type = NCSI_PKT_CMD_SMA;
1025 			for (index = 0; index < 6; index++)
1026 				nca.bytes[index] = dev->dev_addr[index];
1027 			nca.bytes[6] = 0x1;
1028 			nca.bytes[7] = 0x1;
1029 			nd->state = ncsi_dev_state_config_ebf;
1030 		} else if (nd->state == ncsi_dev_state_config_ebf) {
1031 			nca.type = NCSI_PKT_CMD_EBF;
1032 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1033 			/* if multicast global filtering is supported then
1034 			 * disable it so that all multicast packet will be
1035 			 * forwarded to management controller
1036 			 */
1037 			if (nc->caps[NCSI_CAP_GENERIC].cap &
1038 			    NCSI_CAP_GENERIC_MC)
1039 				nd->state = ncsi_dev_state_config_dgmf;
1040 			else if (ncsi_channel_is_tx(ndp, nc))
1041 				nd->state = ncsi_dev_state_config_ecnt;
1042 			else
1043 				nd->state = ncsi_dev_state_config_ec;
1044 		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1045 			nca.type = NCSI_PKT_CMD_DGMF;
1046 			if (ncsi_channel_is_tx(ndp, nc))
1047 				nd->state = ncsi_dev_state_config_ecnt;
1048 			else
1049 				nd->state = ncsi_dev_state_config_ec;
1050 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1051 			if (np->preferred_channel &&
1052 			    nc != np->preferred_channel)
1053 				netdev_info(ndp->ndev.dev,
1054 					    "NCSI: Tx failed over to channel %u\n",
1055 					    nc->id);
1056 			nca.type = NCSI_PKT_CMD_ECNT;
1057 			nd->state = ncsi_dev_state_config_ec;
1058 		} else if (nd->state == ncsi_dev_state_config_ec) {
1059 			/* Enable AEN if it's supported */
1060 			nca.type = NCSI_PKT_CMD_EC;
1061 			nd->state = ncsi_dev_state_config_ae;
1062 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1063 				nd->state = ncsi_dev_state_config_gls;
1064 		} else if (nd->state == ncsi_dev_state_config_ae) {
1065 			nca.type = NCSI_PKT_CMD_AE;
1066 			nca.bytes[0] = 0;
1067 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1068 			nd->state = ncsi_dev_state_config_gls;
1069 		} else if (nd->state == ncsi_dev_state_config_gls) {
1070 			nca.type = NCSI_PKT_CMD_GLS;
1071 			nd->state = ncsi_dev_state_config_done;
1072 		}
1073 
1074 		ret = ncsi_xmit_cmd(&nca);
1075 		if (ret) {
1076 			netdev_err(ndp->ndev.dev,
1077 				   "NCSI: Failed to transmit CMD %x\n",
1078 				   nca.type);
1079 			goto error;
1080 		}
1081 		break;
1082 	case ncsi_dev_state_config_done:
1083 		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1084 			   nc->id);
1085 		spin_lock_irqsave(&nc->lock, flags);
1086 		nc->state = NCSI_CHANNEL_ACTIVE;
1087 
1088 		if (ndp->flags & NCSI_DEV_RESET) {
1089 			/* A reset event happened during config, start it now */
1090 			nc->reconfigure_needed = false;
1091 			spin_unlock_irqrestore(&nc->lock, flags);
1092 			ncsi_reset_dev(nd);
1093 			break;
1094 		}
1095 
1096 		if (nc->reconfigure_needed) {
1097 			/* This channel's configuration has been updated
1098 			 * part-way during the config state - start the
1099 			 * channel configuration over
1100 			 */
1101 			nc->reconfigure_needed = false;
1102 			nc->state = NCSI_CHANNEL_INACTIVE;
1103 			spin_unlock_irqrestore(&nc->lock, flags);
1104 
1105 			spin_lock_irqsave(&ndp->lock, flags);
1106 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1107 			spin_unlock_irqrestore(&ndp->lock, flags);
1108 
1109 			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1110 			ncsi_process_next_channel(ndp);
1111 			break;
1112 		}
1113 
1114 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1115 			hot_nc = nc;
1116 		} else {
1117 			hot_nc = NULL;
1118 			netdev_dbg(ndp->ndev.dev,
1119 				   "NCSI: channel %u link down after config\n",
1120 				   nc->id);
1121 		}
1122 		spin_unlock_irqrestore(&nc->lock, flags);
1123 
1124 		/* Update the hot channel */
1125 		spin_lock_irqsave(&ndp->lock, flags);
1126 		ndp->hot_channel = hot_nc;
1127 		spin_unlock_irqrestore(&ndp->lock, flags);
1128 
1129 		ncsi_start_channel_monitor(nc);
1130 		ncsi_process_next_channel(ndp);
1131 		break;
1132 	default:
1133 		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1134 			     nd->state);
1135 	}
1136 
1137 	return;
1138 
1139 error:
1140 	ncsi_report_link(ndp, true);
1141 }
1142 
1143 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1144 {
1145 	struct ncsi_channel *nc, *found, *hot_nc;
1146 	struct ncsi_channel_mode *ncm;
1147 	unsigned long flags, cflags;
1148 	struct ncsi_package *np;
1149 	bool with_link;
1150 
1151 	spin_lock_irqsave(&ndp->lock, flags);
1152 	hot_nc = ndp->hot_channel;
1153 	spin_unlock_irqrestore(&ndp->lock, flags);
1154 
1155 	/* By default the search is done once an inactive channel with up
1156 	 * link is found, unless a preferred channel is set.
1157 	 * If multi_package or multi_channel are configured all channels in the
1158 	 * whitelist are added to the channel queue.
1159 	 */
1160 	found = NULL;
1161 	with_link = false;
1162 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1163 		if (!(ndp->package_whitelist & (0x1 << np->id)))
1164 			continue;
1165 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1166 			if (!(np->channel_whitelist & (0x1 << nc->id)))
1167 				continue;
1168 
1169 			spin_lock_irqsave(&nc->lock, cflags);
1170 
1171 			if (!list_empty(&nc->link) ||
1172 			    nc->state != NCSI_CHANNEL_INACTIVE) {
1173 				spin_unlock_irqrestore(&nc->lock, cflags);
1174 				continue;
1175 			}
1176 
1177 			if (!found)
1178 				found = nc;
1179 
1180 			if (nc == hot_nc)
1181 				found = nc;
1182 
1183 			ncm = &nc->modes[NCSI_MODE_LINK];
1184 			if (ncm->data[2] & 0x1) {
1185 				found = nc;
1186 				with_link = true;
1187 			}
1188 
1189 			/* If multi_channel is enabled configure all valid
1190 			 * channels whether or not they currently have link
1191 			 * so they will have AENs enabled.
1192 			 */
1193 			if (with_link || np->multi_channel) {
1194 				spin_lock_irqsave(&ndp->lock, flags);
1195 				list_add_tail_rcu(&nc->link,
1196 						  &ndp->channel_queue);
1197 				spin_unlock_irqrestore(&ndp->lock, flags);
1198 
1199 				netdev_dbg(ndp->ndev.dev,
1200 					   "NCSI: Channel %u added to queue (link %s)\n",
1201 					   nc->id,
1202 					   ncm->data[2] & 0x1 ? "up" : "down");
1203 			}
1204 
1205 			spin_unlock_irqrestore(&nc->lock, cflags);
1206 
1207 			if (with_link && !np->multi_channel)
1208 				break;
1209 		}
1210 		if (with_link && !ndp->multi_package)
1211 			break;
1212 	}
1213 
1214 	if (list_empty(&ndp->channel_queue) && found) {
1215 		netdev_info(ndp->ndev.dev,
1216 			    "NCSI: No channel with link found, configuring channel %u\n",
1217 			    found->id);
1218 		spin_lock_irqsave(&ndp->lock, flags);
1219 		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1220 		spin_unlock_irqrestore(&ndp->lock, flags);
1221 	} else if (!found) {
1222 		netdev_warn(ndp->ndev.dev,
1223 			    "NCSI: No channel found to configure!\n");
1224 		ncsi_report_link(ndp, true);
1225 		return -ENODEV;
1226 	}
1227 
1228 	return ncsi_process_next_channel(ndp);
1229 }
1230 
1231 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1232 {
1233 	struct ncsi_package *np;
1234 	struct ncsi_channel *nc;
1235 	unsigned int cap;
1236 	bool has_channel = false;
1237 
1238 	/* The hardware arbitration is disabled if any one channel
1239 	 * doesn't support explicitly.
1240 	 */
1241 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1242 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1243 			has_channel = true;
1244 
1245 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1246 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1247 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1248 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1249 				ndp->flags &= ~NCSI_DEV_HWA;
1250 				return false;
1251 			}
1252 		}
1253 	}
1254 
1255 	if (has_channel) {
1256 		ndp->flags |= NCSI_DEV_HWA;
1257 		return true;
1258 	}
1259 
1260 	ndp->flags &= ~NCSI_DEV_HWA;
1261 	return false;
1262 }
1263 
1264 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1265 {
1266 	struct ncsi_dev *nd = &ndp->ndev;
1267 	struct ncsi_package *np;
1268 	struct ncsi_channel *nc;
1269 	struct ncsi_cmd_arg nca;
1270 	unsigned char index;
1271 	int ret;
1272 
1273 	nca.ndp = ndp;
1274 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1275 	switch (nd->state) {
1276 	case ncsi_dev_state_probe:
1277 		nd->state = ncsi_dev_state_probe_deselect;
1278 		/* Fall through */
1279 	case ncsi_dev_state_probe_deselect:
1280 		ndp->pending_req_num = 8;
1281 
1282 		/* Deselect all possible packages */
1283 		nca.type = NCSI_PKT_CMD_DP;
1284 		nca.channel = NCSI_RESERVED_CHANNEL;
1285 		for (index = 0; index < 8; index++) {
1286 			nca.package = index;
1287 			ret = ncsi_xmit_cmd(&nca);
1288 			if (ret)
1289 				goto error;
1290 		}
1291 
1292 		nd->state = ncsi_dev_state_probe_package;
1293 		break;
1294 	case ncsi_dev_state_probe_package:
1295 		ndp->pending_req_num = 1;
1296 
1297 		nca.type = NCSI_PKT_CMD_SP;
1298 		nca.bytes[0] = 1;
1299 		nca.package = ndp->package_probe_id;
1300 		nca.channel = NCSI_RESERVED_CHANNEL;
1301 		ret = ncsi_xmit_cmd(&nca);
1302 		if (ret)
1303 			goto error;
1304 		nd->state = ncsi_dev_state_probe_channel;
1305 		break;
1306 	case ncsi_dev_state_probe_channel:
1307 		ndp->active_package = ncsi_find_package(ndp,
1308 							ndp->package_probe_id);
1309 		if (!ndp->active_package) {
1310 			/* No response */
1311 			nd->state = ncsi_dev_state_probe_dp;
1312 			schedule_work(&ndp->work);
1313 			break;
1314 		}
1315 		nd->state = ncsi_dev_state_probe_cis;
1316 		schedule_work(&ndp->work);
1317 		break;
1318 	case ncsi_dev_state_probe_cis:
1319 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1320 
1321 		/* Clear initial state */
1322 		nca.type = NCSI_PKT_CMD_CIS;
1323 		nca.package = ndp->active_package->id;
1324 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1325 			nca.channel = index;
1326 			ret = ncsi_xmit_cmd(&nca);
1327 			if (ret)
1328 				goto error;
1329 		}
1330 
1331 		nd->state = ncsi_dev_state_probe_gvi;
1332 		break;
1333 	case ncsi_dev_state_probe_gvi:
1334 	case ncsi_dev_state_probe_gc:
1335 	case ncsi_dev_state_probe_gls:
1336 		np = ndp->active_package;
1337 		ndp->pending_req_num = np->channel_num;
1338 
1339 		/* Retrieve version, capability or link status */
1340 		if (nd->state == ncsi_dev_state_probe_gvi)
1341 			nca.type = NCSI_PKT_CMD_GVI;
1342 		else if (nd->state == ncsi_dev_state_probe_gc)
1343 			nca.type = NCSI_PKT_CMD_GC;
1344 		else
1345 			nca.type = NCSI_PKT_CMD_GLS;
1346 
1347 		nca.package = np->id;
1348 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1349 			nca.channel = nc->id;
1350 			ret = ncsi_xmit_cmd(&nca);
1351 			if (ret)
1352 				goto error;
1353 		}
1354 
1355 		if (nd->state == ncsi_dev_state_probe_gvi)
1356 			nd->state = ncsi_dev_state_probe_gc;
1357 		else if (nd->state == ncsi_dev_state_probe_gc)
1358 			nd->state = ncsi_dev_state_probe_gls;
1359 		else
1360 			nd->state = ncsi_dev_state_probe_dp;
1361 		break;
1362 	case ncsi_dev_state_probe_dp:
1363 		ndp->pending_req_num = 1;
1364 
1365 		/* Deselect the current package */
1366 		nca.type = NCSI_PKT_CMD_DP;
1367 		nca.package = ndp->package_probe_id;
1368 		nca.channel = NCSI_RESERVED_CHANNEL;
1369 		ret = ncsi_xmit_cmd(&nca);
1370 		if (ret)
1371 			goto error;
1372 
1373 		/* Probe next package */
1374 		ndp->package_probe_id++;
1375 		if (ndp->package_probe_id >= 8) {
1376 			/* Probe finished */
1377 			ndp->flags |= NCSI_DEV_PROBED;
1378 			break;
1379 		}
1380 		nd->state = ncsi_dev_state_probe_package;
1381 		ndp->active_package = NULL;
1382 		break;
1383 	default:
1384 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1385 			    nd->state);
1386 	}
1387 
1388 	if (ndp->flags & NCSI_DEV_PROBED) {
1389 		/* Check if all packages have HWA support */
1390 		ncsi_check_hwa(ndp);
1391 		ncsi_choose_active_channel(ndp);
1392 	}
1393 
1394 	return;
1395 error:
1396 	netdev_err(ndp->ndev.dev,
1397 		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1398 		   nca.type);
1399 	ncsi_report_link(ndp, true);
1400 }
1401 
1402 static void ncsi_dev_work(struct work_struct *work)
1403 {
1404 	struct ncsi_dev_priv *ndp = container_of(work,
1405 			struct ncsi_dev_priv, work);
1406 	struct ncsi_dev *nd = &ndp->ndev;
1407 
1408 	switch (nd->state & ncsi_dev_state_major) {
1409 	case ncsi_dev_state_probe:
1410 		ncsi_probe_channel(ndp);
1411 		break;
1412 	case ncsi_dev_state_suspend:
1413 		ncsi_suspend_channel(ndp);
1414 		break;
1415 	case ncsi_dev_state_config:
1416 		ncsi_configure_channel(ndp);
1417 		break;
1418 	default:
1419 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1420 			    nd->state);
1421 	}
1422 }
1423 
1424 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1425 {
1426 	struct ncsi_channel *nc;
1427 	int old_state;
1428 	unsigned long flags;
1429 
1430 	spin_lock_irqsave(&ndp->lock, flags);
1431 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1432 				    struct ncsi_channel, link);
1433 	if (!nc) {
1434 		spin_unlock_irqrestore(&ndp->lock, flags);
1435 		goto out;
1436 	}
1437 
1438 	list_del_init(&nc->link);
1439 	spin_unlock_irqrestore(&ndp->lock, flags);
1440 
1441 	spin_lock_irqsave(&nc->lock, flags);
1442 	old_state = nc->state;
1443 	nc->state = NCSI_CHANNEL_INVISIBLE;
1444 	spin_unlock_irqrestore(&nc->lock, flags);
1445 
1446 	ndp->active_channel = nc;
1447 	ndp->active_package = nc->package;
1448 
1449 	switch (old_state) {
1450 	case NCSI_CHANNEL_INACTIVE:
1451 		ndp->ndev.state = ncsi_dev_state_config;
1452 		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1453 	                   nc->id);
1454 		ncsi_configure_channel(ndp);
1455 		break;
1456 	case NCSI_CHANNEL_ACTIVE:
1457 		ndp->ndev.state = ncsi_dev_state_suspend;
1458 		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1459 			   nc->id);
1460 		ncsi_suspend_channel(ndp);
1461 		break;
1462 	default:
1463 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1464 			   old_state, nc->package->id, nc->id);
1465 		ncsi_report_link(ndp, false);
1466 		return -EINVAL;
1467 	}
1468 
1469 	return 0;
1470 
1471 out:
1472 	ndp->active_channel = NULL;
1473 	ndp->active_package = NULL;
1474 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1475 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1476 		return ncsi_choose_active_channel(ndp);
1477 	}
1478 
1479 	ncsi_report_link(ndp, false);
1480 	return -ENODEV;
1481 }
1482 
1483 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1484 {
1485 	struct ncsi_dev *nd = &ndp->ndev;
1486 	struct ncsi_channel *nc;
1487 	struct ncsi_package *np;
1488 	unsigned long flags;
1489 	unsigned int n = 0;
1490 
1491 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1492 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1493 			spin_lock_irqsave(&nc->lock, flags);
1494 
1495 			/* Channels may be busy, mark dirty instead of
1496 			 * kicking if;
1497 			 * a) not ACTIVE (configured)
1498 			 * b) in the channel_queue (to be configured)
1499 			 * c) it's ndev is in the config state
1500 			 */
1501 			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1502 				if ((ndp->ndev.state & 0xff00) ==
1503 						ncsi_dev_state_config ||
1504 						!list_empty(&nc->link)) {
1505 					netdev_dbg(nd->dev,
1506 						   "NCSI: channel %p marked dirty\n",
1507 						   nc);
1508 					nc->reconfigure_needed = true;
1509 				}
1510 				spin_unlock_irqrestore(&nc->lock, flags);
1511 				continue;
1512 			}
1513 
1514 			spin_unlock_irqrestore(&nc->lock, flags);
1515 
1516 			ncsi_stop_channel_monitor(nc);
1517 			spin_lock_irqsave(&nc->lock, flags);
1518 			nc->state = NCSI_CHANNEL_INACTIVE;
1519 			spin_unlock_irqrestore(&nc->lock, flags);
1520 
1521 			spin_lock_irqsave(&ndp->lock, flags);
1522 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1523 			spin_unlock_irqrestore(&ndp->lock, flags);
1524 
1525 			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1526 			n++;
1527 		}
1528 	}
1529 
1530 	return n;
1531 }
1532 
1533 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1534 {
1535 	struct ncsi_dev_priv *ndp;
1536 	unsigned int n_vids = 0;
1537 	struct vlan_vid *vlan;
1538 	struct ncsi_dev *nd;
1539 	bool found = false;
1540 
1541 	if (vid == 0)
1542 		return 0;
1543 
1544 	nd = ncsi_find_dev(dev);
1545 	if (!nd) {
1546 		netdev_warn(dev, "NCSI: No net_device?\n");
1547 		return 0;
1548 	}
1549 
1550 	ndp = TO_NCSI_DEV_PRIV(nd);
1551 
1552 	/* Add the VLAN id to our internal list */
1553 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1554 		n_vids++;
1555 		if (vlan->vid == vid) {
1556 			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1557 				   vid);
1558 			return 0;
1559 		}
1560 	}
1561 	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1562 		netdev_warn(dev,
1563 			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1564 			    vid, NCSI_MAX_VLAN_VIDS);
1565 		return -ENOSPC;
1566 	}
1567 
1568 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1569 	if (!vlan)
1570 		return -ENOMEM;
1571 
1572 	vlan->proto = proto;
1573 	vlan->vid = vid;
1574 	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1575 
1576 	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1577 
1578 	found = ncsi_kick_channels(ndp) != 0;
1579 
1580 	return found ? ncsi_process_next_channel(ndp) : 0;
1581 }
1582 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1583 
1584 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1585 {
1586 	struct vlan_vid *vlan, *tmp;
1587 	struct ncsi_dev_priv *ndp;
1588 	struct ncsi_dev *nd;
1589 	bool found = false;
1590 
1591 	if (vid == 0)
1592 		return 0;
1593 
1594 	nd = ncsi_find_dev(dev);
1595 	if (!nd) {
1596 		netdev_warn(dev, "NCSI: no net_device?\n");
1597 		return 0;
1598 	}
1599 
1600 	ndp = TO_NCSI_DEV_PRIV(nd);
1601 
1602 	/* Remove the VLAN id from our internal list */
1603 	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1604 		if (vlan->vid == vid) {
1605 			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1606 			list_del_rcu(&vlan->list);
1607 			found = true;
1608 			kfree(vlan);
1609 		}
1610 
1611 	if (!found) {
1612 		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1613 		return -EINVAL;
1614 	}
1615 
1616 	found = ncsi_kick_channels(ndp) != 0;
1617 
1618 	return found ? ncsi_process_next_channel(ndp) : 0;
1619 }
1620 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1621 
1622 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1623 				   void (*handler)(struct ncsi_dev *ndev))
1624 {
1625 	struct ncsi_dev_priv *ndp;
1626 	struct ncsi_dev *nd;
1627 	unsigned long flags;
1628 	int i;
1629 
1630 	/* Check if the device has been registered or not */
1631 	nd = ncsi_find_dev(dev);
1632 	if (nd)
1633 		return nd;
1634 
1635 	/* Create NCSI device */
1636 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1637 	if (!ndp)
1638 		return NULL;
1639 
1640 	nd = &ndp->ndev;
1641 	nd->state = ncsi_dev_state_registered;
1642 	nd->dev = dev;
1643 	nd->handler = handler;
1644 	ndp->pending_req_num = 0;
1645 	INIT_LIST_HEAD(&ndp->channel_queue);
1646 	INIT_LIST_HEAD(&ndp->vlan_vids);
1647 	INIT_WORK(&ndp->work, ncsi_dev_work);
1648 	ndp->package_whitelist = UINT_MAX;
1649 
1650 	/* Initialize private NCSI device */
1651 	spin_lock_init(&ndp->lock);
1652 	INIT_LIST_HEAD(&ndp->packages);
1653 	ndp->request_id = NCSI_REQ_START_IDX;
1654 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1655 		ndp->requests[i].id = i;
1656 		ndp->requests[i].ndp = ndp;
1657 		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1658 	}
1659 
1660 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1661 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1662 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1663 
1664 	/* Register NCSI packet Rx handler */
1665 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1666 	ndp->ptype.func = ncsi_rcv_rsp;
1667 	ndp->ptype.dev = dev;
1668 	dev_add_pack(&ndp->ptype);
1669 
1670 	/* Set up generic netlink interface */
1671 	ncsi_init_netlink(dev);
1672 
1673 	return nd;
1674 }
1675 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1676 
1677 int ncsi_start_dev(struct ncsi_dev *nd)
1678 {
1679 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1680 
1681 	if (nd->state != ncsi_dev_state_registered &&
1682 	    nd->state != ncsi_dev_state_functional)
1683 		return -ENOTTY;
1684 
1685 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1686 		ndp->package_probe_id = 0;
1687 		nd->state = ncsi_dev_state_probe;
1688 		schedule_work(&ndp->work);
1689 		return 0;
1690 	}
1691 
1692 	return ncsi_reset_dev(nd);
1693 }
1694 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1695 
1696 void ncsi_stop_dev(struct ncsi_dev *nd)
1697 {
1698 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1699 	struct ncsi_package *np;
1700 	struct ncsi_channel *nc;
1701 	bool chained;
1702 	int old_state;
1703 	unsigned long flags;
1704 
1705 	/* Stop the channel monitor on any active channels. Don't reset the
1706 	 * channel state so we know which were active when ncsi_start_dev()
1707 	 * is next called.
1708 	 */
1709 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1710 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1711 			ncsi_stop_channel_monitor(nc);
1712 
1713 			spin_lock_irqsave(&nc->lock, flags);
1714 			chained = !list_empty(&nc->link);
1715 			old_state = nc->state;
1716 			spin_unlock_irqrestore(&nc->lock, flags);
1717 
1718 			WARN_ON_ONCE(chained ||
1719 				     old_state == NCSI_CHANNEL_INVISIBLE);
1720 		}
1721 	}
1722 
1723 	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1724 	ncsi_report_link(ndp, true);
1725 }
1726 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1727 
1728 int ncsi_reset_dev(struct ncsi_dev *nd)
1729 {
1730 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1731 	struct ncsi_channel *nc, *active, *tmp;
1732 	struct ncsi_package *np;
1733 	unsigned long flags;
1734 
1735 	spin_lock_irqsave(&ndp->lock, flags);
1736 
1737 	if (!(ndp->flags & NCSI_DEV_RESET)) {
1738 		/* Haven't been called yet, check states */
1739 		switch (nd->state & ncsi_dev_state_major) {
1740 		case ncsi_dev_state_registered:
1741 		case ncsi_dev_state_probe:
1742 			/* Not even probed yet - do nothing */
1743 			spin_unlock_irqrestore(&ndp->lock, flags);
1744 			return 0;
1745 		case ncsi_dev_state_suspend:
1746 		case ncsi_dev_state_config:
1747 			/* Wait for the channel to finish its suspend/config
1748 			 * operation; once it finishes it will check for
1749 			 * NCSI_DEV_RESET and reset the state.
1750 			 */
1751 			ndp->flags |= NCSI_DEV_RESET;
1752 			spin_unlock_irqrestore(&ndp->lock, flags);
1753 			return 0;
1754 		}
1755 	} else {
1756 		switch (nd->state) {
1757 		case ncsi_dev_state_suspend_done:
1758 		case ncsi_dev_state_config_done:
1759 		case ncsi_dev_state_functional:
1760 			/* Ok */
1761 			break;
1762 		default:
1763 			/* Current reset operation happening */
1764 			spin_unlock_irqrestore(&ndp->lock, flags);
1765 			return 0;
1766 		}
1767 	}
1768 
1769 	if (!list_empty(&ndp->channel_queue)) {
1770 		/* Clear any channel queue we may have interrupted */
1771 		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1772 			list_del_init(&nc->link);
1773 	}
1774 	spin_unlock_irqrestore(&ndp->lock, flags);
1775 
1776 	active = NULL;
1777 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1778 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1779 			spin_lock_irqsave(&nc->lock, flags);
1780 
1781 			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1782 				active = nc;
1783 				nc->state = NCSI_CHANNEL_INVISIBLE;
1784 				spin_unlock_irqrestore(&nc->lock, flags);
1785 				ncsi_stop_channel_monitor(nc);
1786 				break;
1787 			}
1788 
1789 			spin_unlock_irqrestore(&nc->lock, flags);
1790 		}
1791 		if (active)
1792 			break;
1793 	}
1794 
1795 	if (!active) {
1796 		/* Done */
1797 		spin_lock_irqsave(&ndp->lock, flags);
1798 		ndp->flags &= ~NCSI_DEV_RESET;
1799 		spin_unlock_irqrestore(&ndp->lock, flags);
1800 		return ncsi_choose_active_channel(ndp);
1801 	}
1802 
1803 	spin_lock_irqsave(&ndp->lock, flags);
1804 	ndp->flags |= NCSI_DEV_RESET;
1805 	ndp->active_channel = active;
1806 	ndp->active_package = active->package;
1807 	spin_unlock_irqrestore(&ndp->lock, flags);
1808 
1809 	nd->state = ncsi_dev_state_suspend;
1810 	schedule_work(&ndp->work);
1811 	return 0;
1812 }
1813 
1814 void ncsi_unregister_dev(struct ncsi_dev *nd)
1815 {
1816 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1817 	struct ncsi_package *np, *tmp;
1818 	unsigned long flags;
1819 
1820 	dev_remove_pack(&ndp->ptype);
1821 
1822 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1823 		ncsi_remove_package(np);
1824 
1825 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1826 	list_del_rcu(&ndp->node);
1827 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1828 
1829 	ncsi_unregister_netlink(nd->dev);
1830 
1831 	kfree(ndp);
1832 }
1833 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1834