xref: /linux/net/dcb/dcbnl.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Copyright (c) 2008-2011, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Author: Lucy Liu <lucy.liu@intel.com>
18  */
19 
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/module.h>
29 #include <net/sock.h>
30 
31 /**
32  * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33  * intended to allow network traffic with differing requirements
34  * (highly reliable, no drops vs. best effort vs. low latency) to operate
35  * and co-exist on Ethernet.  Current DCB features are:
36  *
37  * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
38  *   framework for assigning bandwidth guarantees to traffic classes.
39  *
40  * Priority-based Flow Control (PFC) - provides a flow control mechanism which
41  *   can work independently for each 802.1p priority.
42  *
43  * Congestion Notification - provides a mechanism for end-to-end congestion
44  *   control for protocols which do not have built-in congestion management.
45  *
46  * More information about the emerging standards for these Ethernet features
47  * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
48  *
49  * This file implements an rtnetlink interface to allow configuration of DCB
50  * features for capable devices.
51  */
52 
53 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
54 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
55 MODULE_LICENSE("GPL");
56 
57 /**************** DCB attribute policies *************************************/
58 
59 /* DCB netlink attributes policy */
60 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
61 	[DCB_ATTR_IFNAME]      = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
62 	[DCB_ATTR_STATE]       = {.type = NLA_U8},
63 	[DCB_ATTR_PFC_CFG]     = {.type = NLA_NESTED},
64 	[DCB_ATTR_PG_CFG]      = {.type = NLA_NESTED},
65 	[DCB_ATTR_SET_ALL]     = {.type = NLA_U8},
66 	[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
67 	[DCB_ATTR_CAP]         = {.type = NLA_NESTED},
68 	[DCB_ATTR_PFC_STATE]   = {.type = NLA_U8},
69 	[DCB_ATTR_BCN]         = {.type = NLA_NESTED},
70 	[DCB_ATTR_APP]         = {.type = NLA_NESTED},
71 	[DCB_ATTR_IEEE]	       = {.type = NLA_NESTED},
72 	[DCB_ATTR_DCBX]        = {.type = NLA_U8},
73 	[DCB_ATTR_FEATCFG]     = {.type = NLA_NESTED},
74 };
75 
76 /* DCB priority flow control to User Priority nested attributes */
77 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
78 	[DCB_PFC_UP_ATTR_0]   = {.type = NLA_U8},
79 	[DCB_PFC_UP_ATTR_1]   = {.type = NLA_U8},
80 	[DCB_PFC_UP_ATTR_2]   = {.type = NLA_U8},
81 	[DCB_PFC_UP_ATTR_3]   = {.type = NLA_U8},
82 	[DCB_PFC_UP_ATTR_4]   = {.type = NLA_U8},
83 	[DCB_PFC_UP_ATTR_5]   = {.type = NLA_U8},
84 	[DCB_PFC_UP_ATTR_6]   = {.type = NLA_U8},
85 	[DCB_PFC_UP_ATTR_7]   = {.type = NLA_U8},
86 	[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
87 };
88 
89 /* DCB priority grouping nested attributes */
90 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
91 	[DCB_PG_ATTR_TC_0]      = {.type = NLA_NESTED},
92 	[DCB_PG_ATTR_TC_1]      = {.type = NLA_NESTED},
93 	[DCB_PG_ATTR_TC_2]      = {.type = NLA_NESTED},
94 	[DCB_PG_ATTR_TC_3]      = {.type = NLA_NESTED},
95 	[DCB_PG_ATTR_TC_4]      = {.type = NLA_NESTED},
96 	[DCB_PG_ATTR_TC_5]      = {.type = NLA_NESTED},
97 	[DCB_PG_ATTR_TC_6]      = {.type = NLA_NESTED},
98 	[DCB_PG_ATTR_TC_7]      = {.type = NLA_NESTED},
99 	[DCB_PG_ATTR_TC_ALL]    = {.type = NLA_NESTED},
100 	[DCB_PG_ATTR_BW_ID_0]   = {.type = NLA_U8},
101 	[DCB_PG_ATTR_BW_ID_1]   = {.type = NLA_U8},
102 	[DCB_PG_ATTR_BW_ID_2]   = {.type = NLA_U8},
103 	[DCB_PG_ATTR_BW_ID_3]   = {.type = NLA_U8},
104 	[DCB_PG_ATTR_BW_ID_4]   = {.type = NLA_U8},
105 	[DCB_PG_ATTR_BW_ID_5]   = {.type = NLA_U8},
106 	[DCB_PG_ATTR_BW_ID_6]   = {.type = NLA_U8},
107 	[DCB_PG_ATTR_BW_ID_7]   = {.type = NLA_U8},
108 	[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
109 };
110 
111 /* DCB traffic class nested attributes. */
112 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
113 	[DCB_TC_ATTR_PARAM_PGID]            = {.type = NLA_U8},
114 	[DCB_TC_ATTR_PARAM_UP_MAPPING]      = {.type = NLA_U8},
115 	[DCB_TC_ATTR_PARAM_STRICT_PRIO]     = {.type = NLA_U8},
116 	[DCB_TC_ATTR_PARAM_BW_PCT]          = {.type = NLA_U8},
117 	[DCB_TC_ATTR_PARAM_ALL]             = {.type = NLA_FLAG},
118 };
119 
120 /* DCB capabilities nested attributes. */
121 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
122 	[DCB_CAP_ATTR_ALL]     = {.type = NLA_FLAG},
123 	[DCB_CAP_ATTR_PG]      = {.type = NLA_U8},
124 	[DCB_CAP_ATTR_PFC]     = {.type = NLA_U8},
125 	[DCB_CAP_ATTR_UP2TC]   = {.type = NLA_U8},
126 	[DCB_CAP_ATTR_PG_TCS]  = {.type = NLA_U8},
127 	[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
128 	[DCB_CAP_ATTR_GSP]     = {.type = NLA_U8},
129 	[DCB_CAP_ATTR_BCN]     = {.type = NLA_U8},
130 	[DCB_CAP_ATTR_DCBX]    = {.type = NLA_U8},
131 };
132 
133 /* DCB capabilities nested attributes. */
134 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
135 	[DCB_NUMTCS_ATTR_ALL]     = {.type = NLA_FLAG},
136 	[DCB_NUMTCS_ATTR_PG]      = {.type = NLA_U8},
137 	[DCB_NUMTCS_ATTR_PFC]     = {.type = NLA_U8},
138 };
139 
140 /* DCB BCN nested attributes. */
141 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
142 	[DCB_BCN_ATTR_RP_0]         = {.type = NLA_U8},
143 	[DCB_BCN_ATTR_RP_1]         = {.type = NLA_U8},
144 	[DCB_BCN_ATTR_RP_2]         = {.type = NLA_U8},
145 	[DCB_BCN_ATTR_RP_3]         = {.type = NLA_U8},
146 	[DCB_BCN_ATTR_RP_4]         = {.type = NLA_U8},
147 	[DCB_BCN_ATTR_RP_5]         = {.type = NLA_U8},
148 	[DCB_BCN_ATTR_RP_6]         = {.type = NLA_U8},
149 	[DCB_BCN_ATTR_RP_7]         = {.type = NLA_U8},
150 	[DCB_BCN_ATTR_RP_ALL]       = {.type = NLA_FLAG},
151 	[DCB_BCN_ATTR_BCNA_0]       = {.type = NLA_U32},
152 	[DCB_BCN_ATTR_BCNA_1]       = {.type = NLA_U32},
153 	[DCB_BCN_ATTR_ALPHA]        = {.type = NLA_U32},
154 	[DCB_BCN_ATTR_BETA]         = {.type = NLA_U32},
155 	[DCB_BCN_ATTR_GD]           = {.type = NLA_U32},
156 	[DCB_BCN_ATTR_GI]           = {.type = NLA_U32},
157 	[DCB_BCN_ATTR_TMAX]         = {.type = NLA_U32},
158 	[DCB_BCN_ATTR_TD]           = {.type = NLA_U32},
159 	[DCB_BCN_ATTR_RMIN]         = {.type = NLA_U32},
160 	[DCB_BCN_ATTR_W]            = {.type = NLA_U32},
161 	[DCB_BCN_ATTR_RD]           = {.type = NLA_U32},
162 	[DCB_BCN_ATTR_RU]           = {.type = NLA_U32},
163 	[DCB_BCN_ATTR_WRTT]         = {.type = NLA_U32},
164 	[DCB_BCN_ATTR_RI]           = {.type = NLA_U32},
165 	[DCB_BCN_ATTR_C]            = {.type = NLA_U32},
166 	[DCB_BCN_ATTR_ALL]          = {.type = NLA_FLAG},
167 };
168 
169 /* DCB APP nested attributes. */
170 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
171 	[DCB_APP_ATTR_IDTYPE]       = {.type = NLA_U8},
172 	[DCB_APP_ATTR_ID]           = {.type = NLA_U16},
173 	[DCB_APP_ATTR_PRIORITY]     = {.type = NLA_U8},
174 };
175 
176 /* IEEE 802.1Qaz nested attributes. */
177 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
178 	[DCB_ATTR_IEEE_ETS]	    = {.len = sizeof(struct ieee_ets)},
179 	[DCB_ATTR_IEEE_PFC]	    = {.len = sizeof(struct ieee_pfc)},
180 	[DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
181 };
182 
183 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
184 	[DCB_ATTR_IEEE_APP]	    = {.len = sizeof(struct dcb_app)},
185 };
186 
187 /* DCB number of traffic classes nested attributes. */
188 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
189 	[DCB_FEATCFG_ATTR_ALL]      = {.type = NLA_FLAG},
190 	[DCB_FEATCFG_ATTR_PG]       = {.type = NLA_U8},
191 	[DCB_FEATCFG_ATTR_PFC]      = {.type = NLA_U8},
192 	[DCB_FEATCFG_ATTR_APP]      = {.type = NLA_U8},
193 };
194 
195 static LIST_HEAD(dcb_app_list);
196 static DEFINE_SPINLOCK(dcb_lock);
197 
198 /* standard netlink reply call */
199 static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
200                        u32 seq, u16 flags)
201 {
202 	struct sk_buff *dcbnl_skb;
203 	struct dcbmsg *dcb;
204 	struct nlmsghdr *nlh;
205 	int ret = -EINVAL;
206 
207 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
208 	if (!dcbnl_skb)
209 		return ret;
210 
211 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
212 
213 	dcb = NLMSG_DATA(nlh);
214 	dcb->dcb_family = AF_UNSPEC;
215 	dcb->cmd = cmd;
216 	dcb->dcb_pad = 0;
217 
218 	ret = nla_put_u8(dcbnl_skb, attr, value);
219 	if (ret)
220 		goto err;
221 
222 	/* end the message, assign the nlmsg_len. */
223 	nlmsg_end(dcbnl_skb, nlh);
224 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
225 	if (ret)
226 		return -EINVAL;
227 
228 	return 0;
229 nlmsg_failure:
230 err:
231 	kfree_skb(dcbnl_skb);
232 	return ret;
233 }
234 
235 static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
236                           u32 pid, u32 seq, u16 flags)
237 {
238 	int ret = -EINVAL;
239 
240 	/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
241 	if (!netdev->dcbnl_ops->getstate)
242 		return ret;
243 
244 	ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
245 	                  DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
246 
247 	return ret;
248 }
249 
250 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
251                            u32 pid, u32 seq, u16 flags)
252 {
253 	struct sk_buff *dcbnl_skb;
254 	struct nlmsghdr *nlh;
255 	struct dcbmsg *dcb;
256 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
257 	u8 value;
258 	int ret = -EINVAL;
259 	int i;
260 	int getall = 0;
261 
262 	if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
263 		return ret;
264 
265 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
266 	                       tb[DCB_ATTR_PFC_CFG],
267 	                       dcbnl_pfc_up_nest);
268 	if (ret)
269 		goto err_out;
270 
271 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
272 	if (!dcbnl_skb)
273 		goto err_out;
274 
275 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
276 
277 	dcb = NLMSG_DATA(nlh);
278 	dcb->dcb_family = AF_UNSPEC;
279 	dcb->cmd = DCB_CMD_PFC_GCFG;
280 
281 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
282 	if (!nest)
283 		goto err;
284 
285 	if (data[DCB_PFC_UP_ATTR_ALL])
286 		getall = 1;
287 
288 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
289 		if (!getall && !data[i])
290 			continue;
291 
292 		netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
293 		                             &value);
294 		ret = nla_put_u8(dcbnl_skb, i, value);
295 
296 		if (ret) {
297 			nla_nest_cancel(dcbnl_skb, nest);
298 			goto err;
299 		}
300 	}
301 	nla_nest_end(dcbnl_skb, nest);
302 
303 	nlmsg_end(dcbnl_skb, nlh);
304 
305 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
306 	if (ret)
307 		goto err_out;
308 
309 	return 0;
310 nlmsg_failure:
311 err:
312 	kfree_skb(dcbnl_skb);
313 err_out:
314 	return -EINVAL;
315 }
316 
317 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
318                                 u32 pid, u32 seq, u16 flags)
319 {
320 	struct sk_buff *dcbnl_skb;
321 	struct nlmsghdr *nlh;
322 	struct dcbmsg *dcb;
323 	u8 perm_addr[MAX_ADDR_LEN];
324 	int ret = -EINVAL;
325 
326 	if (!netdev->dcbnl_ops->getpermhwaddr)
327 		return ret;
328 
329 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
330 	if (!dcbnl_skb)
331 		goto err_out;
332 
333 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
334 
335 	dcb = NLMSG_DATA(nlh);
336 	dcb->dcb_family = AF_UNSPEC;
337 	dcb->cmd = DCB_CMD_GPERM_HWADDR;
338 
339 	netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
340 
341 	ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
342 	              perm_addr);
343 
344 	nlmsg_end(dcbnl_skb, nlh);
345 
346 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
347 	if (ret)
348 		goto err_out;
349 
350 	return 0;
351 
352 nlmsg_failure:
353 	kfree_skb(dcbnl_skb);
354 err_out:
355 	return -EINVAL;
356 }
357 
358 static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
359                         u32 pid, u32 seq, u16 flags)
360 {
361 	struct sk_buff *dcbnl_skb;
362 	struct nlmsghdr *nlh;
363 	struct dcbmsg *dcb;
364 	struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
365 	u8 value;
366 	int ret = -EINVAL;
367 	int i;
368 	int getall = 0;
369 
370 	if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
371 		return ret;
372 
373 	ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
374 	                       dcbnl_cap_nest);
375 	if (ret)
376 		goto err_out;
377 
378 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
379 	if (!dcbnl_skb)
380 		goto err_out;
381 
382 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
383 
384 	dcb = NLMSG_DATA(nlh);
385 	dcb->dcb_family = AF_UNSPEC;
386 	dcb->cmd = DCB_CMD_GCAP;
387 
388 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
389 	if (!nest)
390 		goto err;
391 
392 	if (data[DCB_CAP_ATTR_ALL])
393 		getall = 1;
394 
395 	for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
396 		if (!getall && !data[i])
397 			continue;
398 
399 		if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
400 			ret = nla_put_u8(dcbnl_skb, i, value);
401 
402 			if (ret) {
403 				nla_nest_cancel(dcbnl_skb, nest);
404 				goto err;
405 			}
406 		}
407 	}
408 	nla_nest_end(dcbnl_skb, nest);
409 
410 	nlmsg_end(dcbnl_skb, nlh);
411 
412 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
413 	if (ret)
414 		goto err_out;
415 
416 	return 0;
417 nlmsg_failure:
418 err:
419 	kfree_skb(dcbnl_skb);
420 err_out:
421 	return -EINVAL;
422 }
423 
424 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
425                            u32 pid, u32 seq, u16 flags)
426 {
427 	struct sk_buff *dcbnl_skb;
428 	struct nlmsghdr *nlh;
429 	struct dcbmsg *dcb;
430 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
431 	u8 value;
432 	int ret = -EINVAL;
433 	int i;
434 	int getall = 0;
435 
436 	if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
437 		return ret;
438 
439 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
440 	                       dcbnl_numtcs_nest);
441 	if (ret) {
442 		ret = -EINVAL;
443 		goto err_out;
444 	}
445 
446 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
447 	if (!dcbnl_skb) {
448 		ret = -EINVAL;
449 		goto err_out;
450 	}
451 
452 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
453 
454 	dcb = NLMSG_DATA(nlh);
455 	dcb->dcb_family = AF_UNSPEC;
456 	dcb->cmd = DCB_CMD_GNUMTCS;
457 
458 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
459 	if (!nest) {
460 		ret = -EINVAL;
461 		goto err;
462 	}
463 
464 	if (data[DCB_NUMTCS_ATTR_ALL])
465 		getall = 1;
466 
467 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
468 		if (!getall && !data[i])
469 			continue;
470 
471 		ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
472 		if (!ret) {
473 			ret = nla_put_u8(dcbnl_skb, i, value);
474 
475 			if (ret) {
476 				nla_nest_cancel(dcbnl_skb, nest);
477 				ret = -EINVAL;
478 				goto err;
479 			}
480 		} else {
481 			goto err;
482 		}
483 	}
484 	nla_nest_end(dcbnl_skb, nest);
485 
486 	nlmsg_end(dcbnl_skb, nlh);
487 
488 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
489 	if (ret) {
490 		ret = -EINVAL;
491 		goto err_out;
492 	}
493 
494 	return 0;
495 nlmsg_failure:
496 err:
497 	kfree_skb(dcbnl_skb);
498 err_out:
499 	return ret;
500 }
501 
502 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
503                            u32 pid, u32 seq, u16 flags)
504 {
505 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
506 	int ret = -EINVAL;
507 	u8 value;
508 	int i;
509 
510 	if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
511 		return ret;
512 
513 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
514 	                       dcbnl_numtcs_nest);
515 
516 	if (ret) {
517 		ret = -EINVAL;
518 		goto err;
519 	}
520 
521 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
522 		if (data[i] == NULL)
523 			continue;
524 
525 		value = nla_get_u8(data[i]);
526 
527 		ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
528 
529 		if (ret)
530 			goto operr;
531 	}
532 
533 operr:
534 	ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
535 	                  DCB_ATTR_NUMTCS, pid, seq, flags);
536 
537 err:
538 	return ret;
539 }
540 
541 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
542                              u32 pid, u32 seq, u16 flags)
543 {
544 	int ret = -EINVAL;
545 
546 	if (!netdev->dcbnl_ops->getpfcstate)
547 		return ret;
548 
549 	ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
550 	                  DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
551 	                  pid, seq, flags);
552 
553 	return ret;
554 }
555 
556 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
557                              u32 pid, u32 seq, u16 flags)
558 {
559 	int ret = -EINVAL;
560 	u8 value;
561 
562 	if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
563 		return ret;
564 
565 	value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
566 
567 	netdev->dcbnl_ops->setpfcstate(netdev, value);
568 
569 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
570 	                  pid, seq, flags);
571 
572 	return ret;
573 }
574 
575 static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
576                         u32 pid, u32 seq, u16 flags)
577 {
578 	struct sk_buff *dcbnl_skb;
579 	struct nlmsghdr *nlh;
580 	struct dcbmsg *dcb;
581 	struct nlattr *app_nest;
582 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
583 	u16 id;
584 	u8 up, idtype;
585 	int ret = -EINVAL;
586 
587 	if (!tb[DCB_ATTR_APP])
588 		goto out;
589 
590 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
591 	                       dcbnl_app_nest);
592 	if (ret)
593 		goto out;
594 
595 	ret = -EINVAL;
596 	/* all must be non-null */
597 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
598 	    (!app_tb[DCB_APP_ATTR_ID]))
599 		goto out;
600 
601 	/* either by eth type or by socket number */
602 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
603 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
604 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
605 		goto out;
606 
607 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
608 
609 	if (netdev->dcbnl_ops->getapp) {
610 		up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
611 	} else {
612 		struct dcb_app app = {
613 					.selector = idtype,
614 					.protocol = id,
615 				     };
616 		up = dcb_getapp(netdev, &app);
617 	}
618 
619 	/* send this back */
620 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
621 	if (!dcbnl_skb)
622 		goto out;
623 
624 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
625 	dcb = NLMSG_DATA(nlh);
626 	dcb->dcb_family = AF_UNSPEC;
627 	dcb->cmd = DCB_CMD_GAPP;
628 
629 	app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
630 	if (!app_nest)
631 		goto out_cancel;
632 
633 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
634 	if (ret)
635 		goto out_cancel;
636 
637 	ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
638 	if (ret)
639 		goto out_cancel;
640 
641 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
642 	if (ret)
643 		goto out_cancel;
644 
645 	nla_nest_end(dcbnl_skb, app_nest);
646 	nlmsg_end(dcbnl_skb, nlh);
647 
648 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
649 	if (ret)
650 		goto nlmsg_failure;
651 
652 	goto out;
653 
654 out_cancel:
655 	nla_nest_cancel(dcbnl_skb, app_nest);
656 nlmsg_failure:
657 	kfree_skb(dcbnl_skb);
658 out:
659 	return ret;
660 }
661 
662 static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
663                         u32 pid, u32 seq, u16 flags)
664 {
665 	int err, ret = -EINVAL;
666 	u16 id;
667 	u8 up, idtype;
668 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
669 
670 	if (!tb[DCB_ATTR_APP])
671 		goto out;
672 
673 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
674 	                       dcbnl_app_nest);
675 	if (ret)
676 		goto out;
677 
678 	ret = -EINVAL;
679 	/* all must be non-null */
680 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
681 	    (!app_tb[DCB_APP_ATTR_ID]) ||
682 	    (!app_tb[DCB_APP_ATTR_PRIORITY]))
683 		goto out;
684 
685 	/* either by eth type or by socket number */
686 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
687 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
688 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
689 		goto out;
690 
691 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
692 	up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
693 
694 	if (netdev->dcbnl_ops->setapp) {
695 		err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
696 	} else {
697 		struct dcb_app app;
698 		app.selector = idtype;
699 		app.protocol = id;
700 		app.priority = up;
701 		err = dcb_setapp(netdev, &app);
702 	}
703 
704 	ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
705 			  pid, seq, flags);
706 out:
707 	return ret;
708 }
709 
710 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
711                              u32 pid, u32 seq, u16 flags, int dir)
712 {
713 	struct sk_buff *dcbnl_skb;
714 	struct nlmsghdr *nlh;
715 	struct dcbmsg *dcb;
716 	struct nlattr *pg_nest, *param_nest, *data;
717 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
718 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
719 	u8 prio, pgid, tc_pct, up_map;
720 	int ret  = -EINVAL;
721 	int getall = 0;
722 	int i;
723 
724 	if (!tb[DCB_ATTR_PG_CFG] ||
725 	    !netdev->dcbnl_ops->getpgtccfgtx ||
726 	    !netdev->dcbnl_ops->getpgtccfgrx ||
727 	    !netdev->dcbnl_ops->getpgbwgcfgtx ||
728 	    !netdev->dcbnl_ops->getpgbwgcfgrx)
729 		return ret;
730 
731 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
732 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
733 
734 	if (ret)
735 		goto err_out;
736 
737 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
738 	if (!dcbnl_skb)
739 		goto err_out;
740 
741 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
742 
743 	dcb = NLMSG_DATA(nlh);
744 	dcb->dcb_family = AF_UNSPEC;
745 	dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
746 
747 	pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
748 	if (!pg_nest)
749 		goto err;
750 
751 	if (pg_tb[DCB_PG_ATTR_TC_ALL])
752 		getall = 1;
753 
754 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
755 		if (!getall && !pg_tb[i])
756 			continue;
757 
758 		if (pg_tb[DCB_PG_ATTR_TC_ALL])
759 			data = pg_tb[DCB_PG_ATTR_TC_ALL];
760 		else
761 			data = pg_tb[i];
762 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
763 				       data, dcbnl_tc_param_nest);
764 		if (ret)
765 			goto err_pg;
766 
767 		param_nest = nla_nest_start(dcbnl_skb, i);
768 		if (!param_nest)
769 			goto err_pg;
770 
771 		pgid = DCB_ATTR_VALUE_UNDEFINED;
772 		prio = DCB_ATTR_VALUE_UNDEFINED;
773 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
774 		up_map = DCB_ATTR_VALUE_UNDEFINED;
775 
776 		if (dir) {
777 			/* Rx */
778 			netdev->dcbnl_ops->getpgtccfgrx(netdev,
779 						i - DCB_PG_ATTR_TC_0, &prio,
780 						&pgid, &tc_pct, &up_map);
781 		} else {
782 			/* Tx */
783 			netdev->dcbnl_ops->getpgtccfgtx(netdev,
784 						i - DCB_PG_ATTR_TC_0, &prio,
785 						&pgid, &tc_pct, &up_map);
786 		}
787 
788 		if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
789 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
790 			ret = nla_put_u8(dcbnl_skb,
791 			                 DCB_TC_ATTR_PARAM_PGID, pgid);
792 			if (ret)
793 				goto err_param;
794 		}
795 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
796 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
797 			ret = nla_put_u8(dcbnl_skb,
798 			                 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
799 			if (ret)
800 				goto err_param;
801 		}
802 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
803 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
804 			ret = nla_put_u8(dcbnl_skb,
805 			                 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
806 			if (ret)
807 				goto err_param;
808 		}
809 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
810 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
811 			ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
812 			                 tc_pct);
813 			if (ret)
814 				goto err_param;
815 		}
816 		nla_nest_end(dcbnl_skb, param_nest);
817 	}
818 
819 	if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
820 		getall = 1;
821 	else
822 		getall = 0;
823 
824 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
825 		if (!getall && !pg_tb[i])
826 			continue;
827 
828 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
829 
830 		if (dir) {
831 			/* Rx */
832 			netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
833 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
834 		} else {
835 			/* Tx */
836 			netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
837 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
838 		}
839 		ret = nla_put_u8(dcbnl_skb, i, tc_pct);
840 
841 		if (ret)
842 			goto err_pg;
843 	}
844 
845 	nla_nest_end(dcbnl_skb, pg_nest);
846 
847 	nlmsg_end(dcbnl_skb, nlh);
848 
849 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
850 	if (ret)
851 		goto err_out;
852 
853 	return 0;
854 
855 err_param:
856 	nla_nest_cancel(dcbnl_skb, param_nest);
857 err_pg:
858 	nla_nest_cancel(dcbnl_skb, pg_nest);
859 nlmsg_failure:
860 err:
861 	kfree_skb(dcbnl_skb);
862 err_out:
863 	ret  = -EINVAL;
864 	return ret;
865 }
866 
867 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
868                              u32 pid, u32 seq, u16 flags)
869 {
870 	return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
871 }
872 
873 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
874                              u32 pid, u32 seq, u16 flags)
875 {
876 	return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
877 }
878 
879 static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
880                           u32 pid, u32 seq, u16 flags)
881 {
882 	int ret = -EINVAL;
883 	u8 value;
884 
885 	if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
886 		return ret;
887 
888 	value = nla_get_u8(tb[DCB_ATTR_STATE]);
889 
890 	ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
891 	                  RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
892 	                  pid, seq, flags);
893 
894 	return ret;
895 }
896 
897 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
898                            u32 pid, u32 seq, u16 flags)
899 {
900 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
901 	int i;
902 	int ret = -EINVAL;
903 	u8 value;
904 
905 	if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
906 		return ret;
907 
908 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
909 	                       tb[DCB_ATTR_PFC_CFG],
910 	                       dcbnl_pfc_up_nest);
911 	if (ret)
912 		goto err;
913 
914 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
915 		if (data[i] == NULL)
916 			continue;
917 		value = nla_get_u8(data[i]);
918 		netdev->dcbnl_ops->setpfccfg(netdev,
919 			data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
920 	}
921 
922 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
923 	                  pid, seq, flags);
924 err:
925 	return ret;
926 }
927 
928 static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
929                         u32 pid, u32 seq, u16 flags)
930 {
931 	int ret = -EINVAL;
932 
933 	if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
934 		return ret;
935 
936 	ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
937 	                  DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
938 
939 	return ret;
940 }
941 
942 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
943                              u32 pid, u32 seq, u16 flags, int dir)
944 {
945 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
946 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
947 	int ret = -EINVAL;
948 	int i;
949 	u8 pgid;
950 	u8 up_map;
951 	u8 prio;
952 	u8 tc_pct;
953 
954 	if (!tb[DCB_ATTR_PG_CFG] ||
955 	    !netdev->dcbnl_ops->setpgtccfgtx ||
956 	    !netdev->dcbnl_ops->setpgtccfgrx ||
957 	    !netdev->dcbnl_ops->setpgbwgcfgtx ||
958 	    !netdev->dcbnl_ops->setpgbwgcfgrx)
959 		return ret;
960 
961 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
962 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
963 	if (ret)
964 		goto err;
965 
966 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
967 		if (!pg_tb[i])
968 			continue;
969 
970 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
971 		                       pg_tb[i], dcbnl_tc_param_nest);
972 		if (ret)
973 			goto err;
974 
975 		pgid = DCB_ATTR_VALUE_UNDEFINED;
976 		prio = DCB_ATTR_VALUE_UNDEFINED;
977 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
978 		up_map = DCB_ATTR_VALUE_UNDEFINED;
979 
980 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
981 			prio =
982 			    nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
983 
984 		if (param_tb[DCB_TC_ATTR_PARAM_PGID])
985 			pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
986 
987 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
988 			tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
989 
990 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
991 			up_map =
992 			     nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
993 
994 		/* dir: Tx = 0, Rx = 1 */
995 		if (dir) {
996 			/* Rx */
997 			netdev->dcbnl_ops->setpgtccfgrx(netdev,
998 				i - DCB_PG_ATTR_TC_0,
999 				prio, pgid, tc_pct, up_map);
1000 		} else {
1001 			/* Tx */
1002 			netdev->dcbnl_ops->setpgtccfgtx(netdev,
1003 				i - DCB_PG_ATTR_TC_0,
1004 				prio, pgid, tc_pct, up_map);
1005 		}
1006 	}
1007 
1008 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1009 		if (!pg_tb[i])
1010 			continue;
1011 
1012 		tc_pct = nla_get_u8(pg_tb[i]);
1013 
1014 		/* dir: Tx = 0, Rx = 1 */
1015 		if (dir) {
1016 			/* Rx */
1017 			netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
1018 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1019 		} else {
1020 			/* Tx */
1021 			netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
1022 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1023 		}
1024 	}
1025 
1026 	ret = dcbnl_reply(0, RTM_SETDCB,
1027 			  (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1028 			  DCB_ATTR_PG_CFG, pid, seq, flags);
1029 
1030 err:
1031 	return ret;
1032 }
1033 
1034 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
1035                              u32 pid, u32 seq, u16 flags)
1036 {
1037 	return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
1038 }
1039 
1040 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
1041                              u32 pid, u32 seq, u16 flags)
1042 {
1043 	return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
1044 }
1045 
1046 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1047                             u32 pid, u32 seq, u16 flags)
1048 {
1049 	struct sk_buff *dcbnl_skb;
1050 	struct nlmsghdr *nlh;
1051 	struct dcbmsg *dcb;
1052 	struct nlattr *bcn_nest;
1053 	struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1054 	u8 value_byte;
1055 	u32 value_integer;
1056 	int ret  = -EINVAL;
1057 	bool getall = false;
1058 	int i;
1059 
1060 	if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
1061 	    !netdev->dcbnl_ops->getbcncfg)
1062 		return ret;
1063 
1064 	ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1065 	                       tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1066 
1067 	if (ret)
1068 		goto err_out;
1069 
1070 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1071 	if (!dcbnl_skb)
1072 		goto err_out;
1073 
1074 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1075 
1076 	dcb = NLMSG_DATA(nlh);
1077 	dcb->dcb_family = AF_UNSPEC;
1078 	dcb->cmd = DCB_CMD_BCN_GCFG;
1079 
1080 	bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
1081 	if (!bcn_nest)
1082 		goto err;
1083 
1084 	if (bcn_tb[DCB_BCN_ATTR_ALL])
1085 		getall = true;
1086 
1087 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1088 		if (!getall && !bcn_tb[i])
1089 			continue;
1090 
1091 		netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1092 		                            &value_byte);
1093 		ret = nla_put_u8(dcbnl_skb, i, value_byte);
1094 		if (ret)
1095 			goto err_bcn;
1096 	}
1097 
1098 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1099 		if (!getall && !bcn_tb[i])
1100 			continue;
1101 
1102 		netdev->dcbnl_ops->getbcncfg(netdev, i,
1103 		                             &value_integer);
1104 		ret = nla_put_u32(dcbnl_skb, i, value_integer);
1105 		if (ret)
1106 			goto err_bcn;
1107 	}
1108 
1109 	nla_nest_end(dcbnl_skb, bcn_nest);
1110 
1111 	nlmsg_end(dcbnl_skb, nlh);
1112 
1113 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1114 	if (ret)
1115 		goto err_out;
1116 
1117 	return 0;
1118 
1119 err_bcn:
1120 	nla_nest_cancel(dcbnl_skb, bcn_nest);
1121 nlmsg_failure:
1122 err:
1123 	kfree_skb(dcbnl_skb);
1124 err_out:
1125 	ret  = -EINVAL;
1126 	return ret;
1127 }
1128 
1129 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1130                             u32 pid, u32 seq, u16 flags)
1131 {
1132 	struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1133 	int i;
1134 	int ret = -EINVAL;
1135 	u8 value_byte;
1136 	u32 value_int;
1137 
1138 	if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
1139 	    !netdev->dcbnl_ops->setbcnrp)
1140 		return ret;
1141 
1142 	ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1143 	                       tb[DCB_ATTR_BCN],
1144 	                       dcbnl_pfc_up_nest);
1145 	if (ret)
1146 		goto err;
1147 
1148 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1149 		if (data[i] == NULL)
1150 			continue;
1151 		value_byte = nla_get_u8(data[i]);
1152 		netdev->dcbnl_ops->setbcnrp(netdev,
1153 			data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
1154 	}
1155 
1156 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1157 		if (data[i] == NULL)
1158 			continue;
1159 		value_int = nla_get_u32(data[i]);
1160 		netdev->dcbnl_ops->setbcncfg(netdev,
1161 	                                     i, value_int);
1162 	}
1163 
1164 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
1165 	                  pid, seq, flags);
1166 err:
1167 	return ret;
1168 }
1169 
1170 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1171 				int app_nested_type, int app_info_type,
1172 				int app_entry_type)
1173 {
1174 	struct dcb_peer_app_info info;
1175 	struct dcb_app *table = NULL;
1176 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1177 	u16 app_count;
1178 	int err;
1179 
1180 
1181 	/**
1182 	 * retrieve the peer app configuration form the driver. If the driver
1183 	 * handlers fail exit without doing anything
1184 	 */
1185 	err = ops->peer_getappinfo(netdev, &info, &app_count);
1186 	if (!err && app_count) {
1187 		table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
1188 		if (!table)
1189 			return -ENOMEM;
1190 
1191 		err = ops->peer_getapptable(netdev, table);
1192 	}
1193 
1194 	if (!err) {
1195 		u16 i;
1196 		struct nlattr *app;
1197 
1198 		/**
1199 		 * build the message, from here on the only possible failure
1200 		 * is due to the skb size
1201 		 */
1202 		err = -EMSGSIZE;
1203 
1204 		app = nla_nest_start(skb, app_nested_type);
1205 		if (!app)
1206 			goto nla_put_failure;
1207 
1208 		if (app_info_type)
1209 			NLA_PUT(skb, app_info_type, sizeof(info), &info);
1210 
1211 		for (i = 0; i < app_count; i++)
1212 			NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
1213 				&table[i]);
1214 
1215 		nla_nest_end(skb, app);
1216 	}
1217 	err = 0;
1218 
1219 nla_put_failure:
1220 	kfree(table);
1221 	return err;
1222 }
1223 
1224 /* Handle IEEE 802.1Qaz GET commands. */
1225 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1226 {
1227 	struct nlattr *ieee, *app;
1228 	struct dcb_app_type *itr;
1229 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1230 	int dcbx;
1231 	int err = -EMSGSIZE;
1232 
1233 	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1234 
1235 	ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1236 	if (!ieee)
1237 		goto nla_put_failure;
1238 
1239 	if (ops->ieee_getets) {
1240 		struct ieee_ets ets;
1241 		err = ops->ieee_getets(netdev, &ets);
1242 		if (!err)
1243 			NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
1244 	}
1245 
1246 	if (ops->ieee_getpfc) {
1247 		struct ieee_pfc pfc;
1248 		err = ops->ieee_getpfc(netdev, &pfc);
1249 		if (!err)
1250 			NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
1251 	}
1252 
1253 	app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1254 	if (!app)
1255 		goto nla_put_failure;
1256 
1257 	spin_lock(&dcb_lock);
1258 	list_for_each_entry(itr, &dcb_app_list, list) {
1259 		if (itr->ifindex == netdev->ifindex) {
1260 			err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1261 					 &itr->app);
1262 			if (err) {
1263 				spin_unlock(&dcb_lock);
1264 				goto nla_put_failure;
1265 			}
1266 		}
1267 	}
1268 
1269 	if (netdev->dcbnl_ops->getdcbx)
1270 		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1271 	else
1272 		dcbx = -EOPNOTSUPP;
1273 
1274 	spin_unlock(&dcb_lock);
1275 	nla_nest_end(skb, app);
1276 
1277 	/* get peer info if available */
1278 	if (ops->ieee_peer_getets) {
1279 		struct ieee_ets ets;
1280 		err = ops->ieee_peer_getets(netdev, &ets);
1281 		if (!err)
1282 			NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
1283 	}
1284 
1285 	if (ops->ieee_peer_getpfc) {
1286 		struct ieee_pfc pfc;
1287 		err = ops->ieee_peer_getpfc(netdev, &pfc);
1288 		if (!err)
1289 			NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
1290 	}
1291 
1292 	if (ops->peer_getappinfo && ops->peer_getapptable) {
1293 		err = dcbnl_build_peer_app(netdev, skb,
1294 					   DCB_ATTR_IEEE_PEER_APP,
1295 					   DCB_ATTR_IEEE_APP_UNSPEC,
1296 					   DCB_ATTR_IEEE_APP);
1297 		if (err)
1298 			goto nla_put_failure;
1299 	}
1300 
1301 	nla_nest_end(skb, ieee);
1302 	if (dcbx >= 0) {
1303 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1304 		if (err)
1305 			goto nla_put_failure;
1306 	}
1307 
1308 	return 0;
1309 
1310 nla_put_failure:
1311 	return err;
1312 }
1313 
1314 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1315 			     int dir)
1316 {
1317 	u8 pgid, up_map, prio, tc_pct;
1318 	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1319 	int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1320 	struct nlattr *pg = nla_nest_start(skb, i);
1321 
1322 	if (!pg)
1323 		goto nla_put_failure;
1324 
1325 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1326 		struct nlattr *tc_nest = nla_nest_start(skb, i);
1327 
1328 		if (!tc_nest)
1329 			goto nla_put_failure;
1330 
1331 		pgid = DCB_ATTR_VALUE_UNDEFINED;
1332 		prio = DCB_ATTR_VALUE_UNDEFINED;
1333 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1334 		up_map = DCB_ATTR_VALUE_UNDEFINED;
1335 
1336 		if (!dir)
1337 			ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1338 					  &prio, &pgid, &tc_pct, &up_map);
1339 		else
1340 			ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1341 					  &prio, &pgid, &tc_pct, &up_map);
1342 
1343 		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
1344 		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
1345 		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
1346 		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
1347 		nla_nest_end(skb, tc_nest);
1348 	}
1349 
1350 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1351 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1352 
1353 		if (!dir)
1354 			ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1355 					   &tc_pct);
1356 		else
1357 			ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1358 					   &tc_pct);
1359 		NLA_PUT_U8(skb, i, tc_pct);
1360 	}
1361 	nla_nest_end(skb, pg);
1362 	return 0;
1363 
1364 nla_put_failure:
1365 	return -EMSGSIZE;
1366 }
1367 
1368 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1369 {
1370 	struct nlattr *cee, *app;
1371 	struct dcb_app_type *itr;
1372 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1373 	int dcbx, i, err = -EMSGSIZE;
1374 	u8 value;
1375 
1376 	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1377 
1378 	cee = nla_nest_start(skb, DCB_ATTR_CEE);
1379 	if (!cee)
1380 		goto nla_put_failure;
1381 
1382 	/* local pg */
1383 	if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1384 		err = dcbnl_cee_pg_fill(skb, netdev, 1);
1385 		if (err)
1386 			goto nla_put_failure;
1387 	}
1388 
1389 	if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1390 		err = dcbnl_cee_pg_fill(skb, netdev, 0);
1391 		if (err)
1392 			goto nla_put_failure;
1393 	}
1394 
1395 	/* local pfc */
1396 	if (ops->getpfccfg) {
1397 		struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1398 
1399 		if (!pfc_nest)
1400 			goto nla_put_failure;
1401 
1402 		for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1403 			ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1404 			NLA_PUT_U8(skb, i, value);
1405 		}
1406 		nla_nest_end(skb, pfc_nest);
1407 	}
1408 
1409 	/* local app */
1410 	spin_lock(&dcb_lock);
1411 	app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1412 	if (!app)
1413 		goto dcb_unlock;
1414 
1415 	list_for_each_entry(itr, &dcb_app_list, list) {
1416 		if (itr->ifindex == netdev->ifindex) {
1417 			struct nlattr *app_nest = nla_nest_start(skb,
1418 								 DCB_ATTR_APP);
1419 			if (!app_nest)
1420 				goto dcb_unlock;
1421 
1422 			err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1423 					 itr->app.selector);
1424 			if (err)
1425 				goto dcb_unlock;
1426 
1427 			err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1428 					  itr->app.protocol);
1429 			if (err)
1430 				goto dcb_unlock;
1431 
1432 			err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1433 					 itr->app.priority);
1434 			if (err)
1435 				goto dcb_unlock;
1436 
1437 			nla_nest_end(skb, app_nest);
1438 		}
1439 	}
1440 	nla_nest_end(skb, app);
1441 
1442 	if (netdev->dcbnl_ops->getdcbx)
1443 		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1444 	else
1445 		dcbx = -EOPNOTSUPP;
1446 
1447 	spin_unlock(&dcb_lock);
1448 
1449 	/* features flags */
1450 	if (ops->getfeatcfg) {
1451 		struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1452 		if (!feat)
1453 			goto nla_put_failure;
1454 
1455 		for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1456 		     i++)
1457 			if (!ops->getfeatcfg(netdev, i, &value))
1458 				NLA_PUT_U8(skb, i, value);
1459 
1460 		nla_nest_end(skb, feat);
1461 	}
1462 
1463 	/* peer info if available */
1464 	if (ops->cee_peer_getpg) {
1465 		struct cee_pg pg;
1466 		err = ops->cee_peer_getpg(netdev, &pg);
1467 		if (!err)
1468 			NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1469 	}
1470 
1471 	if (ops->cee_peer_getpfc) {
1472 		struct cee_pfc pfc;
1473 		err = ops->cee_peer_getpfc(netdev, &pfc);
1474 		if (!err)
1475 			NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1476 	}
1477 
1478 	if (ops->peer_getappinfo && ops->peer_getapptable) {
1479 		err = dcbnl_build_peer_app(netdev, skb,
1480 					   DCB_ATTR_CEE_PEER_APP_TABLE,
1481 					   DCB_ATTR_CEE_PEER_APP_INFO,
1482 					   DCB_ATTR_CEE_PEER_APP);
1483 		if (err)
1484 			goto nla_put_failure;
1485 	}
1486 	nla_nest_end(skb, cee);
1487 
1488 	/* DCBX state */
1489 	if (dcbx >= 0) {
1490 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1491 		if (err)
1492 			goto nla_put_failure;
1493 	}
1494 	return 0;
1495 
1496 dcb_unlock:
1497 	spin_unlock(&dcb_lock);
1498 nla_put_failure:
1499 	return err;
1500 }
1501 
1502 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1503 			u32 seq, u32 pid, int dcbx_ver)
1504 {
1505 	struct net *net = dev_net(dev);
1506 	struct sk_buff *skb;
1507 	struct nlmsghdr *nlh;
1508 	struct dcbmsg *dcb;
1509 	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1510 	int err;
1511 
1512 	if (!ops)
1513 		return -EOPNOTSUPP;
1514 
1515 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1516 	if (!skb)
1517 		return -ENOBUFS;
1518 
1519 	nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
1520 	if (nlh == NULL) {
1521 		nlmsg_free(skb);
1522 		return -EMSGSIZE;
1523 	}
1524 
1525 	dcb = NLMSG_DATA(nlh);
1526 	dcb->dcb_family = AF_UNSPEC;
1527 	dcb->cmd = cmd;
1528 
1529 	if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1530 		err = dcbnl_ieee_fill(skb, dev);
1531 	else
1532 		err = dcbnl_cee_fill(skb, dev);
1533 
1534 	if (err < 0) {
1535 		/* Report error to broadcast listeners */
1536 		nlmsg_cancel(skb, nlh);
1537 		kfree_skb(skb);
1538 		rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1539 	} else {
1540 		/* End nlmsg and notify broadcast listeners */
1541 		nlmsg_end(skb, nlh);
1542 		rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1543 	}
1544 
1545 	return err;
1546 }
1547 
1548 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1549 		      u32 seq, u32 pid)
1550 {
1551 	return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
1552 }
1553 EXPORT_SYMBOL(dcbnl_ieee_notify);
1554 
1555 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1556 		     u32 seq, u32 pid)
1557 {
1558 	return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
1559 }
1560 EXPORT_SYMBOL(dcbnl_cee_notify);
1561 
1562 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1563  * be completed the entire msg is aborted and error value is returned.
1564  * No attempt is made to reconcile the case where only part of the
1565  * cmd can be completed.
1566  */
1567 static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1568 			  u32 pid, u32 seq, u16 flags)
1569 {
1570 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1571 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1572 	int err = -EOPNOTSUPP;
1573 
1574 	if (!ops)
1575 		return err;
1576 
1577 	if (!tb[DCB_ATTR_IEEE])
1578 		return -EINVAL;
1579 
1580 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1581 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1582 	if (err)
1583 		return err;
1584 
1585 	if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1586 		struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1587 		err = ops->ieee_setets(netdev, ets);
1588 		if (err)
1589 			goto err;
1590 	}
1591 
1592 	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1593 		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1594 		err = ops->ieee_setpfc(netdev, pfc);
1595 		if (err)
1596 			goto err;
1597 	}
1598 
1599 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1600 		struct nlattr *attr;
1601 		int rem;
1602 
1603 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1604 			struct dcb_app *app_data;
1605 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1606 				continue;
1607 			app_data = nla_data(attr);
1608 			if (ops->ieee_setapp)
1609 				err = ops->ieee_setapp(netdev, app_data);
1610 			else
1611 				err = dcb_ieee_setapp(netdev, app_data);
1612 			if (err)
1613 				goto err;
1614 		}
1615 	}
1616 
1617 err:
1618 	dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1619 		    pid, seq, flags);
1620 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1621 	return err;
1622 }
1623 
1624 static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1625 			  u32 pid, u32 seq, u16 flags)
1626 {
1627 	struct net *net = dev_net(netdev);
1628 	struct sk_buff *skb;
1629 	struct nlmsghdr *nlh;
1630 	struct dcbmsg *dcb;
1631 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1632 	int err;
1633 
1634 	if (!ops)
1635 		return -EOPNOTSUPP;
1636 
1637 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1638 	if (!skb)
1639 		return -ENOBUFS;
1640 
1641 	nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1642 	if (nlh == NULL) {
1643 		nlmsg_free(skb);
1644 		return -EMSGSIZE;
1645 	}
1646 
1647 	dcb = NLMSG_DATA(nlh);
1648 	dcb->dcb_family = AF_UNSPEC;
1649 	dcb->cmd = DCB_CMD_IEEE_GET;
1650 
1651 	err = dcbnl_ieee_fill(skb, netdev);
1652 
1653 	if (err < 0) {
1654 		nlmsg_cancel(skb, nlh);
1655 		kfree_skb(skb);
1656 	} else {
1657 		nlmsg_end(skb, nlh);
1658 		err = rtnl_unicast(skb, net, pid);
1659 	}
1660 
1661 	return err;
1662 }
1663 
1664 static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1665 			  u32 pid, u32 seq, u16 flags)
1666 {
1667 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1668 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1669 	int err = -EOPNOTSUPP;
1670 
1671 	if (!ops)
1672 		return -EOPNOTSUPP;
1673 
1674 	if (!tb[DCB_ATTR_IEEE])
1675 		return -EINVAL;
1676 
1677 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1678 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1679 	if (err)
1680 		return err;
1681 
1682 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1683 		struct nlattr *attr;
1684 		int rem;
1685 
1686 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1687 			struct dcb_app *app_data;
1688 
1689 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1690 				continue;
1691 			app_data = nla_data(attr);
1692 			if (ops->ieee_delapp)
1693 				err = ops->ieee_delapp(netdev, app_data);
1694 			else
1695 				err = dcb_ieee_delapp(netdev, app_data);
1696 			if (err)
1697 				goto err;
1698 		}
1699 	}
1700 
1701 err:
1702 	dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
1703 		    pid, seq, flags);
1704 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1705 	return err;
1706 }
1707 
1708 
1709 /* DCBX configuration */
1710 static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1711 			 u32 pid, u32 seq, u16 flags)
1712 {
1713 	int ret;
1714 
1715 	if (!netdev->dcbnl_ops->getdcbx)
1716 		return -EOPNOTSUPP;
1717 
1718 	ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
1719 			  DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
1720 
1721 	return ret;
1722 }
1723 
1724 static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1725 			 u32 pid, u32 seq, u16 flags)
1726 {
1727 	int ret;
1728 	u8 value;
1729 
1730 	if (!netdev->dcbnl_ops->setdcbx)
1731 		return -EOPNOTSUPP;
1732 
1733 	if (!tb[DCB_ATTR_DCBX])
1734 		return -EINVAL;
1735 
1736 	value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1737 
1738 	ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
1739 			  RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
1740 			  pid, seq, flags);
1741 
1742 	return ret;
1743 }
1744 
1745 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1746 			    u32 pid, u32 seq, u16 flags)
1747 {
1748 	struct sk_buff *dcbnl_skb;
1749 	struct nlmsghdr *nlh;
1750 	struct dcbmsg *dcb;
1751 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1752 	u8 value;
1753 	int ret, i;
1754 	int getall = 0;
1755 
1756 	if (!netdev->dcbnl_ops->getfeatcfg)
1757 		return -EOPNOTSUPP;
1758 
1759 	if (!tb[DCB_ATTR_FEATCFG])
1760 		return -EINVAL;
1761 
1762 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1763 			       dcbnl_featcfg_nest);
1764 	if (ret)
1765 		goto err_out;
1766 
1767 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1768 	if (!dcbnl_skb) {
1769 		ret = -ENOBUFS;
1770 		goto err_out;
1771 	}
1772 
1773 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1774 
1775 	dcb = NLMSG_DATA(nlh);
1776 	dcb->dcb_family = AF_UNSPEC;
1777 	dcb->cmd = DCB_CMD_GFEATCFG;
1778 
1779 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
1780 	if (!nest) {
1781 		ret = -EMSGSIZE;
1782 		goto nla_put_failure;
1783 	}
1784 
1785 	if (data[DCB_FEATCFG_ATTR_ALL])
1786 		getall = 1;
1787 
1788 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1789 		if (!getall && !data[i])
1790 			continue;
1791 
1792 		ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1793 		if (!ret)
1794 			ret = nla_put_u8(dcbnl_skb, i, value);
1795 
1796 		if (ret) {
1797 			nla_nest_cancel(dcbnl_skb, nest);
1798 			goto nla_put_failure;
1799 		}
1800 	}
1801 	nla_nest_end(dcbnl_skb, nest);
1802 
1803 	nlmsg_end(dcbnl_skb, nlh);
1804 
1805 	return rtnl_unicast(dcbnl_skb, &init_net, pid);
1806 nla_put_failure:
1807 	nlmsg_cancel(dcbnl_skb, nlh);
1808 nlmsg_failure:
1809 	kfree_skb(dcbnl_skb);
1810 err_out:
1811 	return ret;
1812 }
1813 
1814 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1815 			    u32 pid, u32 seq, u16 flags)
1816 {
1817 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1818 	int ret, i;
1819 	u8 value;
1820 
1821 	if (!netdev->dcbnl_ops->setfeatcfg)
1822 		return -ENOTSUPP;
1823 
1824 	if (!tb[DCB_ATTR_FEATCFG])
1825 		return -EINVAL;
1826 
1827 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1828 			       dcbnl_featcfg_nest);
1829 
1830 	if (ret)
1831 		goto err;
1832 
1833 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1834 		if (data[i] == NULL)
1835 			continue;
1836 
1837 		value = nla_get_u8(data[i]);
1838 
1839 		ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1840 
1841 		if (ret)
1842 			goto err;
1843 	}
1844 err:
1845 	dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
1846 		    pid, seq, flags);
1847 
1848 	return ret;
1849 }
1850 
1851 /* Handle CEE DCBX GET commands. */
1852 static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1853 			 u32 pid, u32 seq, u16 flags)
1854 {
1855 	struct net *net = dev_net(netdev);
1856 	struct sk_buff *skb;
1857 	struct nlmsghdr *nlh;
1858 	struct dcbmsg *dcb;
1859 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1860 	int err;
1861 
1862 	if (!ops)
1863 		return -EOPNOTSUPP;
1864 
1865 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1866 	if (!skb)
1867 		return -ENOBUFS;
1868 
1869 	nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1870 	if (nlh == NULL) {
1871 		nlmsg_free(skb);
1872 		return -EMSGSIZE;
1873 	}
1874 
1875 	dcb = NLMSG_DATA(nlh);
1876 	dcb->dcb_family = AF_UNSPEC;
1877 	dcb->cmd = DCB_CMD_CEE_GET;
1878 
1879 	err = dcbnl_cee_fill(skb, netdev);
1880 
1881 	if (err < 0) {
1882 		nlmsg_cancel(skb, nlh);
1883 		nlmsg_free(skb);
1884 	} else {
1885 		nlmsg_end(skb, nlh);
1886 		err = rtnl_unicast(skb, net, pid);
1887 	}
1888 	return err;
1889 }
1890 
1891 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1892 {
1893 	struct net *net = sock_net(skb->sk);
1894 	struct net_device *netdev;
1895 	struct dcbmsg  *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
1896 	struct nlattr *tb[DCB_ATTR_MAX + 1];
1897 	u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1898 	int ret = -EINVAL;
1899 
1900 	if (!net_eq(net, &init_net))
1901 		return -EINVAL;
1902 
1903 	ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1904 			  dcbnl_rtnl_policy);
1905 	if (ret < 0)
1906 		return ret;
1907 
1908 	if (!tb[DCB_ATTR_IFNAME])
1909 		return -EINVAL;
1910 
1911 	netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1912 	if (!netdev)
1913 		return -EINVAL;
1914 
1915 	if (!netdev->dcbnl_ops)
1916 		goto errout;
1917 
1918 	switch (dcb->cmd) {
1919 	case DCB_CMD_GSTATE:
1920 		ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1921 		                     nlh->nlmsg_flags);
1922 		goto out;
1923 	case DCB_CMD_PFC_GCFG:
1924 		ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1925 		                      nlh->nlmsg_flags);
1926 		goto out;
1927 	case DCB_CMD_GPERM_HWADDR:
1928 		ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1929 		                           nlh->nlmsg_flags);
1930 		goto out;
1931 	case DCB_CMD_PGTX_GCFG:
1932 		ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1933 		                        nlh->nlmsg_flags);
1934 		goto out;
1935 	case DCB_CMD_PGRX_GCFG:
1936 		ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1937 		                        nlh->nlmsg_flags);
1938 		goto out;
1939 	case DCB_CMD_BCN_GCFG:
1940 		ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1941 		                       nlh->nlmsg_flags);
1942 		goto out;
1943 	case DCB_CMD_SSTATE:
1944 		ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1945 		                     nlh->nlmsg_flags);
1946 		goto out;
1947 	case DCB_CMD_PFC_SCFG:
1948 		ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1949 		                      nlh->nlmsg_flags);
1950 		goto out;
1951 
1952 	case DCB_CMD_SET_ALL:
1953 		ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
1954 		                   nlh->nlmsg_flags);
1955 		goto out;
1956 	case DCB_CMD_PGTX_SCFG:
1957 		ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1958 		                        nlh->nlmsg_flags);
1959 		goto out;
1960 	case DCB_CMD_PGRX_SCFG:
1961 		ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1962 		                        nlh->nlmsg_flags);
1963 		goto out;
1964 	case DCB_CMD_GCAP:
1965 		ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
1966 		                   nlh->nlmsg_flags);
1967 		goto out;
1968 	case DCB_CMD_GNUMTCS:
1969 		ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1970 		                      nlh->nlmsg_flags);
1971 		goto out;
1972 	case DCB_CMD_SNUMTCS:
1973 		ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1974 		                      nlh->nlmsg_flags);
1975 		goto out;
1976 	case DCB_CMD_PFC_GSTATE:
1977 		ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1978 		                        nlh->nlmsg_flags);
1979 		goto out;
1980 	case DCB_CMD_PFC_SSTATE:
1981 		ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1982 		                        nlh->nlmsg_flags);
1983 		goto out;
1984 	case DCB_CMD_BCN_SCFG:
1985 		ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1986 		                       nlh->nlmsg_flags);
1987 		goto out;
1988 	case DCB_CMD_GAPP:
1989 		ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
1990 		                   nlh->nlmsg_flags);
1991 		goto out;
1992 	case DCB_CMD_SAPP:
1993 		ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
1994 		                   nlh->nlmsg_flags);
1995 		goto out;
1996 	case DCB_CMD_IEEE_SET:
1997 		ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
1998 				     nlh->nlmsg_flags);
1999 		goto out;
2000 	case DCB_CMD_IEEE_GET:
2001 		ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
2002 				     nlh->nlmsg_flags);
2003 		goto out;
2004 	case DCB_CMD_IEEE_DEL:
2005 		ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
2006 				     nlh->nlmsg_flags);
2007 		goto out;
2008 	case DCB_CMD_GDCBX:
2009 		ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2010 				    nlh->nlmsg_flags);
2011 		goto out;
2012 	case DCB_CMD_SDCBX:
2013 		ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2014 				    nlh->nlmsg_flags);
2015 		goto out;
2016 	case DCB_CMD_GFEATCFG:
2017 		ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2018 				       nlh->nlmsg_flags);
2019 		goto out;
2020 	case DCB_CMD_SFEATCFG:
2021 		ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2022 				       nlh->nlmsg_flags);
2023 		goto out;
2024 	case DCB_CMD_CEE_GET:
2025 		ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
2026 				    nlh->nlmsg_flags);
2027 		goto out;
2028 	default:
2029 		goto errout;
2030 	}
2031 errout:
2032 	ret = -EINVAL;
2033 out:
2034 	dev_put(netdev);
2035 	return ret;
2036 }
2037 
2038 /**
2039  * dcb_getapp - retrieve the DCBX application user priority
2040  *
2041  * On success returns a non-zero 802.1p user priority bitmap
2042  * otherwise returns 0 as the invalid user priority bitmap to
2043  * indicate an error.
2044  */
2045 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2046 {
2047 	struct dcb_app_type *itr;
2048 	u8 prio = 0;
2049 
2050 	spin_lock(&dcb_lock);
2051 	list_for_each_entry(itr, &dcb_app_list, list) {
2052 		if (itr->app.selector == app->selector &&
2053 		    itr->app.protocol == app->protocol &&
2054 		    itr->ifindex == dev->ifindex) {
2055 			prio = itr->app.priority;
2056 			break;
2057 		}
2058 	}
2059 	spin_unlock(&dcb_lock);
2060 
2061 	return prio;
2062 }
2063 EXPORT_SYMBOL(dcb_getapp);
2064 
2065 /**
2066  * dcb_setapp - add CEE dcb application data to app list
2067  *
2068  * Priority 0 is an invalid priority in CEE spec. This routine
2069  * removes applications from the app list if the priority is
2070  * set to zero.
2071  */
2072 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2073 {
2074 	struct dcb_app_type *itr;
2075 	struct dcb_app_type event;
2076 
2077 	event.ifindex = dev->ifindex;
2078 	memcpy(&event.app, new, sizeof(event.app));
2079 	if (dev->dcbnl_ops->getdcbx)
2080 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2081 
2082 	spin_lock(&dcb_lock);
2083 	/* Search for existing match and replace */
2084 	list_for_each_entry(itr, &dcb_app_list, list) {
2085 		if (itr->app.selector == new->selector &&
2086 		    itr->app.protocol == new->protocol &&
2087 		    itr->ifindex == dev->ifindex) {
2088 			if (new->priority)
2089 				itr->app.priority = new->priority;
2090 			else {
2091 				list_del(&itr->list);
2092 				kfree(itr);
2093 			}
2094 			goto out;
2095 		}
2096 	}
2097 	/* App type does not exist add new application type */
2098 	if (new->priority) {
2099 		struct dcb_app_type *entry;
2100 		entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2101 		if (!entry) {
2102 			spin_unlock(&dcb_lock);
2103 			return -ENOMEM;
2104 		}
2105 
2106 		memcpy(&entry->app, new, sizeof(*new));
2107 		entry->ifindex = dev->ifindex;
2108 		list_add(&entry->list, &dcb_app_list);
2109 	}
2110 out:
2111 	spin_unlock(&dcb_lock);
2112 	call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2113 	return 0;
2114 }
2115 EXPORT_SYMBOL(dcb_setapp);
2116 
2117 /**
2118  * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2119  *
2120  * Helper routine which on success returns a non-zero 802.1Qaz user
2121  * priority bitmap otherwise returns 0 to indicate the dcb_app was
2122  * not found in APP list.
2123  */
2124 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2125 {
2126 	struct dcb_app_type *itr;
2127 	u8 prio = 0;
2128 
2129 	spin_lock(&dcb_lock);
2130 	list_for_each_entry(itr, &dcb_app_list, list) {
2131 		if (itr->app.selector == app->selector &&
2132 		    itr->app.protocol == app->protocol &&
2133 		    itr->ifindex == dev->ifindex) {
2134 			prio |= 1 << itr->app.priority;
2135 		}
2136 	}
2137 	spin_unlock(&dcb_lock);
2138 
2139 	return prio;
2140 }
2141 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2142 
2143 /**
2144  * dcb_ieee_setapp - add IEEE dcb application data to app list
2145  *
2146  * This adds Application data to the list. Multiple application
2147  * entries may exists for the same selector and protocol as long
2148  * as the priorities are different.
2149  */
2150 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2151 {
2152 	struct dcb_app_type *itr, *entry;
2153 	struct dcb_app_type event;
2154 	int err = 0;
2155 
2156 	event.ifindex = dev->ifindex;
2157 	memcpy(&event.app, new, sizeof(event.app));
2158 	if (dev->dcbnl_ops->getdcbx)
2159 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2160 
2161 	spin_lock(&dcb_lock);
2162 	/* Search for existing match and abort if found */
2163 	list_for_each_entry(itr, &dcb_app_list, list) {
2164 		if (itr->app.selector == new->selector &&
2165 		    itr->app.protocol == new->protocol &&
2166 		    itr->app.priority == new->priority &&
2167 		    itr->ifindex == dev->ifindex) {
2168 			err = -EEXIST;
2169 			goto out;
2170 		}
2171 	}
2172 
2173 	/* App entry does not exist add new entry */
2174 	entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2175 	if (!entry) {
2176 		err = -ENOMEM;
2177 		goto out;
2178 	}
2179 
2180 	memcpy(&entry->app, new, sizeof(*new));
2181 	entry->ifindex = dev->ifindex;
2182 	list_add(&entry->list, &dcb_app_list);
2183 out:
2184 	spin_unlock(&dcb_lock);
2185 	if (!err)
2186 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2187 	return err;
2188 }
2189 EXPORT_SYMBOL(dcb_ieee_setapp);
2190 
2191 /**
2192  * dcb_ieee_delapp - delete IEEE dcb application data from list
2193  *
2194  * This removes a matching APP data from the APP list
2195  */
2196 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2197 {
2198 	struct dcb_app_type *itr;
2199 	struct dcb_app_type event;
2200 	int err = -ENOENT;
2201 
2202 	event.ifindex = dev->ifindex;
2203 	memcpy(&event.app, del, sizeof(event.app));
2204 	if (dev->dcbnl_ops->getdcbx)
2205 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2206 
2207 	spin_lock(&dcb_lock);
2208 	/* Search for existing match and remove it. */
2209 	list_for_each_entry(itr, &dcb_app_list, list) {
2210 		if (itr->app.selector == del->selector &&
2211 		    itr->app.protocol == del->protocol &&
2212 		    itr->app.priority == del->priority &&
2213 		    itr->ifindex == dev->ifindex) {
2214 			list_del(&itr->list);
2215 			kfree(itr);
2216 			err = 0;
2217 			goto out;
2218 		}
2219 	}
2220 
2221 out:
2222 	spin_unlock(&dcb_lock);
2223 	if (!err)
2224 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2225 	return err;
2226 }
2227 EXPORT_SYMBOL(dcb_ieee_delapp);
2228 
2229 static void dcb_flushapp(void)
2230 {
2231 	struct dcb_app_type *app;
2232 	struct dcb_app_type *tmp;
2233 
2234 	spin_lock(&dcb_lock);
2235 	list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
2236 		list_del(&app->list);
2237 		kfree(app);
2238 	}
2239 	spin_unlock(&dcb_lock);
2240 }
2241 
2242 static int __init dcbnl_init(void)
2243 {
2244 	INIT_LIST_HEAD(&dcb_app_list);
2245 
2246 	rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
2247 	rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
2248 
2249 	return 0;
2250 }
2251 module_init(dcbnl_init);
2252 
2253 static void __exit dcbnl_exit(void)
2254 {
2255 	rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
2256 	rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
2257 	dcb_flushapp();
2258 }
2259 module_exit(dcbnl_exit);
2260