xref: /linux/net/dcb/dcbnl.c (revision c9895ed5a84dc3cbc86a9d6d5656d8c187f53380)
1 /*
2  * Copyright (c) 2008, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Author: Lucy Liu <lucy.liu@intel.com>
18  */
19 
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h>
28 #include <net/sock.h>
29 
30 /**
31  * Data Center Bridging (DCB) is a collection of Ethernet enhancements
32  * intended to allow network traffic with differing requirements
33  * (highly reliable, no drops vs. best effort vs. low latency) to operate
34  * and co-exist on Ethernet.  Current DCB features are:
35  *
36  * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
37  *   framework for assigning bandwidth guarantees to traffic classes.
38  *
39  * Priority-based Flow Control (PFC) - provides a flow control mechanism which
40  *   can work independently for each 802.1p priority.
41  *
42  * Congestion Notification - provides a mechanism for end-to-end congestion
43  *   control for protocols which do not have built-in congestion management.
44  *
45  * More information about the emerging standards for these Ethernet features
46  * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
47  *
48  * This file implements an rtnetlink interface to allow configuration of DCB
49  * features for capable devices.
50  */
51 
52 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
53 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
54 MODULE_LICENSE("GPL");
55 
56 /**************** DCB attribute policies *************************************/
57 
58 /* DCB netlink attributes policy */
59 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
60 	[DCB_ATTR_IFNAME]      = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
61 	[DCB_ATTR_STATE]       = {.type = NLA_U8},
62 	[DCB_ATTR_PFC_CFG]     = {.type = NLA_NESTED},
63 	[DCB_ATTR_PG_CFG]      = {.type = NLA_NESTED},
64 	[DCB_ATTR_SET_ALL]     = {.type = NLA_U8},
65 	[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
66 	[DCB_ATTR_CAP]         = {.type = NLA_NESTED},
67 	[DCB_ATTR_PFC_STATE]   = {.type = NLA_U8},
68 	[DCB_ATTR_BCN]         = {.type = NLA_NESTED},
69 	[DCB_ATTR_APP]         = {.type = NLA_NESTED},
70 	[DCB_ATTR_IEEE]	       = {.type = NLA_NESTED},
71 	[DCB_ATTR_DCBX]        = {.type = NLA_U8},
72 	[DCB_ATTR_FEATCFG]     = {.type = NLA_NESTED},
73 };
74 
75 /* DCB priority flow control to User Priority nested attributes */
76 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
77 	[DCB_PFC_UP_ATTR_0]   = {.type = NLA_U8},
78 	[DCB_PFC_UP_ATTR_1]   = {.type = NLA_U8},
79 	[DCB_PFC_UP_ATTR_2]   = {.type = NLA_U8},
80 	[DCB_PFC_UP_ATTR_3]   = {.type = NLA_U8},
81 	[DCB_PFC_UP_ATTR_4]   = {.type = NLA_U8},
82 	[DCB_PFC_UP_ATTR_5]   = {.type = NLA_U8},
83 	[DCB_PFC_UP_ATTR_6]   = {.type = NLA_U8},
84 	[DCB_PFC_UP_ATTR_7]   = {.type = NLA_U8},
85 	[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
86 };
87 
88 /* DCB priority grouping nested attributes */
89 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
90 	[DCB_PG_ATTR_TC_0]      = {.type = NLA_NESTED},
91 	[DCB_PG_ATTR_TC_1]      = {.type = NLA_NESTED},
92 	[DCB_PG_ATTR_TC_2]      = {.type = NLA_NESTED},
93 	[DCB_PG_ATTR_TC_3]      = {.type = NLA_NESTED},
94 	[DCB_PG_ATTR_TC_4]      = {.type = NLA_NESTED},
95 	[DCB_PG_ATTR_TC_5]      = {.type = NLA_NESTED},
96 	[DCB_PG_ATTR_TC_6]      = {.type = NLA_NESTED},
97 	[DCB_PG_ATTR_TC_7]      = {.type = NLA_NESTED},
98 	[DCB_PG_ATTR_TC_ALL]    = {.type = NLA_NESTED},
99 	[DCB_PG_ATTR_BW_ID_0]   = {.type = NLA_U8},
100 	[DCB_PG_ATTR_BW_ID_1]   = {.type = NLA_U8},
101 	[DCB_PG_ATTR_BW_ID_2]   = {.type = NLA_U8},
102 	[DCB_PG_ATTR_BW_ID_3]   = {.type = NLA_U8},
103 	[DCB_PG_ATTR_BW_ID_4]   = {.type = NLA_U8},
104 	[DCB_PG_ATTR_BW_ID_5]   = {.type = NLA_U8},
105 	[DCB_PG_ATTR_BW_ID_6]   = {.type = NLA_U8},
106 	[DCB_PG_ATTR_BW_ID_7]   = {.type = NLA_U8},
107 	[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
108 };
109 
110 /* DCB traffic class nested attributes. */
111 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
112 	[DCB_TC_ATTR_PARAM_PGID]            = {.type = NLA_U8},
113 	[DCB_TC_ATTR_PARAM_UP_MAPPING]      = {.type = NLA_U8},
114 	[DCB_TC_ATTR_PARAM_STRICT_PRIO]     = {.type = NLA_U8},
115 	[DCB_TC_ATTR_PARAM_BW_PCT]          = {.type = NLA_U8},
116 	[DCB_TC_ATTR_PARAM_ALL]             = {.type = NLA_FLAG},
117 };
118 
119 /* DCB capabilities nested attributes. */
120 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
121 	[DCB_CAP_ATTR_ALL]     = {.type = NLA_FLAG},
122 	[DCB_CAP_ATTR_PG]      = {.type = NLA_U8},
123 	[DCB_CAP_ATTR_PFC]     = {.type = NLA_U8},
124 	[DCB_CAP_ATTR_UP2TC]   = {.type = NLA_U8},
125 	[DCB_CAP_ATTR_PG_TCS]  = {.type = NLA_U8},
126 	[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
127 	[DCB_CAP_ATTR_GSP]     = {.type = NLA_U8},
128 	[DCB_CAP_ATTR_BCN]     = {.type = NLA_U8},
129 	[DCB_CAP_ATTR_DCBX]    = {.type = NLA_U8},
130 };
131 
132 /* DCB capabilities nested attributes. */
133 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
134 	[DCB_NUMTCS_ATTR_ALL]     = {.type = NLA_FLAG},
135 	[DCB_NUMTCS_ATTR_PG]      = {.type = NLA_U8},
136 	[DCB_NUMTCS_ATTR_PFC]     = {.type = NLA_U8},
137 };
138 
139 /* DCB BCN nested attributes. */
140 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
141 	[DCB_BCN_ATTR_RP_0]         = {.type = NLA_U8},
142 	[DCB_BCN_ATTR_RP_1]         = {.type = NLA_U8},
143 	[DCB_BCN_ATTR_RP_2]         = {.type = NLA_U8},
144 	[DCB_BCN_ATTR_RP_3]         = {.type = NLA_U8},
145 	[DCB_BCN_ATTR_RP_4]         = {.type = NLA_U8},
146 	[DCB_BCN_ATTR_RP_5]         = {.type = NLA_U8},
147 	[DCB_BCN_ATTR_RP_6]         = {.type = NLA_U8},
148 	[DCB_BCN_ATTR_RP_7]         = {.type = NLA_U8},
149 	[DCB_BCN_ATTR_RP_ALL]       = {.type = NLA_FLAG},
150 	[DCB_BCN_ATTR_BCNA_0]       = {.type = NLA_U32},
151 	[DCB_BCN_ATTR_BCNA_1]       = {.type = NLA_U32},
152 	[DCB_BCN_ATTR_ALPHA]        = {.type = NLA_U32},
153 	[DCB_BCN_ATTR_BETA]         = {.type = NLA_U32},
154 	[DCB_BCN_ATTR_GD]           = {.type = NLA_U32},
155 	[DCB_BCN_ATTR_GI]           = {.type = NLA_U32},
156 	[DCB_BCN_ATTR_TMAX]         = {.type = NLA_U32},
157 	[DCB_BCN_ATTR_TD]           = {.type = NLA_U32},
158 	[DCB_BCN_ATTR_RMIN]         = {.type = NLA_U32},
159 	[DCB_BCN_ATTR_W]            = {.type = NLA_U32},
160 	[DCB_BCN_ATTR_RD]           = {.type = NLA_U32},
161 	[DCB_BCN_ATTR_RU]           = {.type = NLA_U32},
162 	[DCB_BCN_ATTR_WRTT]         = {.type = NLA_U32},
163 	[DCB_BCN_ATTR_RI]           = {.type = NLA_U32},
164 	[DCB_BCN_ATTR_C]            = {.type = NLA_U32},
165 	[DCB_BCN_ATTR_ALL]          = {.type = NLA_FLAG},
166 };
167 
168 /* DCB APP nested attributes. */
169 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
170 	[DCB_APP_ATTR_IDTYPE]       = {.type = NLA_U8},
171 	[DCB_APP_ATTR_ID]           = {.type = NLA_U16},
172 	[DCB_APP_ATTR_PRIORITY]     = {.type = NLA_U8},
173 };
174 
175 /* IEEE 802.1Qaz nested attributes. */
176 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
177 	[DCB_ATTR_IEEE_ETS]	    = {.len = sizeof(struct ieee_ets)},
178 	[DCB_ATTR_IEEE_PFC]	    = {.len = sizeof(struct ieee_pfc)},
179 	[DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
180 };
181 
182 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
183 	[DCB_ATTR_IEEE_APP]	    = {.len = sizeof(struct dcb_app)},
184 };
185 
186 /* DCB number of traffic classes nested attributes. */
187 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
188 	[DCB_FEATCFG_ATTR_ALL]      = {.type = NLA_FLAG},
189 	[DCB_FEATCFG_ATTR_PG]       = {.type = NLA_U8},
190 	[DCB_FEATCFG_ATTR_PFC]      = {.type = NLA_U8},
191 	[DCB_FEATCFG_ATTR_APP]      = {.type = NLA_U8},
192 };
193 
194 static LIST_HEAD(dcb_app_list);
195 static DEFINE_SPINLOCK(dcb_lock);
196 
197 /* standard netlink reply call */
198 static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
199                        u32 seq, u16 flags)
200 {
201 	struct sk_buff *dcbnl_skb;
202 	struct dcbmsg *dcb;
203 	struct nlmsghdr *nlh;
204 	int ret = -EINVAL;
205 
206 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
207 	if (!dcbnl_skb)
208 		return ret;
209 
210 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
211 
212 	dcb = NLMSG_DATA(nlh);
213 	dcb->dcb_family = AF_UNSPEC;
214 	dcb->cmd = cmd;
215 	dcb->dcb_pad = 0;
216 
217 	ret = nla_put_u8(dcbnl_skb, attr, value);
218 	if (ret)
219 		goto err;
220 
221 	/* end the message, assign the nlmsg_len. */
222 	nlmsg_end(dcbnl_skb, nlh);
223 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
224 	if (ret)
225 		return -EINVAL;
226 
227 	return 0;
228 nlmsg_failure:
229 err:
230 	kfree_skb(dcbnl_skb);
231 	return ret;
232 }
233 
234 static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
235                           u32 pid, u32 seq, u16 flags)
236 {
237 	int ret = -EINVAL;
238 
239 	/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
240 	if (!netdev->dcbnl_ops->getstate)
241 		return ret;
242 
243 	ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
244 	                  DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
245 
246 	return ret;
247 }
248 
249 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
250                            u32 pid, u32 seq, u16 flags)
251 {
252 	struct sk_buff *dcbnl_skb;
253 	struct nlmsghdr *nlh;
254 	struct dcbmsg *dcb;
255 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
256 	u8 value;
257 	int ret = -EINVAL;
258 	int i;
259 	int getall = 0;
260 
261 	if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
262 		return ret;
263 
264 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
265 	                       tb[DCB_ATTR_PFC_CFG],
266 	                       dcbnl_pfc_up_nest);
267 	if (ret)
268 		goto err_out;
269 
270 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
271 	if (!dcbnl_skb)
272 		goto err_out;
273 
274 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
275 
276 	dcb = NLMSG_DATA(nlh);
277 	dcb->dcb_family = AF_UNSPEC;
278 	dcb->cmd = DCB_CMD_PFC_GCFG;
279 
280 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
281 	if (!nest)
282 		goto err;
283 
284 	if (data[DCB_PFC_UP_ATTR_ALL])
285 		getall = 1;
286 
287 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
288 		if (!getall && !data[i])
289 			continue;
290 
291 		netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
292 		                             &value);
293 		ret = nla_put_u8(dcbnl_skb, i, value);
294 
295 		if (ret) {
296 			nla_nest_cancel(dcbnl_skb, nest);
297 			goto err;
298 		}
299 	}
300 	nla_nest_end(dcbnl_skb, nest);
301 
302 	nlmsg_end(dcbnl_skb, nlh);
303 
304 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
305 	if (ret)
306 		goto err_out;
307 
308 	return 0;
309 nlmsg_failure:
310 err:
311 	kfree_skb(dcbnl_skb);
312 err_out:
313 	return -EINVAL;
314 }
315 
316 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
317                                 u32 pid, u32 seq, u16 flags)
318 {
319 	struct sk_buff *dcbnl_skb;
320 	struct nlmsghdr *nlh;
321 	struct dcbmsg *dcb;
322 	u8 perm_addr[MAX_ADDR_LEN];
323 	int ret = -EINVAL;
324 
325 	if (!netdev->dcbnl_ops->getpermhwaddr)
326 		return ret;
327 
328 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
329 	if (!dcbnl_skb)
330 		goto err_out;
331 
332 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
333 
334 	dcb = NLMSG_DATA(nlh);
335 	dcb->dcb_family = AF_UNSPEC;
336 	dcb->cmd = DCB_CMD_GPERM_HWADDR;
337 
338 	netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
339 
340 	ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
341 	              perm_addr);
342 
343 	nlmsg_end(dcbnl_skb, nlh);
344 
345 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
346 	if (ret)
347 		goto err_out;
348 
349 	return 0;
350 
351 nlmsg_failure:
352 	kfree_skb(dcbnl_skb);
353 err_out:
354 	return -EINVAL;
355 }
356 
357 static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
358                         u32 pid, u32 seq, u16 flags)
359 {
360 	struct sk_buff *dcbnl_skb;
361 	struct nlmsghdr *nlh;
362 	struct dcbmsg *dcb;
363 	struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
364 	u8 value;
365 	int ret = -EINVAL;
366 	int i;
367 	int getall = 0;
368 
369 	if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
370 		return ret;
371 
372 	ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
373 	                       dcbnl_cap_nest);
374 	if (ret)
375 		goto err_out;
376 
377 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
378 	if (!dcbnl_skb)
379 		goto err_out;
380 
381 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
382 
383 	dcb = NLMSG_DATA(nlh);
384 	dcb->dcb_family = AF_UNSPEC;
385 	dcb->cmd = DCB_CMD_GCAP;
386 
387 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
388 	if (!nest)
389 		goto err;
390 
391 	if (data[DCB_CAP_ATTR_ALL])
392 		getall = 1;
393 
394 	for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
395 		if (!getall && !data[i])
396 			continue;
397 
398 		if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
399 			ret = nla_put_u8(dcbnl_skb, i, value);
400 
401 			if (ret) {
402 				nla_nest_cancel(dcbnl_skb, nest);
403 				goto err;
404 			}
405 		}
406 	}
407 	nla_nest_end(dcbnl_skb, nest);
408 
409 	nlmsg_end(dcbnl_skb, nlh);
410 
411 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
412 	if (ret)
413 		goto err_out;
414 
415 	return 0;
416 nlmsg_failure:
417 err:
418 	kfree_skb(dcbnl_skb);
419 err_out:
420 	return -EINVAL;
421 }
422 
423 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
424                            u32 pid, u32 seq, u16 flags)
425 {
426 	struct sk_buff *dcbnl_skb;
427 	struct nlmsghdr *nlh;
428 	struct dcbmsg *dcb;
429 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
430 	u8 value;
431 	int ret = -EINVAL;
432 	int i;
433 	int getall = 0;
434 
435 	if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
436 		return ret;
437 
438 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
439 	                       dcbnl_numtcs_nest);
440 	if (ret) {
441 		ret = -EINVAL;
442 		goto err_out;
443 	}
444 
445 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
446 	if (!dcbnl_skb) {
447 		ret = -EINVAL;
448 		goto err_out;
449 	}
450 
451 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
452 
453 	dcb = NLMSG_DATA(nlh);
454 	dcb->dcb_family = AF_UNSPEC;
455 	dcb->cmd = DCB_CMD_GNUMTCS;
456 
457 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
458 	if (!nest) {
459 		ret = -EINVAL;
460 		goto err;
461 	}
462 
463 	if (data[DCB_NUMTCS_ATTR_ALL])
464 		getall = 1;
465 
466 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
467 		if (!getall && !data[i])
468 			continue;
469 
470 		ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
471 		if (!ret) {
472 			ret = nla_put_u8(dcbnl_skb, i, value);
473 
474 			if (ret) {
475 				nla_nest_cancel(dcbnl_skb, nest);
476 				ret = -EINVAL;
477 				goto err;
478 			}
479 		} else {
480 			goto err;
481 		}
482 	}
483 	nla_nest_end(dcbnl_skb, nest);
484 
485 	nlmsg_end(dcbnl_skb, nlh);
486 
487 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
488 	if (ret) {
489 		ret = -EINVAL;
490 		goto err_out;
491 	}
492 
493 	return 0;
494 nlmsg_failure:
495 err:
496 	kfree_skb(dcbnl_skb);
497 err_out:
498 	return ret;
499 }
500 
501 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
502                            u32 pid, u32 seq, u16 flags)
503 {
504 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
505 	int ret = -EINVAL;
506 	u8 value;
507 	int i;
508 
509 	if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
510 		return ret;
511 
512 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
513 	                       dcbnl_numtcs_nest);
514 
515 	if (ret) {
516 		ret = -EINVAL;
517 		goto err;
518 	}
519 
520 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
521 		if (data[i] == NULL)
522 			continue;
523 
524 		value = nla_get_u8(data[i]);
525 
526 		ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
527 
528 		if (ret)
529 			goto operr;
530 	}
531 
532 operr:
533 	ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
534 	                  DCB_ATTR_NUMTCS, pid, seq, flags);
535 
536 err:
537 	return ret;
538 }
539 
540 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
541                              u32 pid, u32 seq, u16 flags)
542 {
543 	int ret = -EINVAL;
544 
545 	if (!netdev->dcbnl_ops->getpfcstate)
546 		return ret;
547 
548 	ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
549 	                  DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
550 	                  pid, seq, flags);
551 
552 	return ret;
553 }
554 
555 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
556                              u32 pid, u32 seq, u16 flags)
557 {
558 	int ret = -EINVAL;
559 	u8 value;
560 
561 	if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
562 		return ret;
563 
564 	value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
565 
566 	netdev->dcbnl_ops->setpfcstate(netdev, value);
567 
568 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
569 	                  pid, seq, flags);
570 
571 	return ret;
572 }
573 
574 static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
575                         u32 pid, u32 seq, u16 flags)
576 {
577 	struct sk_buff *dcbnl_skb;
578 	struct nlmsghdr *nlh;
579 	struct dcbmsg *dcb;
580 	struct nlattr *app_nest;
581 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
582 	u16 id;
583 	u8 up, idtype;
584 	int ret = -EINVAL;
585 
586 	if (!tb[DCB_ATTR_APP])
587 		goto out;
588 
589 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
590 	                       dcbnl_app_nest);
591 	if (ret)
592 		goto out;
593 
594 	ret = -EINVAL;
595 	/* all must be non-null */
596 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
597 	    (!app_tb[DCB_APP_ATTR_ID]))
598 		goto out;
599 
600 	/* either by eth type or by socket number */
601 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
602 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
603 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
604 		goto out;
605 
606 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
607 
608 	if (netdev->dcbnl_ops->getapp) {
609 		up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
610 	} else {
611 		struct dcb_app app = {
612 					.selector = idtype,
613 					.protocol = id,
614 				     };
615 		up = dcb_getapp(netdev, &app);
616 	}
617 
618 	/* send this back */
619 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
620 	if (!dcbnl_skb)
621 		goto out;
622 
623 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
624 	dcb = NLMSG_DATA(nlh);
625 	dcb->dcb_family = AF_UNSPEC;
626 	dcb->cmd = DCB_CMD_GAPP;
627 
628 	app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
629 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
630 	if (ret)
631 		goto out_cancel;
632 
633 	ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
634 	if (ret)
635 		goto out_cancel;
636 
637 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
638 	if (ret)
639 		goto out_cancel;
640 
641 	nla_nest_end(dcbnl_skb, app_nest);
642 	nlmsg_end(dcbnl_skb, nlh);
643 
644 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
645 	if (ret)
646 		goto nlmsg_failure;
647 
648 	goto out;
649 
650 out_cancel:
651 	nla_nest_cancel(dcbnl_skb, app_nest);
652 nlmsg_failure:
653 	kfree_skb(dcbnl_skb);
654 out:
655 	return ret;
656 }
657 
658 static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
659                         u32 pid, u32 seq, u16 flags)
660 {
661 	int err, ret = -EINVAL;
662 	u16 id;
663 	u8 up, idtype;
664 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
665 
666 	if (!tb[DCB_ATTR_APP])
667 		goto out;
668 
669 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
670 	                       dcbnl_app_nest);
671 	if (ret)
672 		goto out;
673 
674 	ret = -EINVAL;
675 	/* all must be non-null */
676 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
677 	    (!app_tb[DCB_APP_ATTR_ID]) ||
678 	    (!app_tb[DCB_APP_ATTR_PRIORITY]))
679 		goto out;
680 
681 	/* either by eth type or by socket number */
682 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
683 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
684 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
685 		goto out;
686 
687 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
688 	up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
689 
690 	if (netdev->dcbnl_ops->setapp) {
691 		err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
692 	} else {
693 		struct dcb_app app;
694 		app.selector = idtype;
695 		app.protocol = id;
696 		app.priority = up;
697 		err = dcb_setapp(netdev, &app);
698 	}
699 
700 	ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
701 			  pid, seq, flags);
702 out:
703 	return ret;
704 }
705 
706 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
707                              u32 pid, u32 seq, u16 flags, int dir)
708 {
709 	struct sk_buff *dcbnl_skb;
710 	struct nlmsghdr *nlh;
711 	struct dcbmsg *dcb;
712 	struct nlattr *pg_nest, *param_nest, *data;
713 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
714 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
715 	u8 prio, pgid, tc_pct, up_map;
716 	int ret  = -EINVAL;
717 	int getall = 0;
718 	int i;
719 
720 	if (!tb[DCB_ATTR_PG_CFG] ||
721 	    !netdev->dcbnl_ops->getpgtccfgtx ||
722 	    !netdev->dcbnl_ops->getpgtccfgrx ||
723 	    !netdev->dcbnl_ops->getpgbwgcfgtx ||
724 	    !netdev->dcbnl_ops->getpgbwgcfgrx)
725 		return ret;
726 
727 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
728 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
729 
730 	if (ret)
731 		goto err_out;
732 
733 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
734 	if (!dcbnl_skb)
735 		goto err_out;
736 
737 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
738 
739 	dcb = NLMSG_DATA(nlh);
740 	dcb->dcb_family = AF_UNSPEC;
741 	dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
742 
743 	pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
744 	if (!pg_nest)
745 		goto err;
746 
747 	if (pg_tb[DCB_PG_ATTR_TC_ALL])
748 		getall = 1;
749 
750 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
751 		if (!getall && !pg_tb[i])
752 			continue;
753 
754 		if (pg_tb[DCB_PG_ATTR_TC_ALL])
755 			data = pg_tb[DCB_PG_ATTR_TC_ALL];
756 		else
757 			data = pg_tb[i];
758 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
759 				       data, dcbnl_tc_param_nest);
760 		if (ret)
761 			goto err_pg;
762 
763 		param_nest = nla_nest_start(dcbnl_skb, i);
764 		if (!param_nest)
765 			goto err_pg;
766 
767 		pgid = DCB_ATTR_VALUE_UNDEFINED;
768 		prio = DCB_ATTR_VALUE_UNDEFINED;
769 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
770 		up_map = DCB_ATTR_VALUE_UNDEFINED;
771 
772 		if (dir) {
773 			/* Rx */
774 			netdev->dcbnl_ops->getpgtccfgrx(netdev,
775 						i - DCB_PG_ATTR_TC_0, &prio,
776 						&pgid, &tc_pct, &up_map);
777 		} else {
778 			/* Tx */
779 			netdev->dcbnl_ops->getpgtccfgtx(netdev,
780 						i - DCB_PG_ATTR_TC_0, &prio,
781 						&pgid, &tc_pct, &up_map);
782 		}
783 
784 		if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
785 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
786 			ret = nla_put_u8(dcbnl_skb,
787 			                 DCB_TC_ATTR_PARAM_PGID, pgid);
788 			if (ret)
789 				goto err_param;
790 		}
791 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
792 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
793 			ret = nla_put_u8(dcbnl_skb,
794 			                 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
795 			if (ret)
796 				goto err_param;
797 		}
798 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
799 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
800 			ret = nla_put_u8(dcbnl_skb,
801 			                 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
802 			if (ret)
803 				goto err_param;
804 		}
805 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
806 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
807 			ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
808 			                 tc_pct);
809 			if (ret)
810 				goto err_param;
811 		}
812 		nla_nest_end(dcbnl_skb, param_nest);
813 	}
814 
815 	if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
816 		getall = 1;
817 	else
818 		getall = 0;
819 
820 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
821 		if (!getall && !pg_tb[i])
822 			continue;
823 
824 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
825 
826 		if (dir) {
827 			/* Rx */
828 			netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
829 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
830 		} else {
831 			/* Tx */
832 			netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
833 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
834 		}
835 		ret = nla_put_u8(dcbnl_skb, i, tc_pct);
836 
837 		if (ret)
838 			goto err_pg;
839 	}
840 
841 	nla_nest_end(dcbnl_skb, pg_nest);
842 
843 	nlmsg_end(dcbnl_skb, nlh);
844 
845 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
846 	if (ret)
847 		goto err_out;
848 
849 	return 0;
850 
851 err_param:
852 	nla_nest_cancel(dcbnl_skb, param_nest);
853 err_pg:
854 	nla_nest_cancel(dcbnl_skb, pg_nest);
855 nlmsg_failure:
856 err:
857 	kfree_skb(dcbnl_skb);
858 err_out:
859 	ret  = -EINVAL;
860 	return ret;
861 }
862 
863 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
864                              u32 pid, u32 seq, u16 flags)
865 {
866 	return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
867 }
868 
869 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
870                              u32 pid, u32 seq, u16 flags)
871 {
872 	return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
873 }
874 
875 static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
876                           u32 pid, u32 seq, u16 flags)
877 {
878 	int ret = -EINVAL;
879 	u8 value;
880 
881 	if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
882 		return ret;
883 
884 	value = nla_get_u8(tb[DCB_ATTR_STATE]);
885 
886 	ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
887 	                  RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
888 	                  pid, seq, flags);
889 
890 	return ret;
891 }
892 
893 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
894                            u32 pid, u32 seq, u16 flags)
895 {
896 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
897 	int i;
898 	int ret = -EINVAL;
899 	u8 value;
900 
901 	if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
902 		return ret;
903 
904 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
905 	                       tb[DCB_ATTR_PFC_CFG],
906 	                       dcbnl_pfc_up_nest);
907 	if (ret)
908 		goto err;
909 
910 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
911 		if (data[i] == NULL)
912 			continue;
913 		value = nla_get_u8(data[i]);
914 		netdev->dcbnl_ops->setpfccfg(netdev,
915 			data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
916 	}
917 
918 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
919 	                  pid, seq, flags);
920 err:
921 	return ret;
922 }
923 
924 static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
925                         u32 pid, u32 seq, u16 flags)
926 {
927 	int ret = -EINVAL;
928 
929 	if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
930 		return ret;
931 
932 	ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
933 	                  DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
934 
935 	return ret;
936 }
937 
938 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
939                              u32 pid, u32 seq, u16 flags, int dir)
940 {
941 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
942 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
943 	int ret = -EINVAL;
944 	int i;
945 	u8 pgid;
946 	u8 up_map;
947 	u8 prio;
948 	u8 tc_pct;
949 
950 	if (!tb[DCB_ATTR_PG_CFG] ||
951 	    !netdev->dcbnl_ops->setpgtccfgtx ||
952 	    !netdev->dcbnl_ops->setpgtccfgrx ||
953 	    !netdev->dcbnl_ops->setpgbwgcfgtx ||
954 	    !netdev->dcbnl_ops->setpgbwgcfgrx)
955 		return ret;
956 
957 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
958 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
959 	if (ret)
960 		goto err;
961 
962 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
963 		if (!pg_tb[i])
964 			continue;
965 
966 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
967 		                       pg_tb[i], dcbnl_tc_param_nest);
968 		if (ret)
969 			goto err;
970 
971 		pgid = DCB_ATTR_VALUE_UNDEFINED;
972 		prio = DCB_ATTR_VALUE_UNDEFINED;
973 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
974 		up_map = DCB_ATTR_VALUE_UNDEFINED;
975 
976 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
977 			prio =
978 			    nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
979 
980 		if (param_tb[DCB_TC_ATTR_PARAM_PGID])
981 			pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
982 
983 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
984 			tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
985 
986 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
987 			up_map =
988 			     nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
989 
990 		/* dir: Tx = 0, Rx = 1 */
991 		if (dir) {
992 			/* Rx */
993 			netdev->dcbnl_ops->setpgtccfgrx(netdev,
994 				i - DCB_PG_ATTR_TC_0,
995 				prio, pgid, tc_pct, up_map);
996 		} else {
997 			/* Tx */
998 			netdev->dcbnl_ops->setpgtccfgtx(netdev,
999 				i - DCB_PG_ATTR_TC_0,
1000 				prio, pgid, tc_pct, up_map);
1001 		}
1002 	}
1003 
1004 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1005 		if (!pg_tb[i])
1006 			continue;
1007 
1008 		tc_pct = nla_get_u8(pg_tb[i]);
1009 
1010 		/* dir: Tx = 0, Rx = 1 */
1011 		if (dir) {
1012 			/* Rx */
1013 			netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
1014 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1015 		} else {
1016 			/* Tx */
1017 			netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
1018 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1019 		}
1020 	}
1021 
1022 	ret = dcbnl_reply(0, RTM_SETDCB,
1023 			  (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1024 			  DCB_ATTR_PG_CFG, pid, seq, flags);
1025 
1026 err:
1027 	return ret;
1028 }
1029 
1030 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
1031                              u32 pid, u32 seq, u16 flags)
1032 {
1033 	return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
1034 }
1035 
1036 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
1037                              u32 pid, u32 seq, u16 flags)
1038 {
1039 	return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
1040 }
1041 
1042 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1043                             u32 pid, u32 seq, u16 flags)
1044 {
1045 	struct sk_buff *dcbnl_skb;
1046 	struct nlmsghdr *nlh;
1047 	struct dcbmsg *dcb;
1048 	struct nlattr *bcn_nest;
1049 	struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1050 	u8 value_byte;
1051 	u32 value_integer;
1052 	int ret  = -EINVAL;
1053 	bool getall = false;
1054 	int i;
1055 
1056 	if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
1057 	    !netdev->dcbnl_ops->getbcncfg)
1058 		return ret;
1059 
1060 	ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1061 	                       tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1062 
1063 	if (ret)
1064 		goto err_out;
1065 
1066 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1067 	if (!dcbnl_skb)
1068 		goto err_out;
1069 
1070 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1071 
1072 	dcb = NLMSG_DATA(nlh);
1073 	dcb->dcb_family = AF_UNSPEC;
1074 	dcb->cmd = DCB_CMD_BCN_GCFG;
1075 
1076 	bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
1077 	if (!bcn_nest)
1078 		goto err;
1079 
1080 	if (bcn_tb[DCB_BCN_ATTR_ALL])
1081 		getall = true;
1082 
1083 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1084 		if (!getall && !bcn_tb[i])
1085 			continue;
1086 
1087 		netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1088 		                            &value_byte);
1089 		ret = nla_put_u8(dcbnl_skb, i, value_byte);
1090 		if (ret)
1091 			goto err_bcn;
1092 	}
1093 
1094 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1095 		if (!getall && !bcn_tb[i])
1096 			continue;
1097 
1098 		netdev->dcbnl_ops->getbcncfg(netdev, i,
1099 		                             &value_integer);
1100 		ret = nla_put_u32(dcbnl_skb, i, value_integer);
1101 		if (ret)
1102 			goto err_bcn;
1103 	}
1104 
1105 	nla_nest_end(dcbnl_skb, bcn_nest);
1106 
1107 	nlmsg_end(dcbnl_skb, nlh);
1108 
1109 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1110 	if (ret)
1111 		goto err_out;
1112 
1113 	return 0;
1114 
1115 err_bcn:
1116 	nla_nest_cancel(dcbnl_skb, bcn_nest);
1117 nlmsg_failure:
1118 err:
1119 	kfree_skb(dcbnl_skb);
1120 err_out:
1121 	ret  = -EINVAL;
1122 	return ret;
1123 }
1124 
1125 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1126                             u32 pid, u32 seq, u16 flags)
1127 {
1128 	struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1129 	int i;
1130 	int ret = -EINVAL;
1131 	u8 value_byte;
1132 	u32 value_int;
1133 
1134 	if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
1135 	    !netdev->dcbnl_ops->setbcnrp)
1136 		return ret;
1137 
1138 	ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1139 	                       tb[DCB_ATTR_BCN],
1140 	                       dcbnl_pfc_up_nest);
1141 	if (ret)
1142 		goto err;
1143 
1144 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1145 		if (data[i] == NULL)
1146 			continue;
1147 		value_byte = nla_get_u8(data[i]);
1148 		netdev->dcbnl_ops->setbcnrp(netdev,
1149 			data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
1150 	}
1151 
1152 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1153 		if (data[i] == NULL)
1154 			continue;
1155 		value_int = nla_get_u32(data[i]);
1156 		netdev->dcbnl_ops->setbcncfg(netdev,
1157 	                                     i, value_int);
1158 	}
1159 
1160 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
1161 	                  pid, seq, flags);
1162 err:
1163 	return ret;
1164 }
1165 
1166 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1167  * be completed the entire msg is aborted and error value is returned.
1168  * No attempt is made to reconcile the case where only part of the
1169  * cmd can be completed.
1170  */
1171 static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1172 			  u32 pid, u32 seq, u16 flags)
1173 {
1174 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1175 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1176 	int err = -EOPNOTSUPP;
1177 
1178 	if (!ops)
1179 		goto err;
1180 
1181 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1182 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1183 	if (err)
1184 		goto err;
1185 
1186 	if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1187 		struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1188 		err = ops->ieee_setets(netdev, ets);
1189 		if (err)
1190 			goto err;
1191 	}
1192 
1193 	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
1194 		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1195 		err = ops->ieee_setpfc(netdev, pfc);
1196 		if (err)
1197 			goto err;
1198 	}
1199 
1200 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1201 		struct nlattr *attr;
1202 		int rem;
1203 
1204 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1205 			struct dcb_app *app_data;
1206 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1207 				continue;
1208 			app_data = nla_data(attr);
1209 			if (ops->ieee_setapp)
1210 				err = ops->ieee_setapp(netdev, app_data);
1211 			else
1212 				err = dcb_setapp(netdev, app_data);
1213 			if (err)
1214 				goto err;
1215 		}
1216 	}
1217 
1218 err:
1219 	dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1220 		    pid, seq, flags);
1221 	return err;
1222 }
1223 
1224 
1225 /* Handle IEEE 802.1Qaz GET commands. */
1226 static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1227 			  u32 pid, u32 seq, u16 flags)
1228 {
1229 	struct sk_buff *skb;
1230 	struct nlmsghdr *nlh;
1231 	struct dcbmsg *dcb;
1232 	struct nlattr *ieee, *app;
1233 	struct dcb_app_type *itr;
1234 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1235 	int err;
1236 
1237 	if (!ops)
1238 		return -EOPNOTSUPP;
1239 
1240 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1241 	if (!skb)
1242 		return -ENOBUFS;
1243 
1244 	nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1245 
1246 	dcb = NLMSG_DATA(nlh);
1247 	dcb->dcb_family = AF_UNSPEC;
1248 	dcb->cmd = DCB_CMD_IEEE_GET;
1249 
1250 	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1251 
1252 	ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1253 	if (!ieee)
1254 		goto nla_put_failure;
1255 
1256 	if (ops->ieee_getets) {
1257 		struct ieee_ets ets;
1258 		err = ops->ieee_getets(netdev, &ets);
1259 		if (!err)
1260 			NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
1261 	}
1262 
1263 	if (ops->ieee_getpfc) {
1264 		struct ieee_pfc pfc;
1265 		err = ops->ieee_getpfc(netdev, &pfc);
1266 		if (!err)
1267 			NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
1268 	}
1269 
1270 	app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1271 	if (!app)
1272 		goto nla_put_failure;
1273 
1274 	spin_lock(&dcb_lock);
1275 	list_for_each_entry(itr, &dcb_app_list, list) {
1276 		if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
1277 			err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1278 					 &itr->app);
1279 			if (err) {
1280 				spin_unlock(&dcb_lock);
1281 				goto nla_put_failure;
1282 			}
1283 		}
1284 	}
1285 	spin_unlock(&dcb_lock);
1286 	nla_nest_end(skb, app);
1287 
1288 	nla_nest_end(skb, ieee);
1289 	nlmsg_end(skb, nlh);
1290 
1291 	return rtnl_unicast(skb, &init_net, pid);
1292 nla_put_failure:
1293 	nlmsg_cancel(skb, nlh);
1294 nlmsg_failure:
1295 	kfree_skb(skb);
1296 	return -1;
1297 }
1298 
1299 /* DCBX configuration */
1300 static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1301 			 u32 pid, u32 seq, u16 flags)
1302 {
1303 	int ret;
1304 
1305 	if (!netdev->dcbnl_ops->getdcbx)
1306 		return -EOPNOTSUPP;
1307 
1308 	ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
1309 			  DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
1310 
1311 	return ret;
1312 }
1313 
1314 static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1315 			 u32 pid, u32 seq, u16 flags)
1316 {
1317 	int ret;
1318 	u8 value;
1319 
1320 	if (!netdev->dcbnl_ops->setdcbx)
1321 		return -EOPNOTSUPP;
1322 
1323 	if (!tb[DCB_ATTR_DCBX])
1324 		return -EINVAL;
1325 
1326 	value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1327 
1328 	ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
1329 			  RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
1330 			  pid, seq, flags);
1331 
1332 	return ret;
1333 }
1334 
1335 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1336 			    u32 pid, u32 seq, u16 flags)
1337 {
1338 	struct sk_buff *dcbnl_skb;
1339 	struct nlmsghdr *nlh;
1340 	struct dcbmsg *dcb;
1341 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1342 	u8 value;
1343 	int ret, i;
1344 	int getall = 0;
1345 
1346 	if (!netdev->dcbnl_ops->getfeatcfg)
1347 		return -EOPNOTSUPP;
1348 
1349 	if (!tb[DCB_ATTR_FEATCFG])
1350 		return -EINVAL;
1351 
1352 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1353 			       dcbnl_featcfg_nest);
1354 	if (ret)
1355 		goto err_out;
1356 
1357 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1358 	if (!dcbnl_skb) {
1359 		ret = -ENOBUFS;
1360 		goto err_out;
1361 	}
1362 
1363 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1364 
1365 	dcb = NLMSG_DATA(nlh);
1366 	dcb->dcb_family = AF_UNSPEC;
1367 	dcb->cmd = DCB_CMD_GFEATCFG;
1368 
1369 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
1370 	if (!nest) {
1371 		ret = -EMSGSIZE;
1372 		goto nla_put_failure;
1373 	}
1374 
1375 	if (data[DCB_FEATCFG_ATTR_ALL])
1376 		getall = 1;
1377 
1378 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1379 		if (!getall && !data[i])
1380 			continue;
1381 
1382 		ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1383 		if (!ret)
1384 			ret = nla_put_u8(dcbnl_skb, i, value);
1385 
1386 		if (ret) {
1387 			nla_nest_cancel(dcbnl_skb, nest);
1388 			goto nla_put_failure;
1389 		}
1390 	}
1391 	nla_nest_end(dcbnl_skb, nest);
1392 
1393 	nlmsg_end(dcbnl_skb, nlh);
1394 
1395 	return rtnl_unicast(dcbnl_skb, &init_net, pid);
1396 nla_put_failure:
1397 	nlmsg_cancel(dcbnl_skb, nlh);
1398 nlmsg_failure:
1399 	kfree_skb(dcbnl_skb);
1400 err_out:
1401 	return ret;
1402 }
1403 
1404 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1405 			    u32 pid, u32 seq, u16 flags)
1406 {
1407 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1408 	int ret, i;
1409 	u8 value;
1410 
1411 	if (!netdev->dcbnl_ops->setfeatcfg)
1412 		return -ENOTSUPP;
1413 
1414 	if (!tb[DCB_ATTR_FEATCFG])
1415 		return -EINVAL;
1416 
1417 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1418 			       dcbnl_featcfg_nest);
1419 
1420 	if (ret)
1421 		goto err;
1422 
1423 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1424 		if (data[i] == NULL)
1425 			continue;
1426 
1427 		value = nla_get_u8(data[i]);
1428 
1429 		ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1430 
1431 		if (ret)
1432 			goto err;
1433 	}
1434 err:
1435 	dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
1436 		    pid, seq, flags);
1437 
1438 	return ret;
1439 }
1440 
1441 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1442 {
1443 	struct net *net = sock_net(skb->sk);
1444 	struct net_device *netdev;
1445 	struct dcbmsg  *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
1446 	struct nlattr *tb[DCB_ATTR_MAX + 1];
1447 	u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1448 	int ret = -EINVAL;
1449 
1450 	if (!net_eq(net, &init_net))
1451 		return -EINVAL;
1452 
1453 	ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1454 			  dcbnl_rtnl_policy);
1455 	if (ret < 0)
1456 		return ret;
1457 
1458 	if (!tb[DCB_ATTR_IFNAME])
1459 		return -EINVAL;
1460 
1461 	netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1462 	if (!netdev)
1463 		return -EINVAL;
1464 
1465 	if (!netdev->dcbnl_ops)
1466 		goto errout;
1467 
1468 	switch (dcb->cmd) {
1469 	case DCB_CMD_GSTATE:
1470 		ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1471 		                     nlh->nlmsg_flags);
1472 		goto out;
1473 	case DCB_CMD_PFC_GCFG:
1474 		ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1475 		                      nlh->nlmsg_flags);
1476 		goto out;
1477 	case DCB_CMD_GPERM_HWADDR:
1478 		ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1479 		                           nlh->nlmsg_flags);
1480 		goto out;
1481 	case DCB_CMD_PGTX_GCFG:
1482 		ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1483 		                        nlh->nlmsg_flags);
1484 		goto out;
1485 	case DCB_CMD_PGRX_GCFG:
1486 		ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1487 		                        nlh->nlmsg_flags);
1488 		goto out;
1489 	case DCB_CMD_BCN_GCFG:
1490 		ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1491 		                       nlh->nlmsg_flags);
1492 		goto out;
1493 	case DCB_CMD_SSTATE:
1494 		ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1495 		                     nlh->nlmsg_flags);
1496 		goto out;
1497 	case DCB_CMD_PFC_SCFG:
1498 		ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1499 		                      nlh->nlmsg_flags);
1500 		goto out;
1501 
1502 	case DCB_CMD_SET_ALL:
1503 		ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
1504 		                   nlh->nlmsg_flags);
1505 		goto out;
1506 	case DCB_CMD_PGTX_SCFG:
1507 		ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1508 		                        nlh->nlmsg_flags);
1509 		goto out;
1510 	case DCB_CMD_PGRX_SCFG:
1511 		ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1512 		                        nlh->nlmsg_flags);
1513 		goto out;
1514 	case DCB_CMD_GCAP:
1515 		ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
1516 		                   nlh->nlmsg_flags);
1517 		goto out;
1518 	case DCB_CMD_GNUMTCS:
1519 		ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1520 		                      nlh->nlmsg_flags);
1521 		goto out;
1522 	case DCB_CMD_SNUMTCS:
1523 		ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1524 		                      nlh->nlmsg_flags);
1525 		goto out;
1526 	case DCB_CMD_PFC_GSTATE:
1527 		ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1528 		                        nlh->nlmsg_flags);
1529 		goto out;
1530 	case DCB_CMD_PFC_SSTATE:
1531 		ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1532 		                        nlh->nlmsg_flags);
1533 		goto out;
1534 	case DCB_CMD_BCN_SCFG:
1535 		ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1536 		                       nlh->nlmsg_flags);
1537 		goto out;
1538 	case DCB_CMD_GAPP:
1539 		ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
1540 		                   nlh->nlmsg_flags);
1541 		goto out;
1542 	case DCB_CMD_SAPP:
1543 		ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
1544 		                   nlh->nlmsg_flags);
1545 		goto out;
1546 	case DCB_CMD_IEEE_SET:
1547 		ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
1548 				 nlh->nlmsg_flags);
1549 		goto out;
1550 	case DCB_CMD_IEEE_GET:
1551 		ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
1552 				 nlh->nlmsg_flags);
1553 		goto out;
1554 	case DCB_CMD_GDCBX:
1555 		ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
1556 				    nlh->nlmsg_flags);
1557 		goto out;
1558 	case DCB_CMD_SDCBX:
1559 		ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
1560 				    nlh->nlmsg_flags);
1561 		goto out;
1562 	case DCB_CMD_GFEATCFG:
1563 		ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
1564 				       nlh->nlmsg_flags);
1565 		goto out;
1566 	case DCB_CMD_SFEATCFG:
1567 		ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
1568 				       nlh->nlmsg_flags);
1569 		goto out;
1570 	default:
1571 		goto errout;
1572 	}
1573 errout:
1574 	ret = -EINVAL;
1575 out:
1576 	dev_put(netdev);
1577 	return ret;
1578 }
1579 
1580 /**
1581  * dcb_getapp - retrieve the DCBX application user priority
1582  *
1583  * On success returns a non-zero 802.1p user priority bitmap
1584  * otherwise returns 0 as the invalid user priority bitmap to
1585  * indicate an error.
1586  */
1587 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1588 {
1589 	struct dcb_app_type *itr;
1590 	u8 prio = 0;
1591 
1592 	spin_lock(&dcb_lock);
1593 	list_for_each_entry(itr, &dcb_app_list, list) {
1594 		if (itr->app.selector == app->selector &&
1595 		    itr->app.protocol == app->protocol &&
1596 		    (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
1597 			prio = itr->app.priority;
1598 			break;
1599 		}
1600 	}
1601 	spin_unlock(&dcb_lock);
1602 
1603 	return prio;
1604 }
1605 EXPORT_SYMBOL(dcb_getapp);
1606 
1607 /**
1608  * ixgbe_dcbnl_setapp - add dcb application data to app list
1609  *
1610  * Priority 0 is the default priority this removes applications
1611  * from the app list if the priority is set to zero.
1612  */
1613 u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1614 {
1615 	struct dcb_app_type *itr;
1616 
1617 	spin_lock(&dcb_lock);
1618 	/* Search for existing match and replace */
1619 	list_for_each_entry(itr, &dcb_app_list, list) {
1620 		if (itr->app.selector == new->selector &&
1621 		    itr->app.protocol == new->protocol &&
1622 		    (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
1623 			if (new->priority)
1624 				itr->app.priority = new->priority;
1625 			else {
1626 				list_del(&itr->list);
1627 				kfree(itr);
1628 			}
1629 			goto out;
1630 		}
1631 	}
1632 	/* App type does not exist add new application type */
1633 	if (new->priority) {
1634 		struct dcb_app_type *entry;
1635 		entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
1636 		if (!entry) {
1637 			spin_unlock(&dcb_lock);
1638 			return -ENOMEM;
1639 		}
1640 
1641 		memcpy(&entry->app, new, sizeof(*new));
1642 		strncpy(entry->name, dev->name, IFNAMSIZ);
1643 		list_add(&entry->list, &dcb_app_list);
1644 	}
1645 out:
1646 	spin_unlock(&dcb_lock);
1647 	call_dcbevent_notifiers(DCB_APP_EVENT, new);
1648 	return 0;
1649 }
1650 EXPORT_SYMBOL(dcb_setapp);
1651 
1652 static void dcb_flushapp(void)
1653 {
1654 	struct dcb_app_type *app;
1655 	struct dcb_app_type *tmp;
1656 
1657 	spin_lock(&dcb_lock);
1658 	list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
1659 		list_del(&app->list);
1660 		kfree(app);
1661 	}
1662 	spin_unlock(&dcb_lock);
1663 }
1664 
1665 static int __init dcbnl_init(void)
1666 {
1667 	INIT_LIST_HEAD(&dcb_app_list);
1668 
1669 	rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
1670 	rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
1671 
1672 	return 0;
1673 }
1674 module_init(dcbnl_init);
1675 
1676 static void __exit dcbnl_exit(void)
1677 {
1678 	rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
1679 	rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
1680 	dcb_flushapp();
1681 }
1682 module_exit(dcbnl_exit);
1683