xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c (revision 9c736ace0666efe68efd53fcdfa2c6653c3e0e72)
1 /*
2  * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "port_buffer.h"
33 
mlx5e_port_query_buffer(struct mlx5e_priv * priv,struct mlx5e_port_buffer * port_buffer)34 int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
35 			    struct mlx5e_port_buffer *port_buffer)
36 {
37 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
38 	struct mlx5_core_dev *mdev = priv->mdev;
39 	int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
40 	u32 total_used = 0;
41 	void *buffer;
42 	void *out;
43 	int err;
44 	int i;
45 
46 	out = kzalloc(sz, GFP_KERNEL);
47 	if (!out)
48 		return -ENOMEM;
49 
50 	err = mlx5e_port_query_pbmc(mdev, out);
51 	if (err)
52 		goto out;
53 
54 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
55 		buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
56 		port_buffer->buffer[i].lossy =
57 			MLX5_GET(bufferx_reg, buffer, lossy);
58 		port_buffer->buffer[i].epsb =
59 			MLX5_GET(bufferx_reg, buffer, epsb);
60 		port_buffer->buffer[i].size =
61 			MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
62 		port_buffer->buffer[i].xon =
63 			MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
64 		port_buffer->buffer[i].xoff =
65 			MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
66 		total_used += port_buffer->buffer[i].size;
67 
68 		netdev_dbg(priv->netdev, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n",
69 			   i,
70 			   port_buffer->buffer[i].size,
71 			   port_buffer->buffer[i].xon,
72 			   port_buffer->buffer[i].xoff,
73 			   port_buffer->buffer[i].epsb,
74 			   port_buffer->buffer[i].lossy);
75 	}
76 
77 	port_buffer->internal_buffers_size = 0;
78 	for (i = MLX5E_MAX_NETWORK_BUFFER; i < MLX5E_TOTAL_BUFFERS; i++) {
79 		buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
80 		port_buffer->internal_buffers_size +=
81 			MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
82 	}
83 
84 	port_buffer->port_buffer_size =
85 		MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
86 	port_buffer->headroom_size = total_used;
87 	port_buffer->spare_buffer_size = port_buffer->port_buffer_size -
88 					 port_buffer->internal_buffers_size -
89 					 port_buffer->headroom_size;
90 
91 	netdev_dbg(priv->netdev,
92 		   "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
93 		   port_buffer->port_buffer_size, port_buffer->headroom_size,
94 		   port_buffer->internal_buffers_size,
95 		   port_buffer->spare_buffer_size);
96 out:
97 	kfree(out);
98 	return err;
99 }
100 
101 struct mlx5e_buffer_pool {
102 	u32 infi_size;
103 	u32 size;
104 	u32 buff_occupancy;
105 };
106 
mlx5e_port_query_pool(struct mlx5_core_dev * mdev,struct mlx5e_buffer_pool * buffer_pool,u32 desc,u8 dir,u8 pool_idx)107 static int mlx5e_port_query_pool(struct mlx5_core_dev *mdev,
108 				 struct mlx5e_buffer_pool *buffer_pool,
109 				 u32 desc, u8 dir, u8 pool_idx)
110 {
111 	u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {};
112 	int err;
113 
114 	err = mlx5e_port_query_sbpr(mdev, desc, dir, pool_idx, out,
115 				    sizeof(out));
116 	if (err)
117 		return err;
118 
119 	buffer_pool->size = MLX5_GET(sbpr_reg, out, size);
120 	buffer_pool->infi_size = MLX5_GET(sbpr_reg, out, infi_size);
121 	buffer_pool->buff_occupancy = MLX5_GET(sbpr_reg, out, buff_occupancy);
122 
123 	return err;
124 }
125 
126 enum {
127 	MLX5_INGRESS_DIR = 0,
128 	MLX5_EGRESS_DIR = 1,
129 };
130 
131 enum {
132 	MLX5_LOSSY_POOL = 0,
133 	MLX5_LOSSLESS_POOL = 1,
134 };
135 
136 /* No limit on usage of shared buffer pool (max_buff=0) */
137 #define MLX5_SB_POOL_NO_THRESHOLD  0
138 /* Shared buffer pool usage threshold when calculated
139  * dynamically in alpha units. alpha=13 is equivalent to
140  * HW_alpha of  [(1/128) * 2 ^ (alpha-1)] = 32, where HW_alpha
141  * equates to the following portion of the shared buffer pool:
142  * [32 / (1 + n * 32)] While *n* is the number of buffers
143  * that are using the shared buffer pool.
144  */
145 #define MLX5_SB_POOL_THRESHOLD 13
146 
147 /* Shared buffer class management parameters */
148 struct mlx5_sbcm_params {
149 	u8 pool_idx;
150 	u8 max_buff;
151 	u8 infi_size;
152 };
153 
154 static const struct mlx5_sbcm_params sbcm_default = {
155 	.pool_idx = MLX5_LOSSY_POOL,
156 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
157 	.infi_size = 0,
158 };
159 
160 static const struct mlx5_sbcm_params sbcm_lossy = {
161 	.pool_idx = MLX5_LOSSY_POOL,
162 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
163 	.infi_size = 1,
164 };
165 
166 static const struct mlx5_sbcm_params sbcm_lossless = {
167 	.pool_idx = MLX5_LOSSLESS_POOL,
168 	.max_buff = MLX5_SB_POOL_THRESHOLD,
169 	.infi_size = 0,
170 };
171 
172 static const struct mlx5_sbcm_params sbcm_lossless_no_threshold = {
173 	.pool_idx = MLX5_LOSSLESS_POOL,
174 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
175 	.infi_size = 1,
176 };
177 
178 /**
179  * select_sbcm_params() - selects the shared buffer pool configuration
180  *
181  * @buffer: <input> port buffer to retrieve params of
182  * @lossless_buff_count: <input> number of lossless buffers in total
183  *
184  * The selection is based on the following rules:
185  * 1. If buffer size is 0, no shared buffer pool is used.
186  * 2. If buffer is lossy, use lossy shared buffer pool.
187  * 3. If there are more than 1 lossless buffers, use lossless shared buffer pool
188  *    with threshold.
189  * 4. If there is only 1 lossless buffer, use lossless shared buffer pool
190  *    without threshold.
191  *
192  * @return const struct mlx5_sbcm_params* selected values
193  */
194 static const struct mlx5_sbcm_params *
select_sbcm_params(struct mlx5e_bufferx_reg * buffer,u8 lossless_buff_count)195 select_sbcm_params(struct mlx5e_bufferx_reg *buffer, u8 lossless_buff_count)
196 {
197 	if (buffer->size == 0)
198 		return &sbcm_default;
199 
200 	if (buffer->lossy)
201 		return &sbcm_lossy;
202 
203 	if (lossless_buff_count > 1)
204 		return &sbcm_lossless;
205 
206 	return &sbcm_lossless_no_threshold;
207 }
208 
port_update_pool_cfg(struct mlx5_core_dev * mdev,struct mlx5e_port_buffer * port_buffer)209 static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
210 				struct mlx5e_port_buffer *port_buffer)
211 {
212 	const struct mlx5_sbcm_params *p;
213 	u8 lossless_buff_count = 0;
214 	int err;
215 	int i;
216 
217 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
218 		return 0;
219 
220 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
221 		lossless_buff_count += ((port_buffer->buffer[i].size) &&
222 				       (!(port_buffer->buffer[i].lossy)));
223 
224 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
225 		p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
226 		err = mlx5e_port_set_sbcm(mdev, 0, i,
227 					  MLX5_INGRESS_DIR,
228 					  p->infi_size,
229 					  p->max_buff,
230 					  p->pool_idx);
231 		if (err)
232 			return err;
233 	}
234 
235 	return 0;
236 }
237 
port_update_shared_buffer(struct mlx5_core_dev * mdev,u32 current_headroom_size,u32 new_headroom_size)238 static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
239 				     u32 current_headroom_size,
240 				     u32 new_headroom_size)
241 {
242 	struct mlx5e_buffer_pool lossless_ipool;
243 	struct mlx5e_buffer_pool lossy_epool;
244 	u32 lossless_ipool_size;
245 	u32 shared_buffer_size;
246 	u32 total_buffer_size;
247 	u32 lossy_epool_size;
248 	int err;
249 
250 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
251 		return 0;
252 
253 	err = mlx5e_port_query_pool(mdev, &lossy_epool, 0, MLX5_EGRESS_DIR,
254 				    MLX5_LOSSY_POOL);
255 	if (err)
256 		return err;
257 
258 	err = mlx5e_port_query_pool(mdev, &lossless_ipool, 0, MLX5_INGRESS_DIR,
259 				    MLX5_LOSSLESS_POOL);
260 	if (err)
261 		return err;
262 
263 	total_buffer_size = current_headroom_size + lossy_epool.size +
264 			    lossless_ipool.size;
265 	shared_buffer_size = total_buffer_size - new_headroom_size;
266 
267 	if (shared_buffer_size < 4) {
268 		pr_err("Requested port buffer is too large, not enough space left for shared buffer\n");
269 		return -EINVAL;
270 	}
271 
272 	/* Total shared buffer size is split in a ratio of 3:1 between
273 	 * lossy and lossless pools respectively.
274 	 */
275 	lossless_ipool_size = shared_buffer_size / 4;
276 	lossy_epool_size    = shared_buffer_size - lossless_ipool_size;
277 
278 	mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
279 			    lossy_epool_size);
280 	mlx5e_port_set_sbpr(mdev, 0, MLX5_INGRESS_DIR, MLX5_LOSSLESS_POOL, 0,
281 			    lossless_ipool_size);
282 	return 0;
283 }
284 
port_set_buffer(struct mlx5e_priv * priv,struct mlx5e_port_buffer * port_buffer)285 static int port_set_buffer(struct mlx5e_priv *priv,
286 			   struct mlx5e_port_buffer *port_buffer)
287 {
288 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
289 	struct mlx5_core_dev *mdev = priv->mdev;
290 	int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
291 	u32 current_headroom_cells = 0;
292 	u32 new_headroom_cells = 0;
293 	void *in;
294 	int err;
295 	int i;
296 
297 	in = kzalloc(sz, GFP_KERNEL);
298 	if (!in)
299 		return -ENOMEM;
300 
301 	err = mlx5e_port_query_pbmc(mdev, in);
302 	if (err)
303 		goto out;
304 
305 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
306 		void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
307 		current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
308 
309 		u64 size = port_buffer->buffer[i].size;
310 		u64 xoff = port_buffer->buffer[i].xoff;
311 		u64 xon = port_buffer->buffer[i].xon;
312 
313 		do_div(size, port_buff_cell_sz);
314 		new_headroom_cells += size;
315 		do_div(xoff, port_buff_cell_sz);
316 		do_div(xon, port_buff_cell_sz);
317 		MLX5_SET(bufferx_reg, buffer, size, size);
318 		MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
319 		MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
320 		MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
321 	}
322 
323 	err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
324 					new_headroom_cells);
325 	if (err)
326 		goto out;
327 
328 	err = port_update_pool_cfg(priv->mdev, port_buffer);
329 	if (err)
330 		goto out;
331 
332 	/* RO bits should be set to 0 on write */
333 	MLX5_SET(pbmc_reg, in, port_buffer_size, 0);
334 
335 	err = mlx5e_port_set_pbmc(mdev, in);
336 out:
337 	kfree(in);
338 	return err;
339 }
340 
341 /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
342  * minimum speed value is 40Gbps
343  */
calculate_xoff(struct mlx5e_priv * priv,unsigned int mtu)344 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
345 {
346 	u32 speed;
347 	u32 xoff;
348 	int err;
349 
350 	err = mlx5e_port_linkspeed(priv->mdev, &speed);
351 	if (err)
352 		speed = SPEED_40000;
353 	speed = max_t(u32, speed, SPEED_40000);
354 
355 	xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
356 
357 	netdev_dbg(priv->netdev, "%s: xoff=%d\n", __func__, xoff);
358 	return xoff;
359 }
360 
update_xoff_threshold(struct mlx5e_port_buffer * port_buffer,u32 xoff,unsigned int max_mtu,u16 port_buff_cell_sz)361 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
362 				 u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
363 {
364 	int i;
365 
366 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
367 		if (port_buffer->buffer[i].lossy) {
368 			port_buffer->buffer[i].xoff = 0;
369 			port_buffer->buffer[i].xon  = 0;
370 			continue;
371 		}
372 
373 		if (port_buffer->buffer[i].size <
374 		    (xoff + max_mtu + port_buff_cell_sz)) {
375 			pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
376 			       i, port_buffer->buffer[i].size);
377 			return -ENOMEM;
378 		}
379 
380 		port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
381 		port_buffer->buffer[i].xon  =
382 			port_buffer->buffer[i].xoff - max_mtu;
383 	}
384 
385 	return 0;
386 }
387 
388 /**
389  *	update_buffer_lossy	- Update buffer configuration based on pfc
390  *	@mdev: port function core device
391  *	@max_mtu: netdev's max_mtu
392  *	@pfc_en: <input> current pfc configuration
393  *	@buffer: <input> current prio to buffer mapping
394  *	@xoff:   <input> xoff value
395  *	@port_buff_cell_sz: <input> port buffer cell_size
396  *	@port_buffer: <output> port receive buffer configuration
397  *	@change: <output>
398  *
399  *	Update buffer configuration based on pfc configuration and
400  *	priority to buffer mapping.
401  *	Buffer's lossy bit is changed to:
402  *		lossless if there is at least one PFC enabled priority
403  *		mapped to this buffer lossy if all priorities mapped to
404  *		this buffer are PFC disabled
405  *
406  *	@return: 0 if no error,
407  *	sets change to true if buffer configuration was modified.
408  */
update_buffer_lossy(struct mlx5_core_dev * mdev,unsigned int max_mtu,u8 pfc_en,u8 * buffer,u32 xoff,u16 port_buff_cell_sz,struct mlx5e_port_buffer * port_buffer,bool * change)409 static int update_buffer_lossy(struct mlx5_core_dev *mdev,
410 			       unsigned int max_mtu,
411 			       u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
412 			       struct mlx5e_port_buffer *port_buffer,
413 			       bool *change)
414 {
415 	bool changed = false;
416 	u8 lossy_count;
417 	u8 prio_count;
418 	u8 lossy;
419 	int prio;
420 	int err;
421 	int i;
422 
423 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
424 		prio_count = 0;
425 		lossy_count = 0;
426 
427 		for (prio = 0; prio < MLX5E_MAX_PRIORITY; prio++) {
428 			if (buffer[prio] != i)
429 				continue;
430 
431 			prio_count++;
432 			lossy_count += !(pfc_en & (1 << prio));
433 		}
434 
435 		if (lossy_count == prio_count)
436 			lossy = 1;
437 		else /* lossy_count < prio_count */
438 			lossy = 0;
439 
440 		if (lossy != port_buffer->buffer[i].lossy) {
441 			port_buffer->buffer[i].lossy = lossy;
442 			changed = true;
443 		}
444 	}
445 
446 	if (changed) {
447 		err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
448 		if (err)
449 			return err;
450 
451 		err = port_update_pool_cfg(mdev, port_buffer);
452 		if (err)
453 			return err;
454 
455 		*change = true;
456 	}
457 
458 	return 0;
459 }
460 
fill_pfc_en(struct mlx5_core_dev * mdev,u8 * pfc_en)461 static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
462 {
463 	u32 g_rx_pause, g_tx_pause;
464 	int err;
465 
466 	err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
467 	if (err)
468 		return err;
469 
470 	/* If global pause enabled, set all active buffers to lossless.
471 	 * Otherwise, check PFC setting.
472 	 */
473 	if (g_rx_pause || g_tx_pause)
474 		*pfc_en = 0xff;
475 	else
476 		err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
477 
478 	return err;
479 }
480 
481 #define MINIMUM_MAX_MTU 9216
mlx5e_port_manual_buffer_config(struct mlx5e_priv * priv,u32 change,unsigned int mtu,struct ieee_pfc * pfc,u32 * buffer_size,u8 * prio2buffer)482 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
483 				    u32 change, unsigned int mtu,
484 				    struct ieee_pfc *pfc,
485 				    u32 *buffer_size,
486 				    u8 *prio2buffer)
487 {
488 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
489 	struct net_device *netdev = priv->netdev;
490 	struct mlx5e_port_buffer port_buffer;
491 	u32 xoff = calculate_xoff(priv, mtu);
492 	bool update_prio2buffer = false;
493 	u8 buffer[MLX5E_MAX_PRIORITY];
494 	bool update_buffer = false;
495 	unsigned int max_mtu;
496 	u32 total_used = 0;
497 	u8 curr_pfc_en;
498 	int err;
499 	int i;
500 
501 	netdev_dbg(netdev, "%s: change=%x\n", __func__, change);
502 	max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
503 
504 	err = mlx5e_port_query_buffer(priv, &port_buffer);
505 	if (err)
506 		return err;
507 
508 	if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
509 		update_buffer = true;
510 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
511 		if (err)
512 			return err;
513 	}
514 
515 	if (change & MLX5E_PORT_BUFFER_PFC) {
516 		netdev_dbg(netdev, "%s: requested PFC per priority bitmask: 0x%x\n",
517 			   __func__, pfc->pfc_en);
518 		err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
519 		if (err)
520 			return err;
521 
522 		err = update_buffer_lossy(priv->mdev, max_mtu, pfc->pfc_en, buffer, xoff,
523 					  port_buff_cell_sz, &port_buffer,
524 					  &update_buffer);
525 		if (err)
526 			return err;
527 	}
528 
529 	if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
530 		update_prio2buffer = true;
531 		for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
532 			netdev_dbg(priv->netdev, "%s: requested to map prio[%d] to buffer %d\n",
533 				   __func__, i, prio2buffer[i]);
534 
535 		err = fill_pfc_en(priv->mdev, &curr_pfc_en);
536 		if (err)
537 			return err;
538 
539 		err = update_buffer_lossy(priv->mdev, max_mtu, curr_pfc_en, prio2buffer, xoff,
540 					  port_buff_cell_sz, &port_buffer, &update_buffer);
541 		if (err)
542 			return err;
543 	}
544 
545 	if (change & MLX5E_PORT_BUFFER_SIZE) {
546 		for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
547 			netdev_dbg(priv->netdev, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
548 			if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
549 				netdev_dbg(priv->netdev, "%s: lossless buffer[%d] size cannot be zero\n",
550 					   __func__, i);
551 				return -EINVAL;
552 			}
553 
554 			port_buffer.buffer[i].size = buffer_size[i];
555 			total_used += buffer_size[i];
556 		}
557 
558 		netdev_dbg(priv->netdev, "%s: total buffer requested=%d\n", __func__, total_used);
559 
560 		if (total_used > port_buffer.headroom_size &&
561 		    (total_used - port_buffer.headroom_size) >
562 			    port_buffer.spare_buffer_size)
563 			return -EINVAL;
564 
565 		update_buffer = true;
566 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
567 		if (err)
568 			return err;
569 	}
570 
571 	/* Need to update buffer configuration if xoff value is changed */
572 	if (!update_buffer && xoff != priv->dcbx.xoff) {
573 		update_buffer = true;
574 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
575 		if (err)
576 			return err;
577 	}
578 
579 	/* Apply the settings */
580 	if (update_buffer) {
581 		err = port_set_buffer(priv, &port_buffer);
582 		if (err)
583 			return err;
584 	}
585 
586 	priv->dcbx.xoff = xoff;
587 
588 	if (update_prio2buffer)
589 		err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
590 
591 	return err;
592 }
593