xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/qos.c (revision 5abfdfd402699ce7c1e81d1a25bc37f60f7741ff)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "qos.h"
5 
6 #define MLX5_QOS_DEFAULT_DWRR_UID 0
7 
mlx5_qos_is_supported(struct mlx5_core_dev * mdev)8 bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev)
9 {
10 	if (!MLX5_CAP_GEN(mdev, qos))
11 		return false;
12 	if (!MLX5_CAP_QOS(mdev, nic_sq_scheduling))
13 		return false;
14 	if (!MLX5_CAP_QOS(mdev, nic_bw_share))
15 		return false;
16 	if (!MLX5_CAP_QOS(mdev, nic_rate_limit))
17 		return false;
18 	return true;
19 }
20 
mlx5_qos_max_leaf_nodes(struct mlx5_core_dev * mdev)21 int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
22 {
23 	return 1 << MLX5_CAP_QOS(mdev, log_max_qos_nic_queue_group);
24 }
25 
mlx5_qos_create_leaf_node(struct mlx5_core_dev * mdev,u32 parent_id,u32 bw_share,u32 max_avg_bw,u32 * id)26 int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
27 			      u32 bw_share, u32 max_avg_bw, u32 *id)
28 {
29 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
30 
31 	if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
32 		return -EOPNOTSUPP;
33 
34 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
35 	MLX5_SET(scheduling_context, sched_ctx, element_type,
36 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
37 	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
38 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
39 
40 	return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
41 						  sched_ctx, id);
42 }
43 
mlx5_qos_create_inner_node(struct mlx5_core_dev * mdev,u32 parent_id,u32 bw_share,u32 max_avg_bw,u32 * id)44 int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
45 			       u32 bw_share, u32 max_avg_bw, u32 *id)
46 {
47 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
48 	void *attr;
49 
50 	if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
51 	    !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
52 		return -EOPNOTSUPP;
53 
54 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
55 	MLX5_SET(scheduling_context, sched_ctx, element_type,
56 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
57 	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
58 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
59 
60 	attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
61 	MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
62 
63 	return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
64 						  sched_ctx, id);
65 }
66 
mlx5_qos_create_root_node(struct mlx5_core_dev * mdev,u32 * id)67 int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
68 {
69 	return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
70 }
71 
mlx5_qos_update_node(struct mlx5_core_dev * mdev,u32 bw_share,u32 max_avg_bw,u32 id)72 int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
73 			 u32 bw_share, u32 max_avg_bw, u32 id)
74 {
75 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
76 	u32 bitmask = 0;
77 
78 	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
79 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
80 
81 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
82 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
83 
84 	return mlx5_modify_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
85 						  sched_ctx, id, bitmask);
86 }
87 
mlx5_qos_destroy_node(struct mlx5_core_dev * mdev,u32 id)88 int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id)
89 {
90 	return mlx5_destroy_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC, id);
91 }
92