xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_common.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "en.h"
34 #include "lib/crypto.h"
35 
36 /* mlx5e global resources should be placed in this file.
37  * Global resources are common to all the netdevices created on the same nic.
38  */
39 
mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev * mdev,void * mkc)40 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
41 {
42 	bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
43 	bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read) ||
44 		       (pcie_relaxed_ordering_enabled(mdev->pdev) &&
45 			MLX5_CAP_GEN(mdev, relaxed_ordering_read_pci_enabled));
46 
47 	MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read);
48 	MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
49 }
50 
mlx5e_create_mkey(struct mlx5_core_dev * mdev,u32 pdn,u32 * mkey)51 int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
52 {
53 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
54 	void *mkc;
55 	u32 *in;
56 	int err;
57 
58 	in = kvzalloc(inlen, GFP_KERNEL);
59 	if (!in)
60 		return -ENOMEM;
61 
62 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
63 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
64 	MLX5_SET(mkc, mkc, lw, 1);
65 	MLX5_SET(mkc, mkc, lr, 1);
66 	mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
67 	MLX5_SET(mkc, mkc, pd, pdn);
68 	MLX5_SET(mkc, mkc, length64, 1);
69 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
70 
71 	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
72 
73 	kvfree(in);
74 	return err;
75 }
76 
mlx5e_create_tis(struct mlx5_core_dev * mdev,void * in,u32 * tisn)77 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
78 {
79 	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
80 
81 	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
82 
83 	if (mlx5_lag_is_lacp_owner(mdev))
84 		MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
85 
86 	return mlx5_core_create_tis(mdev, in, tisn);
87 }
88 
mlx5e_destroy_tis(struct mlx5_core_dev * mdev,u32 tisn)89 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
90 {
91 	mlx5_core_destroy_tis(mdev, tisn);
92 }
93 
mlx5e_destroy_tises(struct mlx5_core_dev * mdev,u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])94 static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])
95 {
96 	int tc, i;
97 
98 	for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
99 		for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
100 			mlx5e_destroy_tis(mdev, tisn[i][tc]);
101 }
102 
mlx5_lag_should_assign_affinity(struct mlx5_core_dev * mdev)103 static bool mlx5_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
104 {
105 	return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
106 }
107 
mlx5e_create_tises(struct mlx5_core_dev * mdev,u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])108 static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])
109 {
110 	int tc, i;
111 	int err;
112 
113 	for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
114 		for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
115 			u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
116 			void *tisc;
117 
118 			tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
119 
120 			MLX5_SET(tisc, tisc, prio, tc << 1);
121 
122 			if (mlx5_lag_should_assign_affinity(mdev))
123 				MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
124 
125 			err = mlx5e_create_tis(mdev, in, &tisn[i][tc]);
126 			if (err)
127 				goto err_close_tises;
128 		}
129 	}
130 
131 	return 0;
132 
133 err_close_tises:
134 	for (; i >= 0; i--) {
135 		for (tc--; tc >= 0; tc--)
136 			mlx5e_destroy_tis(mdev, tisn[i][tc]);
137 		tc = MLX5_MAX_NUM_TC;
138 	}
139 
140 	return err;
141 }
142 
mlx5e_create_mdev_resources(struct mlx5_core_dev * mdev,bool create_tises)143 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
144 {
145 	struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
146 	int err;
147 
148 	err = mlx5_core_alloc_pd(mdev, &res->pdn);
149 	if (err) {
150 		mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
151 		return err;
152 	}
153 
154 	err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
155 	if (err) {
156 		mlx5_core_err(mdev, "alloc td failed, %d\n", err);
157 		goto err_dealloc_pd;
158 	}
159 
160 	err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey);
161 	if (err) {
162 		mlx5_core_err(mdev, "create mkey failed, %d\n", err);
163 		goto err_dealloc_transport_domain;
164 	}
165 
166 	err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
167 	if (err) {
168 		mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
169 		goto err_destroy_mkey;
170 	}
171 
172 	if (create_tises) {
173 		err = mlx5e_create_tises(mdev, res->tisn);
174 		if (err) {
175 			mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
176 			goto err_destroy_bfreg;
177 		}
178 		res->tisn_valid = true;
179 	}
180 
181 	INIT_LIST_HEAD(&res->td.tirs_list);
182 	mutex_init(&res->td.list_lock);
183 
184 	mdev->mlx5e_res.dek_priv = mlx5_crypto_dek_init(mdev);
185 	if (IS_ERR(mdev->mlx5e_res.dek_priv)) {
186 		mlx5_core_err(mdev, "crypto dek init failed, %ld\n",
187 			      PTR_ERR(mdev->mlx5e_res.dek_priv));
188 		mdev->mlx5e_res.dek_priv = NULL;
189 	}
190 
191 	return 0;
192 
193 err_destroy_bfreg:
194 	mlx5_free_bfreg(mdev, &res->bfreg);
195 err_destroy_mkey:
196 	mlx5_core_destroy_mkey(mdev, res->mkey);
197 err_dealloc_transport_domain:
198 	mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
199 err_dealloc_pd:
200 	mlx5_core_dealloc_pd(mdev, res->pdn);
201 	return err;
202 }
203 
mlx5e_destroy_mdev_resources(struct mlx5_core_dev * mdev)204 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
205 {
206 	struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
207 
208 	mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
209 	mdev->mlx5e_res.dek_priv = NULL;
210 	if (res->tisn_valid)
211 		mlx5e_destroy_tises(mdev, res->tisn);
212 	mlx5_free_bfreg(mdev, &res->bfreg);
213 	mlx5_core_destroy_mkey(mdev, res->mkey);
214 	mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
215 	mlx5_core_dealloc_pd(mdev, res->pdn);
216 	memset(res, 0, sizeof(*res));
217 }
218 
mlx5e_refresh_tirs(struct mlx5e_priv * priv,bool enable_uc_lb,bool enable_mc_lb)219 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
220 		       bool enable_mc_lb)
221 {
222 	struct mlx5_core_dev *mdev = priv->mdev;
223 	struct mlx5e_tir *tir;
224 	u8 lb_flags = 0;
225 	int err  = 0;
226 	u32 tirn = 0;
227 	int inlen;
228 	void *in;
229 
230 	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
231 	in = kvzalloc(inlen, GFP_KERNEL);
232 	if (!in)
233 		return -ENOMEM;
234 
235 	if (enable_uc_lb)
236 		lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
237 
238 	if (enable_mc_lb)
239 		lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
240 
241 	if (lb_flags)
242 		MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags);
243 
244 	MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
245 
246 	mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock);
247 	list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) {
248 		tirn = tir->tirn;
249 		err = mlx5_core_modify_tir(mdev, tirn, in);
250 		if (err)
251 			break;
252 	}
253 	mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
254 
255 	kvfree(in);
256 	if (err)
257 		netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
258 
259 	return err;
260 }
261