xref: /linux/drivers/infiniband/hw/mlx5/ib_virt.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/vport.h>
34 #include "mlx5_ib.h"
35 
36 static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
37 {
38 	switch (mlx_policy) {
39 	case MLX5_POLICY_DOWN:
40 		return IFLA_VF_LINK_STATE_DISABLE;
41 	case MLX5_POLICY_UP:
42 		return IFLA_VF_LINK_STATE_ENABLE;
43 	case MLX5_POLICY_FOLLOW:
44 		return IFLA_VF_LINK_STATE_AUTO;
45 	default:
46 		return __IFLA_VF_LINK_STATE_MAX;
47 	}
48 }
49 
50 int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u32 port,
51 			  struct ifla_vf_info *info)
52 {
53 	struct mlx5_ib_dev *dev = to_mdev(device);
54 	struct mlx5_core_dev *mdev = dev->mdev;
55 	struct mlx5_hca_vport_context *rep;
56 	int err;
57 
58 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
59 	if (!rep)
60 		return -ENOMEM;
61 
62 	err = mlx5_query_hca_vport_context(mdev, 1, 1,  vf + 1, rep);
63 	if (err) {
64 		mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n",
65 			     vf, err);
66 		goto free;
67 	}
68 	memset(info, 0, sizeof(*info));
69 	info->linkstate = mlx_to_net_policy(rep->policy);
70 	if (info->linkstate == __IFLA_VF_LINK_STATE_MAX)
71 		err = -EINVAL;
72 
73 free:
74 	kfree(rep);
75 	return err;
76 }
77 
78 static inline enum port_state_policy net_to_mlx_policy(int policy)
79 {
80 	switch (policy) {
81 	case IFLA_VF_LINK_STATE_DISABLE:
82 		return MLX5_POLICY_DOWN;
83 	case IFLA_VF_LINK_STATE_ENABLE:
84 		return MLX5_POLICY_UP;
85 	case IFLA_VF_LINK_STATE_AUTO:
86 		return MLX5_POLICY_FOLLOW;
87 	default:
88 		return MLX5_POLICY_INVALID;
89 	}
90 }
91 
92 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
93 			      u32 port, int state)
94 {
95 	struct mlx5_ib_dev *dev = to_mdev(device);
96 	struct mlx5_core_dev *mdev = dev->mdev;
97 	struct mlx5_hca_vport_context *in;
98 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
99 	int err;
100 
101 	in = kzalloc(sizeof(*in), GFP_KERNEL);
102 	if (!in)
103 		return -ENOMEM;
104 
105 	in->policy = net_to_mlx_policy(state);
106 	if (in->policy == MLX5_POLICY_INVALID) {
107 		err = -EINVAL;
108 		goto out;
109 	}
110 	in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
111 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
112 	if (!err)
113 		vfs_ctx[vf].policy = in->policy;
114 
115 out:
116 	kfree(in);
117 	return err;
118 }
119 
120 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
121 			 u32 port, struct ifla_vf_stats *stats)
122 {
123 	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
124 	struct mlx5_core_dev *mdev;
125 	struct mlx5_ib_dev *dev;
126 	void *out;
127 	int err;
128 
129 	dev = to_mdev(device);
130 	mdev = dev->mdev;
131 
132 	out = kzalloc(out_sz, GFP_KERNEL);
133 	if (!out)
134 		return -ENOMEM;
135 
136 	err = mlx5_core_query_vport_counter(mdev, true, vf, port, out);
137 	if (err)
138 		goto ex;
139 
140 	stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets);
141 	stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets);
142 	stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets);
143 	stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets);
144 	stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets);
145 
146 ex:
147 	kfree(out);
148 	return err;
149 }
150 
151 static int set_vf_node_guid(struct ib_device *device, int vf, u32 port,
152 			    u64 guid)
153 {
154 	struct mlx5_ib_dev *dev = to_mdev(device);
155 	struct mlx5_core_dev *mdev = dev->mdev;
156 	struct mlx5_hca_vport_context *in;
157 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
158 	int err;
159 
160 	in = kzalloc(sizeof(*in), GFP_KERNEL);
161 	if (!in)
162 		return -ENOMEM;
163 
164 	in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
165 	in->node_guid = guid;
166 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
167 	if (!err) {
168 		vfs_ctx[vf].node_guid = guid;
169 		vfs_ctx[vf].node_guid_valid = 1;
170 	}
171 	kfree(in);
172 	return err;
173 }
174 
175 static int set_vf_port_guid(struct ib_device *device, int vf, u32 port,
176 			    u64 guid)
177 {
178 	struct mlx5_ib_dev *dev = to_mdev(device);
179 	struct mlx5_core_dev *mdev = dev->mdev;
180 	struct mlx5_hca_vport_context *in;
181 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
182 	int err;
183 
184 	in = kzalloc(sizeof(*in), GFP_KERNEL);
185 	if (!in)
186 		return -ENOMEM;
187 
188 	in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
189 	in->port_guid = guid;
190 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
191 	if (!err) {
192 		vfs_ctx[vf].port_guid = guid;
193 		vfs_ctx[vf].port_guid_valid = 1;
194 	}
195 	kfree(in);
196 	return err;
197 }
198 
199 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
200 			u64 guid, int type)
201 {
202 	if (type == IFLA_VF_IB_NODE_GUID)
203 		return set_vf_node_guid(device, vf, port, guid);
204 	else if (type == IFLA_VF_IB_PORT_GUID)
205 		return set_vf_port_guid(device, vf, port, guid);
206 
207 	return -EINVAL;
208 }
209 
210 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
211 			struct ifla_vf_guid *node_guid,
212 			struct ifla_vf_guid *port_guid)
213 {
214 	struct mlx5_ib_dev *dev = to_mdev(device);
215 	struct mlx5_core_dev *mdev = dev->mdev;
216 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
217 
218 	node_guid->guid =
219 		vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0;
220 	port_guid->guid =
221 		vfs_ctx[vf].port_guid_valid ? vfs_ctx[vf].port_guid : 0;
222 
223 	return 0;
224 }
225