xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 
4 #include <linux/mlx5/driver.h>
5 #include "mlx5_ifc_vhca_event.h"
6 #include "mlx5_core.h"
7 #include "vhca_event.h"
8 #include "ecpf.h"
9 #define CREATE_TRACE_POINTS
10 #include "diag/vhca_tracepoint.h"
11 
12 struct mlx5_vhca_state_notifier {
13 	struct mlx5_core_dev *dev;
14 	struct mlx5_nb nb;
15 	struct blocking_notifier_head n_head;
16 };
17 
18 struct mlx5_vhca_event_work {
19 	struct work_struct work;
20 	struct mlx5_vhca_state_notifier *notifier;
21 	struct mlx5_vhca_state_event event;
22 };
23 
24 struct mlx5_vhca_event_handler {
25 	struct workqueue_struct *wq;
26 };
27 
28 struct mlx5_vhca_events {
29 	struct mlx5_core_dev *dev;
30 	struct mlx5_vhca_event_handler handler[MLX5_DEV_MAX_WQS];
31 };
32 
mlx5_cmd_query_vhca_state(struct mlx5_core_dev * dev,u16 function_id,u32 * out,u32 outlen)33 int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
34 {
35 	u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
36 
37 	MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
38 	MLX5_SET(query_vhca_state_in, in, function_id, function_id);
39 	MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0);
40 
41 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
42 }
43 
mlx5_cmd_modify_vhca_state(struct mlx5_core_dev * dev,u16 function_id,u32 * in,u32 inlen)44 static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
45 				      u32 *in, u32 inlen)
46 {
47 	u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
48 
49 	MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
50 	MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
51 	MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
52 
53 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
54 }
55 
mlx5_modify_vhca_sw_id(struct mlx5_core_dev * dev,u16 function_id,u32 sw_fn_id)56 int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id)
57 {
58 	u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
59 	u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
60 
61 	MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
62 	MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
63 	MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
64 	MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
65 	MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
66 
67 	return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
68 }
69 
mlx5_vhca_event_arm(struct mlx5_core_dev * dev,u16 function_id)70 int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id)
71 {
72 	u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
73 
74 	MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
75 	MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
76 
77 	return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in));
78 }
79 
80 static void
mlx5_vhca_event_notify(struct mlx5_core_dev * dev,struct mlx5_vhca_state_event * event)81 mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *event)
82 {
83 	u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
84 	int err;
85 
86 	err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out));
87 	if (err)
88 		return;
89 
90 	event->sw_function_id = MLX5_GET(query_vhca_state_out, out,
91 					 vhca_state_context.sw_function_id);
92 	event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
93 					 vhca_state_context.vhca_state);
94 
95 	mlx5_vhca_event_arm(dev, event->function_id);
96 	trace_mlx5_sf_vhca_event(dev, event);
97 
98 	blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
99 }
100 
mlx5_vhca_state_work_handler(struct work_struct * _work)101 static void mlx5_vhca_state_work_handler(struct work_struct *_work)
102 {
103 	struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
104 	struct mlx5_vhca_state_notifier *notifier = work->notifier;
105 	struct mlx5_core_dev *dev = notifier->dev;
106 
107 	mlx5_vhca_event_notify(dev, &work->event);
108 	kfree(work);
109 }
110 
mlx5_vhca_events_work_enqueue(struct mlx5_core_dev * dev,int idx,struct work_struct * work)111 void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work)
112 {
113 	queue_work(dev->priv.vhca_events->handler[idx].wq, work);
114 }
115 
116 static int
mlx5_vhca_state_change_notifier(struct notifier_block * nb,unsigned long type,void * data)117 mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
118 {
119 	struct mlx5_vhca_state_notifier *notifier =
120 				mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
121 	struct mlx5_vhca_event_work *work;
122 	struct mlx5_eqe *eqe = data;
123 	int wq_idx;
124 
125 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
126 	if (!work)
127 		return NOTIFY_DONE;
128 	INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
129 	work->notifier = notifier;
130 	work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
131 	wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
132 	mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
133 	return NOTIFY_OK;
134 }
135 
mlx5_vhca_state_cap_handle(struct mlx5_core_dev * dev,void * set_hca_cap)136 void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
137 {
138 	if (!mlx5_vhca_event_supported(dev))
139 		return;
140 
141 	MLX5_SET(cmd_hca_cap, set_hca_cap, vhca_state, 1);
142 	MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_allocated, 1);
143 	MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_active, 1);
144 	MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_in_use, 1);
145 	MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
146 }
147 
mlx5_vhca_event_init(struct mlx5_core_dev * dev)148 int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
149 {
150 	struct mlx5_vhca_state_notifier *notifier;
151 	char wq_name[MLX5_CMD_WQ_MAX_NAME];
152 	struct mlx5_vhca_events *events;
153 	int err, i;
154 
155 	if (!mlx5_vhca_event_supported(dev))
156 		return 0;
157 
158 	events = kzalloc(sizeof(*events), GFP_KERNEL);
159 	if (!events)
160 		return -ENOMEM;
161 
162 	events->dev = dev;
163 	dev->priv.vhca_events = events;
164 	for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
165 		snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
166 		events->handler[i].wq = create_singlethread_workqueue(wq_name);
167 		if (!events->handler[i].wq) {
168 			err = -ENOMEM;
169 			goto err_create_wq;
170 		}
171 	}
172 
173 	notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
174 	if (!notifier) {
175 		err = -ENOMEM;
176 		goto err_notifier;
177 	}
178 
179 	dev->priv.vhca_state_notifier = notifier;
180 	notifier->dev = dev;
181 	BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
182 	MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
183 	return 0;
184 
185 err_notifier:
186 err_create_wq:
187 	for (--i; i >= 0; i--)
188 		destroy_workqueue(events->handler[i].wq);
189 	kfree(events);
190 	return err;
191 }
192 
mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev * dev)193 void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev)
194 {
195 	struct mlx5_vhca_events *vhca_events;
196 	int i;
197 
198 	if (!mlx5_vhca_event_supported(dev))
199 		return;
200 
201 	vhca_events = dev->priv.vhca_events;
202 	for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
203 		flush_workqueue(vhca_events->handler[i].wq);
204 }
205 
mlx5_vhca_event_cleanup(struct mlx5_core_dev * dev)206 void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
207 {
208 	struct mlx5_vhca_events *vhca_events;
209 	int i;
210 
211 	if (!mlx5_vhca_event_supported(dev))
212 		return;
213 
214 	kfree(dev->priv.vhca_state_notifier);
215 	dev->priv.vhca_state_notifier = NULL;
216 	vhca_events = dev->priv.vhca_events;
217 	for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
218 		destroy_workqueue(vhca_events->handler[i].wq);
219 	kvfree(vhca_events);
220 }
221 
mlx5_vhca_event_start(struct mlx5_core_dev * dev)222 void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
223 {
224 	struct mlx5_vhca_state_notifier *notifier;
225 
226 	if (!dev->priv.vhca_state_notifier)
227 		return;
228 
229 	notifier = dev->priv.vhca_state_notifier;
230 	mlx5_eq_notifier_register(dev, &notifier->nb);
231 }
232 
mlx5_vhca_event_stop(struct mlx5_core_dev * dev)233 void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
234 {
235 	struct mlx5_vhca_state_notifier *notifier;
236 
237 	if (!dev->priv.vhca_state_notifier)
238 		return;
239 
240 	notifier = dev->priv.vhca_state_notifier;
241 	mlx5_eq_notifier_unregister(dev, &notifier->nb);
242 }
243 
mlx5_vhca_event_notifier_register(struct mlx5_core_dev * dev,struct notifier_block * nb)244 int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
245 {
246 	if (!dev->priv.vhca_state_notifier)
247 		return -EOPNOTSUPP;
248 	return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
249 }
250 
mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev * dev,struct notifier_block * nb)251 void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
252 {
253 	blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
254 }
255