xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7 
8 /**
9  * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message
10  * @adapter: Driver specific private structure
11  *
12  * Send virtchnl get PTP capabilities message.
13  *
14  * Return: 0 on success, -errno on failure.
15  */
16 int idpf_ptp_get_caps(struct idpf_adapter *adapter)
17 {
18 	struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;
19 	struct virtchnl2_ptp_get_caps send_ptp_caps_msg = {
20 		.caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |
21 				    VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |
22 				    VIRTCHNL2_CAP_PTP_GET_CROSS_TIME |
23 				    VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB |
24 				    VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |
25 				    VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)
26 	};
27 	struct idpf_vc_xn_params xn_params = {
28 		.vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,
29 		.send_buf.iov_base = &send_ptp_caps_msg,
30 		.send_buf.iov_len = sizeof(send_ptp_caps_msg),
31 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
32 	};
33 	struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;
34 	struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
35 	struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
36 	struct idpf_ptp_secondary_mbx *scnd_mbx;
37 	struct idpf_ptp *ptp = adapter->ptp;
38 	enum idpf_ptp_access access_type;
39 	u32 temp_offset;
40 	int reply_sz;
41 
42 	recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps),
43 				    GFP_KERNEL);
44 	if (!recv_ptp_caps_msg)
45 		return -ENOMEM;
46 
47 	xn_params.recv_buf.iov_base = recv_ptp_caps_msg;
48 	xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);
49 
50 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
51 	if (reply_sz < 0)
52 		return reply_sz;
53 	else if (reply_sz != sizeof(*recv_ptp_caps_msg))
54 		return -EIO;
55 
56 	ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);
57 	ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);
58 	ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj);
59 
60 	scnd_mbx = &ptp->secondary_mbx;
61 	scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id);
62 
63 	/* if the ptp_mb_q_id holds invalid value (0xffff), the secondary
64 	 * mailbox is not supported.
65 	 */
66 	scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff;
67 	if (scnd_mbx->valid)
68 		scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id;
69 
70 	/* Determine the access type for the PTP features */
71 	idpf_ptp_get_features_access(adapter);
72 
73 	access_type = ptp->get_dev_clk_time_access;
74 	if (access_type != IDPF_PTP_DIRECT)
75 		goto cross_tstamp;
76 
77 	clock_offsets = recv_ptp_caps_msg->clk_offsets;
78 
79 	temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l);
80 	ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter,
81 							   temp_offset);
82 	temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h);
83 	ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter,
84 							   temp_offset);
85 	temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l);
86 	ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter,
87 							   temp_offset);
88 	temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h);
89 	ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter,
90 							   temp_offset);
91 	temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
92 	ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
93 
94 cross_tstamp:
95 	access_type = ptp->get_cross_tstamp_access;
96 	if (access_type != IDPF_PTP_DIRECT)
97 		goto discipline_clock;
98 
99 	cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets;
100 
101 	temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l);
102 	ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter,
103 							    temp_offset);
104 	temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h);
105 	ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter,
106 							    temp_offset);
107 	temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger);
108 	ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
109 
110 discipline_clock:
111 	access_type = ptp->adj_dev_clk_time_access;
112 	if (access_type != IDPF_PTP_DIRECT)
113 		return 0;
114 
115 	clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;
116 
117 	/* Device clock offsets */
118 	temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type);
119 	ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset);
120 	temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l);
121 	ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset);
122 	temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h);
123 	ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset);
124 	temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l);
125 	ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset);
126 	temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h);
127 	ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset);
128 
129 	/* PHY clock offsets */
130 	temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type);
131 	ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset);
132 	temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l);
133 	ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter,
134 							   temp_offset);
135 	temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h);
136 	ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter,
137 							   temp_offset);
138 	temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l);
139 	ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset);
140 	temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h);
141 	ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset);
142 
143 	return 0;
144 }
145 
146 /**
147  * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message
148  * @adapter: Driver specific private structure
149  * @dev_clk_time: Pointer to the device clock structure where the value is set
150  *
151  * Send virtchnl get time message to get the time of the clock.
152  *
153  * Return: 0 on success, -errno otherwise.
154  */
155 int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
156 			      struct idpf_ptp_dev_timers *dev_clk_time)
157 {
158 	struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;
159 	struct idpf_vc_xn_params xn_params = {
160 		.vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,
161 		.send_buf.iov_base = &get_dev_clk_time_msg,
162 		.send_buf.iov_len = sizeof(get_dev_clk_time_msg),
163 		.recv_buf.iov_base = &get_dev_clk_time_msg,
164 		.recv_buf.iov_len = sizeof(get_dev_clk_time_msg),
165 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
166 	};
167 	int reply_sz;
168 	u64 dev_time;
169 
170 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
171 	if (reply_sz < 0)
172 		return reply_sz;
173 	if (reply_sz != sizeof(get_dev_clk_time_msg))
174 		return -EIO;
175 
176 	dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);
177 	dev_clk_time->dev_clk_time_ns = dev_time;
178 
179 	return 0;
180 }
181 
182 /**
183  * idpf_ptp_get_cross_time - Send virtchnl get cross time message
184  * @adapter: Driver specific private structure
185  * @cross_time: Pointer to the device clock structure where the value is set
186  *
187  * Send virtchnl get cross time message to get the time of the clock and the
188  * system time.
189  *
190  * Return: 0 on success, -errno otherwise.
191  */
192 int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
193 			    struct idpf_ptp_dev_timers *cross_time)
194 {
195 	struct virtchnl2_ptp_get_cross_time cross_time_msg;
196 	struct idpf_vc_xn_params xn_params = {
197 		.vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,
198 		.send_buf.iov_base = &cross_time_msg,
199 		.send_buf.iov_len = sizeof(cross_time_msg),
200 		.recv_buf.iov_base = &cross_time_msg,
201 		.recv_buf.iov_len = sizeof(cross_time_msg),
202 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
203 	};
204 	int reply_sz;
205 
206 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
207 	if (reply_sz < 0)
208 		return reply_sz;
209 	if (reply_sz != sizeof(cross_time_msg))
210 		return -EIO;
211 
212 	cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);
213 	cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);
214 
215 	return 0;
216 }
217 
218 /**
219  * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
220  * @adapter: Driver specific private structure
221  * @time: New time value
222  *
223  * Send virtchnl set time message to set the time of the clock.
224  *
225  * Return: 0 on success, -errno otherwise.
226  */
227 int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)
228 {
229 	struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {
230 		.dev_time_ns = cpu_to_le64(time),
231 	};
232 	struct idpf_vc_xn_params xn_params = {
233 		.vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,
234 		.send_buf.iov_base = &set_dev_clk_time_msg,
235 		.send_buf.iov_len = sizeof(set_dev_clk_time_msg),
236 		.recv_buf.iov_base = &set_dev_clk_time_msg,
237 		.recv_buf.iov_len = sizeof(set_dev_clk_time_msg),
238 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
239 	};
240 	int reply_sz;
241 
242 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
243 	if (reply_sz < 0)
244 		return reply_sz;
245 	if (reply_sz != sizeof(set_dev_clk_time_msg))
246 		return -EIO;
247 
248 	return 0;
249 }
250 
251 /**
252  * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message
253  * @adapter: Driver specific private structure
254  * @delta: Offset in nanoseconds to adjust the time by
255  *
256  * Send virtchnl adj time message to adjust the clock by the indicated delta.
257  *
258  * Return: 0 on success, -errno otherwise.
259  */
260 int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)
261 {
262 	struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {
263 		.delta = cpu_to_le64(delta),
264 	};
265 	struct idpf_vc_xn_params xn_params = {
266 		.vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,
267 		.send_buf.iov_base = &adj_dev_clk_time_msg,
268 		.send_buf.iov_len = sizeof(adj_dev_clk_time_msg),
269 		.recv_buf.iov_base = &adj_dev_clk_time_msg,
270 		.recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),
271 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
272 	};
273 	int reply_sz;
274 
275 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
276 	if (reply_sz < 0)
277 		return reply_sz;
278 	if (reply_sz != sizeof(adj_dev_clk_time_msg))
279 		return -EIO;
280 
281 	return 0;
282 }
283 
284 /**
285  * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message
286  * @adapter: Driver specific private structure
287  * @incval: Source timer increment value per clock cycle
288  *
289  * Send virtchnl adj fine message to adjust the frequency of the clock by
290  * incval.
291  *
292  * Return: 0 on success, -errno otherwise.
293  */
294 int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)
295 {
296 	struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {
297 		.incval = cpu_to_le64(incval),
298 	};
299 	struct idpf_vc_xn_params xn_params = {
300 		.vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,
301 		.send_buf.iov_base = &adj_dev_clk_fine_msg,
302 		.send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
303 		.recv_buf.iov_base = &adj_dev_clk_fine_msg,
304 		.recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
305 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
306 	};
307 	int reply_sz;
308 
309 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
310 	if (reply_sz < 0)
311 		return reply_sz;
312 	if (reply_sz != sizeof(adj_dev_clk_fine_msg))
313 		return -EIO;
314 
315 	return 0;
316 }
317 
318 /**
319  * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport
320  * @vport: Virtual port structure
321  *
322  * Send virtchnl get vport tstamps caps message to receive the set of tstamp
323  * capabilities per vport.
324  *
325  * Return: 0 on success, -errno otherwise.
326  */
327 int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
328 {
329 	struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps;
330 	struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps;
331 	struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;
332 	struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;
333 	struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
334 	struct idpf_vc_xn_params xn_params = {
335 		.vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,
336 		.send_buf.iov_base = &send_tx_tstamp_caps,
337 		.send_buf.iov_len = sizeof(send_tx_tstamp_caps),
338 		.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
339 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
340 	};
341 	enum idpf_ptp_access tstamp_access, get_dev_clk_access;
342 	struct idpf_ptp *ptp = vport->adapter->ptp;
343 	struct list_head *head;
344 	int err = 0, reply_sz;
345 	u16 num_latches;
346 	u32 size;
347 
348 	if (!ptp)
349 		return -EOPNOTSUPP;
350 
351 	tstamp_access = ptp->tx_tstamp_access;
352 	get_dev_clk_access = ptp->get_dev_clk_time_access;
353 	if (tstamp_access == IDPF_PTP_NONE ||
354 	    get_dev_clk_access == IDPF_PTP_NONE)
355 		return -EOPNOTSUPP;
356 
357 	rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
358 	if (!rcv_tx_tstamp_caps)
359 		return -ENOMEM;
360 
361 	send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);
362 	xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;
363 
364 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
365 	if (reply_sz < 0) {
366 		err = reply_sz;
367 		goto get_tstamp_caps_out;
368 	}
369 
370 	num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);
371 	size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);
372 	if (reply_sz != size) {
373 		err = -EIO;
374 		goto get_tstamp_caps_out;
375 	}
376 
377 	size = struct_size(tstamp_caps, tx_tstamp_status, num_latches);
378 	tstamp_caps = kzalloc(size, GFP_KERNEL);
379 	if (!tstamp_caps) {
380 		err = -ENOMEM;
381 		goto get_tstamp_caps_out;
382 	}
383 
384 	tstamp_caps->access = true;
385 	tstamp_caps->num_entries = num_latches;
386 
387 	INIT_LIST_HEAD(&tstamp_caps->latches_in_use);
388 	INIT_LIST_HEAD(&tstamp_caps->latches_free);
389 
390 	spin_lock_init(&tstamp_caps->latches_lock);
391 	spin_lock_init(&tstamp_caps->status_lock);
392 
393 	tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit;
394 
395 	for (u16 i = 0; i < tstamp_caps->num_entries; i++) {
396 		__le32 offset_l, offset_h;
397 
398 		ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL);
399 		if (!ptp_tx_tstamp) {
400 			err = -ENOMEM;
401 			goto err_free_ptp_tx_stamp_list;
402 		}
403 
404 		tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i];
405 
406 		if (tstamp_access != IDPF_PTP_DIRECT)
407 			goto skip_offsets;
408 
409 		offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l;
410 		offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h;
411 		ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l);
412 		ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h);
413 
414 skip_offsets:
415 		ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index;
416 
417 		list_add(&ptp_tx_tstamp->list_member,
418 			 &tstamp_caps->latches_free);
419 
420 		tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE;
421 	}
422 
423 	vport->tx_tstamp_caps = tstamp_caps;
424 	kfree(rcv_tx_tstamp_caps);
425 
426 	return 0;
427 
428 err_free_ptp_tx_stamp_list:
429 	head = &tstamp_caps->latches_free;
430 	list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
431 		list_del(&ptp_tx_tstamp->list_member);
432 		kfree(ptp_tx_tstamp);
433 	}
434 
435 	kfree(tstamp_caps);
436 get_tstamp_caps_out:
437 	kfree(rcv_tx_tstamp_caps);
438 
439 	return err;
440 }
441 
442 /**
443  * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on
444  *				    the skb compatibility.
445  * @caps: Tx timestamp capabilities that monitor the latch status
446  * @skb: skb for which the tstamp value is returned through virtchnl message
447  * @current_state: Current state of the Tx timestamp latch
448  * @expected_state: Expected state of the Tx timestamp latch
449  *
450  * Find a proper skb tracker for which the Tx timestamp is received and change
451  * the state to expected value.
452  *
453  * Return: true if the tracker has been found and updated, false otherwise.
454  */
455 static bool
456 idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps,
457 			       struct sk_buff *skb,
458 			       enum idpf_ptp_tx_tstamp_state current_state,
459 			       enum idpf_ptp_tx_tstamp_state expected_state)
460 {
461 	bool updated = false;
462 
463 	spin_lock(&caps->status_lock);
464 	for (u16 i = 0; i < caps->num_entries; i++) {
465 		struct idpf_ptp_tx_tstamp_status *status;
466 
467 		status = &caps->tx_tstamp_status[i];
468 
469 		if (skb == status->skb && status->state == current_state) {
470 			status->state = expected_state;
471 			updated = true;
472 			break;
473 		}
474 	}
475 	spin_unlock(&caps->status_lock);
476 
477 	return updated;
478 }
479 
480 /**
481  * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it
482  *			       back to the skb.
483  * @vport: Virtual port structure
484  * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane
485  * @ptp_tx_tstamp: Tx timestamp latch to add to the free list
486  *
487  * Read the value of the Tx timestamp for a given latch received from the
488  * Control Plane, extend it to 64 bit and provide back to the skb.
489  *
490  * Return: 0 on success, -errno otherwise.
491  */
492 static int
493 idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
494 			  struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch,
495 			  struct idpf_ptp_tx_tstamp *ptp_tx_tstamp)
496 {
497 	struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
498 	struct skb_shared_hwtstamps shhwtstamps;
499 	bool state_upd = false;
500 	u8 tstamp_ns_lo_bit;
501 	u64 tstamp;
502 
503 	tx_tstamp_caps = vport->tx_tstamp_caps;
504 	tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit;
505 
506 	ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp);
507 	ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit;
508 
509 	state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
510 						   ptp_tx_tstamp->skb,
511 						   IDPF_PTP_READ_VALUE,
512 						   IDPF_PTP_FREE);
513 	if (!state_upd)
514 		return -EINVAL;
515 
516 	tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp);
517 	shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
518 	skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
519 	consume_skb(ptp_tx_tstamp->skb);
520 
521 	list_add(&ptp_tx_tstamp->list_member,
522 		 &tx_tstamp_caps->latches_free);
523 
524 	return 0;
525 }
526 
527 /**
528  * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps
529  * @adapter: Driver specific private structure
530  * @xn: transaction for message
531  * @ctlq_msg: received message
532  *
533  * Read the tstamps Tx tstamp values from a received message and put them
534  * directly to the skb. The number of timestamps to read is specified by
535  * the virtchnl message.
536  *
537  * Return: 0 on success, -errno otherwise.
538  */
539 static int
540 idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,
541 				     struct idpf_vc_xn *xn,
542 				     const struct idpf_ctlq_msg *ctlq_msg)
543 {
544 	struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;
545 	struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
546 	struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;
547 	struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;
548 	struct idpf_vport *tstamp_vport = NULL;
549 	struct list_head *head;
550 	u16 num_latches;
551 	u32 vport_id;
552 	int err = 0;
553 
554 	recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;
555 	vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);
556 
557 	idpf_for_each_vport(adapter, vport) {
558 		if (!vport)
559 			continue;
560 
561 		if (vport->vport_id == vport_id) {
562 			tstamp_vport = vport;
563 			break;
564 		}
565 	}
566 
567 	if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)
568 		return -EINVAL;
569 
570 	tx_tstamp_caps = tstamp_vport->tx_tstamp_caps;
571 	num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);
572 
573 	spin_lock_bh(&tx_tstamp_caps->latches_lock);
574 	head = &tx_tstamp_caps->latches_in_use;
575 
576 	for (u16 i = 0; i < num_latches; i++) {
577 		tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i];
578 
579 		if (!tstamp_latch.valid)
580 			continue;
581 
582 		if (list_empty(head)) {
583 			err = -ENOBUFS;
584 			goto unlock;
585 		}
586 
587 		list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) {
588 			if (tstamp_latch.index == tx_tstamp->idx) {
589 				list_del(&tx_tstamp->list_member);
590 				err = idpf_ptp_get_tstamp_value(tstamp_vport,
591 								&tstamp_latch,
592 								tx_tstamp);
593 				if (err)
594 					goto unlock;
595 
596 				break;
597 			}
598 		}
599 	}
600 
601 unlock:
602 	spin_unlock_bh(&tx_tstamp_caps->latches_lock);
603 
604 	return err;
605 }
606 
607 /**
608  * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message
609  * @vport: Virtual port structure
610  *
611  * Send virtchnl get Tx tstamp message to read the value of the HW timestamp.
612  * The message contains a list of indexes set in the Tx descriptors.
613  *
614  * Return: 0 on success, -errno otherwise.
615  */
616 int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
617 {
618 	struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;
619 	struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
620 	struct idpf_vc_xn_params xn_params = {
621 		.vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,
622 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
623 		.async = true,
624 		.async_handler = idpf_ptp_get_tx_tstamp_async_handler,
625 	};
626 	struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
627 	int reply_sz, size, msg_size;
628 	struct list_head *head;
629 	bool state_upd;
630 	u16 id = 0;
631 
632 	tx_tstamp_caps = vport->tx_tstamp_caps;
633 	head = &tx_tstamp_caps->latches_in_use;
634 
635 	size = struct_size(send_tx_tstamp_msg, tstamp_latches,
636 			   tx_tstamp_caps->num_entries);
637 	send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL);
638 	if (!send_tx_tstamp_msg)
639 		return -ENOMEM;
640 
641 	spin_lock_bh(&tx_tstamp_caps->latches_lock);
642 	list_for_each_entry(ptp_tx_tstamp, head, list_member) {
643 		u8 idx;
644 
645 		state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
646 							   ptp_tx_tstamp->skb,
647 							   IDPF_PTP_REQUEST,
648 							   IDPF_PTP_READ_VALUE);
649 		if (!state_upd)
650 			continue;
651 
652 		idx = ptp_tx_tstamp->idx;
653 		send_tx_tstamp_msg->tstamp_latches[id].index = idx;
654 		id++;
655 	}
656 	spin_unlock_bh(&tx_tstamp_caps->latches_lock);
657 
658 	msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);
659 	send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);
660 	send_tx_tstamp_msg->num_latches = cpu_to_le16(id);
661 	xn_params.send_buf.iov_base = send_tx_tstamp_msg;
662 	xn_params.send_buf.iov_len = msg_size;
663 
664 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
665 	kfree(send_tx_tstamp_msg);
666 
667 	return min(reply_sz, 0);
668 }
669