1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Intel Corporation */
3
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7
8 /**
9 * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message
10 * @adapter: Driver specific private structure
11 *
12 * Send virtchnl get PTP capabilities message.
13 *
14 * Return: 0 on success, -errno on failure.
15 */
idpf_ptp_get_caps(struct idpf_adapter * adapter)16 int idpf_ptp_get_caps(struct idpf_adapter *adapter)
17 {
18 struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;
19 struct virtchnl2_ptp_get_caps send_ptp_caps_msg = {
20 .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |
21 VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |
22 VIRTCHNL2_CAP_PTP_GET_CROSS_TIME |
23 VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB |
24 VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |
25 VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)
26 };
27 struct idpf_vc_xn_params xn_params = {
28 .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,
29 .send_buf.iov_base = &send_ptp_caps_msg,
30 .send_buf.iov_len = sizeof(send_ptp_caps_msg),
31 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
32 };
33 struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;
34 struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
35 struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
36 struct idpf_ptp_secondary_mbx *scnd_mbx;
37 struct idpf_ptp *ptp = adapter->ptp;
38 enum idpf_ptp_access access_type;
39 u32 temp_offset;
40 int reply_sz;
41
42 recv_ptp_caps_msg = kzalloc_obj(struct virtchnl2_ptp_get_caps);
43 if (!recv_ptp_caps_msg)
44 return -ENOMEM;
45
46 xn_params.recv_buf.iov_base = recv_ptp_caps_msg;
47 xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);
48
49 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
50 if (reply_sz < 0)
51 return reply_sz;
52 else if (reply_sz != sizeof(*recv_ptp_caps_msg))
53 return -EIO;
54
55 ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);
56 ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);
57 ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj);
58
59 scnd_mbx = &ptp->secondary_mbx;
60 scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id);
61
62 /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary
63 * mailbox is not supported.
64 */
65 scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff;
66 if (scnd_mbx->valid)
67 scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id;
68
69 /* Determine the access type for the PTP features */
70 idpf_ptp_get_features_access(adapter);
71
72 access_type = ptp->get_dev_clk_time_access;
73 if (access_type != IDPF_PTP_DIRECT)
74 goto cross_tstamp;
75
76 clock_offsets = recv_ptp_caps_msg->clk_offsets;
77
78 temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l);
79 ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter,
80 temp_offset);
81 temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h);
82 ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter,
83 temp_offset);
84 temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l);
85 ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter,
86 temp_offset);
87 temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h);
88 ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter,
89 temp_offset);
90 temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
91 ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
92
93 cross_tstamp:
94 access_type = ptp->get_cross_tstamp_access;
95 if (access_type != IDPF_PTP_DIRECT)
96 goto discipline_clock;
97
98 cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets;
99
100 temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l);
101 ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter,
102 temp_offset);
103 temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h);
104 ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter,
105 temp_offset);
106 temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger);
107 ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
108
109 discipline_clock:
110 access_type = ptp->adj_dev_clk_time_access;
111 if (access_type != IDPF_PTP_DIRECT)
112 return 0;
113
114 clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;
115
116 /* Device clock offsets */
117 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type);
118 ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset);
119 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l);
120 ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset);
121 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h);
122 ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset);
123 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l);
124 ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset);
125 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h);
126 ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset);
127
128 /* PHY clock offsets */
129 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type);
130 ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset);
131 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l);
132 ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter,
133 temp_offset);
134 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h);
135 ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter,
136 temp_offset);
137 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l);
138 ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset);
139 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h);
140 ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset);
141
142 return 0;
143 }
144
145 /**
146 * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message
147 * @adapter: Driver specific private structure
148 * @dev_clk_time: Pointer to the device clock structure where the value is set
149 *
150 * Send virtchnl get time message to get the time of the clock.
151 *
152 * Return: 0 on success, -errno otherwise.
153 */
idpf_ptp_get_dev_clk_time(struct idpf_adapter * adapter,struct idpf_ptp_dev_timers * dev_clk_time)154 int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
155 struct idpf_ptp_dev_timers *dev_clk_time)
156 {
157 struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;
158 struct idpf_vc_xn_params xn_params = {
159 .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,
160 .send_buf.iov_base = &get_dev_clk_time_msg,
161 .send_buf.iov_len = sizeof(get_dev_clk_time_msg),
162 .recv_buf.iov_base = &get_dev_clk_time_msg,
163 .recv_buf.iov_len = sizeof(get_dev_clk_time_msg),
164 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
165 };
166 int reply_sz;
167 u64 dev_time;
168
169 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
170 if (reply_sz < 0)
171 return reply_sz;
172 if (reply_sz != sizeof(get_dev_clk_time_msg))
173 return -EIO;
174
175 dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);
176 dev_clk_time->dev_clk_time_ns = dev_time;
177
178 return 0;
179 }
180
181 /**
182 * idpf_ptp_get_cross_time - Send virtchnl get cross time message
183 * @adapter: Driver specific private structure
184 * @cross_time: Pointer to the device clock structure where the value is set
185 *
186 * Send virtchnl get cross time message to get the time of the clock and the
187 * system time.
188 *
189 * Return: 0 on success, -errno otherwise.
190 */
idpf_ptp_get_cross_time(struct idpf_adapter * adapter,struct idpf_ptp_dev_timers * cross_time)191 int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
192 struct idpf_ptp_dev_timers *cross_time)
193 {
194 struct virtchnl2_ptp_get_cross_time cross_time_msg;
195 struct idpf_vc_xn_params xn_params = {
196 .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,
197 .send_buf.iov_base = &cross_time_msg,
198 .send_buf.iov_len = sizeof(cross_time_msg),
199 .recv_buf.iov_base = &cross_time_msg,
200 .recv_buf.iov_len = sizeof(cross_time_msg),
201 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
202 };
203 int reply_sz;
204
205 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
206 if (reply_sz < 0)
207 return reply_sz;
208 if (reply_sz != sizeof(cross_time_msg))
209 return -EIO;
210
211 cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);
212 cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);
213
214 return 0;
215 }
216
217 /**
218 * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
219 * @adapter: Driver specific private structure
220 * @time: New time value
221 *
222 * Send virtchnl set time message to set the time of the clock.
223 *
224 * Return: 0 on success, -errno otherwise.
225 */
idpf_ptp_set_dev_clk_time(struct idpf_adapter * adapter,u64 time)226 int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)
227 {
228 struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {
229 .dev_time_ns = cpu_to_le64(time),
230 };
231 struct idpf_vc_xn_params xn_params = {
232 .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,
233 .send_buf.iov_base = &set_dev_clk_time_msg,
234 .send_buf.iov_len = sizeof(set_dev_clk_time_msg),
235 .recv_buf.iov_base = &set_dev_clk_time_msg,
236 .recv_buf.iov_len = sizeof(set_dev_clk_time_msg),
237 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
238 };
239 int reply_sz;
240
241 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
242 if (reply_sz < 0)
243 return reply_sz;
244 if (reply_sz != sizeof(set_dev_clk_time_msg))
245 return -EIO;
246
247 return 0;
248 }
249
250 /**
251 * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message
252 * @adapter: Driver specific private structure
253 * @delta: Offset in nanoseconds to adjust the time by
254 *
255 * Send virtchnl adj time message to adjust the clock by the indicated delta.
256 *
257 * Return: 0 on success, -errno otherwise.
258 */
idpf_ptp_adj_dev_clk_time(struct idpf_adapter * adapter,s64 delta)259 int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)
260 {
261 struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {
262 .delta = cpu_to_le64(delta),
263 };
264 struct idpf_vc_xn_params xn_params = {
265 .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,
266 .send_buf.iov_base = &adj_dev_clk_time_msg,
267 .send_buf.iov_len = sizeof(adj_dev_clk_time_msg),
268 .recv_buf.iov_base = &adj_dev_clk_time_msg,
269 .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),
270 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
271 };
272 int reply_sz;
273
274 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
275 if (reply_sz < 0)
276 return reply_sz;
277 if (reply_sz != sizeof(adj_dev_clk_time_msg))
278 return -EIO;
279
280 return 0;
281 }
282
283 /**
284 * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message
285 * @adapter: Driver specific private structure
286 * @incval: Source timer increment value per clock cycle
287 *
288 * Send virtchnl adj fine message to adjust the frequency of the clock by
289 * incval.
290 *
291 * Return: 0 on success, -errno otherwise.
292 */
idpf_ptp_adj_dev_clk_fine(struct idpf_adapter * adapter,u64 incval)293 int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)
294 {
295 struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {
296 .incval = cpu_to_le64(incval),
297 };
298 struct idpf_vc_xn_params xn_params = {
299 .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,
300 .send_buf.iov_base = &adj_dev_clk_fine_msg,
301 .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
302 .recv_buf.iov_base = &adj_dev_clk_fine_msg,
303 .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
304 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
305 };
306 int reply_sz;
307
308 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
309 if (reply_sz < 0)
310 return reply_sz;
311 if (reply_sz != sizeof(adj_dev_clk_fine_msg))
312 return -EIO;
313
314 return 0;
315 }
316
317 /**
318 * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport
319 * @vport: Virtual port structure
320 *
321 * Send virtchnl get vport tstamps caps message to receive the set of tstamp
322 * capabilities per vport.
323 *
324 * Return: 0 on success, -errno otherwise.
325 */
idpf_ptp_get_vport_tstamps_caps(struct idpf_vport * vport)326 int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
327 {
328 struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps;
329 struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps;
330 struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;
331 struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;
332 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
333 struct idpf_vc_xn_params xn_params = {
334 .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,
335 .send_buf.iov_base = &send_tx_tstamp_caps,
336 .send_buf.iov_len = sizeof(send_tx_tstamp_caps),
337 .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
338 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
339 };
340 enum idpf_ptp_access tstamp_access, get_dev_clk_access;
341 struct idpf_ptp *ptp = vport->adapter->ptp;
342 struct list_head *head;
343 int err = 0, reply_sz;
344 u16 num_latches;
345 u32 size;
346
347 if (!ptp)
348 return -EOPNOTSUPP;
349
350 tstamp_access = ptp->tx_tstamp_access;
351 get_dev_clk_access = ptp->get_dev_clk_time_access;
352 if (tstamp_access == IDPF_PTP_NONE ||
353 get_dev_clk_access == IDPF_PTP_NONE)
354 return -EOPNOTSUPP;
355
356 rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
357 if (!rcv_tx_tstamp_caps)
358 return -ENOMEM;
359
360 send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);
361 xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;
362
363 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
364 if (reply_sz < 0) {
365 err = reply_sz;
366 goto get_tstamp_caps_out;
367 }
368
369 num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);
370 size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);
371 if (reply_sz != size) {
372 err = -EIO;
373 goto get_tstamp_caps_out;
374 }
375
376 size = struct_size(tstamp_caps, tx_tstamp_status, num_latches);
377 tstamp_caps = kzalloc(size, GFP_KERNEL);
378 if (!tstamp_caps) {
379 err = -ENOMEM;
380 goto get_tstamp_caps_out;
381 }
382
383 tstamp_caps->access = true;
384 tstamp_caps->num_entries = num_latches;
385
386 INIT_LIST_HEAD(&tstamp_caps->latches_in_use);
387 INIT_LIST_HEAD(&tstamp_caps->latches_free);
388
389 spin_lock_init(&tstamp_caps->latches_lock);
390 spin_lock_init(&tstamp_caps->status_lock);
391
392 tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit;
393
394 for (u16 i = 0; i < tstamp_caps->num_entries; i++) {
395 __le32 offset_l, offset_h;
396
397 ptp_tx_tstamp = kzalloc_obj(*ptp_tx_tstamp);
398 if (!ptp_tx_tstamp) {
399 err = -ENOMEM;
400 goto err_free_ptp_tx_stamp_list;
401 }
402
403 tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i];
404
405 if (tstamp_access != IDPF_PTP_DIRECT)
406 goto skip_offsets;
407
408 offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l;
409 offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h;
410 ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l);
411 ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h);
412
413 skip_offsets:
414 ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index;
415
416 list_add(&ptp_tx_tstamp->list_member,
417 &tstamp_caps->latches_free);
418
419 tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE;
420 }
421
422 vport->tx_tstamp_caps = tstamp_caps;
423 kfree(rcv_tx_tstamp_caps);
424
425 return 0;
426
427 err_free_ptp_tx_stamp_list:
428 head = &tstamp_caps->latches_free;
429 list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
430 list_del(&ptp_tx_tstamp->list_member);
431 kfree(ptp_tx_tstamp);
432 }
433
434 kfree(tstamp_caps);
435 get_tstamp_caps_out:
436 kfree(rcv_tx_tstamp_caps);
437
438 return err;
439 }
440
441 /**
442 * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on
443 * the skb compatibility.
444 * @caps: Tx timestamp capabilities that monitor the latch status
445 * @skb: skb for which the tstamp value is returned through virtchnl message
446 * @current_state: Current state of the Tx timestamp latch
447 * @expected_state: Expected state of the Tx timestamp latch
448 *
449 * Find a proper skb tracker for which the Tx timestamp is received and change
450 * the state to expected value.
451 *
452 * Return: true if the tracker has been found and updated, false otherwise.
453 */
454 static bool
idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps * caps,struct sk_buff * skb,enum idpf_ptp_tx_tstamp_state current_state,enum idpf_ptp_tx_tstamp_state expected_state)455 idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps,
456 struct sk_buff *skb,
457 enum idpf_ptp_tx_tstamp_state current_state,
458 enum idpf_ptp_tx_tstamp_state expected_state)
459 {
460 bool updated = false;
461
462 spin_lock(&caps->status_lock);
463 for (u16 i = 0; i < caps->num_entries; i++) {
464 struct idpf_ptp_tx_tstamp_status *status;
465
466 status = &caps->tx_tstamp_status[i];
467
468 if (skb == status->skb && status->state == current_state) {
469 status->state = expected_state;
470 updated = true;
471 break;
472 }
473 }
474 spin_unlock(&caps->status_lock);
475
476 return updated;
477 }
478
479 /**
480 * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it
481 * back to the skb.
482 * @vport: Virtual port structure
483 * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane
484 * @ptp_tx_tstamp: Tx timestamp latch to add to the free list
485 *
486 * Read the value of the Tx timestamp for a given latch received from the
487 * Control Plane, extend it to 64 bit and provide back to the skb.
488 *
489 * Return: 0 on success, -errno otherwise.
490 */
491 static int
idpf_ptp_get_tstamp_value(struct idpf_vport * vport,struct virtchnl2_ptp_tx_tstamp_latch * tstamp_latch,struct idpf_ptp_tx_tstamp * ptp_tx_tstamp)492 idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
493 struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch,
494 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp)
495 {
496 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
497 struct skb_shared_hwtstamps shhwtstamps;
498 bool state_upd = false;
499 u8 tstamp_ns_lo_bit;
500 u64 tstamp;
501
502 tx_tstamp_caps = vport->tx_tstamp_caps;
503 tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit;
504
505 ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp);
506 ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit;
507
508 state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
509 ptp_tx_tstamp->skb,
510 IDPF_PTP_READ_VALUE,
511 IDPF_PTP_FREE);
512 if (!state_upd)
513 return -EINVAL;
514
515 tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp);
516 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
517 skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
518 consume_skb(ptp_tx_tstamp->skb);
519 ptp_tx_tstamp->skb = NULL;
520
521 list_add(&ptp_tx_tstamp->list_member,
522 &tx_tstamp_caps->latches_free);
523
524 u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
525 u64_stats_inc(&vport->tstamp_stats.packets);
526 u64_stats_update_end(&vport->tstamp_stats.stats_sync);
527
528 return 0;
529 }
530
531 /**
532 * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps
533 * @adapter: Driver specific private structure
534 * @xn: transaction for message
535 * @ctlq_msg: received message
536 *
537 * Read the tstamps Tx tstamp values from a received message and put them
538 * directly to the skb. The number of timestamps to read is specified by
539 * the virtchnl message.
540 *
541 * Return: 0 on success, -errno otherwise.
542 */
543 static int
idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter * adapter,struct idpf_vc_xn * xn,const struct idpf_ctlq_msg * ctlq_msg)544 idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,
545 struct idpf_vc_xn *xn,
546 const struct idpf_ctlq_msg *ctlq_msg)
547 {
548 struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;
549 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
550 struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;
551 struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;
552 struct idpf_vport *tstamp_vport = NULL;
553 struct list_head *head;
554 u16 num_latches;
555 u32 vport_id;
556 int err = 0;
557
558 recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;
559 vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);
560
561 idpf_for_each_vport(adapter, vport) {
562 if (!vport)
563 continue;
564
565 if (vport->vport_id == vport_id) {
566 tstamp_vport = vport;
567 break;
568 }
569 }
570
571 if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)
572 return -EINVAL;
573
574 tx_tstamp_caps = tstamp_vport->tx_tstamp_caps;
575 num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);
576
577 spin_lock_bh(&tx_tstamp_caps->latches_lock);
578 head = &tx_tstamp_caps->latches_in_use;
579
580 for (u16 i = 0; i < num_latches; i++) {
581 tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i];
582
583 if (!tstamp_latch.valid)
584 continue;
585
586 if (list_empty(head)) {
587 err = -ENOBUFS;
588 goto unlock;
589 }
590
591 list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) {
592 if (tstamp_latch.index == tx_tstamp->idx) {
593 list_del(&tx_tstamp->list_member);
594 err = idpf_ptp_get_tstamp_value(tstamp_vport,
595 &tstamp_latch,
596 tx_tstamp);
597 if (err)
598 goto unlock;
599
600 break;
601 }
602 }
603 }
604
605 unlock:
606 spin_unlock_bh(&tx_tstamp_caps->latches_lock);
607
608 return err;
609 }
610
611 /**
612 * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message
613 * @vport: Virtual port structure
614 *
615 * Send virtchnl get Tx tstamp message to read the value of the HW timestamp.
616 * The message contains a list of indexes set in the Tx descriptors.
617 *
618 * Return: 0 on success, -errno otherwise.
619 */
idpf_ptp_get_tx_tstamp(struct idpf_vport * vport)620 int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
621 {
622 struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;
623 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
624 struct idpf_vc_xn_params xn_params = {
625 .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,
626 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
627 .async = true,
628 .async_handler = idpf_ptp_get_tx_tstamp_async_handler,
629 };
630 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
631 int reply_sz, size, msg_size;
632 struct list_head *head;
633 bool state_upd;
634 u16 id = 0;
635
636 tx_tstamp_caps = vport->tx_tstamp_caps;
637 head = &tx_tstamp_caps->latches_in_use;
638
639 size = struct_size(send_tx_tstamp_msg, tstamp_latches,
640 tx_tstamp_caps->num_entries);
641 send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL);
642 if (!send_tx_tstamp_msg)
643 return -ENOMEM;
644
645 spin_lock_bh(&tx_tstamp_caps->latches_lock);
646 list_for_each_entry(ptp_tx_tstamp, head, list_member) {
647 u8 idx;
648
649 state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
650 ptp_tx_tstamp->skb,
651 IDPF_PTP_REQUEST,
652 IDPF_PTP_READ_VALUE);
653 if (!state_upd)
654 continue;
655
656 idx = ptp_tx_tstamp->idx;
657 send_tx_tstamp_msg->tstamp_latches[id].index = idx;
658 id++;
659 }
660 spin_unlock_bh(&tx_tstamp_caps->latches_lock);
661
662 msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);
663 send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);
664 send_tx_tstamp_msg->num_latches = cpu_to_le16(id);
665 xn_params.send_buf.iov_base = send_tx_tstamp_msg;
666 xn_params.send_buf.iov_len = msg_size;
667
668 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
669 kfree(send_tx_tstamp_msg);
670
671 return min(reply_sz, 0);
672 }
673