1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Intel Corporation */
3
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7
8 /**
9 * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message
10 * @adapter: Driver specific private structure
11 *
12 * Send virtchnl get PTP capabilities message.
13 *
14 * Return: 0 on success, -errno on failure.
15 */
idpf_ptp_get_caps(struct idpf_adapter * adapter)16 int idpf_ptp_get_caps(struct idpf_adapter *adapter)
17 {
18 struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;
19 struct virtchnl2_ptp_get_caps send_ptp_caps_msg = {
20 .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |
21 VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |
22 VIRTCHNL2_CAP_PTP_GET_CROSS_TIME |
23 VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB |
24 VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |
25 VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)
26 };
27 struct idpf_vc_xn_params xn_params = {
28 .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,
29 .send_buf.iov_base = &send_ptp_caps_msg,
30 .send_buf.iov_len = sizeof(send_ptp_caps_msg),
31 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
32 };
33 struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;
34 struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
35 struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
36 struct idpf_ptp_secondary_mbx *scnd_mbx;
37 struct idpf_ptp *ptp = adapter->ptp;
38 enum idpf_ptp_access access_type;
39 u32 temp_offset;
40 int reply_sz;
41
42 recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps),
43 GFP_KERNEL);
44 if (!recv_ptp_caps_msg)
45 return -ENOMEM;
46
47 xn_params.recv_buf.iov_base = recv_ptp_caps_msg;
48 xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);
49
50 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
51 if (reply_sz < 0)
52 return reply_sz;
53 else if (reply_sz != sizeof(*recv_ptp_caps_msg))
54 return -EIO;
55
56 ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);
57 ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);
58 ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj);
59
60 scnd_mbx = &ptp->secondary_mbx;
61 scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id);
62
63 /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary
64 * mailbox is not supported.
65 */
66 scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff;
67 if (scnd_mbx->valid)
68 scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id;
69
70 /* Determine the access type for the PTP features */
71 idpf_ptp_get_features_access(adapter);
72
73 access_type = ptp->get_dev_clk_time_access;
74 if (access_type != IDPF_PTP_DIRECT)
75 goto cross_tstamp;
76
77 clock_offsets = recv_ptp_caps_msg->clk_offsets;
78
79 temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l);
80 ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter,
81 temp_offset);
82 temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h);
83 ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter,
84 temp_offset);
85 temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l);
86 ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter,
87 temp_offset);
88 temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h);
89 ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter,
90 temp_offset);
91 temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
92 ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
93
94 cross_tstamp:
95 access_type = ptp->get_cross_tstamp_access;
96 if (access_type != IDPF_PTP_DIRECT)
97 goto discipline_clock;
98
99 cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets;
100
101 temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l);
102 ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter,
103 temp_offset);
104 temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h);
105 ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter,
106 temp_offset);
107 temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger);
108 ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
109
110 discipline_clock:
111 access_type = ptp->adj_dev_clk_time_access;
112 if (access_type != IDPF_PTP_DIRECT)
113 return 0;
114
115 clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;
116
117 /* Device clock offsets */
118 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type);
119 ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset);
120 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l);
121 ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset);
122 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h);
123 ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset);
124 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l);
125 ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset);
126 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h);
127 ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset);
128
129 /* PHY clock offsets */
130 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type);
131 ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset);
132 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l);
133 ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter,
134 temp_offset);
135 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h);
136 ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter,
137 temp_offset);
138 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l);
139 ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset);
140 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h);
141 ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset);
142
143 return 0;
144 }
145
146 /**
147 * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message
148 * @adapter: Driver specific private structure
149 * @dev_clk_time: Pointer to the device clock structure where the value is set
150 *
151 * Send virtchnl get time message to get the time of the clock.
152 *
153 * Return: 0 on success, -errno otherwise.
154 */
idpf_ptp_get_dev_clk_time(struct idpf_adapter * adapter,struct idpf_ptp_dev_timers * dev_clk_time)155 int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
156 struct idpf_ptp_dev_timers *dev_clk_time)
157 {
158 struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;
159 struct idpf_vc_xn_params xn_params = {
160 .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,
161 .send_buf.iov_base = &get_dev_clk_time_msg,
162 .send_buf.iov_len = sizeof(get_dev_clk_time_msg),
163 .recv_buf.iov_base = &get_dev_clk_time_msg,
164 .recv_buf.iov_len = sizeof(get_dev_clk_time_msg),
165 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
166 };
167 int reply_sz;
168 u64 dev_time;
169
170 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
171 if (reply_sz < 0)
172 return reply_sz;
173 if (reply_sz != sizeof(get_dev_clk_time_msg))
174 return -EIO;
175
176 dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);
177 dev_clk_time->dev_clk_time_ns = dev_time;
178
179 return 0;
180 }
181
182 /**
183 * idpf_ptp_get_cross_time - Send virtchnl get cross time message
184 * @adapter: Driver specific private structure
185 * @cross_time: Pointer to the device clock structure where the value is set
186 *
187 * Send virtchnl get cross time message to get the time of the clock and the
188 * system time.
189 *
190 * Return: 0 on success, -errno otherwise.
191 */
idpf_ptp_get_cross_time(struct idpf_adapter * adapter,struct idpf_ptp_dev_timers * cross_time)192 int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
193 struct idpf_ptp_dev_timers *cross_time)
194 {
195 struct virtchnl2_ptp_get_cross_time cross_time_msg;
196 struct idpf_vc_xn_params xn_params = {
197 .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,
198 .send_buf.iov_base = &cross_time_msg,
199 .send_buf.iov_len = sizeof(cross_time_msg),
200 .recv_buf.iov_base = &cross_time_msg,
201 .recv_buf.iov_len = sizeof(cross_time_msg),
202 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
203 };
204 int reply_sz;
205
206 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
207 if (reply_sz < 0)
208 return reply_sz;
209 if (reply_sz != sizeof(cross_time_msg))
210 return -EIO;
211
212 cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);
213 cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);
214
215 return 0;
216 }
217
218 /**
219 * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
220 * @adapter: Driver specific private structure
221 * @time: New time value
222 *
223 * Send virtchnl set time message to set the time of the clock.
224 *
225 * Return: 0 on success, -errno otherwise.
226 */
idpf_ptp_set_dev_clk_time(struct idpf_adapter * adapter,u64 time)227 int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)
228 {
229 struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {
230 .dev_time_ns = cpu_to_le64(time),
231 };
232 struct idpf_vc_xn_params xn_params = {
233 .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,
234 .send_buf.iov_base = &set_dev_clk_time_msg,
235 .send_buf.iov_len = sizeof(set_dev_clk_time_msg),
236 .recv_buf.iov_base = &set_dev_clk_time_msg,
237 .recv_buf.iov_len = sizeof(set_dev_clk_time_msg),
238 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
239 };
240 int reply_sz;
241
242 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
243 if (reply_sz < 0)
244 return reply_sz;
245 if (reply_sz != sizeof(set_dev_clk_time_msg))
246 return -EIO;
247
248 return 0;
249 }
250
251 /**
252 * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message
253 * @adapter: Driver specific private structure
254 * @delta: Offset in nanoseconds to adjust the time by
255 *
256 * Send virtchnl adj time message to adjust the clock by the indicated delta.
257 *
258 * Return: 0 on success, -errno otherwise.
259 */
idpf_ptp_adj_dev_clk_time(struct idpf_adapter * adapter,s64 delta)260 int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)
261 {
262 struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {
263 .delta = cpu_to_le64(delta),
264 };
265 struct idpf_vc_xn_params xn_params = {
266 .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,
267 .send_buf.iov_base = &adj_dev_clk_time_msg,
268 .send_buf.iov_len = sizeof(adj_dev_clk_time_msg),
269 .recv_buf.iov_base = &adj_dev_clk_time_msg,
270 .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),
271 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
272 };
273 int reply_sz;
274
275 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
276 if (reply_sz < 0)
277 return reply_sz;
278 if (reply_sz != sizeof(adj_dev_clk_time_msg))
279 return -EIO;
280
281 return 0;
282 }
283
284 /**
285 * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message
286 * @adapter: Driver specific private structure
287 * @incval: Source timer increment value per clock cycle
288 *
289 * Send virtchnl adj fine message to adjust the frequency of the clock by
290 * incval.
291 *
292 * Return: 0 on success, -errno otherwise.
293 */
idpf_ptp_adj_dev_clk_fine(struct idpf_adapter * adapter,u64 incval)294 int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)
295 {
296 struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {
297 .incval = cpu_to_le64(incval),
298 };
299 struct idpf_vc_xn_params xn_params = {
300 .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,
301 .send_buf.iov_base = &adj_dev_clk_fine_msg,
302 .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
303 .recv_buf.iov_base = &adj_dev_clk_fine_msg,
304 .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
305 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
306 };
307 int reply_sz;
308
309 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
310 if (reply_sz < 0)
311 return reply_sz;
312 if (reply_sz != sizeof(adj_dev_clk_fine_msg))
313 return -EIO;
314
315 return 0;
316 }
317
318 /**
319 * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport
320 * @vport: Virtual port structure
321 *
322 * Send virtchnl get vport tstamps caps message to receive the set of tstamp
323 * capabilities per vport.
324 *
325 * Return: 0 on success, -errno otherwise.
326 */
idpf_ptp_get_vport_tstamps_caps(struct idpf_vport * vport)327 int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
328 {
329 struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps;
330 struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps;
331 struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;
332 struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;
333 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
334 struct idpf_vc_xn_params xn_params = {
335 .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,
336 .send_buf.iov_base = &send_tx_tstamp_caps,
337 .send_buf.iov_len = sizeof(send_tx_tstamp_caps),
338 .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
339 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
340 };
341 enum idpf_ptp_access tstamp_access, get_dev_clk_access;
342 struct idpf_ptp *ptp = vport->adapter->ptp;
343 struct list_head *head;
344 int err = 0, reply_sz;
345 u16 num_latches;
346 u32 size;
347
348 if (!ptp)
349 return -EOPNOTSUPP;
350
351 tstamp_access = ptp->tx_tstamp_access;
352 get_dev_clk_access = ptp->get_dev_clk_time_access;
353 if (tstamp_access == IDPF_PTP_NONE ||
354 get_dev_clk_access == IDPF_PTP_NONE)
355 return -EOPNOTSUPP;
356
357 rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
358 if (!rcv_tx_tstamp_caps)
359 return -ENOMEM;
360
361 send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);
362 xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;
363
364 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
365 if (reply_sz < 0) {
366 err = reply_sz;
367 goto get_tstamp_caps_out;
368 }
369
370 num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);
371 size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);
372 if (reply_sz != size) {
373 err = -EIO;
374 goto get_tstamp_caps_out;
375 }
376
377 size = struct_size(tstamp_caps, tx_tstamp_status, num_latches);
378 tstamp_caps = kzalloc(size, GFP_KERNEL);
379 if (!tstamp_caps) {
380 err = -ENOMEM;
381 goto get_tstamp_caps_out;
382 }
383
384 tstamp_caps->access = true;
385 tstamp_caps->num_entries = num_latches;
386
387 INIT_LIST_HEAD(&tstamp_caps->latches_in_use);
388 INIT_LIST_HEAD(&tstamp_caps->latches_free);
389
390 spin_lock_init(&tstamp_caps->latches_lock);
391 spin_lock_init(&tstamp_caps->status_lock);
392
393 tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit;
394
395 for (u16 i = 0; i < tstamp_caps->num_entries; i++) {
396 __le32 offset_l, offset_h;
397
398 ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL);
399 if (!ptp_tx_tstamp) {
400 err = -ENOMEM;
401 goto err_free_ptp_tx_stamp_list;
402 }
403
404 tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i];
405
406 if (tstamp_access != IDPF_PTP_DIRECT)
407 goto skip_offsets;
408
409 offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l;
410 offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h;
411 ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l);
412 ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h);
413
414 skip_offsets:
415 ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index;
416
417 list_add(&ptp_tx_tstamp->list_member,
418 &tstamp_caps->latches_free);
419
420 tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE;
421 }
422
423 vport->tx_tstamp_caps = tstamp_caps;
424 kfree(rcv_tx_tstamp_caps);
425
426 return 0;
427
428 err_free_ptp_tx_stamp_list:
429 head = &tstamp_caps->latches_free;
430 list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
431 list_del(&ptp_tx_tstamp->list_member);
432 kfree(ptp_tx_tstamp);
433 }
434
435 kfree(tstamp_caps);
436 get_tstamp_caps_out:
437 kfree(rcv_tx_tstamp_caps);
438
439 return err;
440 }
441
442 /**
443 * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on
444 * the skb compatibility.
445 * @caps: Tx timestamp capabilities that monitor the latch status
446 * @skb: skb for which the tstamp value is returned through virtchnl message
447 * @current_state: Current state of the Tx timestamp latch
448 * @expected_state: Expected state of the Tx timestamp latch
449 *
450 * Find a proper skb tracker for which the Tx timestamp is received and change
451 * the state to expected value.
452 *
453 * Return: true if the tracker has been found and updated, false otherwise.
454 */
455 static bool
idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps * caps,struct sk_buff * skb,enum idpf_ptp_tx_tstamp_state current_state,enum idpf_ptp_tx_tstamp_state expected_state)456 idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps,
457 struct sk_buff *skb,
458 enum idpf_ptp_tx_tstamp_state current_state,
459 enum idpf_ptp_tx_tstamp_state expected_state)
460 {
461 bool updated = false;
462
463 spin_lock(&caps->status_lock);
464 for (u16 i = 0; i < caps->num_entries; i++) {
465 struct idpf_ptp_tx_tstamp_status *status;
466
467 status = &caps->tx_tstamp_status[i];
468
469 if (skb == status->skb && status->state == current_state) {
470 status->state = expected_state;
471 updated = true;
472 break;
473 }
474 }
475 spin_unlock(&caps->status_lock);
476
477 return updated;
478 }
479
480 /**
481 * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it
482 * back to the skb.
483 * @vport: Virtual port structure
484 * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane
485 * @ptp_tx_tstamp: Tx timestamp latch to add to the free list
486 *
487 * Read the value of the Tx timestamp for a given latch received from the
488 * Control Plane, extend it to 64 bit and provide back to the skb.
489 *
490 * Return: 0 on success, -errno otherwise.
491 */
492 static int
idpf_ptp_get_tstamp_value(struct idpf_vport * vport,struct virtchnl2_ptp_tx_tstamp_latch * tstamp_latch,struct idpf_ptp_tx_tstamp * ptp_tx_tstamp)493 idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
494 struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch,
495 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp)
496 {
497 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
498 struct skb_shared_hwtstamps shhwtstamps;
499 bool state_upd = false;
500 u8 tstamp_ns_lo_bit;
501 u64 tstamp;
502
503 tx_tstamp_caps = vport->tx_tstamp_caps;
504 tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit;
505
506 ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp);
507 ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit;
508
509 state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
510 ptp_tx_tstamp->skb,
511 IDPF_PTP_READ_VALUE,
512 IDPF_PTP_FREE);
513 if (!state_upd)
514 return -EINVAL;
515
516 tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp);
517 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
518 skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
519 consume_skb(ptp_tx_tstamp->skb);
520 ptp_tx_tstamp->skb = NULL;
521
522 list_add(&ptp_tx_tstamp->list_member,
523 &tx_tstamp_caps->latches_free);
524
525 u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
526 u64_stats_inc(&vport->tstamp_stats.packets);
527 u64_stats_update_end(&vport->tstamp_stats.stats_sync);
528
529 return 0;
530 }
531
532 /**
533 * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps
534 * @adapter: Driver specific private structure
535 * @xn: transaction for message
536 * @ctlq_msg: received message
537 *
538 * Read the tstamps Tx tstamp values from a received message and put them
539 * directly to the skb. The number of timestamps to read is specified by
540 * the virtchnl message.
541 *
542 * Return: 0 on success, -errno otherwise.
543 */
544 static int
idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter * adapter,struct idpf_vc_xn * xn,const struct idpf_ctlq_msg * ctlq_msg)545 idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,
546 struct idpf_vc_xn *xn,
547 const struct idpf_ctlq_msg *ctlq_msg)
548 {
549 struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;
550 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
551 struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;
552 struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;
553 struct idpf_vport *tstamp_vport = NULL;
554 struct list_head *head;
555 u16 num_latches;
556 u32 vport_id;
557 int err = 0;
558
559 recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;
560 vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);
561
562 idpf_for_each_vport(adapter, vport) {
563 if (!vport)
564 continue;
565
566 if (vport->vport_id == vport_id) {
567 tstamp_vport = vport;
568 break;
569 }
570 }
571
572 if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)
573 return -EINVAL;
574
575 tx_tstamp_caps = tstamp_vport->tx_tstamp_caps;
576 num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);
577
578 spin_lock_bh(&tx_tstamp_caps->latches_lock);
579 head = &tx_tstamp_caps->latches_in_use;
580
581 for (u16 i = 0; i < num_latches; i++) {
582 tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i];
583
584 if (!tstamp_latch.valid)
585 continue;
586
587 if (list_empty(head)) {
588 err = -ENOBUFS;
589 goto unlock;
590 }
591
592 list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) {
593 if (tstamp_latch.index == tx_tstamp->idx) {
594 list_del(&tx_tstamp->list_member);
595 err = idpf_ptp_get_tstamp_value(tstamp_vport,
596 &tstamp_latch,
597 tx_tstamp);
598 if (err)
599 goto unlock;
600
601 break;
602 }
603 }
604 }
605
606 unlock:
607 spin_unlock_bh(&tx_tstamp_caps->latches_lock);
608
609 return err;
610 }
611
612 /**
613 * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message
614 * @vport: Virtual port structure
615 *
616 * Send virtchnl get Tx tstamp message to read the value of the HW timestamp.
617 * The message contains a list of indexes set in the Tx descriptors.
618 *
619 * Return: 0 on success, -errno otherwise.
620 */
idpf_ptp_get_tx_tstamp(struct idpf_vport * vport)621 int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
622 {
623 struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;
624 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
625 struct idpf_vc_xn_params xn_params = {
626 .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,
627 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
628 .async = true,
629 .async_handler = idpf_ptp_get_tx_tstamp_async_handler,
630 };
631 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
632 int reply_sz, size, msg_size;
633 struct list_head *head;
634 bool state_upd;
635 u16 id = 0;
636
637 tx_tstamp_caps = vport->tx_tstamp_caps;
638 head = &tx_tstamp_caps->latches_in_use;
639
640 size = struct_size(send_tx_tstamp_msg, tstamp_latches,
641 tx_tstamp_caps->num_entries);
642 send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL);
643 if (!send_tx_tstamp_msg)
644 return -ENOMEM;
645
646 spin_lock_bh(&tx_tstamp_caps->latches_lock);
647 list_for_each_entry(ptp_tx_tstamp, head, list_member) {
648 u8 idx;
649
650 state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
651 ptp_tx_tstamp->skb,
652 IDPF_PTP_REQUEST,
653 IDPF_PTP_READ_VALUE);
654 if (!state_upd)
655 continue;
656
657 idx = ptp_tx_tstamp->idx;
658 send_tx_tstamp_msg->tstamp_latches[id].index = idx;
659 id++;
660 }
661 spin_unlock_bh(&tx_tstamp_caps->latches_lock);
662
663 msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);
664 send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);
665 send_tx_tstamp_msg->num_latches = cpu_to_le16(id);
666 xn_params.send_buf.iov_base = send_tx_tstamp_msg;
667 xn_params.send_buf.iov_len = msg_size;
668
669 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
670 kfree(send_tx_tstamp_msg);
671
672 return min(reply_sz, 0);
673 }
674