1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2024 Intel Corporation */ 3 4 #include "idpf.h" 5 #include "idpf_ptp.h" 6 #include "idpf_virtchnl.h" 7 8 /** 9 * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message 10 * @adapter: Driver specific private structure 11 * 12 * Send virtchnl get PTP capabilities message. 13 * 14 * Return: 0 on success, -errno on failure. 15 */ 16 int idpf_ptp_get_caps(struct idpf_adapter *adapter) 17 { 18 struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL; 19 struct virtchnl2_ptp_get_caps send_ptp_caps_msg = { 20 .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME | 21 VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB | 22 VIRTCHNL2_CAP_PTP_GET_CROSS_TIME | 23 VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB | 24 VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB | 25 VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB) 26 }; 27 struct idpf_vc_xn_params xn_params = { 28 .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS, 29 .send_buf.iov_base = &send_ptp_caps_msg, 30 .send_buf.iov_len = sizeof(send_ptp_caps_msg), 31 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 32 }; 33 struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets; 34 struct virtchnl2_ptp_clk_reg_offsets clock_offsets; 35 struct idpf_ptp_secondary_mbx *scnd_mbx; 36 struct idpf_ptp *ptp = adapter->ptp; 37 enum idpf_ptp_access access_type; 38 u32 temp_offset; 39 int reply_sz; 40 41 recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps), 42 GFP_KERNEL); 43 if (!recv_ptp_caps_msg) 44 return -ENOMEM; 45 46 xn_params.recv_buf.iov_base = recv_ptp_caps_msg; 47 xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg); 48 49 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 50 if (reply_sz < 0) 51 return reply_sz; 52 else if (reply_sz != sizeof(*recv_ptp_caps_msg)) 53 return -EIO; 54 55 ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps); 56 ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval); 57 ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj); 58 59 scnd_mbx = &ptp->secondary_mbx; 60 scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id); 61 62 /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary 63 * mailbox is not supported. 64 */ 65 scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff; 66 if (scnd_mbx->valid) 67 scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id; 68 69 /* Determine the access type for the PTP features */ 70 idpf_ptp_get_features_access(adapter); 71 72 access_type = ptp->get_dev_clk_time_access; 73 if (access_type != IDPF_PTP_DIRECT) 74 goto discipline_clock; 75 76 clock_offsets = recv_ptp_caps_msg->clk_offsets; 77 78 temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l); 79 ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter, 80 temp_offset); 81 temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h); 82 ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter, 83 temp_offset); 84 temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l); 85 ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter, 86 temp_offset); 87 temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h); 88 ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter, 89 temp_offset); 90 temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger); 91 ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset); 92 93 discipline_clock: 94 access_type = ptp->adj_dev_clk_time_access; 95 if (access_type != IDPF_PTP_DIRECT) 96 return 0; 97 98 clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets; 99 100 /* Device clock offsets */ 101 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type); 102 ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset); 103 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l); 104 ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset); 105 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h); 106 ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset); 107 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l); 108 ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset); 109 temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h); 110 ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset); 111 112 /* PHY clock offsets */ 113 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type); 114 ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset); 115 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l); 116 ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter, 117 temp_offset); 118 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h); 119 ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter, 120 temp_offset); 121 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l); 122 ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset); 123 temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h); 124 ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset); 125 126 return 0; 127 } 128 129 /** 130 * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message 131 * @adapter: Driver specific private structure 132 * @dev_clk_time: Pointer to the device clock structure where the value is set 133 * 134 * Send virtchnl get time message to get the time of the clock. 135 * 136 * Return: 0 on success, -errno otherwise. 137 */ 138 int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, 139 struct idpf_ptp_dev_timers *dev_clk_time) 140 { 141 struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg; 142 struct idpf_vc_xn_params xn_params = { 143 .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME, 144 .send_buf.iov_base = &get_dev_clk_time_msg, 145 .send_buf.iov_len = sizeof(get_dev_clk_time_msg), 146 .recv_buf.iov_base = &get_dev_clk_time_msg, 147 .recv_buf.iov_len = sizeof(get_dev_clk_time_msg), 148 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 149 }; 150 int reply_sz; 151 u64 dev_time; 152 153 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 154 if (reply_sz < 0) 155 return reply_sz; 156 if (reply_sz != sizeof(get_dev_clk_time_msg)) 157 return -EIO; 158 159 dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns); 160 dev_clk_time->dev_clk_time_ns = dev_time; 161 162 return 0; 163 } 164 165 /** 166 * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message 167 * @adapter: Driver specific private structure 168 * @time: New time value 169 * 170 * Send virtchnl set time message to set the time of the clock. 171 * 172 * Return: 0 on success, -errno otherwise. 173 */ 174 int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time) 175 { 176 struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = { 177 .dev_time_ns = cpu_to_le64(time), 178 }; 179 struct idpf_vc_xn_params xn_params = { 180 .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME, 181 .send_buf.iov_base = &set_dev_clk_time_msg, 182 .send_buf.iov_len = sizeof(set_dev_clk_time_msg), 183 .recv_buf.iov_base = &set_dev_clk_time_msg, 184 .recv_buf.iov_len = sizeof(set_dev_clk_time_msg), 185 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 186 }; 187 int reply_sz; 188 189 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 190 if (reply_sz < 0) 191 return reply_sz; 192 if (reply_sz != sizeof(set_dev_clk_time_msg)) 193 return -EIO; 194 195 return 0; 196 } 197 198 /** 199 * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message 200 * @adapter: Driver specific private structure 201 * @delta: Offset in nanoseconds to adjust the time by 202 * 203 * Send virtchnl adj time message to adjust the clock by the indicated delta. 204 * 205 * Return: 0 on success, -errno otherwise. 206 */ 207 int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta) 208 { 209 struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = { 210 .delta = cpu_to_le64(delta), 211 }; 212 struct idpf_vc_xn_params xn_params = { 213 .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME, 214 .send_buf.iov_base = &adj_dev_clk_time_msg, 215 .send_buf.iov_len = sizeof(adj_dev_clk_time_msg), 216 .recv_buf.iov_base = &adj_dev_clk_time_msg, 217 .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg), 218 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 219 }; 220 int reply_sz; 221 222 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 223 if (reply_sz < 0) 224 return reply_sz; 225 if (reply_sz != sizeof(adj_dev_clk_time_msg)) 226 return -EIO; 227 228 return 0; 229 } 230 231 /** 232 * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message 233 * @adapter: Driver specific private structure 234 * @incval: Source timer increment value per clock cycle 235 * 236 * Send virtchnl adj fine message to adjust the frequency of the clock by 237 * incval. 238 * 239 * Return: 0 on success, -errno otherwise. 240 */ 241 int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval) 242 { 243 struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = { 244 .incval = cpu_to_le64(incval), 245 }; 246 struct idpf_vc_xn_params xn_params = { 247 .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE, 248 .send_buf.iov_base = &adj_dev_clk_fine_msg, 249 .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg), 250 .recv_buf.iov_base = &adj_dev_clk_fine_msg, 251 .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg), 252 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 253 }; 254 int reply_sz; 255 256 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 257 if (reply_sz < 0) 258 return reply_sz; 259 if (reply_sz != sizeof(adj_dev_clk_fine_msg)) 260 return -EIO; 261 262 return 0; 263 } 264 265 /** 266 * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport 267 * @vport: Virtual port structure 268 * 269 * Send virtchnl get vport tstamps caps message to receive the set of tstamp 270 * capabilities per vport. 271 * 272 * Return: 0 on success, -errno otherwise. 273 */ 274 int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport) 275 { 276 struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps; 277 struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps; 278 struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps; 279 struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps; 280 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp; 281 struct idpf_vc_xn_params xn_params = { 282 .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS, 283 .send_buf.iov_base = &send_tx_tstamp_caps, 284 .send_buf.iov_len = sizeof(send_tx_tstamp_caps), 285 .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, 286 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 287 }; 288 enum idpf_ptp_access tstamp_access, get_dev_clk_access; 289 struct idpf_ptp *ptp = vport->adapter->ptp; 290 struct list_head *head; 291 int err = 0, reply_sz; 292 u16 num_latches; 293 u32 size; 294 295 if (!ptp) 296 return -EOPNOTSUPP; 297 298 tstamp_access = ptp->tx_tstamp_access; 299 get_dev_clk_access = ptp->get_dev_clk_time_access; 300 if (tstamp_access == IDPF_PTP_NONE || 301 get_dev_clk_access == IDPF_PTP_NONE) 302 return -EOPNOTSUPP; 303 304 rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 305 if (!rcv_tx_tstamp_caps) 306 return -ENOMEM; 307 308 send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id); 309 xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps; 310 311 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 312 if (reply_sz < 0) { 313 err = reply_sz; 314 goto get_tstamp_caps_out; 315 } 316 317 num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches); 318 size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches); 319 if (reply_sz != size) { 320 err = -EIO; 321 goto get_tstamp_caps_out; 322 } 323 324 size = struct_size(tstamp_caps, tx_tstamp_status, num_latches); 325 tstamp_caps = kzalloc(size, GFP_KERNEL); 326 if (!tstamp_caps) { 327 err = -ENOMEM; 328 goto get_tstamp_caps_out; 329 } 330 331 tstamp_caps->access = true; 332 tstamp_caps->num_entries = num_latches; 333 334 INIT_LIST_HEAD(&tstamp_caps->latches_in_use); 335 INIT_LIST_HEAD(&tstamp_caps->latches_free); 336 337 spin_lock_init(&tstamp_caps->latches_lock); 338 spin_lock_init(&tstamp_caps->status_lock); 339 340 tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit; 341 342 for (u16 i = 0; i < tstamp_caps->num_entries; i++) { 343 __le32 offset_l, offset_h; 344 345 ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL); 346 if (!ptp_tx_tstamp) { 347 err = -ENOMEM; 348 goto err_free_ptp_tx_stamp_list; 349 } 350 351 tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i]; 352 353 if (tstamp_access != IDPF_PTP_DIRECT) 354 goto skip_offsets; 355 356 offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l; 357 offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h; 358 ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l); 359 ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h); 360 361 skip_offsets: 362 ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index; 363 364 list_add(&ptp_tx_tstamp->list_member, 365 &tstamp_caps->latches_free); 366 367 tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE; 368 } 369 370 vport->tx_tstamp_caps = tstamp_caps; 371 kfree(rcv_tx_tstamp_caps); 372 373 return 0; 374 375 err_free_ptp_tx_stamp_list: 376 head = &tstamp_caps->latches_free; 377 list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { 378 list_del(&ptp_tx_tstamp->list_member); 379 kfree(ptp_tx_tstamp); 380 } 381 382 kfree(tstamp_caps); 383 get_tstamp_caps_out: 384 kfree(rcv_tx_tstamp_caps); 385 386 return err; 387 } 388 389 /** 390 * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on 391 * the skb compatibility. 392 * @caps: Tx timestamp capabilities that monitor the latch status 393 * @skb: skb for which the tstamp value is returned through virtchnl message 394 * @current_state: Current state of the Tx timestamp latch 395 * @expected_state: Expected state of the Tx timestamp latch 396 * 397 * Find a proper skb tracker for which the Tx timestamp is received and change 398 * the state to expected value. 399 * 400 * Return: true if the tracker has been found and updated, false otherwise. 401 */ 402 static bool 403 idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps, 404 struct sk_buff *skb, 405 enum idpf_ptp_tx_tstamp_state current_state, 406 enum idpf_ptp_tx_tstamp_state expected_state) 407 { 408 bool updated = false; 409 410 spin_lock(&caps->status_lock); 411 for (u16 i = 0; i < caps->num_entries; i++) { 412 struct idpf_ptp_tx_tstamp_status *status; 413 414 status = &caps->tx_tstamp_status[i]; 415 416 if (skb == status->skb && status->state == current_state) { 417 status->state = expected_state; 418 updated = true; 419 break; 420 } 421 } 422 spin_unlock(&caps->status_lock); 423 424 return updated; 425 } 426 427 /** 428 * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it 429 * back to the skb. 430 * @vport: Virtual port structure 431 * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane 432 * @ptp_tx_tstamp: Tx timestamp latch to add to the free list 433 * 434 * Read the value of the Tx timestamp for a given latch received from the 435 * Control Plane, extend it to 64 bit and provide back to the skb. 436 * 437 * Return: 0 on success, -errno otherwise. 438 */ 439 static int 440 idpf_ptp_get_tstamp_value(struct idpf_vport *vport, 441 struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch, 442 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp) 443 { 444 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; 445 struct skb_shared_hwtstamps shhwtstamps; 446 bool state_upd = false; 447 u8 tstamp_ns_lo_bit; 448 u64 tstamp; 449 450 tx_tstamp_caps = vport->tx_tstamp_caps; 451 tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit; 452 453 ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp); 454 ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit; 455 456 state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps, 457 ptp_tx_tstamp->skb, 458 IDPF_PTP_READ_VALUE, 459 IDPF_PTP_FREE); 460 if (!state_upd) 461 return -EINVAL; 462 463 tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp); 464 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 465 skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps); 466 consume_skb(ptp_tx_tstamp->skb); 467 468 list_add(&ptp_tx_tstamp->list_member, 469 &tx_tstamp_caps->latches_free); 470 471 return 0; 472 } 473 474 /** 475 * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps 476 * @adapter: Driver specific private structure 477 * @xn: transaction for message 478 * @ctlq_msg: received message 479 * 480 * Read the tstamps Tx tstamp values from a received message and put them 481 * directly to the skb. The number of timestamps to read is specified by 482 * the virtchnl message. 483 * 484 * Return: 0 on success, -errno otherwise. 485 */ 486 static int 487 idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter, 488 struct idpf_vc_xn *xn, 489 const struct idpf_ctlq_msg *ctlq_msg) 490 { 491 struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg; 492 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; 493 struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch; 494 struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp; 495 struct idpf_vport *tstamp_vport = NULL; 496 struct list_head *head; 497 u16 num_latches; 498 u32 vport_id; 499 int err = 0; 500 501 recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va; 502 vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id); 503 504 idpf_for_each_vport(adapter, vport) { 505 if (!vport) 506 continue; 507 508 if (vport->vport_id == vport_id) { 509 tstamp_vport = vport; 510 break; 511 } 512 } 513 514 if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps) 515 return -EINVAL; 516 517 tx_tstamp_caps = tstamp_vport->tx_tstamp_caps; 518 num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches); 519 520 spin_lock_bh(&tx_tstamp_caps->latches_lock); 521 head = &tx_tstamp_caps->latches_in_use; 522 523 for (u16 i = 0; i < num_latches; i++) { 524 tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i]; 525 526 if (!tstamp_latch.valid) 527 continue; 528 529 if (list_empty(head)) { 530 err = -ENOBUFS; 531 goto unlock; 532 } 533 534 list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) { 535 if (tstamp_latch.index == tx_tstamp->idx) { 536 list_del(&tx_tstamp->list_member); 537 err = idpf_ptp_get_tstamp_value(tstamp_vport, 538 &tstamp_latch, 539 tx_tstamp); 540 if (err) 541 goto unlock; 542 543 break; 544 } 545 } 546 } 547 548 unlock: 549 spin_unlock_bh(&tx_tstamp_caps->latches_lock); 550 551 return err; 552 } 553 554 /** 555 * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message 556 * @vport: Virtual port structure 557 * 558 * Send virtchnl get Tx tstamp message to read the value of the HW timestamp. 559 * The message contains a list of indexes set in the Tx descriptors. 560 * 561 * Return: 0 on success, -errno otherwise. 562 */ 563 int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport) 564 { 565 struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg; 566 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; 567 struct idpf_vc_xn_params xn_params = { 568 .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP, 569 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 570 .async = true, 571 .async_handler = idpf_ptp_get_tx_tstamp_async_handler, 572 }; 573 struct idpf_ptp_tx_tstamp *ptp_tx_tstamp; 574 int reply_sz, size, msg_size; 575 struct list_head *head; 576 bool state_upd; 577 u16 id = 0; 578 579 tx_tstamp_caps = vport->tx_tstamp_caps; 580 head = &tx_tstamp_caps->latches_in_use; 581 582 size = struct_size(send_tx_tstamp_msg, tstamp_latches, 583 tx_tstamp_caps->num_entries); 584 send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL); 585 if (!send_tx_tstamp_msg) 586 return -ENOMEM; 587 588 spin_lock_bh(&tx_tstamp_caps->latches_lock); 589 list_for_each_entry(ptp_tx_tstamp, head, list_member) { 590 u8 idx; 591 592 state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps, 593 ptp_tx_tstamp->skb, 594 IDPF_PTP_REQUEST, 595 IDPF_PTP_READ_VALUE); 596 if (!state_upd) 597 continue; 598 599 idx = ptp_tx_tstamp->idx; 600 send_tx_tstamp_msg->tstamp_latches[id].index = idx; 601 id++; 602 } 603 spin_unlock_bh(&tx_tstamp_caps->latches_lock); 604 605 msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id); 606 send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id); 607 send_tx_tstamp_msg->num_latches = cpu_to_le16(id); 608 xn_params.send_buf.iov_base = send_tx_tstamp_msg; 609 xn_params.send_buf.iov_len = msg_size; 610 611 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 612 kfree(send_tx_tstamp_msg); 613 614 return min(reply_sz, 0); 615 } 616