Lines Matching full:cqe
150 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe) in mlx5e_lro_update_hdr() argument
167 l4_hdr_type = get_cqe_l4_hdr_type(cqe); in mlx5e_lro_update_hdr()
172 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN; in mlx5e_lro_update_hdr()
189 if (get_cqe_lro_tcppsh(cqe)) in mlx5e_lro_update_hdr()
194 th->th_ack = cqe->lro_ack_seq_num; in mlx5e_lro_update_hdr()
195 th->th_win = cqe->lro_tcp_win; in mlx5e_lro_update_hdr()
208 if (get_cqe_lro_timestamp_valid(cqe) && in mlx5e_lro_update_hdr()
213 * cqe->timestamp is 64bit long. in mlx5e_lro_update_hdr()
217 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1); in mlx5e_lro_update_hdr()
223 ip4->ip_ttl = cqe->lro_min_ttl; in mlx5e_lro_update_hdr()
230 tcp_csum = cqe->check_sum; in mlx5e_lro_update_hdr()
245 ip6->ip6_hlim = cqe->lro_min_ttl; in mlx5e_lro_update_hdr()
255 tcp_csum = csum_reduce(tcp_csum + cqe->check_sum); in mlx5e_lro_update_hdr()
325 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, in mlx5e_build_rx_mbuf() argument
334 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; in mlx5e_build_rx_mbuf()
336 mlx5e_lro_update_hdr(mb, cqe); in mlx5e_build_rx_mbuf()
359 if (cqe->rss_hash_type != 0) { in mlx5e_build_rx_mbuf()
360 mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result); in mlx5e_build_rx_mbuf()
363 switch (cqe->rss_hash_type & in mlx5e_build_rx_mbuf()
393 if (cqe_is_tunneled(cqe)) in mlx5e_build_rx_mbuf()
403 if (cqe_is_tunneled(cqe)) { in mlx5e_build_rx_mbuf()
405 * CQE can be tunneled only if TIR is configured to in mlx5e_build_rx_mbuf()
409 if (((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK)) == in mlx5e_build_rx_mbuf()
417 if (likely((cqe->hds_ip_ext & CQE_L4_OK) == CQE_L4_OK)) { in mlx5e_build_rx_mbuf()
426 ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) == in mlx5e_build_rx_mbuf()
436 if (cqe_has_vlan(cqe)) { in mlx5e_build_rx_mbuf()
437 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info); in mlx5e_build_rx_mbuf()
443 tstmp = mlx5e_mbuf_tstmp(c->priv, be64_to_cpu(cqe->timestamp)); in mlx5e_build_rx_mbuf()
447 * instead of the cqe generation. in mlx5e_build_rx_mbuf()
457 switch (get_cqe_tls_offload(cqe)) { in mlx5e_build_rx_mbuf()
470 mlx5e_accel_ipsec_handle_rx(ifp, mb, cqe, mr); in mlx5e_build_rx_mbuf()
556 struct mlx5_cqe64 *cqe; in mlx5e_poll_rx_cq() local
562 cqe = mlx5e_get_cqe(&rq->cq); in mlx5e_poll_rx_cq()
563 if (!cqe) in mlx5e_poll_rx_cq()
566 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) in mlx5e_poll_rx_cq()
571 wqe_counter_be = cqe->wqe_counter; in mlx5e_poll_rx_cq()
574 byte_cnt = be32_to_cpu(cqe->byte_cnt); in mlx5e_poll_rx_cq()
580 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { in mlx5e_poll_rx_cq()
581 mlx5e_dump_err_cqe(&rq->cq, rq->rqn, (const void *)cqe); in mlx5e_poll_rx_cq()
616 if (!mlx5e_accel_ipsec_flow(cqe) /* tag is already assigned in mlx5e_poll_rx_cq()
635 mlx5e_build_rx_mbuf(cqe, rq, mb, &rq->mbuf[wqe_counter], in mlx5e_poll_rx_cq()