1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6 #ifndef ENA_ETH_COM_H_ 7 #define ENA_ETH_COM_H_ 8 9 #include "ena_com.h" 10 11 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ 12 #define ENA_COMP_HEAD_THRESH 4 13 14 struct ena_com_tx_ctx { 15 struct ena_com_tx_meta ena_meta; 16 struct ena_com_buf *ena_bufs; 17 /* For LLQ, header buffer - pushed to the device mem space */ 18 void *push_header; 19 20 enum ena_eth_io_l3_proto_index l3_proto; 21 enum ena_eth_io_l4_proto_index l4_proto; 22 u16 num_bufs; 23 u16 req_id; 24 /* For regular queue, indicate the size of the header 25 * For LLQ, indicate the size of the pushed buffer 26 */ 27 u16 header_len; 28 29 u8 meta_valid; 30 u8 tso_enable; 31 u8 l3_csum_enable; 32 u8 l4_csum_enable; 33 u8 l4_csum_partial; 34 u8 df; /* Don't fragment */ 35 }; 36 37 struct ena_com_rx_ctx { 38 struct ena_com_rx_buf_info *ena_bufs; 39 enum ena_eth_io_l3_proto_index l3_proto; 40 enum ena_eth_io_l4_proto_index l4_proto; 41 bool l3_csum_err; 42 bool l4_csum_err; 43 u8 l4_csum_checked; 44 /* fragmented packet */ 45 bool frag; 46 u32 hash; 47 u16 descs; 48 int max_bufs; 49 u8 pkt_offset; 50 }; 51 52 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, 53 struct ena_com_tx_ctx *ena_tx_ctx, 54 int *nb_hw_desc); 55 56 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, 57 struct ena_com_io_sq *io_sq, 58 struct ena_com_rx_ctx *ena_rx_ctx); 59 60 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, 61 struct ena_com_buf *ena_buf, 62 u16 req_id); 63 64 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); 65 66 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, 67 struct ena_eth_io_intr_reg *intr_reg) 68 { 69 writel(intr_reg->intr_control, io_cq->unmask_reg); 70 } 71 72 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq) 73 { 74 u16 tail, next_to_comp, cnt; 75 76 next_to_comp = io_sq->next_to_comp; 77 tail = io_sq->tail; 78 cnt = tail - next_to_comp; 79 80 return io_sq->q_depth - 1 - cnt; 81 } 82 83 /* Check if the submission queue has enough space to hold required_buffers */ 84 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, 85 u16 required_buffers) 86 { 87 int temp; 88 89 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 90 return ena_com_free_q_entries(io_sq) >= required_buffers; 91 92 /* This calculation doesn't need to be 100% accurate. So to reduce 93 * the calculation overhead just Subtract 2 lines from the free descs 94 * (one for the header line and one to compensate the devision 95 * down calculation. 96 */ 97 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; 98 99 return ena_com_free_q_entries(io_sq) > temp; 100 } 101 102 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, 103 struct ena_com_tx_ctx *ena_tx_ctx) 104 { 105 if (!ena_tx_ctx->meta_valid) 106 return false; 107 108 return !!memcmp(&io_sq->cached_tx_meta, 109 &ena_tx_ctx->ena_meta, 110 sizeof(struct ena_com_tx_meta)); 111 } 112 113 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq) 114 { 115 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && 116 io_sq->llq_info.max_entries_in_tx_burst > 0; 117 } 118 119 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, 120 struct ena_com_tx_ctx *ena_tx_ctx) 121 { 122 struct ena_com_llq_info *llq_info; 123 int descs_after_first_entry; 124 int num_entries_needed = 1; 125 u16 num_descs; 126 127 if (!is_llq_max_tx_burst_exists(io_sq)) 128 return false; 129 130 llq_info = &io_sq->llq_info; 131 num_descs = ena_tx_ctx->num_bufs; 132 133 if (llq_info->disable_meta_caching || 134 unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) 135 ++num_descs; 136 137 if (num_descs > llq_info->descs_num_before_header) { 138 descs_after_first_entry = num_descs - llq_info->descs_num_before_header; 139 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry, 140 llq_info->descs_per_entry); 141 } 142 143 pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, 144 num_descs, num_entries_needed); 145 146 return num_entries_needed > io_sq->entries_in_tx_burst_left; 147 } 148 149 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) 150 { 151 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; 152 u16 tail = io_sq->tail; 153 154 pr_debug("Write submission queue doorbell for queue: %d tail: %d\n", 155 io_sq->qid, tail); 156 157 writel(tail, io_sq->db_addr); 158 159 if (is_llq_max_tx_burst_exists(io_sq)) { 160 pr_debug("Reset available entries in tx burst for queue %d to %d\n", 161 io_sq->qid, max_entries_in_tx_burst); 162 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; 163 } 164 165 return 0; 166 } 167 168 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) 169 { 170 u16 unreported_comp, head; 171 bool need_update; 172 173 if (unlikely(io_cq->cq_head_db_reg)) { 174 head = io_cq->head; 175 unreported_comp = head - io_cq->last_head_update; 176 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); 177 178 if (unlikely(need_update)) { 179 pr_debug("Write completion queue doorbell for queue %d: head: %d\n", 180 io_cq->qid, head); 181 writel(head, io_cq->cq_head_db_reg); 182 io_cq->last_head_update = head; 183 } 184 } 185 186 return 0; 187 } 188 189 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, 190 u8 numa_node) 191 { 192 struct ena_eth_io_numa_node_cfg_reg numa_cfg; 193 194 if (!io_cq->numa_node_cfg_reg) 195 return; 196 197 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) 198 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; 199 200 writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); 201 } 202 203 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) 204 { 205 io_sq->next_to_comp += elem; 206 } 207 208 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) 209 { 210 io_cq->head++; 211 212 /* Switch phase bit in case of wrap around */ 213 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) 214 io_cq->phase ^= 1; 215 } 216 217 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, 218 u16 *req_id) 219 { 220 u8 expected_phase, cdesc_phase; 221 struct ena_eth_io_tx_cdesc *cdesc; 222 u16 masked_head; 223 224 masked_head = io_cq->head & (io_cq->q_depth - 1); 225 expected_phase = io_cq->phase; 226 227 cdesc = (struct ena_eth_io_tx_cdesc *) 228 ((uintptr_t)io_cq->cdesc_addr.virt_addr + 229 (masked_head * io_cq->cdesc_entry_size_in_bytes)); 230 231 /* When the current completion descriptor phase isn't the same as the 232 * expected, it mean that the device still didn't update 233 * this completion. 234 */ 235 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; 236 if (cdesc_phase != expected_phase) 237 return -EAGAIN; 238 239 dma_rmb(); 240 241 *req_id = READ_ONCE(cdesc->req_id); 242 if (unlikely(*req_id >= io_cq->q_depth)) { 243 pr_err("Invalid req id %d\n", cdesc->req_id); 244 return -EINVAL; 245 } 246 247 ena_com_cq_inc_head(io_cq); 248 249 return 0; 250 } 251 252 #endif /* ENA_ETH_COM_H_ */ 253