1 /* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 */ 29 30 #ifndef NICVF_QUEUES_H 31 #define NICVF_QUEUES_H 32 33 #include "q_struct.h" 34 35 #define MAX_QUEUE_SET 128 36 #define MAX_RCV_QUEUES_PER_QS 8 37 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 38 #define MAX_SND_QUEUES_PER_QS 8 39 #define MAX_CMP_QUEUES_PER_QS 8 40 41 /* VF's queue interrupt ranges */ 42 #define NICVF_INTR_ID_CQ 0 43 #define NICVF_INTR_ID_SQ 8 44 #define NICVF_INTR_ID_RBDR 16 45 #define NICVF_INTR_ID_MISC 18 46 #define NICVF_INTR_ID_QS_ERR 19 47 48 #define for_each_cq_irq(irq) \ 49 for ((irq) = NICVF_INTR_ID_CQ; (irq) < NICVF_INTR_ID_SQ; (irq)++) 50 #define for_each_sq_irq(irq) \ 51 for ((irq) = NICVF_INTR_ID_SQ; (irq) < NICVF_INTR_ID_RBDR; (irq)++) 52 #define for_each_rbdr_irq(irq) \ 53 for ((irq) = NICVF_INTR_ID_RBDR; (irq) < NICVF_INTR_ID_MISC; (irq)++) 54 55 #define RBDR_SIZE0 0UL /* 8K entries */ 56 #define RBDR_SIZE1 1UL /* 16K entries */ 57 #define RBDR_SIZE2 2UL /* 32K entries */ 58 #define RBDR_SIZE3 3UL /* 64K entries */ 59 #define RBDR_SIZE4 4UL /* 126K entries */ 60 #define RBDR_SIZE5 5UL /* 256K entries */ 61 #define RBDR_SIZE6 6UL /* 512K entries */ 62 63 #define SND_QUEUE_SIZE0 0UL /* 1K entries */ 64 #define SND_QUEUE_SIZE1 1UL /* 2K entries */ 65 #define SND_QUEUE_SIZE2 2UL /* 4K entries */ 66 #define SND_QUEUE_SIZE3 3UL /* 8K entries */ 67 #define SND_QUEUE_SIZE4 4UL /* 16K entries */ 68 #define SND_QUEUE_SIZE5 5UL /* 32K entries */ 69 #define SND_QUEUE_SIZE6 6UL /* 64K entries */ 70 71 #define CMP_QUEUE_SIZE0 0UL /* 1K entries */ 72 #define CMP_QUEUE_SIZE1 1UL /* 2K entries */ 73 #define CMP_QUEUE_SIZE2 2UL /* 4K entries */ 74 #define CMP_QUEUE_SIZE3 3UL /* 8K entries */ 75 #define CMP_QUEUE_SIZE4 4UL /* 16K entries */ 76 #define CMP_QUEUE_SIZE5 5UL /* 32K entries */ 77 #define CMP_QUEUE_SIZE6 6UL /* 64K entries */ 78 79 /* Default queue count per QS, its lengths and threshold values */ 80 #define RBDR_CNT 1 81 #define RCV_QUEUE_CNT 8 82 #define SND_QUEUE_CNT 8 83 #define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ 84 85 #define SND_QSIZE SND_QUEUE_SIZE2 86 #define SND_QUEUE_LEN (1UL << (SND_QSIZE + 10)) 87 #define MAX_SND_QUEUE_LEN (1UL << (SND_QUEUE_SIZE6 + 10)) 88 #define SND_QUEUE_THRESH 2UL 89 #define MIN_SQ_DESC_PER_PKT_XMIT 2 90 /* Since timestamp not enabled, otherwise 2 */ 91 #define MAX_CQE_PER_PKT_XMIT 1 92 93 /* 94 * Keep CQ and SQ sizes same, if timestamping 95 * is enabled this equation will change. 96 */ 97 #define CMP_QSIZE CMP_QUEUE_SIZE2 98 #define CMP_QUEUE_LEN (1UL << (CMP_QSIZE + 10)) 99 #define CMP_QUEUE_CQE_THRESH 0 100 #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 101 102 #define RBDR_SIZE RBDR_SIZE0 103 #define RCV_BUF_COUNT (1UL << (RBDR_SIZE + 13)) 104 #define MAX_RCV_BUF_COUNT (1UL << (RBDR_SIZE6 + 13)) 105 #define RBDR_THRESH (RCV_BUF_COUNT / 2) 106 #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ 107 108 #define MAX_CQES_FOR_TX \ 109 ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * MAX_CQE_PER_PKT_XMIT) 110 /* Calculate number of CQEs to reserve for all SQEs. 111 * Its 1/256th level of CQ size. 112 * '+ 1' to account for pipelining 113 */ 114 #define RQ_CQ_DROP \ 115 ((256 / (CMP_QUEUE_LEN / (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) 116 117 /* Descriptor size in bytes */ 118 #define SND_QUEUE_DESC_SIZE 16 119 #define CMP_QUEUE_DESC_SIZE 512 120 121 /* Buffer / descriptor alignments */ 122 #define NICVF_RCV_BUF_ALIGN 7 123 #define NICVF_RCV_BUF_ALIGN_BYTES (1UL << NICVF_RCV_BUF_ALIGN) 124 #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ 125 #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ 126 127 #define NICVF_ALIGNED_ADDR(addr, align_bytes) \ 128 roundup2((addr), (align_bytes)) 129 #define NICVF_ADDR_ALIGN_LEN(addr, bytes) \ 130 (NICVF_ALIGNED_ADDR((addr), (bytes)) - (bytes)) 131 #define NICVF_RCV_BUF_ALIGN_LEN(addr) \ 132 (NICVF_ALIGNED_ADDR((addr), NICVF_RCV_BUF_ALIGN_BYTES) - (addr)) 133 134 #define NICVF_TXBUF_MAXSIZE 9212 /* Total max payload without TSO */ 135 #define NICVF_TXBUF_NSEGS 256 /* Single command is at most 256 buffers 136 (hdr + 255 subcmds) */ 137 138 139 /* Queue enable/disable */ 140 #define NICVF_SQ_EN (1UL << 19) 141 142 /* Queue reset */ 143 #define NICVF_CQ_RESET (1UL << 41) 144 #define NICVF_SQ_RESET (1UL << 17) 145 #define NICVF_RBDR_RESET (1UL << 43) 146 147 enum CQ_RX_ERRLVL_E { 148 CQ_ERRLVL_MAC, 149 CQ_ERRLVL_L2, 150 CQ_ERRLVL_L3, 151 CQ_ERRLVL_L4, 152 }; 153 154 enum CQ_RX_ERROP_E { 155 CQ_RX_ERROP_RE_NONE = 0x0, 156 CQ_RX_ERROP_RE_PARTIAL = 0x1, 157 CQ_RX_ERROP_RE_JABBER = 0x2, 158 CQ_RX_ERROP_RE_FCS = 0x7, 159 CQ_RX_ERROP_RE_TERMINATE = 0x9, 160 CQ_RX_ERROP_RE_RX_CTL = 0xb, 161 CQ_RX_ERROP_PREL2_ERR = 0x1f, 162 CQ_RX_ERROP_L2_FRAGMENT = 0x20, 163 CQ_RX_ERROP_L2_OVERRUN = 0x21, 164 CQ_RX_ERROP_L2_PFCS = 0x22, 165 CQ_RX_ERROP_L2_PUNY = 0x23, 166 CQ_RX_ERROP_L2_MAL = 0x24, 167 CQ_RX_ERROP_L2_OVERSIZE = 0x25, 168 CQ_RX_ERROP_L2_UNDERSIZE = 0x26, 169 CQ_RX_ERROP_L2_LENMISM = 0x27, 170 CQ_RX_ERROP_L2_PCLP = 0x28, 171 CQ_RX_ERROP_IP_NOT = 0x41, 172 CQ_RX_ERROP_IP_CSUM_ERR = 0x42, 173 CQ_RX_ERROP_IP_MAL = 0x43, 174 CQ_RX_ERROP_IP_MALD = 0x44, 175 CQ_RX_ERROP_IP_HOP = 0x45, 176 CQ_RX_ERROP_L3_ICRC = 0x46, 177 CQ_RX_ERROP_L3_PCLP = 0x47, 178 CQ_RX_ERROP_L4_MAL = 0x61, 179 CQ_RX_ERROP_L4_CHK = 0x62, 180 CQ_RX_ERROP_UDP_LEN = 0x63, 181 CQ_RX_ERROP_L4_PORT = 0x64, 182 CQ_RX_ERROP_TCP_FLAG = 0x65, 183 CQ_RX_ERROP_TCP_OFFSET = 0x66, 184 CQ_RX_ERROP_L4_PCLP = 0x67, 185 CQ_RX_ERROP_RBDR_TRUNC = 0x70, 186 }; 187 188 enum CQ_TX_ERROP_E { 189 CQ_TX_ERROP_GOOD = 0x0, 190 CQ_TX_ERROP_DESC_FAULT = 0x10, 191 CQ_TX_ERROP_HDR_CONS_ERR = 0x11, 192 CQ_TX_ERROP_SUBDC_ERR = 0x12, 193 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, 194 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, 195 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, 196 CQ_TX_ERROP_LOCK_VIOL = 0x83, 197 CQ_TX_ERROP_DATA_FAULT = 0x84, 198 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, 199 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, 200 CQ_TX_ERROP_MEM_FAULT = 0x87, 201 CQ_TX_ERROP_CK_OVERLAP = 0x88, 202 CQ_TX_ERROP_CK_OFLOW = 0x89, 203 CQ_TX_ERROP_ENUM_LAST = 0x8a, 204 }; 205 206 struct cmp_queue_stats { 207 struct tx_stats { 208 uint64_t good; 209 uint64_t desc_fault; 210 uint64_t hdr_cons_err; 211 uint64_t subdesc_err; 212 uint64_t imm_size_oflow; 213 uint64_t data_seq_err; 214 uint64_t mem_seq_err; 215 uint64_t lock_viol; 216 uint64_t data_fault; 217 uint64_t tstmp_conflict; 218 uint64_t tstmp_timeout; 219 uint64_t mem_fault; 220 uint64_t csum_overlap; 221 uint64_t csum_overflow; 222 } tx; 223 } __aligned(CACHE_LINE_SIZE); 224 225 enum RQ_SQ_STATS { 226 RQ_SQ_STATS_OCTS, 227 RQ_SQ_STATS_PKTS, 228 }; 229 230 struct rx_tx_queue_stats { 231 uint64_t bytes; 232 uint64_t pkts; 233 } __aligned(CACHE_LINE_SIZE); 234 235 struct q_desc_mem { 236 bus_dma_tag_t dmat; 237 bus_dmamap_t dmap; 238 void *base; 239 bus_addr_t phys_base; 240 uint64_t size; 241 uint16_t q_len; 242 }; 243 244 struct rbdr { 245 boolean_t enable; 246 uint32_t dma_size; 247 uint32_t frag_len; 248 uint32_t thresh; /* Threshold level for interrupt */ 249 void *desc; 250 uint32_t head; 251 uint32_t tail; 252 struct q_desc_mem dmem; 253 254 struct nicvf *nic; 255 int idx; 256 257 struct task rbdr_task; 258 struct task rbdr_task_nowait; 259 struct taskqueue *rbdr_taskq; 260 261 bus_dma_tag_t rbdr_buff_dmat; 262 bus_dmamap_t *rbdr_buff_dmaps; 263 } __aligned(CACHE_LINE_SIZE); 264 265 struct rcv_queue { 266 boolean_t enable; 267 struct rbdr *rbdr_start; 268 struct rbdr *rbdr_cont; 269 boolean_t en_tcp_reassembly; 270 uint8_t cq_qs; /* CQ's QS to which this RQ is assigned */ 271 uint8_t cq_idx; /* CQ index (0 to 7) in the QS */ 272 uint8_t cont_rbdr_qs; /* Continue buffer ptrs - QS num */ 273 uint8_t cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ 274 uint8_t start_rbdr_qs; /* First buffer ptrs - QS num */ 275 uint8_t start_qs_rbdr_idx; /* RBDR idx in the above QS */ 276 uint8_t caching; 277 struct rx_tx_queue_stats stats; 278 } __aligned(CACHE_LINE_SIZE); 279 280 struct cmp_queue { 281 boolean_t enable; 282 uint16_t thresh; 283 284 struct nicvf *nic; 285 int idx; /* This queue index */ 286 287 struct buf_ring *rx_br; /* Reception buf ring */ 288 struct mtx mtx; /* lock to serialize processing CQEs */ 289 char mtx_name[32]; 290 291 struct task cmp_task; 292 struct taskqueue *cmp_taskq; 293 294 void *desc; 295 struct q_desc_mem dmem; 296 struct cmp_queue_stats stats; 297 int irq; 298 } __aligned(CACHE_LINE_SIZE); 299 300 struct snd_buff { 301 bus_dmamap_t dmap; 302 struct mbuf *mbuf; 303 }; 304 305 struct snd_queue { 306 boolean_t enable; 307 uint8_t cq_qs; /* CQ's QS to which this SQ is pointing */ 308 uint8_t cq_idx; /* CQ index (0 to 7) in the above QS */ 309 uint16_t thresh; 310 volatile int free_cnt; 311 uint32_t head; 312 uint32_t tail; 313 uint64_t *skbuff; 314 void *desc; 315 316 struct nicvf *nic; 317 int idx; /* This queue index */ 318 319 bus_dma_tag_t snd_buff_dmat; 320 struct snd_buff *snd_buff; 321 322 struct buf_ring *br; /* Transmission buf ring */ 323 struct mtx mtx; 324 char mtx_name[32]; 325 326 struct task snd_task; 327 struct taskqueue *snd_taskq; 328 329 struct q_desc_mem dmem; 330 struct rx_tx_queue_stats stats; 331 } __aligned(CACHE_LINE_SIZE); 332 333 struct queue_set { 334 boolean_t enable; 335 boolean_t be_en; 336 uint8_t vnic_id; 337 uint8_t rq_cnt; 338 uint8_t cq_cnt; 339 uint64_t cq_len; 340 uint8_t sq_cnt; 341 uint64_t sq_len; 342 uint8_t rbdr_cnt; 343 uint64_t rbdr_len; 344 struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; 345 struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; 346 struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; 347 struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; 348 349 struct task qs_err_task; 350 struct taskqueue *qs_err_taskq; 351 } __aligned(CACHE_LINE_SIZE); 352 353 #define GET_RBDR_DESC(RING, idx) \ 354 (&(((struct rbdr_entry_t *)((RING)->desc))[(idx)])) 355 #define GET_SQ_DESC(RING, idx) \ 356 (&(((struct sq_hdr_subdesc *)((RING)->desc))[(idx)])) 357 #define GET_CQ_DESC(RING, idx) \ 358 (&(((union cq_desc_t *)((RING)->desc))[(idx)])) 359 360 /* CQ status bits */ 361 #define CQ_WR_FUL (1UL << 26) 362 #define CQ_WR_DISABLE (1UL << 25) 363 #define CQ_WR_FAULT (1UL << 24) 364 #define CQ_CQE_COUNT (0xFFFF << 0) 365 366 #define CQ_ERR_MASK (CQ_WR_FUL | CQ_WR_DISABLE | CQ_WR_FAULT) 367 368 #define NICVF_TX_LOCK(sq) mtx_lock(&(sq)->mtx) 369 #define NICVF_TX_TRYLOCK(sq) mtx_trylock(&(sq)->mtx) 370 #define NICVF_TX_UNLOCK(sq) mtx_unlock(&(sq)->mtx) 371 #define NICVF_TX_LOCK_ASSERT(sq) mtx_assert(&(sq)->mtx, MA_OWNED) 372 373 #define NICVF_CMP_LOCK(cq) mtx_lock(&(cq)->mtx) 374 #define NICVF_CMP_UNLOCK(cq) mtx_unlock(&(cq)->mtx) 375 376 int nicvf_set_qset_resources(struct nicvf *); 377 int nicvf_config_data_transfer(struct nicvf *, boolean_t); 378 void nicvf_qset_config(struct nicvf *, boolean_t); 379 380 void nicvf_enable_intr(struct nicvf *, int, int); 381 void nicvf_disable_intr(struct nicvf *, int, int); 382 void nicvf_clear_intr(struct nicvf *, int, int); 383 int nicvf_is_intr_enabled(struct nicvf *, int, int); 384 385 /* Register access APIs */ 386 void nicvf_reg_write(struct nicvf *, uint64_t, uint64_t); 387 uint64_t nicvf_reg_read(struct nicvf *, uint64_t); 388 void nicvf_qset_reg_write(struct nicvf *, uint64_t, uint64_t); 389 uint64_t nicvf_qset_reg_read(struct nicvf *, uint64_t); 390 void nicvf_queue_reg_write(struct nicvf *, uint64_t, uint64_t, uint64_t); 391 uint64_t nicvf_queue_reg_read(struct nicvf *, uint64_t, uint64_t); 392 393 /* Stats */ 394 void nicvf_update_rq_stats(struct nicvf *, int); 395 void nicvf_update_sq_stats(struct nicvf *, int); 396 int nicvf_check_cqe_rx_errs(struct nicvf *, struct cmp_queue *, 397 struct cqe_rx_t *); 398 int nicvf_check_cqe_tx_errs(struct nicvf *,struct cmp_queue *, 399 struct cqe_send_t *); 400 #endif /* NICVF_QUEUES_H */ 401