1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #ifndef _IAVF_TXRX_H_ 5 #define _IAVF_TXRX_H_ 6 7 /* Interrupt Throttling and Rate Limiting Goodies */ 8 #define IAVF_DEFAULT_IRQ_WORK 256 9 10 /* The datasheet for the X710 and XL710 indicate that the maximum value for 11 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec 12 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing 13 * the register value which is divided by 2 lets use the actual values and 14 * avoid an excessive amount of translation. 15 */ 16 #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 17 #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */ 18 #define IAVF_ITR_100K 10 /* all values below must be even */ 19 #define IAVF_ITR_50K 20 20 #define IAVF_ITR_20K 50 21 #define IAVF_ITR_18K 60 22 #define IAVF_ITR_8K 122 23 #define IAVF_MAX_ITR 8160 /* maximum value as per datasheet */ 24 #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC) 25 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK) 26 #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC)) 27 28 #define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) 29 #define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) 30 31 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if 32 * the value of the rate limit is non-zero 33 */ 34 #define INTRL_ENA BIT(6) 35 #define IAVF_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ 36 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) 37 #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) 38 #define IAVF_INTRL_8K 125 /* 8000 ints/sec */ 39 #define IAVF_INTRL_62K 16 /* 62500 ints/sec */ 40 #define IAVF_INTRL_83K 12 /* 83333 ints/sec */ 41 42 #define IAVF_QUEUE_END_OF_LIST 0x7FF 43 44 /* this enum matches hardware bits and is meant to be used by DYN_CTLN 45 * registers and QINT registers or more generally anywhere in the manual 46 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 47 * register but instead is a special value meaning "don't update" ITR0/1/2. 48 */ 49 enum iavf_dyn_idx_t { 50 IAVF_IDX_ITR0 = 0, 51 IAVF_IDX_ITR1 = 1, 52 IAVF_IDX_ITR2 = 2, 53 IAVF_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 54 }; 55 56 /* these are indexes into ITRN registers */ 57 #define IAVF_RX_ITR IAVF_IDX_ITR0 58 #define IAVF_TX_ITR IAVF_IDX_ITR1 59 #define IAVF_PE_ITR IAVF_IDX_ITR2 60 61 /* Supported RSS offloads */ 62 #define IAVF_DEFAULT_RSS_HENA ( \ 63 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 64 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 65 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 66 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 67 BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \ 68 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 69 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 70 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 71 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 72 BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \ 73 BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD)) 74 75 #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \ 76 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 77 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 78 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 79 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ 80 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 81 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) 82 83 /* Supported Rx Buffer Sizes (a multiple of 128) */ 84 #define IAVF_RXBUFFER_256 256 85 #define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ 86 #define IAVF_RXBUFFER_2048 2048 87 #define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ 88 #define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */ 89 90 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we 91 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, 92 * this adds up to 512 bytes of extra data meaning the smallest allocation 93 * we could have is 1K. 94 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) 95 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) 96 */ 97 #define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256 98 #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) 99 #define iavf_rx_desc iavf_32byte_rx_desc 100 101 #define IAVF_RX_DMA_ATTR \ 102 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 103 104 /* Attempt to maximize the headroom available for incoming frames. We 105 * use a 2K buffer for receives and need 1536/1534 to store the data for 106 * the frame. This leaves us with 512 bytes of room. From that we need 107 * to deduct the space needed for the shared info and the padding needed 108 * to IP align the frame. 109 * 110 * Note: For cache line sizes 256 or larger this value is going to end 111 * up negative. In these cases we should fall back to the legacy 112 * receive path. 113 */ 114 #if (PAGE_SIZE < 8192) 115 #define IAVF_2K_TOO_SMALL_WITH_PADDING \ 116 ((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048)) 117 118 static inline int iavf_compute_pad(int rx_buf_len) 119 { 120 int page_size, pad_size; 121 122 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); 123 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; 124 125 return pad_size; 126 } 127 128 static inline int iavf_skb_pad(void) 129 { 130 int rx_buf_len; 131 132 /* If a 2K buffer cannot handle a standard Ethernet frame then 133 * optimize padding for a 3K buffer instead of a 1.5K buffer. 134 * 135 * For a 3K buffer we need to add enough padding to allow for 136 * tailroom due to NET_IP_ALIGN possibly shifting us out of 137 * cache-line alignment. 138 */ 139 if (IAVF_2K_TOO_SMALL_WITH_PADDING) 140 rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); 141 else 142 rx_buf_len = IAVF_RXBUFFER_1536; 143 144 /* if needed make room for NET_IP_ALIGN */ 145 rx_buf_len -= NET_IP_ALIGN; 146 147 return iavf_compute_pad(rx_buf_len); 148 } 149 150 #define IAVF_SKB_PAD iavf_skb_pad() 151 #else 152 #define IAVF_2K_TOO_SMALL_WITH_PADDING false 153 #define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 154 #endif 155 156 /** 157 * iavf_test_staterr - tests bits in Rx descriptor status and error fields 158 * @rx_desc: pointer to receive descriptor (in le64 format) 159 * @stat_err_bits: value to mask 160 * 161 * This function does some fast chicanery in order to return the 162 * value of the mask which is really only used for boolean tests. 163 * The status_error_len doesn't need to be shifted because it begins 164 * at offset zero. 165 */ 166 static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc, 167 const u64 stat_err_bits) 168 { 169 return !!(rx_desc->wb.qword1.status_error_len & 170 cpu_to_le64(stat_err_bits)); 171 } 172 173 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 174 #define IAVF_RX_INCREMENT(r, i) \ 175 do { \ 176 (i)++; \ 177 if ((i) == (r)->count) \ 178 i = 0; \ 179 r->next_to_clean = i; \ 180 } while (0) 181 182 #define IAVF_RX_NEXT_DESC(r, i, n) \ 183 do { \ 184 (i)++; \ 185 if ((i) == (r)->count) \ 186 i = 0; \ 187 (n) = IAVF_RX_DESC((r), (i)); \ 188 } while (0) 189 190 #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \ 191 do { \ 192 IAVF_RX_NEXT_DESC((r), (i), (n)); \ 193 prefetch((n)); \ 194 } while (0) 195 196 #define IAVF_MAX_BUFFER_TXD 8 197 #define IAVF_MIN_TX_LEN 17 198 199 /* The size limit for a transmit buffer in a descriptor is (16K - 1). 200 * In order to align with the read requests we will align the value to 201 * the nearest 4K which represents our maximum read request size. 202 */ 203 #define IAVF_MAX_READ_REQ_SIZE 4096 204 #define IAVF_MAX_DATA_PER_TXD (16 * 1024 - 1) 205 #define IAVF_MAX_DATA_PER_TXD_ALIGNED \ 206 (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1)) 207 208 /** 209 * iavf_txd_use_count - estimate the number of descriptors needed for Tx 210 * @size: transmit request size in bytes 211 * 212 * Due to hardware alignment restrictions (4K alignment), we need to 213 * assume that we can have no more than 12K of data per descriptor, even 214 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 215 * Thus, we need to divide by 12K. But division is slow! Instead, 216 * we decompose the operation into shifts and one relatively cheap 217 * multiply operation. 218 * 219 * To divide by 12K, we first divide by 4K, then divide by 3: 220 * To divide by 4K, shift right by 12 bits 221 * To divide by 3, multiply by 85, then divide by 256 222 * (Divide by 256 is done by shifting right by 8 bits) 223 * Finally, we add one to round up. Because 256 isn't an exact multiple of 224 * 3, we'll underestimate near each multiple of 12K. This is actually more 225 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 226 * segment. For our purposes this is accurate out to 1M which is orders of 227 * magnitude greater than our largest possible GSO size. 228 * 229 * This would then be implemented as: 230 * return (((size >> 12) * 85) >> 8) + 1; 231 * 232 * Since multiplication and division are commutative, we can reorder 233 * operations into: 234 * return ((size * 85) >> 20) + 1; 235 */ 236 static inline unsigned int iavf_txd_use_count(unsigned int size) 237 { 238 return ((size * 85) >> 20) + 1; 239 } 240 241 /* Tx Descriptors needed, worst case */ 242 #define DESC_NEEDED (MAX_SKB_FRAGS + 6) 243 #define IAVF_MIN_DESC_PENDING 4 244 245 #define IAVF_TX_FLAGS_HW_VLAN BIT(1) 246 #define IAVF_TX_FLAGS_SW_VLAN BIT(2) 247 #define IAVF_TX_FLAGS_TSO BIT(3) 248 #define IAVF_TX_FLAGS_IPV4 BIT(4) 249 #define IAVF_TX_FLAGS_IPV6 BIT(5) 250 #define IAVF_TX_FLAGS_FCCRC BIT(6) 251 #define IAVF_TX_FLAGS_FSO BIT(7) 252 #define IAVF_TX_FLAGS_FD_SB BIT(9) 253 #define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) 254 #define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11) 255 #define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 256 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 257 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 258 #define IAVF_TX_FLAGS_VLAN_SHIFT 16 259 260 struct iavf_tx_buffer { 261 struct iavf_tx_desc *next_to_watch; 262 union { 263 struct sk_buff *skb; 264 void *raw_buf; 265 }; 266 unsigned int bytecount; 267 unsigned short gso_segs; 268 269 DEFINE_DMA_UNMAP_ADDR(dma); 270 DEFINE_DMA_UNMAP_LEN(len); 271 u32 tx_flags; 272 }; 273 274 struct iavf_rx_buffer { 275 dma_addr_t dma; 276 struct page *page; 277 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 278 __u32 page_offset; 279 #else 280 __u16 page_offset; 281 #endif 282 __u16 pagecnt_bias; 283 }; 284 285 struct iavf_queue_stats { 286 u64 packets; 287 u64 bytes; 288 }; 289 290 struct iavf_tx_queue_stats { 291 u64 restart_queue; 292 u64 tx_busy; 293 u64 tx_done_old; 294 u64 tx_linearize; 295 u64 tx_force_wb; 296 int prev_pkt_ctr; 297 u64 tx_lost_interrupt; 298 }; 299 300 struct iavf_rx_queue_stats { 301 u64 non_eop_descs; 302 u64 alloc_page_failed; 303 u64 alloc_buff_failed; 304 u64 page_reuse_count; 305 u64 realloc_count; 306 }; 307 308 enum iavf_ring_state_t { 309 __IAVF_TX_FDIR_INIT_DONE, 310 __IAVF_TX_XPS_INIT_DONE, 311 __IAVF_RING_STATE_NBITS /* must be last */ 312 }; 313 314 /* some useful defines for virtchannel interface, which 315 * is the only remaining user of header split 316 */ 317 #define IAVF_RX_DTYPE_NO_SPLIT 0 318 #define IAVF_RX_DTYPE_HEADER_SPLIT 1 319 #define IAVF_RX_DTYPE_SPLIT_ALWAYS 2 320 #define IAVF_RX_SPLIT_L2 0x1 321 #define IAVF_RX_SPLIT_IP 0x2 322 #define IAVF_RX_SPLIT_TCP_UDP 0x4 323 #define IAVF_RX_SPLIT_SCTP 0x8 324 325 /* struct that defines a descriptor ring, associated with a VSI */ 326 struct iavf_ring { 327 struct iavf_ring *next; /* pointer to next ring in q_vector */ 328 void *desc; /* Descriptor ring memory */ 329 struct device *dev; /* Used for DMA mapping */ 330 struct net_device *netdev; /* netdev ring maps to */ 331 union { 332 struct iavf_tx_buffer *tx_bi; 333 struct iavf_rx_buffer *rx_bi; 334 }; 335 DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS); 336 u16 queue_index; /* Queue number of ring */ 337 u8 dcb_tc; /* Traffic class of ring */ 338 u8 __iomem *tail; 339 340 /* high bit set means dynamic, use accessors routines to read/write. 341 * hardware only supports 2us resolution for the ITR registers. 342 * these values always store the USER setting, and must be converted 343 * before programming to a register. 344 */ 345 u16 itr_setting; 346 347 u16 count; /* Number of descriptors */ 348 u16 reg_idx; /* HW register index of the ring */ 349 u16 rx_buf_len; 350 351 /* used in interrupt processing */ 352 u16 next_to_use; 353 u16 next_to_clean; 354 355 u8 atr_sample_rate; 356 u8 atr_count; 357 358 bool ring_active; /* is ring online or not */ 359 bool arm_wb; /* do something to arm write back */ 360 u8 packet_stride; 361 362 u16 flags; 363 #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) 364 #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) 365 #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3) 366 #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4) 367 #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5) 368 369 /* stats structs */ 370 struct iavf_queue_stats stats; 371 struct u64_stats_sync syncp; 372 union { 373 struct iavf_tx_queue_stats tx_stats; 374 struct iavf_rx_queue_stats rx_stats; 375 }; 376 377 unsigned int size; /* length of descriptor ring in bytes */ 378 dma_addr_t dma; /* physical address of ring */ 379 380 struct iavf_vsi *vsi; /* Backreference to associated VSI */ 381 struct iavf_q_vector *q_vector; /* Backreference to associated vector */ 382 383 struct rcu_head rcu; /* to avoid race on free */ 384 u16 next_to_alloc; 385 struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must 386 * return before it sees the EOP for 387 * the current packet, we save that skb 388 * here and resume receiving this 389 * packet the next time 390 * iavf_clean_rx_ring_irq() is called 391 * for this ring. 392 */ 393 } ____cacheline_internodealigned_in_smp; 394 395 static inline bool ring_uses_build_skb(struct iavf_ring *ring) 396 { 397 return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED); 398 } 399 400 static inline void set_ring_build_skb_enabled(struct iavf_ring *ring) 401 { 402 ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; 403 } 404 405 static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring) 406 { 407 ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; 408 } 409 410 #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 411 #define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002 412 #define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e 413 #define IAVF_ITR_ADAPTIVE_LATENCY 0x8000 414 #define IAVF_ITR_ADAPTIVE_BULK 0x0000 415 #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY)) 416 417 struct iavf_ring_container { 418 struct iavf_ring *ring; /* pointer to linked list of ring(s) */ 419 unsigned long next_update; /* jiffies value of next update */ 420 unsigned int total_bytes; /* total bytes processed this int */ 421 unsigned int total_packets; /* total packets processed this int */ 422 u16 count; 423 u16 target_itr; /* target ITR setting for ring(s) */ 424 u16 current_itr; /* current ITR setting for ring(s) */ 425 }; 426 427 /* iterator for handling rings in ring container */ 428 #define iavf_for_each_ring(pos, head) \ 429 for (pos = (head).ring; pos != NULL; pos = pos->next) 430 431 static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring) 432 { 433 #if (PAGE_SIZE < 8192) 434 if (ring->rx_buf_len > (PAGE_SIZE / 2)) 435 return 1; 436 #endif 437 return 0; 438 } 439 440 #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring)) 441 442 bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count); 443 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 444 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring); 445 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring); 446 void iavf_free_tx_resources(struct iavf_ring *tx_ring); 447 void iavf_free_rx_resources(struct iavf_ring *rx_ring); 448 int iavf_napi_poll(struct napi_struct *napi, int budget); 449 void iavf_detect_recover_hung(struct iavf_vsi *vsi); 450 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size); 451 bool __iavf_chk_linearize(struct sk_buff *skb); 452 453 /** 454 * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed 455 * @skb: send buffer 456 * 457 * Returns number of data descriptors needed for this skb. Returns 0 to indicate 458 * there is not enough descriptors available in this ring since we need at least 459 * one descriptor. 460 **/ 461 static inline int iavf_xmit_descriptor_count(struct sk_buff *skb) 462 { 463 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 464 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 465 int count = 0, size = skb_headlen(skb); 466 467 for (;;) { 468 count += iavf_txd_use_count(size); 469 470 if (!nr_frags--) 471 break; 472 473 size = skb_frag_size(frag++); 474 } 475 476 return count; 477 } 478 479 /** 480 * iavf_maybe_stop_tx - 1st level check for Tx stop conditions 481 * @tx_ring: the ring to be checked 482 * @size: the size buffer we want to assure is available 483 * 484 * Returns 0 if stop is not needed 485 **/ 486 static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) 487 { 488 if (likely(IAVF_DESC_UNUSED(tx_ring) >= size)) 489 return 0; 490 return __iavf_maybe_stop_tx(tx_ring, size); 491 } 492 493 /** 494 * iavf_chk_linearize - Check if there are more than 8 fragments per packet 495 * @skb: send buffer 496 * @count: number of buffers used 497 * 498 * Note: Our HW can't scatter-gather more than 8 fragments to build 499 * a packet on the wire and so we need to figure out the cases where we 500 * need to linearize the skb. 501 **/ 502 static inline bool iavf_chk_linearize(struct sk_buff *skb, int count) 503 { 504 /* Both TSO and single send will work if count is less than 8 */ 505 if (likely(count < IAVF_MAX_BUFFER_TXD)) 506 return false; 507 508 if (skb_is_gso(skb)) 509 return __iavf_chk_linearize(skb); 510 511 /* we can support up to 8 data buffers for a single send */ 512 return count != IAVF_MAX_BUFFER_TXD; 513 } 514 /** 515 * txring_txq - helper to convert from a ring to a queue 516 * @ring: Tx ring to find the netdev equivalent of 517 **/ 518 static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring) 519 { 520 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 521 } 522 #endif /* _IAVF_TXRX_H_ */ 523