Lines Matching full:self

84 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)  in aq_get_rxpages()  argument
86 unsigned int order = self->page_order; in aq_get_rxpages()
87 u16 page_offset = self->page_offset; in aq_get_rxpages()
88 u16 frame_max = self->frame_max; in aq_get_rxpages()
89 u16 tail_size = self->tail_size; in aq_get_rxpages()
100 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
101 self->stats.rx.pg_flips++; in aq_get_rxpages()
102 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
109 aq_nic_get_dev(self->aq_nic)); in aq_get_rxpages()
110 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
111 self->stats.rx.pg_losts++; in aq_get_rxpages()
112 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
116 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
117 self->stats.rx.pg_reuses++; in aq_get_rxpages()
118 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
123 ret = aq_alloc_rxpages(&rxbuf->rxdata, self); in aq_get_rxpages()
125 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
126 self->stats.rx.alloc_fails++; in aq_get_rxpages()
127 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
135 static int aq_ring_alloc(struct aq_ring_s *self, in aq_ring_alloc() argument
140 self->buff_ring = in aq_ring_alloc()
141 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); in aq_ring_alloc()
143 if (!self->buff_ring) { in aq_ring_alloc()
148 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), in aq_ring_alloc()
149 self->size * self->dx_size, in aq_ring_alloc()
150 &self->dx_ring_pa, GFP_KERNEL); in aq_ring_alloc()
151 if (!self->dx_ring) { in aq_ring_alloc()
158 aq_ring_free(self); in aq_ring_alloc()
164 int aq_ring_tx_alloc(struct aq_ring_s *self, in aq_ring_tx_alloc() argument
169 self->aq_nic = aq_nic; in aq_ring_tx_alloc()
170 self->idx = idx; in aq_ring_tx_alloc()
171 self->size = aq_nic_cfg->txds; in aq_ring_tx_alloc()
172 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; in aq_ring_tx_alloc()
174 return aq_ring_alloc(self, aq_nic); in aq_ring_tx_alloc()
177 int aq_ring_rx_alloc(struct aq_ring_s *self, in aq_ring_rx_alloc() argument
182 self->aq_nic = aq_nic; in aq_ring_rx_alloc()
183 self->idx = idx; in aq_ring_rx_alloc()
184 self->size = aq_nic_cfg->rxds; in aq_ring_rx_alloc()
185 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; in aq_ring_rx_alloc()
186 self->xdp_prog = aq_nic->xdp_prog; in aq_ring_rx_alloc()
187 self->frame_max = AQ_CFG_RX_FRAME_MAX; in aq_ring_rx_alloc()
190 if (READ_ONCE(self->xdp_prog)) { in aq_ring_rx_alloc()
191 self->page_offset = AQ_XDP_HEADROOM; in aq_ring_rx_alloc()
192 self->page_order = AQ_CFG_XDP_PAGEORDER; in aq_ring_rx_alloc()
193 self->tail_size = AQ_XDP_TAILROOM; in aq_ring_rx_alloc()
195 self->page_offset = 0; in aq_ring_rx_alloc()
196 self->page_order = fls(self->frame_max / PAGE_SIZE + in aq_ring_rx_alloc()
197 (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1; in aq_ring_rx_alloc()
198 if (aq_nic_cfg->rxpageorder > self->page_order) in aq_ring_rx_alloc()
199 self->page_order = aq_nic_cfg->rxpageorder; in aq_ring_rx_alloc()
200 self->tail_size = 0; in aq_ring_rx_alloc()
203 return aq_ring_alloc(self, aq_nic); in aq_ring_rx_alloc()
207 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, in aq_ring_hwts_rx_alloc() argument
213 memset(self, 0, sizeof(*self)); in aq_ring_hwts_rx_alloc()
215 self->aq_nic = aq_nic; in aq_ring_hwts_rx_alloc()
216 self->idx = idx; in aq_ring_hwts_rx_alloc()
217 self->size = size; in aq_ring_hwts_rx_alloc()
218 self->dx_size = dx_size; in aq_ring_hwts_rx_alloc()
220 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa, in aq_ring_hwts_rx_alloc()
222 if (!self->dx_ring) { in aq_ring_hwts_rx_alloc()
223 aq_ring_free(self); in aq_ring_hwts_rx_alloc()
230 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type) in aq_ring_init() argument
232 self->hw_head = 0; in aq_ring_init()
233 self->sw_head = 0; in aq_ring_init()
234 self->sw_tail = 0; in aq_ring_init()
235 self->ring_type = ring_type; in aq_ring_init()
237 if (self->ring_type == ATL_RING_RX) in aq_ring_init()
238 u64_stats_init(&self->stats.rx.syncp); in aq_ring_init()
240 u64_stats_init(&self->stats.tx.syncp); in aq_ring_init()
285 bool aq_ring_tx_clean(struct aq_ring_s *self) in aq_ring_tx_clean() argument
287 struct device *dev = aq_nic_get_dev(self->aq_nic); in aq_ring_tx_clean()
291 budget && self->sw_head != self->hw_head; budget--) { in aq_ring_tx_clean()
292 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in aq_ring_tx_clean()
298 (!aq_ring_dx_in_range(self->sw_head, in aq_ring_tx_clean()
300 self->hw_head))) in aq_ring_tx_clean()
315 u64_stats_update_begin(&self->stats.tx.syncp); in aq_ring_tx_clean()
316 ++self->stats.tx.packets; in aq_ring_tx_clean()
317 self->stats.tx.bytes += buff->skb->len; in aq_ring_tx_clean()
318 u64_stats_update_end(&self->stats.tx.syncp); in aq_ring_tx_clean()
321 u64_stats_update_begin(&self->stats.tx.syncp); in aq_ring_tx_clean()
322 ++self->stats.tx.packets; in aq_ring_tx_clean()
323 self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf); in aq_ring_tx_clean()
324 u64_stats_update_end(&self->stats.tx.syncp); in aq_ring_tx_clean()
333 self->sw_head = aq_ring_next_dx(self, self->sw_head); in aq_ring_tx_clean()
339 static void aq_rx_checksum(struct aq_ring_s *self, in aq_rx_checksum() argument
343 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) in aq_rx_checksum()
347 u64_stats_update_begin(&self->stats.rx.syncp); in aq_rx_checksum()
348 ++self->stats.rx.errors; in aq_rx_checksum()
349 u64_stats_update_end(&self->stats.rx.syncp); in aq_rx_checksum()
528 static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, in __aq_ring_rx_clean() argument
531 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); in __aq_ring_rx_clean()
534 for (; (self->sw_head != self->hw_head) && budget; in __aq_ring_rx_clean()
535 self->sw_head = aq_ring_next_dx(self, self->sw_head), in __aq_ring_rx_clean()
537 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in __aq_ring_rx_clean()
538 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self); in __aq_ring_rx_clean()
554 if (buff_->next >= self->size) { in __aq_ring_rx_clean()
561 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
563 aq_ring_dx_in_range(self->sw_head, in __aq_ring_rx_clean()
565 self->hw_head); in __aq_ring_rx_clean()
582 if (buff_->next >= self->size) { in __aq_ring_rx_clean()
587 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
592 u64_stats_update_begin(&self->stats.rx.syncp); in __aq_ring_rx_clean()
593 ++self->stats.rx.errors; in __aq_ring_rx_clean()
594 u64_stats_update_end(&self->stats.rx.syncp); in __aq_ring_rx_clean()
600 u64_stats_update_begin(&self->stats.rx.syncp); in __aq_ring_rx_clean()
601 ++self->stats.rx.errors; in __aq_ring_rx_clean()
602 u64_stats_update_end(&self->stats.rx.syncp); in __aq_ring_rx_clean()
606 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), in __aq_ring_rx_clean()
613 u64_stats_update_begin(&self->stats.rx.syncp); in __aq_ring_rx_clean()
614 self->stats.rx.skb_alloc_fails++; in __aq_ring_rx_clean()
615 u64_stats_update_end(&self->stats.rx.syncp); in __aq_ring_rx_clean()
621 aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb), in __aq_ring_rx_clean()
638 self->frame_max); in __aq_ring_rx_clean()
646 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
648 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), in __aq_ring_rx_clean()
657 self->frame_max); in __aq_ring_rx_clean()
675 aq_rx_checksum(self, buff, skb); in __aq_ring_rx_clean()
683 : AQ_NIC_RING2QMAP(self->aq_nic, in __aq_ring_rx_clean()
684 self->idx)); in __aq_ring_rx_clean()
686 u64_stats_update_begin(&self->stats.rx.syncp); in __aq_ring_rx_clean()
687 ++self->stats.rx.packets; in __aq_ring_rx_clean()
688 self->stats.rx.bytes += skb->len; in __aq_ring_rx_clean()
689 u64_stats_update_end(&self->stats.rx.syncp); in __aq_ring_rx_clean()
836 int aq_ring_rx_clean(struct aq_ring_s *self, in aq_ring_rx_clean() argument
842 return __aq_ring_xdp_clean(self, napi, work_done, budget); in aq_ring_rx_clean()
844 return __aq_ring_rx_clean(self, napi, work_done, budget); in aq_ring_rx_clean()
847 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) in aq_ring_hwts_rx_clean() argument
850 while (self->sw_head != self->hw_head) { in aq_ring_hwts_rx_clean()
854 self->dx_ring + in aq_ring_hwts_rx_clean()
855 (self->sw_head * self->dx_size), in aq_ring_hwts_rx_clean()
856 self->dx_size, &ns); in aq_ring_hwts_rx_clean()
859 self->sw_head = aq_ring_next_dx(self, self->sw_head); in aq_ring_hwts_rx_clean()
864 int aq_ring_rx_fill(struct aq_ring_s *self) in aq_ring_rx_fill() argument
870 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES, in aq_ring_rx_fill()
871 self->size / 2)) in aq_ring_rx_fill()
874 for (i = aq_ring_avail_dx(self); i--; in aq_ring_rx_fill()
875 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { in aq_ring_rx_fill()
876 buff = &self->buff_ring[self->sw_tail]; in aq_ring_rx_fill()
879 buff->len = self->frame_max; in aq_ring_rx_fill()
881 err = aq_get_rxpages(self, buff); in aq_ring_rx_fill()
893 void aq_ring_rx_deinit(struct aq_ring_s *self) in aq_ring_rx_deinit() argument
895 if (!self) in aq_ring_rx_deinit()
898 for (; self->sw_head != self->sw_tail; in aq_ring_rx_deinit()
899 self->sw_head = aq_ring_next_dx(self, self->sw_head)) { in aq_ring_rx_deinit()
900 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in aq_ring_rx_deinit()
902 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic)); in aq_ring_rx_deinit()
906 void aq_ring_free(struct aq_ring_s *self) in aq_ring_free() argument
908 if (!self) in aq_ring_free()
911 kfree(self->buff_ring); in aq_ring_free()
912 self->buff_ring = NULL; in aq_ring_free()
914 if (self->dx_ring) { in aq_ring_free()
915 dma_free_coherent(aq_nic_get_dev(self->aq_nic), in aq_ring_free()
916 self->size * self->dx_size, self->dx_ring, in aq_ring_free()
917 self->dx_ring_pa); in aq_ring_free()
918 self->dx_ring = NULL; in aq_ring_free()
922 void aq_ring_hwts_rx_free(struct aq_ring_s *self) in aq_ring_hwts_rx_free() argument
924 if (!self) in aq_ring_hwts_rx_free()
927 if (self->dx_ring) { in aq_ring_hwts_rx_free()
928 dma_free_coherent(aq_nic_get_dev(self->aq_nic), in aq_ring_hwts_rx_free()
929 self->size * self->dx_size + AQ_CFG_RXDS_DEF, in aq_ring_hwts_rx_free()
930 self->dx_ring, self->dx_ring_pa); in aq_ring_hwts_rx_free()
931 self->dx_ring = NULL; in aq_ring_hwts_rx_free()
935 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) in aq_ring_fill_stats_data() argument
940 if (self->ring_type == ATL_RING_RX) { in aq_ring_fill_stats_data()
944 start = u64_stats_fetch_begin(&self->stats.rx.syncp); in aq_ring_fill_stats_data()
945 data[count] = self->stats.rx.packets; in aq_ring_fill_stats_data()
946 data[++count] = self->stats.rx.jumbo_packets; in aq_ring_fill_stats_data()
947 data[++count] = self->stats.rx.lro_packets; in aq_ring_fill_stats_data()
948 data[++count] = self->stats.rx.errors; in aq_ring_fill_stats_data()
949 data[++count] = self->stats.rx.alloc_fails; in aq_ring_fill_stats_data()
950 data[++count] = self->stats.rx.skb_alloc_fails; in aq_ring_fill_stats_data()
951 data[++count] = self->stats.rx.polls; in aq_ring_fill_stats_data()
952 data[++count] = self->stats.rx.pg_flips; in aq_ring_fill_stats_data()
953 data[++count] = self->stats.rx.pg_reuses; in aq_ring_fill_stats_data()
954 data[++count] = self->stats.rx.pg_losts; in aq_ring_fill_stats_data()
955 data[++count] = self->stats.rx.xdp_aborted; in aq_ring_fill_stats_data()
956 data[++count] = self->stats.rx.xdp_drop; in aq_ring_fill_stats_data()
957 data[++count] = self->stats.rx.xdp_pass; in aq_ring_fill_stats_data()
958 data[++count] = self->stats.rx.xdp_tx; in aq_ring_fill_stats_data()
959 data[++count] = self->stats.rx.xdp_invalid; in aq_ring_fill_stats_data()
960 data[++count] = self->stats.rx.xdp_redirect; in aq_ring_fill_stats_data()
961 } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start)); in aq_ring_fill_stats_data()
966 start = u64_stats_fetch_begin(&self->stats.tx.syncp); in aq_ring_fill_stats_data()
967 data[count] = self->stats.tx.packets; in aq_ring_fill_stats_data()
968 data[++count] = self->stats.tx.queue_restarts; in aq_ring_fill_stats_data()
969 } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start)); in aq_ring_fill_stats_data()