gve.h (9a5e0776d11f1ac9c740a6e24ff0e0facb6e3ddb) | gve.h (ee24284e2a1075966f0f2c5499c59b7d2b9bc2de) |
---|---|
1/* SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 * Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7#ifndef _GVE_H_ 8#define _GVE_H_ --- 624 unchanged lines hidden (view full) --- 633 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ 634 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ 635}; 636 637struct gve_ptype_lut { 638 struct gve_ptype ptypes[GVE_NUM_PTYPES]; 639}; 640 | 1/* SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 * Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7#ifndef _GVE_H_ 8#define _GVE_H_ --- 624 unchanged lines hidden (view full) --- 633 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ 634 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ 635}; 636 637struct gve_ptype_lut { 638 struct gve_ptype ptypes[GVE_NUM_PTYPES]; 639}; 640 |
641/* Parameters for allocating queue page lists */ 642struct gve_qpls_alloc_cfg { 643 struct gve_queue_config *tx_cfg; 644 struct gve_queue_config *rx_cfg; 645 646 u16 num_xdp_queues; 647 bool raw_addressing; 648 bool is_gqi; 649 650 /* Allocated resources are returned here */ 651 struct gve_queue_page_list *qpls; 652}; 653 | |
654/* Parameters for allocating resources for tx queues */ 655struct gve_tx_alloc_rings_cfg { 656 struct gve_queue_config *qcfg; 657 | 641/* Parameters for allocating resources for tx queues */ 642struct gve_tx_alloc_rings_cfg { 643 struct gve_queue_config *qcfg; 644 |
658 /* qpls must already be allocated */ 659 struct gve_queue_page_list *qpls; 660 | |
661 u16 ring_size; 662 u16 start_idx; 663 u16 num_rings; 664 bool raw_addressing; 665 666 /* Allocated resources are returned here */ 667 struct gve_tx_ring *tx; 668}; 669 670/* Parameters for allocating resources for rx queues */ 671struct gve_rx_alloc_rings_cfg { 672 /* tx config is also needed to determine QPL ids */ 673 struct gve_queue_config *qcfg; 674 struct gve_queue_config *qcfg_tx; 675 | 645 u16 ring_size; 646 u16 start_idx; 647 u16 num_rings; 648 bool raw_addressing; 649 650 /* Allocated resources are returned here */ 651 struct gve_tx_ring *tx; 652}; 653 654/* Parameters for allocating resources for rx queues */ 655struct gve_rx_alloc_rings_cfg { 656 /* tx config is also needed to determine QPL ids */ 657 struct gve_queue_config *qcfg; 658 struct gve_queue_config *qcfg_tx; 659 |
676 /* qpls must already be allocated */ 677 struct gve_queue_page_list *qpls; 678 | |
679 u16 ring_size; 680 u16 packet_buffer_size; 681 bool raw_addressing; 682 bool enable_header_split; 683 684 /* Allocated resources are returned here */ 685 struct gve_rx_ring *rx; 686}; --- 9 unchanged lines hidden (view full) --- 696 GVE_DQO_RDA_FORMAT = 0x3, 697 GVE_DQO_QPL_FORMAT = 0x4, 698}; 699 700struct gve_priv { 701 struct net_device *dev; 702 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ 703 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ | 660 u16 ring_size; 661 u16 packet_buffer_size; 662 bool raw_addressing; 663 bool enable_header_split; 664 665 /* Allocated resources are returned here */ 666 struct gve_rx_ring *rx; 667}; --- 9 unchanged lines hidden (view full) --- 677 GVE_DQO_RDA_FORMAT = 0x3, 678 GVE_DQO_QPL_FORMAT = 0x4, 679}; 680 681struct gve_priv { 682 struct net_device *dev; 683 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ 684 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ |
704 struct gve_queue_page_list *qpls; /* array of num qpls */ | |
705 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ 706 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ 707 dma_addr_t irq_db_indices_bus; 708 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ 709 char mgmt_msix_name[IFNAMSIZ + 16]; 710 u32 mgmt_msix_idx; 711 __be32 *counter_array; /* array of num_event_counters */ 712 dma_addr_t counter_array_bus; --- 307 unchanged lines hidden (view full) --- 1020 return tx_qid; 1021} 1022 1023static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) 1024{ 1025 return priv->tx_cfg.max_queues + rx_qid; 1026} 1027 | 685 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ 686 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ 687 dma_addr_t irq_db_indices_bus; 688 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ 689 char mgmt_msix_name[IFNAMSIZ + 16]; 690 u32 mgmt_msix_idx; 691 __be32 *counter_array; /* array of num_event_counters */ 692 dma_addr_t counter_array_bus; --- 307 unchanged lines hidden (view full) --- 1000 return tx_qid; 1001} 1002 1003static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) 1004{ 1005 return priv->tx_cfg.max_queues + rx_qid; 1006} 1007 |
1028/* Returns the index into priv->qpls where a certain rx queue's QPL resides */ | |
1029static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) 1030{ 1031 return tx_cfg->max_queues + rx_qid; 1032} 1033 1034static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) 1035{ 1036 return gve_tx_qpl_id(priv, 0); 1037} 1038 | 1008static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) 1009{ 1010 return tx_cfg->max_queues + rx_qid; 1011} 1012 1013static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) 1014{ 1015 return gve_tx_qpl_id(priv, 0); 1016} 1017 |
1039/* Returns the index into priv->qpls where the first rx queue's QPL resides */ | |
1040static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) 1041{ 1042 return gve_get_rx_qpl_id(tx_cfg, 0); 1043} 1044 1045static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt) 1046{ 1047 /* For DQO, page count should be more than ring size for --- 37 unchanged lines hidden (view full) --- 1085int gve_napi_poll(struct napi_struct *napi, int budget); 1086 1087/* buffers */ 1088int gve_alloc_page(struct gve_priv *priv, struct device *dev, 1089 struct page **page, dma_addr_t *dma, 1090 enum dma_data_direction, gfp_t gfp_flags); 1091void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, 1092 enum dma_data_direction); | 1018static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) 1019{ 1020 return gve_get_rx_qpl_id(tx_cfg, 0); 1021} 1022 1023static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt) 1024{ 1025 /* For DQO, page count should be more than ring size for --- 37 unchanged lines hidden (view full) --- 1063int gve_napi_poll(struct napi_struct *napi, int budget); 1064 1065/* buffers */ 1066int gve_alloc_page(struct gve_priv *priv, struct device *dev, 1067 struct page **page, dma_addr_t *dma, 1068 enum dma_data_direction, gfp_t gfp_flags); 1069void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, 1070 enum dma_data_direction); |
1071/* qpls */ 1072struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv, 1073 u32 id, int pages); 1074void gve_free_queue_page_list(struct gve_priv *priv, 1075 struct gve_queue_page_list *qpl, 1076 u32 id); |
|
1093/* tx handling */ 1094netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); 1095int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 1096 u32 flags); 1097int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, 1098 void *data, int len, void *frame_p); 1099void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); 1100bool gve_tx_poll(struct gve_notify_block *block, int budget); --- 20 unchanged lines hidden (view full) --- 1121void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); 1122u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit); 1123bool gve_header_split_supported(const struct gve_priv *priv); 1124int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); 1125/* Reset */ 1126void gve_schedule_reset(struct gve_priv *priv); 1127int gve_reset(struct gve_priv *priv, bool attempt_teardown); 1128void gve_get_curr_alloc_cfgs(struct gve_priv *priv, | 1077/* tx handling */ 1078netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); 1079int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 1080 u32 flags); 1081int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, 1082 void *data, int len, void *frame_p); 1083void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); 1084bool gve_tx_poll(struct gve_notify_block *block, int budget); --- 20 unchanged lines hidden (view full) --- 1105void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); 1106u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit); 1107bool gve_header_split_supported(const struct gve_priv *priv); 1108int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); 1109/* Reset */ 1110void gve_schedule_reset(struct gve_priv *priv); 1111int gve_reset(struct gve_priv *priv, bool attempt_teardown); 1112void gve_get_curr_alloc_cfgs(struct gve_priv *priv, |
1129 struct gve_qpls_alloc_cfg *qpls_alloc_cfg, | |
1130 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, 1131 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); 1132int gve_adjust_config(struct gve_priv *priv, | 1113 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, 1114 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); 1115int gve_adjust_config(struct gve_priv *priv, |
1133 struct gve_qpls_alloc_cfg *qpls_alloc_cfg, | |
1134 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, 1135 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); 1136int gve_adjust_queues(struct gve_priv *priv, 1137 struct gve_queue_config new_rx_config, 1138 struct gve_queue_config new_tx_config); 1139/* report stats handling */ 1140void gve_handle_report_stats(struct gve_priv *priv); 1141/* exported by ethtool.c */ 1142extern const struct ethtool_ops gve_ethtool_ops; 1143/* needed by ethtool */ 1144extern char gve_driver_name[]; 1145extern const char gve_version_str[]; 1146#endif /* _GVE_H_ */ | 1116 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, 1117 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); 1118int gve_adjust_queues(struct gve_priv *priv, 1119 struct gve_queue_config new_rx_config, 1120 struct gve_queue_config new_tx_config); 1121/* report stats handling */ 1122void gve_handle_report_stats(struct gve_priv *priv); 1123/* exported by ethtool.c */ 1124extern const struct ethtool_ops gve_ethtool_ops; 1125/* needed by ethtool */ 1126extern char gve_driver_name[]; 1127extern const char gve_version_str[]; 1128#endif /* _GVE_H_ */ |