xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2024 Intel Corporation */
3 
4 #ifndef _IDPF_VIRTCHNL_H_
5 #define _IDPF_VIRTCHNL_H_
6 
7 #include "virtchnl2.h"
8 
9 #define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC	(60 * 1000)
10 #define IDPF_VC_XN_IDX_M		GENMASK(7, 0)
11 #define IDPF_VC_XN_SALT_M		GENMASK(15, 8)
12 #define IDPF_VC_XN_RING_LEN		U8_MAX
13 
14 /**
15  * enum idpf_vc_xn_state - Virtchnl transaction status
16  * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
17  * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
18  * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer
19  *				  updated
20  * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
21  *				 was an error, buffer not updated
22  * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
23  * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
24  *		      return context; a callback may be provided to handle
25  *		      return
26  */
27 enum idpf_vc_xn_state {
28 	IDPF_VC_XN_IDLE = 1,
29 	IDPF_VC_XN_WAITING,
30 	IDPF_VC_XN_COMPLETED_SUCCESS,
31 	IDPF_VC_XN_COMPLETED_FAILED,
32 	IDPF_VC_XN_SHUTDOWN,
33 	IDPF_VC_XN_ASYNC,
34 };
35 
36 struct idpf_vc_xn;
37 /* Callback for asynchronous messages */
38 typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
39 			    const struct idpf_ctlq_msg *);
40 
41 /**
42  * struct idpf_vc_xn - Data structure representing virtchnl transactions
43  * @completed: virtchnl event loop uses that to signal when a reply is
44  *	       available, uses kernel completion API
45  * @state: virtchnl event loop stores the data below, protected by the
46  *	   completion's lock.
47  * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
48  *	      truncated on its way to the receiver thread according to
49  *	      reply_buf.iov_len.
50  * @reply: Reference to the buffer(s) where the reply data should be written
51  *	   to. May be 0-length (then NULL address permitted) if the reply data
52  *	   should be ignored.
53  * @async_handler: if sent asynchronously, a callback can be provided to handle
54  *		   the reply when it's received
55  * @vc_op: corresponding opcode sent with this transaction
56  * @idx: index used as retrieval on reply receive, used for cookie
57  * @salt: changed every message to make unique, used for cookie
58  */
59 struct idpf_vc_xn {
60 	struct completion completed;
61 	enum idpf_vc_xn_state state;
62 	size_t reply_sz;
63 	struct kvec reply;
64 	async_vc_cb async_handler;
65 	u32 vc_op;
66 	u8 idx;
67 	u8 salt;
68 };
69 
70 /**
71  * struct idpf_vc_xn_params - Parameters for executing transaction
72  * @send_buf: kvec for send buffer
73  * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
74  * @timeout_ms: timeout to wait for reply
75  * @async: send message asynchronously, will not wait on completion
76  * @async_handler: If sent asynchronously, optional callback handler. The user
77  *		   must be careful when using async handlers as the memory for
78  *		   the recv_buf _cannot_ be on stack if this is async.
79  * @vc_op: virtchnl op to send
80  */
81 struct idpf_vc_xn_params {
82 	struct kvec send_buf;
83 	struct kvec recv_buf;
84 	int timeout_ms;
85 	bool async;
86 	async_vc_cb async_handler;
87 	u32 vc_op;
88 };
89 
90 struct idpf_adapter;
91 struct idpf_netdev_priv;
92 struct idpf_vec_regs;
93 struct idpf_vport;
94 struct idpf_vport_max_q;
95 struct idpf_vport_config;
96 struct idpf_vport_user_config_data;
97 
98 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
99 			const struct idpf_vc_xn_params *params);
100 int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
101 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
102 int idpf_vc_core_init(struct idpf_adapter *adapter);
103 void idpf_vc_core_deinit(struct idpf_adapter *adapter);
104 
105 int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
106 			   struct idpf_vec_regs *reg_vals);
107 int idpf_queue_reg_init(struct idpf_vport *vport,
108 			struct idpf_q_vec_rsrc *rsrc,
109 			struct idpf_queue_id_reg_info *chunks);
110 int idpf_vport_queue_ids_init(struct idpf_vport *vport,
111 			      struct idpf_q_vec_rsrc *rsrc,
112 			      struct idpf_queue_id_reg_info *chunks);
113 static inline void
114 idpf_vport_deinit_queue_reg_chunks(struct idpf_vport_config *vport_cfg)
115 {
116 	kfree(vport_cfg->qid_reg_info.queue_chunks);
117 	vport_cfg->qid_reg_info.queue_chunks = NULL;
118 }
119 
120 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
121 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
122 bool idpf_sideband_action_ena(struct idpf_vport *vport,
123 			      struct ethtool_rx_flow_spec *fsp);
124 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
125 
126 int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq);
127 int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
128 		     u32 op, u16 msg_size, u8 *msg, u16 cookie);
129 
130 struct idpf_queue_ptr {
131 	enum virtchnl2_queue_type	type;
132 	union {
133 		struct idpf_rx_queue		*rxq;
134 		struct idpf_tx_queue		*txq;
135 		struct idpf_buf_queue		*bufq;
136 		struct idpf_compl_queue		*complq;
137 	};
138 };
139 
140 struct idpf_queue_set {
141 	struct idpf_adapter		*adapter;
142 	struct idpf_q_vec_rsrc		*qv_rsrc;
143 	u32				vport_id;
144 
145 	u32				num;
146 	struct idpf_queue_ptr		qs[] __counted_by(num);
147 };
148 
149 struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
150 					    struct idpf_q_vec_rsrc *rsrc,
151 					    u32 vport_id, u32 num);
152 
153 int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
154 int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
155 int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
156 
157 int idpf_send_disable_queues_msg(struct idpf_vport *vport);
158 int idpf_send_enable_queues_msg(struct idpf_vport *vport);
159 int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
160 				struct idpf_q_vec_rsrc *rsrc,
161 				u32 vport_id);
162 
163 int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
164 u32 idpf_get_vport_id(struct idpf_vport *vport);
165 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
166 			       struct idpf_vport_max_q *max_q);
167 int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
168 int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
169 int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
170 
171 int idpf_vport_adjust_qs(struct idpf_vport *vport,
172 			 struct idpf_q_vec_rsrc *rsrc);
173 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
174 			    struct idpf_vport_max_q *max_q);
175 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
176 			       struct idpf_vport_max_q *max_q);
177 int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
178 			     struct idpf_vport_config *vport_config,
179 			     struct idpf_q_vec_rsrc *rsrc,
180 			     u32 vport_id);
181 int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
182 				struct idpf_queue_id_reg_info *chunks,
183 				u32 vport_id);
184 
185 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
186 				 struct idpf_q_vec_rsrc *rsrc);
187 int idpf_get_vec_ids(struct idpf_adapter *adapter,
188 		     u16 *vecids, int num_vecids,
189 		     struct virtchnl2_vector_chunks *chunks);
190 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
191 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
192 int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
193 					 struct idpf_q_vec_rsrc *rsrc,
194 					 u32 vport_id,
195 					 bool map);
196 
197 int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
198 			     struct idpf_vport_config *vport_config,
199 			     const u8 *default_mac_addr, u32 vport_id,
200 			     bool add, bool async);
201 int idpf_set_promiscuous(struct idpf_adapter *adapter,
202 			 struct idpf_vport_user_config_data *config_data,
203 			 u32 vport_id);
204 int idpf_check_supported_desc_ids(struct idpf_vport *vport);
205 int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
206 				   bool loopback_ena);
207 int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
208 			    struct idpf_port_stats *port_stats);
209 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
210 int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
211 				  struct idpf_rss_data *rss_data,
212 				  u32 vport_id, bool get);
213 int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
214 				  struct idpf_rss_data *rss_data,
215 				  u32 vport_id, bool get);
216 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
217 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
218 			       u8 *send_msg, u16 msg_size,
219 			       u8 *recv_msg, u16 *recv_len);
220 
221 #endif /* _IDPF_VIRTCHNL_H_ */
222