xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2024 Intel Corporation */
3 
4 #ifndef _IDPF_VIRTCHNL_H_
5 #define _IDPF_VIRTCHNL_H_
6 
7 #include "virtchnl2.h"
8 
9 #define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC	(60 * 1000)
10 #define IDPF_VC_XN_IDX_M		GENMASK(7, 0)
11 #define IDPF_VC_XN_SALT_M		GENMASK(15, 8)
12 #define IDPF_VC_XN_RING_LEN		U8_MAX
13 
14 /**
15  * enum idpf_vc_xn_state - Virtchnl transaction status
16  * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
17  * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
18  * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer
19  *				  updated
20  * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
21  *				 was an error, buffer not updated
22  * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
23  * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
24  *		      return context; a callback may be provided to handle
25  *		      return
26  */
27 enum idpf_vc_xn_state {
28 	IDPF_VC_XN_IDLE = 1,
29 	IDPF_VC_XN_WAITING,
30 	IDPF_VC_XN_COMPLETED_SUCCESS,
31 	IDPF_VC_XN_COMPLETED_FAILED,
32 	IDPF_VC_XN_SHUTDOWN,
33 	IDPF_VC_XN_ASYNC,
34 };
35 
36 struct idpf_vc_xn;
37 /* Callback for asynchronous messages */
38 typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
39 			    const struct idpf_ctlq_msg *);
40 
41 /**
42  * struct idpf_vc_xn - Data structure representing virtchnl transactions
43  * @completed: virtchnl event loop uses that to signal when a reply is
44  *	       available, uses kernel completion API
45  * @state: virtchnl event loop stores the data below, protected by the
46  *	   completion's lock.
47  * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
48  *	      truncated on its way to the receiver thread according to
49  *	      reply_buf.iov_len.
50  * @reply: Reference to the buffer(s) where the reply data should be written
51  *	   to. May be 0-length (then NULL address permitted) if the reply data
52  *	   should be ignored.
53  * @async_handler: if sent asynchronously, a callback can be provided to handle
54  *		   the reply when it's received
55  * @vc_op: corresponding opcode sent with this transaction
56  * @idx: index used as retrieval on reply receive, used for cookie
57  * @salt: changed every message to make unique, used for cookie
58  */
59 struct idpf_vc_xn {
60 	struct completion completed;
61 	enum idpf_vc_xn_state state;
62 	size_t reply_sz;
63 	struct kvec reply;
64 	async_vc_cb async_handler;
65 	u32 vc_op;
66 	u8 idx;
67 	u8 salt;
68 };
69 
70 /**
71  * struct idpf_vc_xn_params - Parameters for executing transaction
72  * @send_buf: kvec for send buffer
73  * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
74  * @timeout_ms: timeout to wait for reply
75  * @async: send message asynchronously, will not wait on completion
76  * @async_handler: If sent asynchronously, optional callback handler. The user
77  *		   must be careful when using async handlers as the memory for
78  *		   the recv_buf _cannot_ be on stack if this is async.
79  * @vc_op: virtchnl op to send
80  */
81 struct idpf_vc_xn_params {
82 	struct kvec send_buf;
83 	struct kvec recv_buf;
84 	int timeout_ms;
85 	bool async;
86 	async_vc_cb async_handler;
87 	u32 vc_op;
88 };
89 
90 struct idpf_adapter;
91 struct idpf_netdev_priv;
92 struct idpf_vec_regs;
93 struct idpf_vport;
94 struct idpf_vport_max_q;
95 struct idpf_vport_user_config_data;
96 
97 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
98 			const struct idpf_vc_xn_params *params);
99 int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
100 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
101 int idpf_vc_core_init(struct idpf_adapter *adapter);
102 void idpf_vc_core_deinit(struct idpf_adapter *adapter);
103 
104 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
105 			   struct idpf_vec_regs *reg_vals);
106 int idpf_queue_reg_init(struct idpf_vport *vport);
107 int idpf_vport_queue_ids_init(struct idpf_vport *vport);
108 
109 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
110 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
111 bool idpf_sideband_action_ena(struct idpf_vport *vport,
112 			      struct ethtool_rx_flow_spec *fsp);
113 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
114 
115 int idpf_recv_mb_msg(struct idpf_adapter *adapter);
116 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
117 		     u16 msg_size, u8 *msg, u16 cookie);
118 
119 struct idpf_queue_ptr {
120 	enum virtchnl2_queue_type	type;
121 	union {
122 		struct idpf_rx_queue		*rxq;
123 		struct idpf_tx_queue		*txq;
124 		struct idpf_buf_queue		*bufq;
125 		struct idpf_compl_queue		*complq;
126 	};
127 };
128 
129 struct idpf_queue_set {
130 	struct idpf_vport		*vport;
131 
132 	u32				num;
133 	struct idpf_queue_ptr		qs[] __counted_by(num);
134 };
135 
136 struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
137 
138 int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
139 int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
140 int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
141 
142 int idpf_send_disable_queues_msg(struct idpf_vport *vport);
143 int idpf_send_config_queues_msg(struct idpf_vport *vport);
144 int idpf_send_enable_queues_msg(struct idpf_vport *vport);
145 
146 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
147 u32 idpf_get_vport_id(struct idpf_vport *vport);
148 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
149 			       struct idpf_vport_max_q *max_q);
150 int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
151 int idpf_send_enable_vport_msg(struct idpf_vport *vport);
152 int idpf_send_disable_vport_msg(struct idpf_vport *vport);
153 
154 int idpf_vport_adjust_qs(struct idpf_vport *vport);
155 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
156 			    struct idpf_vport_max_q *max_q);
157 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
158 			       struct idpf_vport_max_q *max_q);
159 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
160 			     u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
161 int idpf_send_delete_queues_msg(struct idpf_vport *vport);
162 
163 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
164 int idpf_get_vec_ids(struct idpf_adapter *adapter,
165 		     u16 *vecids, int num_vecids,
166 		     struct virtchnl2_vector_chunks *chunks);
167 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
168 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
169 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
170 
171 int idpf_add_del_mac_filters(struct idpf_vport *vport,
172 			     struct idpf_netdev_priv *np,
173 			     bool add, bool async);
174 int idpf_set_promiscuous(struct idpf_adapter *adapter,
175 			 struct idpf_vport_user_config_data *config_data,
176 			 u32 vport_id);
177 int idpf_check_supported_desc_ids(struct idpf_vport *vport);
178 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
179 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
180 int idpf_send_get_stats_msg(struct idpf_vport *vport);
181 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
182 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
183 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
184 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
185 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
186 			       u8 *send_msg, u16 msg_size,
187 			       u8 *recv_msg, u16 *recv_len);
188 
189 #endif /* _IDPF_VIRTCHNL_H_ */
190