xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h (revision ccde82e909467abdf098a8ee6f63e1ecf9a47ce5)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2024 Intel Corporation */
3 
4 #ifndef _IDPF_VIRTCHNL_H_
5 #define _IDPF_VIRTCHNL_H_
6 
7 #define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC	(60 * 1000)
8 #define IDPF_VC_XN_IDX_M		GENMASK(7, 0)
9 #define IDPF_VC_XN_SALT_M		GENMASK(15, 8)
10 #define IDPF_VC_XN_RING_LEN		U8_MAX
11 
12 /**
13  * enum idpf_vc_xn_state - Virtchnl transaction status
14  * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
15  * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
16  * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer
17  *				  updated
18  * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
19  *				 was an error, buffer not updated
20  * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
21  * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
22  *		      return context; a callback may be provided to handle
23  *		      return
24  */
25 enum idpf_vc_xn_state {
26 	IDPF_VC_XN_IDLE = 1,
27 	IDPF_VC_XN_WAITING,
28 	IDPF_VC_XN_COMPLETED_SUCCESS,
29 	IDPF_VC_XN_COMPLETED_FAILED,
30 	IDPF_VC_XN_SHUTDOWN,
31 	IDPF_VC_XN_ASYNC,
32 };
33 
34 struct idpf_vc_xn;
35 /* Callback for asynchronous messages */
36 typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
37 			    const struct idpf_ctlq_msg *);
38 
39 /**
40  * struct idpf_vc_xn - Data structure representing virtchnl transactions
41  * @completed: virtchnl event loop uses that to signal when a reply is
42  *	       available, uses kernel completion API
43  * @state: virtchnl event loop stores the data below, protected by the
44  *	   completion's lock.
45  * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
46  *	      truncated on its way to the receiver thread according to
47  *	      reply_buf.iov_len.
48  * @reply: Reference to the buffer(s) where the reply data should be written
49  *	   to. May be 0-length (then NULL address permitted) if the reply data
50  *	   should be ignored.
51  * @async_handler: if sent asynchronously, a callback can be provided to handle
52  *		   the reply when it's received
53  * @vc_op: corresponding opcode sent with this transaction
54  * @idx: index used as retrieval on reply receive, used for cookie
55  * @salt: changed every message to make unique, used for cookie
56  */
57 struct idpf_vc_xn {
58 	struct completion completed;
59 	enum idpf_vc_xn_state state;
60 	size_t reply_sz;
61 	struct kvec reply;
62 	async_vc_cb async_handler;
63 	u32 vc_op;
64 	u8 idx;
65 	u8 salt;
66 };
67 
68 /**
69  * struct idpf_vc_xn_params - Parameters for executing transaction
70  * @send_buf: kvec for send buffer
71  * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
72  * @timeout_ms: timeout to wait for reply
73  * @async: send message asynchronously, will not wait on completion
74  * @async_handler: If sent asynchronously, optional callback handler. The user
75  *		   must be careful when using async handlers as the memory for
76  *		   the recv_buf _cannot_ be on stack if this is async.
77  * @vc_op: virtchnl op to send
78  */
79 struct idpf_vc_xn_params {
80 	struct kvec send_buf;
81 	struct kvec recv_buf;
82 	int timeout_ms;
83 	bool async;
84 	async_vc_cb async_handler;
85 	u32 vc_op;
86 };
87 
88 struct idpf_adapter;
89 struct idpf_netdev_priv;
90 struct idpf_vec_regs;
91 struct idpf_vport;
92 struct idpf_vport_max_q;
93 struct idpf_vport_user_config_data;
94 
95 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
96 			const struct idpf_vc_xn_params *params);
97 int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
98 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
99 int idpf_vc_core_init(struct idpf_adapter *adapter);
100 void idpf_vc_core_deinit(struct idpf_adapter *adapter);
101 
102 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
103 			   struct idpf_vec_regs *reg_vals);
104 int idpf_queue_reg_init(struct idpf_vport *vport);
105 int idpf_vport_queue_ids_init(struct idpf_vport *vport);
106 
107 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
108 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
109 bool idpf_sideband_action_ena(struct idpf_vport *vport,
110 			      struct ethtool_rx_flow_spec *fsp);
111 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
112 
113 int idpf_recv_mb_msg(struct idpf_adapter *adapter);
114 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
115 		     u16 msg_size, u8 *msg, u16 cookie);
116 
117 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
118 u32 idpf_get_vport_id(struct idpf_vport *vport);
119 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
120 			       struct idpf_vport_max_q *max_q);
121 int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
122 int idpf_send_enable_vport_msg(struct idpf_vport *vport);
123 int idpf_send_disable_vport_msg(struct idpf_vport *vport);
124 
125 int idpf_vport_adjust_qs(struct idpf_vport *vport);
126 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
127 			    struct idpf_vport_max_q *max_q);
128 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
129 			       struct idpf_vport_max_q *max_q);
130 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
131 			     u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
132 int idpf_send_delete_queues_msg(struct idpf_vport *vport);
133 int idpf_send_enable_queues_msg(struct idpf_vport *vport);
134 int idpf_send_disable_queues_msg(struct idpf_vport *vport);
135 int idpf_send_config_queues_msg(struct idpf_vport *vport);
136 
137 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
138 int idpf_get_vec_ids(struct idpf_adapter *adapter,
139 		     u16 *vecids, int num_vecids,
140 		     struct virtchnl2_vector_chunks *chunks);
141 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
142 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
143 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
144 
145 int idpf_add_del_mac_filters(struct idpf_vport *vport,
146 			     struct idpf_netdev_priv *np,
147 			     bool add, bool async);
148 int idpf_set_promiscuous(struct idpf_adapter *adapter,
149 			 struct idpf_vport_user_config_data *config_data,
150 			 u32 vport_id);
151 int idpf_check_supported_desc_ids(struct idpf_vport *vport);
152 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
153 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
154 int idpf_send_get_stats_msg(struct idpf_vport *vport);
155 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
156 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
157 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
158 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
159 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
160 			       u8 *send_msg, u16 msg_size,
161 			       u8 *recv_msg, u16 *recv_len);
162 
163 #endif /* _IDPF_VIRTCHNL_H_ */
164