1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #ifndef _QED_SP_H 10 #define _QED_SP_H 11 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/list.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/qed/qed_chain.h> 18 #include "qed.h" 19 #include "qed_hsi.h" 20 21 enum spq_mode { 22 QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */ 23 QED_SPQ_MODE_CB, /* Client supplies a callback */ 24 QED_SPQ_MODE_EBLOCK, /* QED should block until completion */ 25 }; 26 27 struct qed_spq_comp_cb { 28 void (*function)(struct qed_hwfn *, 29 void *, 30 union event_ring_data *, 31 u8 fw_return_code); 32 void *cookie; 33 }; 34 35 /** 36 * @brief qed_eth_cqe_completion - handles the completion of a 37 * ramrod on the cqe ring 38 * 39 * @param p_hwfn 40 * @param cqe 41 * 42 * @return int 43 */ 44 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, 45 struct eth_slow_path_rx_cqe *cqe); 46 47 /** 48 * @file 49 * 50 * QED Slow-hwfn queue interface 51 */ 52 53 union ramrod_data { 54 struct pf_start_ramrod_data pf_start; 55 struct pf_update_ramrod_data pf_update; 56 struct rx_queue_start_ramrod_data rx_queue_start; 57 struct rx_queue_update_ramrod_data rx_queue_update; 58 struct rx_queue_stop_ramrod_data rx_queue_stop; 59 struct tx_queue_start_ramrod_data tx_queue_start; 60 struct tx_queue_stop_ramrod_data tx_queue_stop; 61 struct vport_start_ramrod_data vport_start; 62 struct vport_stop_ramrod_data vport_stop; 63 struct vport_update_ramrod_data vport_update; 64 struct core_rx_start_ramrod_data core_rx_queue_start; 65 struct core_rx_stop_ramrod_data core_rx_queue_stop; 66 struct core_tx_start_ramrod_data core_tx_queue_start; 67 struct core_tx_stop_ramrod_data core_tx_queue_stop; 68 struct vport_filter_update_ramrod_data vport_filter_update; 69 70 struct rdma_init_func_ramrod_data rdma_init_func; 71 struct rdma_close_func_ramrod_data rdma_close_func; 72 struct rdma_register_tid_ramrod_data rdma_register_tid; 73 struct rdma_deregister_tid_ramrod_data rdma_deregister_tid; 74 struct roce_create_qp_resp_ramrod_data roce_create_qp_resp; 75 struct roce_create_qp_req_ramrod_data roce_create_qp_req; 76 struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp; 77 struct roce_modify_qp_req_ramrod_data roce_modify_qp_req; 78 struct roce_query_qp_resp_ramrod_data roce_query_qp_resp; 79 struct roce_query_qp_req_ramrod_data roce_query_qp_req; 80 struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; 81 struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; 82 struct rdma_create_cq_ramrod_data rdma_create_cq; 83 struct rdma_resize_cq_ramrod_data rdma_resize_cq; 84 struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; 85 struct rdma_srq_create_ramrod_data rdma_create_srq; 86 struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; 87 struct rdma_srq_modify_ramrod_data rdma_modify_srq; 88 struct roce_init_func_ramrod_data roce_init_func; 89 90 struct iscsi_slow_path_hdr iscsi_empty; 91 struct iscsi_init_ramrod_params iscsi_init; 92 struct iscsi_spe_func_dstry iscsi_destroy; 93 struct iscsi_spe_conn_offload iscsi_conn_offload; 94 struct iscsi_conn_update_ramrod_params iscsi_conn_update; 95 struct iscsi_spe_conn_termination iscsi_conn_terminate; 96 97 struct vf_start_ramrod_data vf_start; 98 struct vf_stop_ramrod_data vf_stop; 99 }; 100 101 #define EQ_MAX_CREDIT 0xffffffff 102 103 enum spq_priority { 104 QED_SPQ_PRIORITY_NORMAL, 105 QED_SPQ_PRIORITY_HIGH, 106 }; 107 108 union qed_spq_req_comp { 109 struct qed_spq_comp_cb cb; 110 u64 *done_addr; 111 }; 112 113 struct qed_spq_comp_done { 114 u64 done; 115 u8 fw_return_code; 116 }; 117 118 struct qed_spq_entry { 119 struct list_head list; 120 121 u8 flags; 122 123 /* HSI slow path element */ 124 struct slow_path_element elem; 125 126 union ramrod_data ramrod; 127 128 enum spq_priority priority; 129 130 /* pending queue for this entry */ 131 struct list_head *queue; 132 133 enum spq_mode comp_mode; 134 struct qed_spq_comp_cb comp_cb; 135 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ 136 }; 137 138 struct qed_eq { 139 struct qed_chain chain; 140 u8 eq_sb_index; /* index within the SB */ 141 __le16 *p_fw_cons; /* ptr to index value */ 142 }; 143 144 struct qed_consq { 145 struct qed_chain chain; 146 }; 147 148 struct qed_spq { 149 spinlock_t lock; /* SPQ lock */ 150 151 struct list_head unlimited_pending; 152 struct list_head pending; 153 struct list_head completion_pending; 154 struct list_head free_pool; 155 156 struct qed_chain chain; 157 158 /* allocated dma-able memory for spq entries (+ramrod data) */ 159 dma_addr_t p_phys; 160 struct qed_spq_entry *p_virt; 161 162 #define SPQ_RING_SIZE \ 163 (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) 164 165 /* Bitmap for handling out-of-order completions */ 166 DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE); 167 u8 comp_bitmap_idx; 168 169 /* Statistics */ 170 u32 unlimited_pending_count; 171 u32 normal_count; 172 u32 high_count; 173 u32 comp_sent_count; 174 u32 comp_count; 175 176 u32 cid; 177 }; 178 179 /** 180 * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that 181 * Pends it to the future list. 182 * 183 * @param p_hwfn 184 * @param p_req 185 * 186 * @return int 187 */ 188 int qed_spq_post(struct qed_hwfn *p_hwfn, 189 struct qed_spq_entry *p_ent, 190 u8 *fw_return_code); 191 192 /** 193 * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ. 194 * 195 * @param p_hwfn 196 * 197 * @return int 198 */ 199 int qed_spq_alloc(struct qed_hwfn *p_hwfn); 200 201 /** 202 * @brief qed_spq_setup - Reset the SPQ to its start state. 203 * 204 * @param p_hwfn 205 */ 206 void qed_spq_setup(struct qed_hwfn *p_hwfn); 207 208 /** 209 * @brief qed_spq_deallocate - Deallocates the given SPQ struct. 210 * 211 * @param p_hwfn 212 */ 213 void qed_spq_free(struct qed_hwfn *p_hwfn); 214 215 /** 216 * @brief qed_spq_get_entry - Obtain an entrry from the spq 217 * free pool list. 218 * 219 * 220 * 221 * @param p_hwfn 222 * @param pp_ent 223 * 224 * @return int 225 */ 226 int 227 qed_spq_get_entry(struct qed_hwfn *p_hwfn, 228 struct qed_spq_entry **pp_ent); 229 230 /** 231 * @brief qed_spq_return_entry - Return an entry to spq free 232 * pool list 233 * 234 * @param p_hwfn 235 * @param p_ent 236 */ 237 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, 238 struct qed_spq_entry *p_ent); 239 /** 240 * @brief qed_eq_allocate - Allocates & initializes an EQ struct 241 * 242 * @param p_hwfn 243 * @param num_elem number of elements in the eq 244 * 245 * @return struct qed_eq* - a newly allocated structure; NULL upon error. 246 */ 247 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, 248 u16 num_elem); 249 250 /** 251 * @brief qed_eq_setup - Reset the SPQ to its start state. 252 * 253 * @param p_hwfn 254 * @param p_eq 255 */ 256 void qed_eq_setup(struct qed_hwfn *p_hwfn, 257 struct qed_eq *p_eq); 258 259 /** 260 * @brief qed_eq_deallocate - deallocates the given EQ struct. 261 * 262 * @param p_hwfn 263 * @param p_eq 264 */ 265 void qed_eq_free(struct qed_hwfn *p_hwfn, 266 struct qed_eq *p_eq); 267 268 /** 269 * @brief qed_eq_prod_update - update the FW with default EQ producer 270 * 271 * @param p_hwfn 272 * @param prod 273 */ 274 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, 275 u16 prod); 276 277 /** 278 * @brief qed_eq_completion - Completes currently pending EQ elements 279 * 280 * @param p_hwfn 281 * @param cookie 282 * 283 * @return int 284 */ 285 int qed_eq_completion(struct qed_hwfn *p_hwfn, 286 void *cookie); 287 288 /** 289 * @brief qed_spq_completion - Completes a single event 290 * 291 * @param p_hwfn 292 * @param echo - echo value from cookie (used for determining completion) 293 * @param p_data - data from cookie (used in callback function if applicable) 294 * 295 * @return int 296 */ 297 int qed_spq_completion(struct qed_hwfn *p_hwfn, 298 __le16 echo, 299 u8 fw_return_code, 300 union event_ring_data *p_data); 301 302 /** 303 * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ 304 * 305 * @param p_hwfn 306 * 307 * @return u32 - SPQ CID 308 */ 309 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); 310 311 /** 312 * @brief qed_consq_alloc - Allocates & initializes an ConsQ 313 * struct 314 * 315 * @param p_hwfn 316 * 317 * @return struct qed_eq* - a newly allocated structure; NULL upon error. 318 */ 319 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn); 320 321 /** 322 * @brief qed_consq_setup - Reset the ConsQ to its start 323 * state. 324 * 325 * @param p_hwfn 326 * @param p_eq 327 */ 328 void qed_consq_setup(struct qed_hwfn *p_hwfn, 329 struct qed_consq *p_consq); 330 331 /** 332 * @brief qed_consq_free - deallocates the given ConsQ struct. 333 * 334 * @param p_hwfn 335 * @param p_eq 336 */ 337 void qed_consq_free(struct qed_hwfn *p_hwfn, 338 struct qed_consq *p_consq); 339 340 /** 341 * @file 342 * 343 * @brief Slow-hwfn low-level commands (Ramrods) function definitions. 344 */ 345 346 #define QED_SP_EQ_COMPLETION 0x01 347 #define QED_SP_CQE_COMPLETION 0x02 348 349 struct qed_sp_init_data { 350 u32 cid; 351 u16 opaque_fid; 352 353 /* Information regarding operation upon sending & completion */ 354 enum spq_mode comp_mode; 355 struct qed_spq_comp_cb *p_comp_data; 356 }; 357 358 int qed_sp_init_request(struct qed_hwfn *p_hwfn, 359 struct qed_spq_entry **pp_ent, 360 u8 cmd, 361 u8 protocol, 362 struct qed_sp_init_data *p_data); 363 364 /** 365 * @brief qed_sp_pf_start - PF Function Start Ramrod 366 * 367 * This ramrod is sent to initialize a physical function (PF). It will 368 * configure the function related parameters and write its completion to the 369 * event ring specified in the parameters. 370 * 371 * Ramrods complete on the common event ring for the PF. This ring is 372 * allocated by the driver on host memory and its parameters are written 373 * to the internal RAM of the UStorm by the Function Start Ramrod. 374 * 375 * @param p_hwfn 376 * @param p_tunn 377 * @param mode 378 * @param allow_npar_tx_switch 379 * 380 * @return int 381 */ 382 383 int qed_sp_pf_start(struct qed_hwfn *p_hwfn, 384 struct qed_tunn_start_params *p_tunn, 385 enum qed_mf_mode mode, bool allow_npar_tx_switch); 386 387 /** 388 * @brief qed_sp_pf_update - PF Function Update Ramrod 389 * 390 * This ramrod updates function-related parameters. Every parameter can be 391 * updated independently, according to configuration flags. 392 * 393 * @param p_hwfn 394 * 395 * @return int 396 */ 397 398 int qed_sp_pf_update(struct qed_hwfn *p_hwfn); 399 400 /** 401 * @brief qed_sp_pf_stop - PF Function Stop Ramrod 402 * 403 * This ramrod is sent to close a Physical Function (PF). It is the last ramrod 404 * sent and the last completion written to the PFs Event Ring. This ramrod also 405 * deletes the context for the Slowhwfn connection on this PF. 406 * 407 * @note Not required for first packet. 408 * 409 * @param p_hwfn 410 * 411 * @return int 412 */ 413 414 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); 415 416 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, 417 struct qed_tunn_update_params *p_tunn, 418 enum spq_mode comp_mode, 419 struct qed_spq_comp_cb *p_comp_data); 420 /** 421 * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod 422 * 423 * @param p_hwfn 424 * 425 * @return int 426 */ 427 428 int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); 429 430 #endif 431