xref: /linux/drivers/scsi/qedf/qedf.h (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  QLogic FCoE Offload Driver
4  *  Copyright (c) 2016-2018 Cavium Inc.
5  */
6 #ifndef _QEDFC_H_
7 #define _QEDFC_H_
8 
9 #include <scsi/libfcoe.h>
10 #include <scsi/libfc.h>
11 #include <scsi/fc/fc_fip.h>
12 #include <scsi/fc/fc_fc2.h>
13 #include <scsi/scsi_tcq.h>
14 
15 /* qedf_hsi.h needs to before included any qed includes */
16 #include "qedf_hsi.h"
17 
18 #include <linux/qed/qed_if.h>
19 #include <linux/qed/qed_fcoe_if.h>
20 #include <linux/qed/qed_ll2_if.h>
21 #include "qedf_version.h"
22 #include "qedf_dbg.h"
23 #include "drv_fcoe_fw_funcs.h"
24 
25 /* Helpers to extract upper and lower 32-bits of pointer */
26 #define U64_HI(val) ((u32)(((u64)(val)) >> 32))
27 #define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
28 
29 #define QEDF_DESCR "QLogic FCoE Offload Driver"
30 #define QEDF_MODULE_NAME "qedf"
31 
32 #define QEDF_FLOGI_RETRY_CNT	3
33 #define QEDF_RPORT_RETRY_CNT	255
34 #define QEDF_MAX_SESSIONS	1024
35 #define QEDF_MAX_PAYLOAD	2048
36 #define QEDF_MAX_BDS_PER_CMD	256
37 #define QEDF_MAX_BD_LEN		0xffff
38 #define QEDF_BD_SPLIT_SZ	0x1000
39 #define QEDF_PAGE_SIZE		4096
40 #define QED_HW_DMA_BOUNDARY     0xfff
41 #define QEDF_MAX_SGLEN_FOR_CACHESGL		((1U << 16) - 1)
42 #define QEDF_MFS		(QEDF_MAX_PAYLOAD + \
43 	sizeof(struct fc_frame_header))
44 #define QEDF_MAX_NPIV		64
45 #define QEDF_TM_TIMEOUT		10
46 #define QEDF_ABORT_TIMEOUT	(10 * 1000)
47 #define QEDF_CLEANUP_TIMEOUT	1
48 #define QEDF_MAX_CDB_LEN	16
49 #define QEDF_LL2_BUF_SIZE	2500	/* Buffer size required for LL2 Rx */
50 
51 #define UPSTREAM_REMOVE		1
52 #define UPSTREAM_KEEP		1
53 
54 struct qedf_mp_req {
55 	uint32_t req_len;
56 	void *req_buf;
57 	dma_addr_t req_buf_dma;
58 	struct scsi_sge *mp_req_bd;
59 	dma_addr_t mp_req_bd_dma;
60 	struct fc_frame_header req_fc_hdr;
61 
62 	uint32_t resp_len;
63 	void *resp_buf;
64 	dma_addr_t resp_buf_dma;
65 	struct scsi_sge *mp_resp_bd;
66 	dma_addr_t mp_resp_bd_dma;
67 	struct fc_frame_header resp_fc_hdr;
68 };
69 
70 struct qedf_els_cb_arg {
71 	struct qedf_ioreq *aborted_io_req;
72 	struct qedf_ioreq *io_req;
73 	u8 op; /* Used to keep track of ELS op */
74 	uint16_t l2_oxid;
75 	u32 offset; /* Used for sequence cleanup */
76 	u8 r_ctl; /* Used for sequence cleanup */
77 };
78 
79 enum qedf_ioreq_event {
80 	QEDF_IOREQ_EV_NONE,
81 	QEDF_IOREQ_EV_ABORT_SUCCESS,
82 	QEDF_IOREQ_EV_ABORT_FAILED,
83 	QEDF_IOREQ_EV_SEND_RRQ,
84 	QEDF_IOREQ_EV_ELS_TMO,
85 	QEDF_IOREQ_EV_ELS_ERR_DETECT,
86 	QEDF_IOREQ_EV_ELS_FLUSH,
87 	QEDF_IOREQ_EV_CLEANUP_SUCCESS,
88 	QEDF_IOREQ_EV_CLEANUP_FAILED,
89 };
90 
91 #define FC_GOOD		0
92 #define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER	(0x1<<2)
93 #define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER	(0x1<<3)
94 #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID	(0x1<<0)
95 #define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID	(0x1<<1)
96 struct qedf_ioreq {
97 	struct list_head link;
98 	uint16_t xid;
99 	struct scsi_cmnd *sc_cmd;
100 #define QEDF_SCSI_CMD		1
101 #define QEDF_TASK_MGMT_CMD	2
102 #define QEDF_ABTS		3
103 #define QEDF_ELS		4
104 #define QEDF_CLEANUP		5
105 #define QEDF_SEQ_CLEANUP	6
106 	u8 cmd_type;
107 #define QEDF_CMD_OUTSTANDING		0x0
108 #define QEDF_CMD_IN_ABORT		0x1
109 #define QEDF_CMD_IN_CLEANUP		0x2
110 #define QEDF_CMD_SRR_SENT		0x3
111 #define QEDF_CMD_DIRTY			0x4
112 #define QEDF_CMD_ERR_SCSI_DONE		0x5
113 	u8 io_req_flags;
114 	uint8_t tm_flags;
115 	u64 tm_lun;
116 	struct qedf_rport *fcport;
117 #define	QEDF_CMD_ST_INACTIVE		0
118 #define	QEDFC_CMD_ST_IO_ACTIVE		1
119 #define	QEDFC_CMD_ST_ABORT_ACTIVE	2
120 #define	QEDFC_CMD_ST_ABORT_ACTIVE_EH	3
121 #define	QEDFC_CMD_ST_CLEANUP_ACTIVE	4
122 #define	QEDFC_CMD_ST_CLEANUP_ACTIVE_EH	5
123 #define	QEDFC_CMD_ST_RRQ_ACTIVE		6
124 #define	QEDFC_CMD_ST_RRQ_WAIT		7
125 #define	QEDFC_CMD_ST_OXID_RETIRE_WAIT	8
126 #define	QEDFC_CMD_ST_TMF_ACTIVE		9
127 #define	QEDFC_CMD_ST_DRAIN_ACTIVE	10
128 #define	QEDFC_CMD_ST_CLEANED		11
129 #define	QEDFC_CMD_ST_ELS_ACTIVE		12
130 	atomic_t state;
131 	unsigned long flags;
132 	enum qedf_ioreq_event event;
133 	size_t data_xfer_len;
134 	/* ID: 001: Alloc cmd (qedf_alloc_cmd) */
135 	/* ID: 002: Initiate ABTS (qedf_initiate_abts) */
136 	/* ID: 003: For RRQ (qedf_process_abts_compl) */
137 	struct kref refcount;
138 	struct qedf_cmd_mgr *cmd_mgr;
139 	struct io_bdt *bd_tbl;
140 	struct delayed_work timeout_work;
141 	struct completion tm_done;
142 	struct completion abts_done;
143 	struct completion cleanup_done;
144 	struct fcoe_task_context *task;
145 	struct fcoe_task_params *task_params;
146 	struct scsi_sgl_task_params *sgl_task_params;
147 	int idx;
148 	int lun;
149 /*
150  * Need to allocate enough room for both sense data and FCP response data
151  * which has a max length of 8 bytes according to spec.
152  */
153 #define QEDF_SCSI_SENSE_BUFFERSIZE	(SCSI_SENSE_BUFFERSIZE + 8)
154 	uint8_t *sense_buffer;
155 	dma_addr_t sense_buffer_dma;
156 	u32 fcp_resid;
157 	u32 fcp_rsp_len;
158 	u32 fcp_sns_len;
159 	u8 cdb_status;
160 	u8 fcp_status;
161 	u8 fcp_rsp_code;
162 	u8 scsi_comp_flags;
163 #define QEDF_MAX_REUSE		0xfff
164 	u16 reuse_count;
165 	struct qedf_mp_req mp_req;
166 	void (*cb_func)(struct qedf_els_cb_arg *cb_arg);
167 	struct qedf_els_cb_arg *cb_arg;
168 	int fp_idx;
169 	unsigned int cpu;
170 	unsigned int int_cpu;
171 #define QEDF_IOREQ_UNKNOWN_SGE		1
172 #define QEDF_IOREQ_SLOW_SGE		2
173 #define QEDF_IOREQ_FAST_SGE		3
174 	u8 sge_type;
175 	struct delayed_work rrq_work;
176 
177 	/* Used for sequence level recovery; i.e. REC/SRR */
178 	uint32_t rx_buf_off;
179 	uint32_t tx_buf_off;
180 	uint32_t rx_id;
181 	uint32_t task_retry_identifier;
182 
183 	/*
184 	 * Used to tell if we need to return a SCSI command
185 	 * during some form of error processing.
186 	 */
187 	bool return_scsi_cmd_on_abts;
188 
189 	unsigned int alloc;
190 };
191 
192 struct qedf_cmd_priv {
193 	struct qedf_ioreq *io_req;
194 };
195 
196 static inline struct qedf_cmd_priv *qedf_priv(struct scsi_cmnd *cmd)
197 {
198 	return scsi_cmd_priv(cmd);
199 }
200 
201 extern struct workqueue_struct *qedf_io_wq;
202 
203 struct qedf_rport {
204 	spinlock_t rport_lock;
205 #define QEDF_RPORT_SESSION_READY 1
206 #define QEDF_RPORT_UPLOADING_CONNECTION	2
207 #define QEDF_RPORT_IN_RESET 3
208 #define QEDF_RPORT_IN_LUN_RESET 4
209 #define QEDF_RPORT_IN_TARGET_RESET 5
210 	unsigned long flags;
211 	int lun_reset_lun;
212 	unsigned long retry_delay_timestamp;
213 	struct fc_rport *rport;
214 	struct fc_rport_priv *rdata;
215 	struct qedf_ctx *qedf;
216 	u32 handle; /* Handle from qed */
217 	u32 fw_cid; /* fw_cid from qed */
218 	void __iomem *p_doorbell;
219 	/* Send queue management */
220 	atomic_t free_sqes;
221 	atomic_t ios_to_queue;
222 	atomic_t num_active_ios;
223 	struct fcoe_wqe *sq;
224 	dma_addr_t sq_dma;
225 	u16 sq_prod_idx;
226 	u16 fw_sq_prod_idx;
227 	u16 sq_con_idx;
228 	u32 sq_mem_size;
229 	void *sq_pbl;
230 	dma_addr_t sq_pbl_dma;
231 	u32 sq_pbl_size;
232 	u32 sid;
233 #define	QEDF_RPORT_TYPE_DISK		0
234 #define	QEDF_RPORT_TYPE_TAPE		1
235 	uint dev_type; /* Disk or tape */
236 	struct list_head peers;
237 };
238 
239 /* Used to contain LL2 skb's in ll2_skb_list */
240 struct qedf_skb_work {
241 	struct work_struct work;
242 	struct sk_buff *skb;
243 	struct qedf_ctx *qedf;
244 };
245 
246 struct qedf_fastpath {
247 #define	QEDF_SB_ID_NULL		0xffff
248 	u16		sb_id;
249 	struct qed_sb_info	*sb_info;
250 	struct qedf_ctx *qedf;
251 	/* Keep track of number of completions on this fastpath */
252 	unsigned long completions;
253 	uint32_t cq_num_entries;
254 };
255 
256 /* Used to pass fastpath information needed to process CQEs */
257 struct qedf_io_work {
258 	struct work_struct work;
259 	struct fcoe_cqe cqe;
260 	struct qedf_ctx *qedf;
261 	struct fc_frame *fp;
262 };
263 
264 struct qedf_glbl_q_params {
265 	u64	hw_p_cq;	/* Completion queue PBL */
266 	u64	hw_p_rq;	/* Request queue PBL */
267 	u64	hw_p_cmdq;	/* Command queue PBL */
268 };
269 
270 struct global_queue {
271 	struct fcoe_cqe *cq;
272 	dma_addr_t cq_dma;
273 	u32 cq_mem_size;
274 	u32 cq_cons_idx; /* Completion queue consumer index */
275 	u32 cq_prod_idx;
276 
277 	void *cq_pbl;
278 	dma_addr_t cq_pbl_dma;
279 	u32 cq_pbl_size;
280 };
281 
282 /* I/O tracing entry */
283 #define QEDF_IO_TRACE_SIZE		2048
284 struct qedf_io_log {
285 #define QEDF_IO_TRACE_REQ		0
286 #define QEDF_IO_TRACE_RSP		1
287 	uint8_t direction;
288 	uint16_t task_id;
289 	uint32_t port_id; /* Remote port fabric ID */
290 	int lun;
291 	unsigned char op; /* SCSI CDB */
292 	uint8_t lba[4];
293 	unsigned int bufflen; /* SCSI buffer length */
294 	unsigned int sg_count; /* Number of SG elements */
295 	int result; /* Result passed back to mid-layer */
296 	unsigned long jiffies; /* Time stamp when I/O logged */
297 	int refcount; /* Reference count for task id */
298 	unsigned int req_cpu; /* CPU that the task is queued on */
299 	unsigned int int_cpu; /* Interrupt CPU that the task is received on */
300 	unsigned int rsp_cpu; /* CPU that task is returned on */
301 	u8 sge_type; /* Did we take the slow, single or fast SGE path */
302 };
303 
304 /* Number of entries in BDQ */
305 #define QEDF_BDQ_SIZE			256
306 #define QEDF_BDQ_BUF_SIZE		2072
307 
308 /* DMA coherent buffers for BDQ */
309 struct qedf_bdq_buf {
310 	void *buf_addr;
311 	dma_addr_t buf_dma;
312 };
313 
314 /* Main adapter struct */
315 struct qedf_ctx {
316 	struct qedf_dbg_ctx dbg_ctx;
317 	struct fcoe_ctlr ctlr;
318 	struct fc_lport *lport;
319 	u8 data_src_addr[ETH_ALEN];
320 #define QEDF_LINK_DOWN		0
321 #define QEDF_LINK_UP		1
322 	atomic_t link_state;
323 #define QEDF_DCBX_PENDING	0
324 #define QEDF_DCBX_DONE		1
325 	atomic_t dcbx;
326 #define QEDF_NULL_VLAN_ID	-1
327 #define QEDF_FALLBACK_VLAN	1002
328 #define QEDF_DEFAULT_PRIO	3
329 	int vlan_id;
330 	u8 prio;
331 	struct qed_dev *cdev;
332 	struct qed_dev_fcoe_info dev_info;
333 	struct qed_int_info int_info;
334 	uint16_t last_command;
335 	spinlock_t hba_lock;
336 	struct pci_dev *pdev;
337 	u64 wwnn;
338 	u64 wwpn;
339 	u8 __aligned(16) mac[ETH_ALEN];
340 	struct list_head fcports;
341 	atomic_t num_offloads;
342 	unsigned int curr_conn_id;
343 	struct workqueue_struct *ll2_recv_wq;
344 	struct workqueue_struct *link_update_wq;
345 	struct devlink *devlink;
346 	struct delayed_work link_update;
347 	struct delayed_work link_recovery;
348 	struct completion flogi_compl;
349 	struct completion fipvlan_compl;
350 
351 	/*
352 	 * Used to tell if we're in the window where we are waiting for
353 	 * the link to come back up before informting fcoe that the link is
354 	 * done.
355 	 */
356 	atomic_t link_down_tmo_valid;
357 #define QEDF_TIMER_INTERVAL		(1 * HZ)
358 	struct timer_list timer; /* One second book keeping timer */
359 #define QEDF_DRAIN_ACTIVE		1
360 #define QEDF_LL2_STARTED		2
361 #define QEDF_UNLOADING			3
362 #define QEDF_GRCDUMP_CAPTURE		4
363 #define QEDF_IN_RECOVERY		5
364 #define QEDF_DBG_STOP_IO		6
365 #define QEDF_PROBING			8
366 #define QEDF_STAG_IN_PROGRESS		9
367 	unsigned long flags; /* Miscellaneous state flags */
368 	int fipvlan_retries;
369 	u8 num_queues;
370 	struct global_queue **global_queues;
371 	/* Pointer to array of queue structures */
372 	struct qedf_glbl_q_params *p_cpuq;
373 	/* Physical address of array of queue structures */
374 	dma_addr_t hw_p_cpuq;
375 
376 	struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE];
377 	void *bdq_pbl;
378 	dma_addr_t bdq_pbl_dma;
379 	size_t bdq_pbl_mem_size;
380 	void *bdq_pbl_list;
381 	dma_addr_t bdq_pbl_list_dma;
382 	u8 bdq_pbl_list_num_entries;
383 	void __iomem *bdq_primary_prod;
384 	void __iomem *bdq_secondary_prod;
385 	uint16_t bdq_prod_idx;
386 
387 	/* Structure for holding all the fastpath for this qedf_ctx */
388 	struct qedf_fastpath *fp_array;
389 	struct qed_fcoe_tid tasks;
390 	struct qedf_cmd_mgr *cmd_mgr;
391 	/* Holds the PF parameters we pass to qed to start he FCoE function */
392 	struct qed_pf_params pf_params;
393 	/* Used to time middle path ELS and TM commands */
394 	struct workqueue_struct *timer_work_queue;
395 
396 #define QEDF_IO_WORK_MIN		64
397 	mempool_t *io_mempool;
398 	struct workqueue_struct *dpc_wq;
399 	struct delayed_work recovery_work;
400 	struct delayed_work board_disable_work;
401 	struct delayed_work grcdump_work;
402 	struct delayed_work stag_work;
403 
404 	u32 slow_sge_ios;
405 	u32 fast_sge_ios;
406 
407 	uint8_t	*grcdump;
408 	uint32_t grcdump_size;
409 
410 	struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE];
411 	spinlock_t io_trace_lock;
412 	uint16_t io_trace_idx;
413 
414 	bool stop_io_on_error;
415 
416 	u32 flogi_cnt;
417 	u32 flogi_failed;
418 	u32 flogi_pending;
419 
420 	/* Used for fc statistics */
421 	struct mutex stats_mutex;
422 	u64 input_requests;
423 	u64 output_requests;
424 	u64 control_requests;
425 	u64 packet_aborts;
426 	u64 alloc_failures;
427 	u8 lun_resets;
428 	u8 target_resets;
429 	u8 task_set_fulls;
430 	u8 busy;
431 	/* Used for flush routine */
432 	struct mutex flush_mutex;
433 };
434 
435 struct io_bdt {
436 	struct qedf_ioreq *io_req;
437 	struct scsi_sge *bd_tbl;
438 	dma_addr_t bd_tbl_dma;
439 	u16 bd_valid;
440 };
441 
442 struct qedf_cmd_mgr {
443 	struct qedf_ctx *qedf;
444 	u16 idx;
445 	struct io_bdt **io_bdt_pool;
446 #define FCOE_PARAMS_NUM_TASKS		2048
447 	struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
448 	spinlock_t lock;
449 	atomic_t free_list_cnt;
450 };
451 
452 /* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info
453  * Usage:
454  *
455  * void *ptr;
456  * ptr = qedf_get_task_mem(&qedf->tasks, 128);
457  */
458 static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid)
459 {
460 	return (void *)(info->blocks[tid / info->num_tids_per_block] +
461 			(tid % info->num_tids_per_block) * info->size);
462 }
463 
464 static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
465 {
466 	set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
467 }
468 
469 /*
470  * Externs
471  */
472 
473 /*
474  * (QEDF_LOG_NPIV | QEDF_LOG_SESS | QEDF_LOG_LPORT | QEDF_LOG_ELS | QEDF_LOG_MQ
475  * | QEDF_LOG_IO | QEDF_LOG_UNSOL | QEDF_LOG_SCSI_TM | QEDF_LOG_MP_REQ |
476  * QEDF_LOG_EVT | QEDF_LOG_CONN | QEDF_LOG_DISC | QEDF_LOG_INFO)
477  */
478 #define QEDF_DEFAULT_LOG_MASK		0x3CFB6
479 extern const struct qed_fcoe_ops *qed_ops;
480 extern uint qedf_dump_frames;
481 extern uint qedf_io_tracing;
482 extern uint qedf_stop_io_on_error;
483 extern uint qedf_link_down_tmo;
484 #define QEDF_RETRY_DELAY_MAX		600 /* 60 seconds */
485 extern bool qedf_retry_delay;
486 extern uint qedf_debug;
487 
488 extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf);
489 extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
490 extern int qedf_queuecommand(struct Scsi_Host *host,
491 	struct scsi_cmnd *sc_cmd);
492 extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
493 extern u8 *qedf_get_src_mac(struct fc_lport *lport);
494 extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
495 extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
496 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
497 	struct qedf_ioreq *io_req);
498 extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
499 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
500 extern void qedf_process_error_detect(struct qedf_ctx *qedf,
501 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
502 extern void qedf_flush_active_ios(struct qedf_rport *fcport, u64 lun);
503 extern void qedf_release_cmd(struct kref *ref);
504 extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
505 	bool return_scsi_cmd_on_abts);
506 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
507 	struct qedf_ioreq *io_req);
508 extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
509 	u8 cmd_type);
510 
511 extern const struct attribute_group *qedf_host_groups[];
512 extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
513 	unsigned int timer_msec);
514 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
515 extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
516 	struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
517 extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
518 extern void qedf_ring_doorbell(struct qedf_rport *fcport);
519 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
520 	struct qedf_ioreq *els_req);
521 extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req);
522 extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp);
523 extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
524 	bool return_scsi_cmd_on_abts);
525 extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
526 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
527 extern int qedf_initiate_tmf(struct fc_rport *rport, u64 lun, u8 tm_flags);
528 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
529 	struct qedf_ioreq *io_req);
530 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
531 extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
532 	int result);
533 extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
534 extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
535 extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
536 extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
537 bool qedf_wait_for_upload(struct qedf_ctx *qedf);
538 extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
539 	struct fcoe_cqe *cqe);
540 extern void qedf_restart_rport(struct qedf_rport *fcport);
541 extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
542 extern int qedf_post_io_req(struct qedf_rport *fcport,
543 	struct qedf_ioreq *io_req);
544 extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
545 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
546 extern int qedf_send_flogi(struct qedf_ctx *qedf);
547 extern void qedf_get_protocol_tlv_data(void *dev, void *data);
548 extern void qedf_fp_io_handler(struct work_struct *work);
549 extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
550 extern void qedf_wq_grcdump(struct work_struct *work);
551 void qedf_stag_change_work(struct work_struct *work);
552 void qedf_ctx_soft_reset(struct fc_lport *lport);
553 extern void qedf_schedule_hw_err_handler(void *dev,
554 		enum qed_hw_err_type err_type);
555 
556 #define FCOE_WORD_TO_BYTE  4
557 #define QEDF_MAX_TASK_NUM	0xFFFF
558 #define QL45xxx			0x165C
559 #define QL41xxx			0x8080
560 #define MAX_CT_PAYLOAD		2048
561 #define DISCOVERED_PORTS	4
562 #define NUMBER_OF_PORTS		1
563 
564 struct fip_vlan {
565 	struct ethhdr eth;
566 	struct fip_header fip;
567 	struct {
568 		struct fip_mac_desc mac;
569 		struct fip_wwn_desc wwnn;
570 	} desc;
571 };
572 
573 /* SQ/CQ Sizes */
574 #define GBL_RSVD_TASKS			16
575 #define NUM_TASKS_PER_CONNECTION	1024
576 #define NUM_RW_TASKS_PER_CONNECTION	512
577 #define FCOE_PARAMS_CQ_NUM_ENTRIES	FCOE_PARAMS_NUM_TASKS
578 
579 #define FCOE_PARAMS_CMDQ_NUM_ENTRIES	FCOE_PARAMS_NUM_TASKS
580 #define SQ_NUM_ENTRIES			NUM_TASKS_PER_CONNECTION
581 
582 #define QEDF_FCOE_PARAMS_GL_RQ_PI              0
583 #define QEDF_FCOE_PARAMS_GL_CMD_PI             1
584 
585 #define QEDF_READ                     (1 << 1)
586 #define QEDF_WRITE                    (1 << 0)
587 #define MAX_FIBRE_LUNS			0xffffffff
588 
589 #define MIN_NUM_CPUS_MSIX(x)	min_t(u32, x->dev_info.num_cqs, \
590 					num_online_cpus())
591 
592 /*
593  * PCI function probe defines
594  */
595 /* Probe/remove called during normal PCI probe */
596 #define	QEDF_MODE_NORMAL		0
597 /* Probe/remove called from qed error recovery */
598 #define QEDF_MODE_RECOVERY		1
599 
600 #define SUPPORTED_25000baseKR_Full    (1<<27)
601 #define SUPPORTED_50000baseKR2_Full   (1<<28)
602 #define SUPPORTED_100000baseKR4_Full  (1<<29)
603 #define SUPPORTED_100000baseCR4_Full  (1<<30)
604 
605 #endif
606