1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <scsi/scsi_host.h>
25 #include <linux/hashtable.h>
26 #include <linux/ktime.h>
27 #include <linux/workqueue.h>
28
29 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
30 #define CONFIG_SCSI_LPFC_DEBUG_FS
31 #endif
32
33 struct lpfc_sli2_slim;
34
35 #define ELX_MODEL_NAME_SIZE 80
36 #define ELX_FW_NAME_SIZE 84
37
38 #define LPFC_PCI_DEV_LP 0x1
39 #define LPFC_PCI_DEV_OC 0x2
40
41 #define LPFC_SLI_REV2 2
42 #define LPFC_SLI_REV3 3
43 #define LPFC_SLI_REV4 4
44
45 #define LPFC_MAX_TARGET 4096 /* max number of targets supported */
46 #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
47 requests */
48 #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
49 the NameServer before giving up. */
50 #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
51 #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
52
53 #define LPFC_DEFAULT_XPSGL_SIZE 256
54 #define LPFC_MAX_SG_TABLESIZE 0xffff
55 #define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
56 #define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
57 #define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
58 #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
59 #define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
60 #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
61 #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
62 #define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
63
64 #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
65 #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
66 #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
67 #define LPFC_VNAME_LEN 100 /* vport symbolic name length */
68 #define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
69 #define LPFC_MIN_TGT_QDEPTH 10
70 #define LPFC_MAX_TGT_QDEPTH 0xFFFF
71
72 /*
73 * Following time intervals are used of adjusting SCSI device
74 * queue depths when there are driver resource error or Firmware
75 * resource error.
76 */
77 #define QUEUE_RAMP_DOWN_INTERVAL (secs_to_jiffies(1))
78
79 /* Number of exchanges reserved for discovery to complete */
80 #define LPFC_DISC_IOCB_BUFF_COUNT 20
81
82 #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
83 #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
84
85 /* Error Attention event polling interval */
86 #define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
87
88 /* Define macros for 64 bit support */
89 #define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
90 #define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
91 #define getPaddr(high, low) ((dma_addr_t)( \
92 (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
93 /* Provide maximum configuration definitions. */
94 #define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
95 #define FC_MAX_ADPTMSG 64
96
97 #define MAX_HBAEVT 32
98 #define MAX_HBAS_NO_RESET 16
99
100 /* Number of MSI-X vectors the driver uses */
101 #define LPFC_MSIX_VECTORS 2
102
103 /* lpfc wait event data ready flag */
104 #define LPFC_DATA_READY 0 /* bit 0 */
105
106 /* queue dump line buffer size */
107 #define LPFC_LBUF_SZ 128
108
109 /* mailbox system shutdown options */
110 #define LPFC_MBX_NO_WAIT 0
111 #define LPFC_MBX_WAIT 1
112
113 #define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005
114 #define LPFC_PORT_CFG_NAME "/cfg/port.cfg"
115
116 #define lpfc_rangecheck(val, min, max) \
117 ((uint)(val) >= (uint)(min) && (val) <= (max))
118
119 enum lpfc_polling_flags {
120 ENABLE_FCP_RING_POLLING = 0x1,
121 DISABLE_FCP_RING_INT = 0x2
122 };
123
124 struct perf_prof {
125 uint16_t cmd_cpu[40];
126 uint16_t rsp_cpu[40];
127 uint16_t qh_cpu[40];
128 uint16_t wqidx[40];
129 };
130
131 /*
132 * Provide for FC4 TYPE x28 - NVME. The
133 * bit mask for FCP and NVME is 0x8 identically
134 * because they are 32 bit positions distance.
135 */
136 #define LPFC_FC4_TYPE_BITMASK 0x00000100
137
138 /* Provide DMA memory definitions the driver uses per port instance. */
139 struct lpfc_dmabuf {
140 struct list_head list;
141 void *virt; /* virtual address ptr */
142 dma_addr_t phys; /* mapped address */
143 uint32_t buffer_tag; /* used for tagged queue ring */
144 };
145
146 struct lpfc_nvmet_ctxbuf {
147 struct list_head list;
148 struct lpfc_async_xchg_ctx *context;
149 struct lpfc_iocbq *iocbq;
150 struct lpfc_sglq *sglq;
151 struct work_struct defer_work;
152 };
153
154 struct lpfc_dma_pool {
155 struct lpfc_dmabuf *elements;
156 uint32_t max_count;
157 uint32_t current_count;
158 };
159
160 struct hbq_dmabuf {
161 struct lpfc_dmabuf hbuf;
162 struct lpfc_dmabuf dbuf;
163 uint16_t total_size;
164 uint16_t bytes_recv;
165 uint32_t tag;
166 struct lpfc_cq_event cq_event;
167 unsigned long time_stamp;
168 void *context;
169 };
170
171 struct rqb_dmabuf {
172 struct lpfc_dmabuf hbuf;
173 struct lpfc_dmabuf dbuf;
174 uint16_t total_size;
175 uint16_t bytes_recv;
176 uint16_t idx;
177 struct lpfc_queue *hrq; /* ptr to associated Header RQ */
178 struct lpfc_queue *drq; /* ptr to associated Data RQ */
179 };
180
181 /* Priority bit. Set value to exceed low water mark in lpfc_mem. */
182 #define MEM_PRI 0x100
183
184
185 /****************************************************************************/
186 /* Device VPD save area */
187 /****************************************************************************/
188 typedef struct lpfc_vpd {
189 uint32_t status; /* vpd status value */
190 uint32_t length; /* number of bytes actually returned */
191 struct {
192 uint32_t rsvd1; /* Revision numbers */
193 uint32_t biuRev;
194 uint32_t smRev;
195 uint32_t smFwRev;
196 uint32_t endecRev;
197 uint16_t rBit;
198 uint8_t fcphHigh;
199 uint8_t fcphLow;
200 uint8_t feaLevelHigh;
201 uint8_t feaLevelLow;
202 uint32_t postKernRev;
203 uint32_t opFwRev;
204 uint8_t opFwName[16];
205 uint32_t sli1FwRev;
206 uint8_t sli1FwName[16];
207 uint32_t sli2FwRev;
208 uint8_t sli2FwName[16];
209 } rev;
210 struct {
211 #ifdef __BIG_ENDIAN_BITFIELD
212 uint32_t rsvd3 :20; /* Reserved */
213 uint32_t rsvd2 : 3; /* Reserved */
214 uint32_t cbg : 1; /* Configure BlockGuard */
215 uint32_t cmv : 1; /* Configure Max VPIs */
216 uint32_t ccrp : 1; /* Config Command Ring Polling */
217 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
218 uint32_t chbs : 1; /* Cofigure Host Backing store */
219 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
220 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
221 uint32_t cmx : 1; /* Configure Max XRIs */
222 uint32_t cmr : 1; /* Configure Max RPIs */
223 #else /* __LITTLE_ENDIAN */
224 uint32_t cmr : 1; /* Configure Max RPIs */
225 uint32_t cmx : 1; /* Configure Max XRIs */
226 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
227 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
228 uint32_t chbs : 1; /* Cofigure Host Backing store */
229 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
230 uint32_t ccrp : 1; /* Config Command Ring Polling */
231 uint32_t cmv : 1; /* Configure Max VPIs */
232 uint32_t cbg : 1; /* Configure BlockGuard */
233 uint32_t rsvd2 : 3; /* Reserved */
234 uint32_t rsvd3 :20; /* Reserved */
235 #endif
236 } sli3Feat;
237 } lpfc_vpd_t;
238
239
240 /*
241 * lpfc stat counters
242 */
243 struct lpfc_stats {
244 /* Statistics for ELS commands */
245 uint32_t elsLogiCol;
246 uint32_t elsRetryExceeded;
247 uint32_t elsXmitRetry;
248 uint32_t elsDelayRetry;
249 uint32_t elsRcvDrop;
250 uint32_t elsRcvFrame;
251 uint32_t elsRcvRSCN;
252 uint32_t elsRcvRNID;
253 uint32_t elsRcvFARP;
254 uint32_t elsRcvFARPR;
255 uint32_t elsRcvFLOGI;
256 uint32_t elsRcvPLOGI;
257 uint32_t elsRcvADISC;
258 uint32_t elsRcvPDISC;
259 uint32_t elsRcvFAN;
260 uint32_t elsRcvLOGO;
261 uint32_t elsRcvPRLO;
262 uint32_t elsRcvPRLI;
263 uint32_t elsRcvLIRR;
264 uint32_t elsRcvRLS;
265 uint32_t elsRcvRPL;
266 uint32_t elsRcvRRQ;
267 uint32_t elsRcvRTV;
268 uint32_t elsRcvECHO;
269 uint32_t elsRcvLCB;
270 uint32_t elsRcvRDP;
271 uint32_t elsRcvRDF;
272 uint32_t elsXmitFLOGI;
273 uint32_t elsXmitFDISC;
274 uint32_t elsXmitPLOGI;
275 uint32_t elsXmitPRLI;
276 uint32_t elsXmitADISC;
277 uint32_t elsXmitLOGO;
278 uint32_t elsXmitSCR;
279 uint32_t elsXmitRSCN;
280 uint32_t elsXmitRNID;
281 uint32_t elsXmitFARP;
282 uint32_t elsXmitFARPR;
283 uint32_t elsXmitACC;
284 uint32_t elsXmitLSRJT;
285
286 uint32_t frameRcvBcast;
287 uint32_t frameRcvMulti;
288 uint32_t strayXmitCmpl;
289 uint32_t frameXmitDelay;
290 uint32_t xriCmdCmpl;
291 uint32_t xriStatErr;
292 uint32_t LinkUp;
293 uint32_t LinkDown;
294 uint32_t LinkMultiEvent;
295 uint32_t NoRcvBuf;
296 uint32_t fcpCmd;
297 uint32_t fcpCmpl;
298 uint32_t fcpRspErr;
299 uint32_t fcpRemoteStop;
300 uint32_t fcpPortRjt;
301 uint32_t fcpPortBusy;
302 uint32_t fcpError;
303 uint32_t fcpLocalErr;
304 };
305
306 struct lpfc_hba;
307
308 /* Data structure to keep withheld FLOGI_ACC information */
309 struct lpfc_defer_flogi_acc {
310 bool flag;
311 u16 rx_id;
312 u16 ox_id;
313 struct lpfc_nodelist *ndlp;
314 };
315
316 #define LPFC_VMID_TIMER 300 /* timer interval in seconds */
317
318 #define LPFC_MAX_VMID_SIZE 256
319
320 union lpfc_vmid_io_tag {
321 u32 app_id; /* App Id vmid */
322 u8 cs_ctl_vmid; /* Priority tag vmid */
323 };
324
325 #define JIFFIES_PER_HR (HZ * 60 * 60)
326
327 struct lpfc_vmid {
328 u8 flag;
329 #define LPFC_VMID_SLOT_FREE 0x0
330 #define LPFC_VMID_SLOT_USED 0x1
331 #define LPFC_VMID_REQ_REGISTER 0x2
332 #define LPFC_VMID_REGISTERED 0x4
333 #define LPFC_VMID_DE_REGISTER 0x8
334 char host_vmid[LPFC_MAX_VMID_SIZE];
335 union lpfc_vmid_io_tag un;
336 struct hlist_node hnode;
337 u64 io_rd_cnt;
338 u64 io_wr_cnt;
339 u8 vmid_len;
340 u8 delete_inactive; /* Delete if inactive flag 0 = no, 1 = yes */
341 u32 hash_index;
342 u64 __percpu *last_io_time;
343 };
344
345 #define lpfc_vmid_is_type_priority_tag(vport)\
346 (vport->vmid_priority_tagging ? 1 : 0)
347
348 #define LPFC_VMID_HASH_SIZE 256
349 #define LPFC_VMID_HASH_MASK 255
350 #define LPFC_VMID_HASH_SHIFT 6
351
352 struct lpfc_vmid_context {
353 struct lpfc_vmid *vmp;
354 struct lpfc_nodelist *nlp;
355 bool instantiated;
356 };
357
358 struct lpfc_vmid_priority_range {
359 u8 low;
360 u8 high;
361 u8 qos;
362 };
363
364 struct lpfc_vmid_priority_info {
365 u32 num_descriptors;
366 struct lpfc_vmid_priority_range *vmid_range;
367 };
368
369 #define QFPA_EVEN_ONLY 0x01
370 #define QFPA_ODD_ONLY 0x02
371 #define QFPA_EVEN_ODD 0x03
372
373 enum discovery_state {
374 LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
375 LPFC_VPORT_FAILED = 1, /* vport has failed */
376 LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
377 LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
378 LPFC_FDISC = 8, /* FDISC sent for vport */
379 LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id
380 * configured */
381 LPFC_NS_REG = 10, /* Register with NameServer */
382 LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */
383 LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for
384 * device authentication / discovery */
385 LPFC_DISC_AUTH = 13, /* Processing ADISC list */
386 LPFC_VPORT_READY = 32,
387 };
388
389 enum hba_state {
390 LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */
391 LPFC_WARM_START = 1, /* HBA state after selective reset */
392 LPFC_INIT_START = 2, /* Initial state after board reset */
393 LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */
394 LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */
395 LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */
396 LPFC_CLEAR_LA = 6, /* authentication cmplt - issue
397 * CLEAR_LA */
398 LPFC_HBA_READY = 32,
399 LPFC_HBA_ERROR = -1
400 };
401
402 enum lpfc_hba_flag { /* hba generic flags */
403 HBA_ERATT_HANDLED = 0, /* This flag is set when eratt handled */
404 DEFER_ERATT = 1, /* Deferred error attn in progress */
405 HBA_FCOE_MODE = 2, /* HBA function in FCoE Mode */
406 HBA_SP_QUEUE_EVT = 3, /* Slow-path qevt posted to worker thread*/
407 HBA_POST_RECEIVE_BUFFER = 4, /* Rcv buffers need to be posted */
408 HBA_PERSISTENT_TOPO = 5, /* Persistent topology support in hba */
409 ELS_XRI_ABORT_EVENT = 6, /* ELS_XRI abort event was queued */
410 ASYNC_EVENT = 7,
411 LINK_DISABLED = 8, /* Link disabled by user */
412 FCF_TS_INPROG = 9, /* FCF table scan in progress */
413 FCF_RR_INPROG = 10, /* FCF roundrobin flogi in progress */
414 HBA_FIP_SUPPORT = 11, /* FIP support in HBA */
415 HBA_DEVLOSS_TMO = 13, /* HBA in devloss timeout */
416 HBA_RRQ_ACTIVE = 14, /* process the rrq active list */
417 HBA_IOQ_FLUSH = 15, /* I/O queues being flushed */
418 HBA_RECOVERABLE_UE = 17, /* FW supports recoverable UE */
419 HBA_FORCED_LINK_SPEED = 18, /*
420 * Firmware supports Forced Link
421 * Speed capability
422 */
423 HBA_FLOGI_ISSUED = 20, /* FLOGI was issued */
424 HBA_DEFER_FLOGI = 23, /* Defer FLOGI till read_sparm cmpl */
425 HBA_SETUP = 24, /* HBA setup completed */
426 HBA_NEEDS_CFG_PORT = 25, /* SLI3: CONFIG_PORT mbox needed */
427 HBA_HBEAT_INP = 26, /* mbox HBEAT is in progress */
428 HBA_HBEAT_TMO = 27, /* HBEAT initiated after timeout */
429 HBA_FLOGI_OUTSTANDING = 28, /* FLOGI is outstanding */
430 HBA_RHBA_CMPL = 29, /* RHBA FDMI cmd is successful */
431 };
432
433 struct lpfc_trunk_link_state {
434 enum hba_state state;
435 uint8_t fault;
436 };
437
438 struct lpfc_trunk_link {
439 struct lpfc_trunk_link_state link0,
440 link1,
441 link2,
442 link3;
443 u32 phy_lnk_speed;
444 };
445
446 /* Format of congestion module parameters */
447 struct lpfc_cgn_param {
448 uint32_t cgn_param_magic;
449 uint8_t cgn_param_version; /* version 1 */
450 uint8_t cgn_param_mode; /* 0=off 1=managed 2=monitor only */
451 #define LPFC_CFG_OFF 0
452 #define LPFC_CFG_MANAGED 1
453 #define LPFC_CFG_MONITOR 2
454 uint8_t cgn_rsvd1;
455 uint8_t cgn_rsvd2;
456 uint8_t cgn_param_level0;
457 uint8_t cgn_param_level1;
458 uint8_t cgn_param_level2;
459 uint8_t byte11;
460 uint8_t byte12;
461 uint8_t byte13;
462 uint8_t byte14;
463 uint8_t byte15;
464 };
465
466 /* Max number of days of congestion data */
467 #define LPFC_MAX_CGN_DAYS 10
468
469 struct lpfc_cgn_ts {
470 uint8_t month;
471 uint8_t day;
472 uint8_t year;
473 uint8_t hour;
474 uint8_t minute;
475 uint8_t second;
476 };
477
478 /* Format of congestion buffer info
479 * This structure defines memory thats allocated and registered with
480 * the HBA firmware. When adding or removing fields from this structure
481 * the alignment must match the HBA firmware.
482 */
483
484 struct lpfc_cgn_info {
485 /* Header */
486 __le16 cgn_info_size; /* is sizeof(struct lpfc_cgn_info) */
487 uint8_t cgn_info_version; /* represents format of structure */
488 #define LPFC_CGN_INFO_V1 1
489 #define LPFC_CGN_INFO_V2 2
490 #define LPFC_CGN_INFO_V3 3
491 #define LPFC_CGN_INFO_V4 4
492 uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */
493 uint8_t cgn_info_detect;
494 uint8_t cgn_info_action;
495 uint8_t cgn_info_level0;
496 uint8_t cgn_info_level1;
497 uint8_t cgn_info_level2;
498
499 /* Start Time */
500 struct lpfc_cgn_ts base_time;
501
502 /* minute / hours / daily indices */
503 uint8_t cgn_index_minute;
504 uint8_t cgn_index_hour;
505 uint8_t cgn_index_day;
506
507 __le16 cgn_warn_freq;
508 __le16 cgn_alarm_freq;
509 __le16 cgn_lunq;
510 uint8_t cgn_pad1[8];
511
512 /* Driver Information */
513 __le16 cgn_drvr_min[60];
514 __le32 cgn_drvr_hr[24];
515 __le32 cgn_drvr_day[LPFC_MAX_CGN_DAYS];
516
517 /* Congestion Warnings */
518 __le16 cgn_warn_min[60];
519 __le32 cgn_warn_hr[24];
520 __le32 cgn_warn_day[LPFC_MAX_CGN_DAYS];
521
522 /* Latency Information */
523 __le32 cgn_latency_min[60];
524 __le32 cgn_latency_hr[24];
525 __le32 cgn_latency_day[LPFC_MAX_CGN_DAYS];
526
527 /* Bandwidth Information */
528 __le16 cgn_bw_min[60];
529 __le16 cgn_bw_hr[24];
530 __le16 cgn_bw_day[LPFC_MAX_CGN_DAYS];
531
532 /* Congestion Alarms */
533 __le16 cgn_alarm_min[60];
534 __le32 cgn_alarm_hr[24];
535 __le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
536
537 struct_group(cgn_stat,
538 uint8_t cgn_stat_npm; /* Notifications per minute */
539
540 /* Start Time */
541 struct lpfc_cgn_ts stat_start; /* Base time */
542 uint8_t cgn_pad2;
543
544 __le32 cgn_notification;
545 __le32 cgn_peer_notification;
546 __le32 link_integ_notification;
547 __le32 delivery_notification;
548 struct lpfc_cgn_ts stat_fpin; /* Last congestion notification FPIN */
549 struct lpfc_cgn_ts stat_peer; /* Last peer congestion FPIN */
550 struct lpfc_cgn_ts stat_lnk; /* Last link integrity FPIN */
551 struct lpfc_cgn_ts stat_delivery; /* Last delivery notification FPIN */
552 );
553
554 __le32 cgn_info_crc;
555 #define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
556 #define LPFC_CGN_CRC32_SEED 0xFFFFFFFF
557 };
558
559 #define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \
560 sizeof(uint32_t))
561
562 struct lpfc_cgn_stat {
563 atomic64_t total_bytes;
564 atomic64_t rcv_bytes;
565 atomic64_t rx_latency;
566 #define LPFC_CGN_NOT_SENT 0xFFFFFFFFFFFFFFFFLL
567 atomic_t rx_io_cnt;
568 };
569
570 struct lpfc_cgn_acqe_stat {
571 atomic64_t alarm;
572 atomic64_t warn;
573 };
574
575 enum lpfc_fc_flag {
576 /* Several of these flags are HBA centric and should be moved to
577 * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
578 */
579 FC_PT2PT, /* pt2pt with no fabric */
580 FC_PT2PT_PLOGI, /* pt2pt initiate PLOGI */
581 FC_DISC_TMO, /* Discovery timer running */
582 FC_PUBLIC_LOOP, /* Public loop */
583 FC_LBIT, /* LOGIN bit in loopinit set */
584 FC_RSCN_MODE, /* RSCN cmd rcv'ed */
585 FC_NLP_MORE, /* More node to process in node tbl */
586 FC_OFFLINE_MODE, /* Interface is offline for diag */
587 FC_FABRIC, /* We are fabric attached */
588 FC_VPORT_LOGO_RCVD, /* LOGO received on vport */
589 FC_RSCN_DISCOVERY, /* Auth all devices after RSCN */
590 FC_LOGO_RCVD_DID_CHNG, /* FDISC on phys port detect DID chng */
591 FC_PT2PT_NO_NVME, /* Don't send NVME PRLI */
592 FC_SCSI_SCAN_TMO, /* scsi scan timer running */
593 FC_ABORT_DISCOVERY, /* we want to abort discovery */
594 FC_NDISC_ACTIVE, /* NPort discovery active */
595 FC_BYPASSED_MODE, /* NPort is in bypassed mode */
596 FC_VPORT_NEEDS_REG_VPI, /* Needs to have its vpi registered */
597 FC_RSCN_DEFERRED, /* A deferred RSCN being processed */
598 FC_VPORT_NEEDS_INIT_VPI, /* Need to INIT_VPI before FDISC */
599 FC_VPORT_CVL_RCVD, /* VLink failed due to CVL */
600 FC_VFI_REGISTERED, /* VFI is registered */
601 FC_FDISC_COMPLETED, /* FDISC completed */
602 FC_DISC_DELAYED, /* Delay NPort discovery */
603 };
604
605 enum lpfc_load_flag {
606 FC_LOADING, /* HBA in process of loading drvr */
607 FC_UNLOADING, /* HBA in process of unloading drvr */
608 FC_ALLOW_FDMI, /* port is ready for FDMI requests */
609 FC_ALLOW_VMID, /* Allow VMID I/Os */
610 FC_DEREGISTER_ALL_APP_ID /* Deregister all VMIDs */
611 };
612
613 struct lpfc_vport {
614 struct lpfc_hba *phba;
615 struct list_head listentry;
616 uint8_t port_type;
617 #define LPFC_PHYSICAL_PORT 1
618 #define LPFC_NPIV_PORT 2
619 #define LPFC_FABRIC_PORT 3
620 enum discovery_state port_state;
621
622 uint16_t vpi;
623 uint16_t vfi;
624 uint8_t vpi_state;
625 #define LPFC_VPI_REGISTERED 0x1
626
627 unsigned long fc_flag; /* FC flags */
628
629 uint32_t ct_flags;
630 #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
631 #define FC_CT_RNN_ID 0x2 /* RNN_ID accepted by switch */
632 #define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */
633 #define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
634 #define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
635 #define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
636 #define FC_CT_RSPNI_PNI 0x40 /* RSPNI_PNI accepted by switch */
637
638 struct list_head fc_nodes;
639 spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
640
641 /* Keep counters for the number of entries in each list. */
642 atomic_t fc_plogi_cnt;
643 atomic_t fc_adisc_cnt;
644 atomic_t fc_reglogin_cnt;
645 atomic_t fc_prli_cnt;
646 atomic_t fc_unmap_cnt;
647 atomic_t fc_map_cnt;
648 atomic_t fc_npr_cnt;
649 atomic_t fc_unused_cnt;
650
651 struct serv_parm fc_sparam; /* buffer for our service parameters */
652
653 uint32_t fc_myDID; /* fibre channel S_ID */
654 uint32_t fc_prevDID; /* previous fibre channel S_ID */
655 struct lpfc_name fabric_portname;
656 struct lpfc_name fabric_nodename;
657
658 int32_t stopped; /* HBA has not been restarted since last ERATT */
659 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
660
661 uint32_t num_disc_nodes; /* in addition to hba_state */
662 uint32_t gidft_inp; /* cnt of outstanding GID_FTs */
663
664 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
665 uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */
666 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
667 struct lpfc_name fc_nodename; /* fc nodename */
668 struct lpfc_name fc_portname; /* fc portname */
669
670 struct timer_list fc_disctmo; /* Discovery rescue timer */
671 uint8_t fc_ns_retry; /* retries for fabric nameserver */
672 uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
673
674 spinlock_t work_port_lock;
675 uint32_t work_port_events; /* Timeout to be handled */
676 #define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
677 #define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
678 #define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */
679
680 #define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
681 #define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
682 #define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */
683 #define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
684 #define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
685 #define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */
686 #define WORKER_CHECK_INACTIVE_VMID 0x4000 /* hba: check inactive vmids */
687 #define WORKER_CHECK_VMID_ISSUE_QFPA 0x8000 /* vport: Check if qfpa needs
688 * to be issued */
689
690 struct timer_list els_tmofunc;
691 struct timer_list delayed_disc_tmo;
692
693 unsigned long load_flag;
694 /* Vport Config Parameters */
695 uint32_t cfg_scan_down;
696 uint32_t cfg_lun_queue_depth;
697 uint32_t cfg_nodev_tmo;
698 uint32_t cfg_devloss_tmo;
699 uint32_t cfg_restrict_login;
700 uint32_t cfg_peer_port_login;
701 uint32_t cfg_fcp_class;
702 uint32_t cfg_use_adisc;
703 uint32_t cfg_discovery_threads;
704 uint32_t cfg_log_verbose;
705 uint32_t cfg_enable_fc4_type;
706 uint32_t cfg_max_luns;
707 uint32_t cfg_enable_da_id;
708 uint32_t cfg_max_scsicmpl_time;
709 uint32_t cfg_tgt_queue_depth;
710 uint32_t cfg_first_burst_size;
711 uint32_t dev_loss_tmo_changed;
712 /* VMID parameters */
713 u8 lpfc_vmid_host_uuid[16];
714 u32 max_vmid; /* maximum VMIDs allowed per port */
715 u32 cur_vmid_cnt; /* Current VMID count */
716 #define LPFC_MIN_VMID 4
717 #define LPFC_MAX_VMID 255
718 u32 vmid_inactivity_timeout; /* Time after which the VMID */
719 /* deregisters from switch */
720 u32 vmid_priority_tagging;
721 #define LPFC_VMID_PRIO_TAG_DISABLE 0 /* Disable */
722 #define LPFC_VMID_PRIO_TAG_SUP_TARGETS 1 /* Allow supported targets only */
723 #define LPFC_VMID_PRIO_TAG_ALL_TARGETS 2 /* Allow all targets */
724 unsigned long *vmid_priority_range;
725 #define LPFC_VMID_MAX_PRIORITY_RANGE 256
726 #define LPFC_VMID_PRIORITY_BITMAP_SIZE 32
727 u8 vmid_flag;
728 #define LPFC_VMID_IN_USE 0x1
729 #define LPFC_VMID_ISSUE_QFPA 0x2
730 #define LPFC_VMID_QFPA_CMPL 0x4
731 #define LPFC_VMID_QOS_ENABLED 0x8
732 #define LPFC_VMID_TIMER_ENBLD 0x10
733 #define LPFC_VMID_TYPE_PRIO 0x20
734 struct fc_qfpa_res *qfpa_res;
735
736 struct fc_vport *fc_vport;
737
738 struct lpfc_vmid *vmid;
739 DECLARE_HASHTABLE(hash_table, 8);
740 rwlock_t vmid_lock;
741 struct lpfc_vmid_priority_info vmid_priority;
742
743 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
744 struct dentry *vport_debugfs_root;
745 struct lpfc_debugfs_trc *disc_trc;
746 atomic_t disc_trc_cnt;
747 #endif
748 struct list_head rcv_buffer_list;
749 unsigned long rcv_buffer_time_stamp;
750 uint32_t vport_flag;
751 #define STATIC_VPORT 0x1
752 #define FAWWPN_PARAM_CHG 0x2
753
754 uint16_t fdmi_num_disc;
755 uint32_t fdmi_hba_mask;
756 uint32_t fdmi_port_mask;
757
758 /* There is a single nvme instance per vport. */
759 struct nvme_fc_local_port *localport;
760 uint8_t nvmei_support; /* driver supports NVME Initiator */
761 uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
762 };
763
764 struct hbq_s {
765 uint16_t entry_count; /* Current number of HBQ slots */
766 uint16_t buffer_count; /* Current number of buffers posted */
767 uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
768 uint32_t hbqPutIdx; /* HBQ slot to use */
769 uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
770 void *hbq_virt; /* Virtual ptr to this hbq */
771 struct list_head hbq_buffer_list; /* buffers assigned to this HBQ */
772 /* Callback for HBQ buffer allocation */
773 struct hbq_dmabuf *(*hbq_alloc_buffer) (struct lpfc_hba *);
774 /* Callback for HBQ buffer free */
775 void (*hbq_free_buffer) (struct lpfc_hba *,
776 struct hbq_dmabuf *);
777 };
778
779 /* this matches the position in the lpfc_hbq_defs array */
780 #define LPFC_ELS_HBQ 0
781 #define LPFC_MAX_HBQS 1
782
783 enum hba_temp_state {
784 HBA_NORMAL_TEMP,
785 HBA_OVER_TEMP
786 };
787
788 enum intr_type_t {
789 NONE = 0,
790 INTx,
791 MSI,
792 MSIX,
793 };
794
795 #define LPFC_CT_CTX_MAX 64
796 struct unsol_rcv_ct_ctx {
797 uint32_t ctxt_id;
798 uint32_t SID;
799 uint32_t valid;
800 #define UNSOL_INVALID 0
801 #define UNSOL_VALID 1
802 uint16_t oxid;
803 uint16_t rxid;
804 };
805
806 #define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
807 #define LPFC_USER_LINK_SPEED_1G 1 /* 1 Gigabaud */
808 #define LPFC_USER_LINK_SPEED_2G 2 /* 2 Gigabaud */
809 #define LPFC_USER_LINK_SPEED_4G 4 /* 4 Gigabaud */
810 #define LPFC_USER_LINK_SPEED_8G 8 /* 8 Gigabaud */
811 #define LPFC_USER_LINK_SPEED_10G 10 /* 10 Gigabaud */
812 #define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */
813 #define LPFC_USER_LINK_SPEED_32G 32 /* 32 Gigabaud */
814 #define LPFC_USER_LINK_SPEED_64G 64 /* 64 Gigabaud */
815 #define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_64G
816
817 #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32, 64"
818
819 enum nemb_type {
820 nemb_mse = 1,
821 nemb_hbd
822 };
823
824 enum mbox_type {
825 mbox_rd = 1,
826 mbox_wr
827 };
828
829 enum dma_type {
830 dma_mbox = 1,
831 dma_ebuf
832 };
833
834 enum sta_type {
835 sta_pre_addr = 1,
836 sta_pos_addr
837 };
838
839 struct lpfc_mbox_ext_buf_ctx {
840 uint32_t state;
841 #define LPFC_BSG_MBOX_IDLE 0
842 #define LPFC_BSG_MBOX_HOST 1
843 #define LPFC_BSG_MBOX_PORT 2
844 #define LPFC_BSG_MBOX_DONE 3
845 #define LPFC_BSG_MBOX_ABTS 4
846 enum nemb_type nembType;
847 enum mbox_type mboxType;
848 uint32_t numBuf;
849 uint32_t mbxTag;
850 uint32_t seqNum;
851 struct lpfc_dmabuf *mbx_dmabuf;
852 struct list_head ext_dmabuf_list;
853 };
854
855 struct lpfc_epd_pool {
856 /* Expedite pool */
857 struct list_head list;
858 u32 count;
859 spinlock_t lock; /* lock for expedite pool */
860 };
861
862 enum ras_state {
863 INACTIVE,
864 REG_INPROGRESS,
865 ACTIVE
866 };
867
868 struct lpfc_ras_fwlog {
869 uint8_t *fwlog_buff;
870 uint32_t fw_buffcount; /* Buffer size posted to FW */
871 #define LPFC_RAS_BUFF_ENTERIES 16 /* Each entry can hold max of 64k */
872 #define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
873 #define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
874 #define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
875 uint32_t fw_loglevel; /* Log level set */
876 struct lpfc_dmabuf lwpd;
877 struct list_head fwlog_buff_list;
878
879 /* RAS support status on adapter */
880 bool ras_hwsupport; /* RAS Support available on HW or not */
881 bool ras_enabled; /* Ras Enabled for the function */
882 #define LPFC_RAS_DISABLE_LOGGING 0x00
883 #define LPFC_RAS_ENABLE_LOGGING 0x01
884 enum ras_state state; /* RAS logging running state */
885 };
886
887 #define DBG_LOG_STR_SZ 256
888 #define DBG_LOG_SZ 256
889
890 struct dbg_log_ent {
891 char log[DBG_LOG_STR_SZ];
892 u64 t_ns;
893 };
894
895 enum lpfc_irq_chann_mode {
896 /* Assign IRQs to all possible cpus that have hardware queues */
897 NORMAL_MODE,
898
899 /* Assign IRQs only to cpus on the same numa node as HBA */
900 NUMA_MODE,
901
902 /* Assign IRQs only on non-hyperthreaded CPUs. This is the
903 * same as normal_mode, but assign IRQS only on physical CPUs.
904 */
905 NHT_MODE,
906 };
907
908 enum lpfc_hba_bit_flags {
909 FABRIC_COMANDS_BLOCKED,
910 HBA_PCI_ERR,
911 MBX_TMO_ERR,
912 };
913
914 struct lpfc_hba {
915 /* SCSI interface function jump table entries */
916 struct lpfc_io_buf * (*lpfc_get_scsi_buf)
917 (struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
918 struct scsi_cmnd *cmnd);
919 int (*lpfc_scsi_prep_dma_buf)
920 (struct lpfc_hba *, struct lpfc_io_buf *);
921 void (*lpfc_scsi_unprep_dma_buf)
922 (struct lpfc_hba *, struct lpfc_io_buf *);
923 void (*lpfc_release_scsi_buf)
924 (struct lpfc_hba *, struct lpfc_io_buf *);
925 void (*lpfc_rampdown_queue_depth)
926 (struct lpfc_hba *);
927 void (*lpfc_scsi_prep_cmnd)
928 (struct lpfc_vport *, struct lpfc_io_buf *,
929 struct lpfc_nodelist *);
930 int (*lpfc_scsi_prep_cmnd_buf)
931 (struct lpfc_vport *vport,
932 struct lpfc_io_buf *lpfc_cmd,
933 uint8_t tmo);
934 int (*lpfc_scsi_prep_task_mgmt_cmd)
935 (struct lpfc_vport *vport,
936 struct lpfc_io_buf *lpfc_cmd,
937 u64 lun, u8 task_mgmt_cmd);
938
939 /* IOCB interface function jump table entries */
940 int (*__lpfc_sli_issue_iocb)
941 (struct lpfc_hba *, uint32_t,
942 struct lpfc_iocbq *, uint32_t);
943 int (*__lpfc_sli_issue_fcp_io)
944 (struct lpfc_hba *phba, uint32_t ring_number,
945 struct lpfc_iocbq *piocb, uint32_t flag);
946 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
947 struct lpfc_iocbq *);
948 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
949
950 /* MBOX interface function jump table entries */
951 int (*lpfc_sli_issue_mbox)
952 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
953
954 /* Slow-path IOCB process function jump table entries */
955 void (*lpfc_sli_handle_slow_ring_event)
956 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
957 uint32_t mask);
958
959 /* INIT device interface function jump table entries */
960 int (*lpfc_sli_hbq_to_firmware)
961 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
962 int (*lpfc_sli_brdrestart)
963 (struct lpfc_hba *);
964 int (*lpfc_sli_brdready)
965 (struct lpfc_hba *, uint32_t);
966 void (*lpfc_handle_eratt)
967 (struct lpfc_hba *);
968 void (*lpfc_stop_port)
969 (struct lpfc_hba *);
970 int (*lpfc_hba_init_link)
971 (struct lpfc_hba *, uint32_t);
972 int (*lpfc_hba_down_link)
973 (struct lpfc_hba *, uint32_t);
974 int (*lpfc_selective_reset)
975 (struct lpfc_hba *);
976
977 int (*lpfc_bg_scsi_prep_dma_buf)
978 (struct lpfc_hba *, struct lpfc_io_buf *);
979
980 /* Prep SLI WQE/IOCB jump table entries */
981 void (*__lpfc_sli_prep_els_req_rsp)(struct lpfc_iocbq *cmdiocbq,
982 struct lpfc_vport *vport,
983 struct lpfc_dmabuf *bmp,
984 u16 cmd_size, u32 did, u32 elscmd,
985 u8 tmo, u8 expect_rsp);
986 void (*__lpfc_sli_prep_gen_req)(struct lpfc_iocbq *cmdiocbq,
987 struct lpfc_dmabuf *bmp, u16 rpi,
988 u32 num_entry, u8 tmo);
989 void (*__lpfc_sli_prep_xmit_seq64)(struct lpfc_iocbq *cmdiocbq,
990 struct lpfc_dmabuf *bmp, u16 rpi,
991 u16 ox_id, u32 num_entry, u8 rctl,
992 u8 last_seq, u8 cr_cx_cmd);
993 void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
994 u16 ulp_context, u16 iotag,
995 u8 ulp_class, u16 cqid, bool ia,
996 bool wqec);
997
998 /* expedite pool */
999 struct lpfc_epd_pool epd_pool;
1000
1001 /* SLI4 specific HBA data structure */
1002 struct lpfc_sli4_hba sli4_hba;
1003
1004 struct workqueue_struct *wq;
1005 struct delayed_work eq_delay_work;
1006
1007 #define LPFC_IDLE_STAT_DELAY 1000
1008 struct delayed_work idle_stat_delay_work;
1009
1010 struct lpfc_sli sli;
1011 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
1012 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
1013 uint32_t sli3_options; /* Mask of enabled SLI3 options */
1014 #define LPFC_SLI3_HBQ_ENABLED 0x01
1015 #define LPFC_SLI3_NPIV_ENABLED 0x02
1016 #define LPFC_SLI3_VPORT_TEARDOWN 0x04
1017 #define LPFC_SLI3_CRP_ENABLED 0x08
1018 #define LPFC_SLI3_BG_ENABLED 0x20
1019 #define LPFC_SLI3_DSS_ENABLED 0x40
1020 #define LPFC_SLI4_PERFH_ENABLED 0x80
1021 #define LPFC_SLI4_PHWQ_ENABLED 0x100
1022 uint32_t iocb_cmd_size;
1023 uint32_t iocb_rsp_size;
1024
1025 struct lpfc_trunk_link trunk_link;
1026 enum hba_state link_state;
1027 uint32_t link_flag; /* link state flags */
1028 #define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
1029 /* This flag is set while issuing */
1030 /* INIT_LINK mailbox command */
1031 #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
1032 #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
1033 #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
1034 #define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
1035 #define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */
1036 #define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */
1037
1038 unsigned long hba_flag; /* hba generic flags */
1039
1040 struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
1041 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
1042 struct lpfc_dmabuf slim2p;
1043
1044 MAILBOX_t *mbox;
1045 uint32_t *mbox_ext;
1046 struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
1047 uint32_t ha_copy;
1048 struct _PCB *pcb;
1049 struct _IOCB *IOCBs;
1050
1051 struct lpfc_dmabuf hbqslimp;
1052
1053 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
1054
1055 uint32_t fc_eventTag; /* event tag for link attention */
1056 uint32_t link_events;
1057
1058 /* These fields used to be binfo */
1059 uint32_t fc_pref_DID; /* preferred D_ID */
1060 uint8_t fc_pref_ALPA; /* preferred AL_PA */
1061 uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
1062 uint32_t fc_edtov; /* E_D_TOV timer value */
1063 uint32_t fc_arbtov; /* ARB_TOV timer value */
1064 uint32_t fc_ratov; /* R_A_TOV timer value */
1065 uint32_t fc_rttov; /* R_T_TOV timer value */
1066 uint32_t fc_altov; /* AL_TOV timer value */
1067 uint32_t fc_crtov; /* C_R_TOV timer value */
1068
1069 struct serv_parm fc_fabparam; /* fabric service parameters buffer */
1070 uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
1071
1072 uint32_t lmt;
1073
1074 uint32_t fc_topology; /* link topology, from LINK INIT */
1075 uint32_t fc_topology_changed; /* link topology, from LINK INIT */
1076
1077 struct lpfc_stats fc_stat;
1078
1079 uint32_t nport_event_cnt; /* timestamp for nlplist entry */
1080
1081 unsigned long pni; /* 64-bit Platform Name Identifier */
1082
1083 uint8_t wwnn[8];
1084 uint8_t wwpn[8];
1085 uint32_t RandomData[7];
1086 uint8_t fcp_embed_io;
1087 uint8_t nvmet_support; /* driver supports NVMET */
1088 #define LPFC_NVMET_MAX_PORTS 32
1089 uint8_t mds_diags_support;
1090 uint8_t bbcredit_support;
1091 uint8_t enab_exp_wqcq_pages;
1092 u8 nsler; /* Firmware supports FC-NVMe-2 SLER */
1093
1094 /* HBA Config Parameters */
1095 uint32_t cfg_ack0;
1096 uint32_t cfg_xri_rebalancing;
1097 uint32_t cfg_xpsgl;
1098 uint32_t cfg_enable_npiv;
1099 uint32_t cfg_enable_rrq;
1100 uint32_t cfg_topology;
1101 uint32_t cfg_link_speed;
1102 #define LPFC_FCF_FOV 1 /* Fast fcf failover */
1103 #define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
1104 uint32_t cfg_fcf_failover_policy;
1105 uint32_t cfg_fcp_io_sched;
1106 uint32_t cfg_ns_query;
1107 uint32_t cfg_fcp2_no_tgt_reset;
1108 uint32_t cfg_cr_delay;
1109 uint32_t cfg_cr_count;
1110 uint32_t cfg_multi_ring_support;
1111 uint32_t cfg_multi_ring_rctl;
1112 uint32_t cfg_multi_ring_type;
1113 uint32_t cfg_poll;
1114 uint32_t cfg_poll_tmo;
1115 uint32_t cfg_task_mgmt_tmo;
1116 uint32_t cfg_use_msi;
1117 uint32_t cfg_auto_imax;
1118 uint32_t cfg_fcp_imax;
1119 uint32_t cfg_force_rscn;
1120 uint32_t cfg_cq_poll_threshold;
1121 uint32_t cfg_cq_max_proc_limit;
1122 uint32_t cfg_fcp_cpu_map;
1123 uint32_t cfg_fcp_mq_threshold;
1124 uint32_t cfg_hdw_queue;
1125 uint32_t cfg_irq_chann;
1126 uint32_t cfg_suppress_rsp;
1127 uint32_t cfg_nvme_oas;
1128 uint32_t cfg_nvme_embed_cmd;
1129 uint32_t cfg_nvmet_mrq_post;
1130 uint32_t cfg_nvmet_mrq;
1131 uint32_t cfg_enable_nvmet;
1132 uint32_t cfg_nvme_enable_fb;
1133 uint32_t cfg_nvmet_fb_size;
1134 uint32_t cfg_total_seg_cnt;
1135 uint32_t cfg_sg_seg_cnt;
1136 uint32_t cfg_nvme_seg_cnt;
1137 uint32_t cfg_scsi_seg_cnt;
1138 uint32_t cfg_sg_dma_buf_size;
1139 uint32_t cfg_hba_queue_depth;
1140 uint32_t cfg_enable_hba_reset;
1141 uint32_t cfg_enable_hba_heartbeat;
1142 uint32_t cfg_fof;
1143 uint32_t cfg_EnableXLane;
1144 uint8_t cfg_oas_tgt_wwpn[8];
1145 uint8_t cfg_oas_vpt_wwpn[8];
1146 uint32_t cfg_oas_lun_state;
1147 #define OAS_LUN_ENABLE 1
1148 #define OAS_LUN_DISABLE 0
1149 uint32_t cfg_oas_lun_status;
1150 #define OAS_LUN_STATUS_EXISTS 0x01
1151 uint32_t cfg_oas_flags;
1152 #define OAS_FIND_ANY_VPORT 0x01
1153 #define OAS_FIND_ANY_TARGET 0x02
1154 #define OAS_LUN_VALID 0x04
1155 uint32_t cfg_oas_priority;
1156 uint32_t cfg_XLanePriority;
1157 uint32_t cfg_enable_bg;
1158 uint32_t cfg_prot_mask;
1159 uint32_t cfg_prot_guard;
1160 uint32_t cfg_hostmem_hgp;
1161 uint32_t cfg_log_verbose;
1162 uint32_t cfg_enable_fc4_type;
1163 #define LPFC_ENABLE_FCP 1
1164 #define LPFC_ENABLE_NVME 2
1165 #define LPFC_ENABLE_BOTH 3
1166 #if (IS_ENABLED(CONFIG_NVME_FC))
1167 #define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
1168 #define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
1169 #else
1170 #define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
1171 #define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
1172 #endif
1173 uint32_t cfg_sriov_nr_virtfn;
1174 uint32_t cfg_request_firmware_upgrade;
1175 uint32_t cfg_suppress_link_up;
1176 uint32_t cfg_rrq_xri_bitmap_sz;
1177 u32 cfg_fcp_wait_abts_rsp;
1178 uint32_t cfg_delay_discovery;
1179 uint32_t cfg_sli_mode;
1180 #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
1181 #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
1182 #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
1183 uint32_t cfg_fdmi_on;
1184 #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */
1185 #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
1186 uint32_t cfg_enable_SmartSAN;
1187 uint32_t cfg_enable_mds_diags;
1188 uint32_t cfg_ras_fwlog_level;
1189 uint32_t cfg_ras_fwlog_buffsize;
1190 uint32_t cfg_ras_fwlog_func;
1191 uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
1192 uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
1193 uint32_t cfg_enable_pbde;
1194 uint32_t cfg_enable_mi;
1195 struct nvmet_fc_target_port *targetport;
1196 lpfc_vpd_t vpd; /* vital product data */
1197
1198 u32 cfg_max_vmid; /* maximum VMIDs allowed per port */
1199 u32 cfg_vmid_app_header;
1200 #define LPFC_VMID_APP_HEADER_DISABLE 0
1201 #define LPFC_VMID_APP_HEADER_ENABLE 1
1202 u32 cfg_vmid_priority_tagging;
1203 u32 cfg_vmid_inactivity_timeout; /* Time after which the VMID */
1204 /* deregisters from switch */
1205 struct pci_dev *pcidev;
1206 struct list_head work_list;
1207 uint32_t work_ha; /* Host Attention Bits for WT */
1208 uint32_t work_ha_mask; /* HA Bits owned by WT */
1209 uint32_t work_hs; /* HS stored in case of ERRAT */
1210 uint32_t work_status[2]; /* Extra status from SLIM */
1211
1212 wait_queue_head_t work_waitq;
1213 struct task_struct *worker_thread;
1214 unsigned long data_flags;
1215 uint32_t border_sge_num;
1216
1217 uint32_t hbq_in_use; /* HBQs in use flag */
1218 uint32_t hbq_count; /* Count of configured HBQs */
1219 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
1220
1221 phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
1222 phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
1223 phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */
1224 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
1225 PCI BAR0 */
1226 void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
1227 PCI BAR2 */
1228
1229 void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
1230 PCI BAR0 with dual-ULP support */
1231 void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
1232 PCI BAR2 with dual-ULP support */
1233 void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
1234 PCI BAR4 with dual-ULP support */
1235 #define PCI_64BIT_BAR0 0
1236 #define PCI_64BIT_BAR2 2
1237 #define PCI_64BIT_BAR4 4
1238 void __iomem *MBslimaddr; /* virtual address for mbox cmds */
1239 void __iomem *HAregaddr; /* virtual address for host attn reg */
1240 void __iomem *CAregaddr; /* virtual address for chip attn reg */
1241 void __iomem *HSregaddr; /* virtual address for host status
1242 reg */
1243 void __iomem *HCregaddr; /* virtual address for host ctl reg */
1244
1245 struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
1246 struct lpfc_pgp *port_gp;
1247 uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
1248 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
1249
1250 int brd_no; /* FC board number */
1251 char SerialNumber[32]; /* adapter Serial Number */
1252 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
1253 char BIOSVersion[16]; /* Boot BIOS version */
1254 char ModelDesc[256]; /* Model Description */
1255 char ModelName[80]; /* Model Name */
1256 char ProgramType[256]; /* Program Type */
1257 char Port[20]; /* Port No */
1258 uint8_t vpd_flag; /* VPD data flag */
1259
1260 #define VPD_MODEL_DESC 0x1 /* valid vpd model description */
1261 #define VPD_MODEL_NAME 0x2 /* valid vpd model name */
1262 #define VPD_PROGRAM_TYPE 0x4 /* valid vpd program type */
1263 #define VPD_PORT 0x8 /* valid vpd port data */
1264 #define VPD_MASK 0xf /* mask for any vpd data */
1265
1266
1267 struct timer_list fcp_poll_timer;
1268 struct timer_list eratt_poll;
1269 uint32_t eratt_poll_interval;
1270
1271 uint64_t bg_guard_err_cnt;
1272 uint64_t bg_apptag_err_cnt;
1273 uint64_t bg_reftag_err_cnt;
1274
1275 /* fastpath list. */
1276 spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */
1277 spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */
1278 struct list_head lpfc_scsi_buf_list_get;
1279 struct list_head lpfc_scsi_buf_list_put;
1280 uint32_t total_scsi_bufs;
1281 struct list_head lpfc_iocb_list;
1282 uint32_t total_iocbq_bufs;
1283 spinlock_t rrq_list_lock; /* lock for active_rrq_list */
1284 struct list_head active_rrq_list;
1285 spinlock_t hbalock;
1286 struct work_struct unblock_request_work; /* SCSI layer unblock IOs */
1287
1288 /* dma_mem_pools */
1289 struct dma_pool *lpfc_sg_dma_buf_pool;
1290 struct dma_pool *lpfc_mbuf_pool;
1291 struct dma_pool *lpfc_hrb_pool; /* header receive buffer pool */
1292 struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
1293 struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
1294 struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
1295 struct dma_pool *lpfc_cmd_rsp_buf_pool;
1296 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
1297
1298 mempool_t *mbox_mem_pool;
1299 mempool_t *nlp_mem_pool;
1300 mempool_t *rrq_pool;
1301 mempool_t *active_rrq_pool;
1302
1303 struct fc_host_statistics link_stats;
1304 enum lpfc_irq_chann_mode irq_chann_mode;
1305 enum intr_type_t intr_type;
1306 uint32_t intr_mode;
1307 #define LPFC_INTR_ERROR 0xFFFFFFFF
1308 struct list_head port_list;
1309 spinlock_t port_list_lock; /* lock for port_list mutations */
1310 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
1311 uint16_t max_vpi; /* Maximum virtual nports */
1312 #define LPFC_MAX_VPI 0xFF /* Max number VPI supported 0 - 0xff */
1313 #define LPFC_MAX_VPORTS 0x100 /* Max vports per port, with pport */
1314 uint16_t max_vports; /*
1315 * For IOV HBAs max_vpi can change
1316 * after a reset. max_vports is max
1317 * number of vports present. This can
1318 * be greater than max_vpi.
1319 */
1320 uint16_t vpi_base;
1321 uint16_t vfi_base;
1322 unsigned long *vpi_bmask; /* vpi allocation table */
1323 uint16_t *vpi_ids;
1324 uint16_t vpi_count;
1325 struct list_head lpfc_vpi_blk_list;
1326
1327 /* Data structure used by fabric iocb scheduler */
1328 struct list_head fabric_iocb_list;
1329 atomic_t fabric_iocb_count;
1330 struct timer_list fabric_block_timer;
1331 unsigned long bit_flags;
1332 atomic_t num_rsrc_err;
1333 unsigned long last_rsrc_error_time;
1334 unsigned long last_ramp_down_time;
1335 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1336 struct dentry *hba_debugfs_root;
1337 unsigned int debugfs_vport_count;
1338
1339 struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
1340 atomic_t nvmeio_trc_cnt;
1341 uint32_t nvmeio_trc_size;
1342 uint32_t nvmeio_trc_output_idx;
1343
1344 /* T10 DIF error injection */
1345 uint32_t lpfc_injerr_wgrd_cnt;
1346 uint32_t lpfc_injerr_wapp_cnt;
1347 uint32_t lpfc_injerr_wref_cnt;
1348 uint32_t lpfc_injerr_rgrd_cnt;
1349 uint32_t lpfc_injerr_rapp_cnt;
1350 uint32_t lpfc_injerr_rref_cnt;
1351 uint32_t lpfc_injerr_nportid;
1352 struct lpfc_name lpfc_injerr_wwpn;
1353 sector_t lpfc_injerr_lba;
1354 #define LPFC_INJERR_LBA_OFF (sector_t)(-1)
1355
1356 struct lpfc_debugfs_trc *slow_ring_trc;
1357 atomic_t slow_ring_trc_cnt;
1358 /* iDiag debugfs sub-directory */
1359 struct dentry *idiag_root;
1360 uint8_t lpfc_idiag_last_eq;
1361 #endif
1362 uint16_t nvmeio_trc_on;
1363
1364 /* Used for deferred freeing of ELS data buffers */
1365 struct list_head elsbuf;
1366 int elsbuf_cnt;
1367 int elsbuf_prev_cnt;
1368
1369 uint8_t temp_sensor_support;
1370 /* Fields used for heart beat. */
1371 unsigned long last_completion_time;
1372 unsigned long skipped_hb;
1373 struct timer_list hb_tmofunc;
1374 struct timer_list rrq_tmr;
1375 enum hba_temp_state over_temp_state;
1376 /*
1377 * Following bit will be set for all buffer tags which are not
1378 * associated with any HBQ.
1379 */
1380 #define QUE_BUFTAG_BIT (1<<31)
1381 uint32_t buffer_tag_count;
1382
1383 /* Maximum number of events that can be outstanding at any time*/
1384 #define LPFC_MAX_EVT_COUNT 512
1385 atomic_t fast_event_count;
1386 uint32_t fcoe_eventtag;
1387 uint32_t fcoe_eventtag_at_fcf_scan;
1388 uint32_t fcoe_cvl_eventtag;
1389 uint32_t fcoe_cvl_eventtag_attn;
1390 struct lpfc_fcf fcf;
1391 uint8_t fc_map[3];
1392 uint8_t valid_vlan;
1393 uint16_t vlan_id;
1394 struct list_head fcf_conn_rec_list;
1395
1396 struct lpfc_defer_flogi_acc defer_flogi_acc;
1397
1398 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
1399 struct list_head ct_ev_waiters;
1400 struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
1401 uint32_t ctx_idx;
1402 struct timer_list inactive_vmid_poll;
1403
1404 /* RAS Support */
1405 spinlock_t ras_fwlog_lock; /* do not take while holding another lock */
1406 struct lpfc_ras_fwlog ras_fwlog;
1407
1408 uint32_t iocb_cnt;
1409 uint32_t iocb_max;
1410 atomic_t sdev_cnt;
1411 spinlock_t devicelock; /* lock for luns list */
1412 mempool_t *device_data_mem_pool;
1413 struct list_head luns;
1414 #define LPFC_TRANSGRESSION_HIGH_TEMPERATURE 0x0080
1415 #define LPFC_TRANSGRESSION_LOW_TEMPERATURE 0x0040
1416 #define LPFC_TRANSGRESSION_HIGH_VOLTAGE 0x0020
1417 #define LPFC_TRANSGRESSION_LOW_VOLTAGE 0x0010
1418 #define LPFC_TRANSGRESSION_HIGH_TXBIAS 0x0008
1419 #define LPFC_TRANSGRESSION_LOW_TXBIAS 0x0004
1420 #define LPFC_TRANSGRESSION_HIGH_TXPOWER 0x0002
1421 #define LPFC_TRANSGRESSION_LOW_TXPOWER 0x0001
1422 #define LPFC_TRANSGRESSION_HIGH_RXPOWER 0x8000
1423 #define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000
1424 uint16_t sfp_alarm;
1425 uint16_t sfp_warning;
1426
1427 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1428 uint16_t hdwqstat_on;
1429 #define LPFC_CHECK_OFF 0
1430 #define LPFC_CHECK_NVME_IO 1
1431 #define LPFC_CHECK_NVMET_IO 2
1432 #define LPFC_CHECK_SCSI_IO 4
1433 uint16_t ktime_on;
1434 uint64_t ktime_data_samples;
1435 uint64_t ktime_status_samples;
1436 uint64_t ktime_last_cmd;
1437 uint64_t ktime_seg1_total;
1438 uint64_t ktime_seg1_min;
1439 uint64_t ktime_seg1_max;
1440 uint64_t ktime_seg2_total;
1441 uint64_t ktime_seg2_min;
1442 uint64_t ktime_seg2_max;
1443 uint64_t ktime_seg3_total;
1444 uint64_t ktime_seg3_min;
1445 uint64_t ktime_seg3_max;
1446 uint64_t ktime_seg4_total;
1447 uint64_t ktime_seg4_min;
1448 uint64_t ktime_seg4_max;
1449 uint64_t ktime_seg5_total;
1450 uint64_t ktime_seg5_min;
1451 uint64_t ktime_seg5_max;
1452 uint64_t ktime_seg6_total;
1453 uint64_t ktime_seg6_min;
1454 uint64_t ktime_seg6_max;
1455 uint64_t ktime_seg7_total;
1456 uint64_t ktime_seg7_min;
1457 uint64_t ktime_seg7_max;
1458 uint64_t ktime_seg8_total;
1459 uint64_t ktime_seg8_min;
1460 uint64_t ktime_seg8_max;
1461 uint64_t ktime_seg9_total;
1462 uint64_t ktime_seg9_min;
1463 uint64_t ktime_seg9_max;
1464 uint64_t ktime_seg10_total;
1465 uint64_t ktime_seg10_min;
1466 uint64_t ktime_seg10_max;
1467 #endif
1468 /* CMF objects */
1469 struct lpfc_cgn_stat __percpu *cmf_stat;
1470 uint32_t cmf_interval_rate; /* timer interval limit in ms */
1471 uint32_t cmf_timer_cnt;
1472 #define LPFC_CMF_INTERVAL 90
1473 uint64_t cmf_link_byte_count;
1474 uint64_t cmf_max_line_rate;
1475 uint64_t cmf_max_bytes_per_interval;
1476 uint64_t cmf_last_sync_bw;
1477 #define LPFC_CMF_BLK_SIZE 512
1478 struct hrtimer cmf_timer;
1479 struct hrtimer cmf_stats_timer; /* 1 minute stats timer */
1480 atomic_t cmf_bw_wait;
1481 atomic_t cmf_busy;
1482 atomic_t cmf_stop_io; /* To block request and stop IO's */
1483 uint32_t cmf_active_mode;
1484 uint32_t cmf_info_per_interval;
1485 #define LPFC_MAX_CMF_INFO 32
1486 struct timespec64 cmf_latency; /* Interval congestion timestamp */
1487 uint32_t cmf_last_ts; /* Interval congestion time (ms) */
1488 uint32_t cmf_active_info;
1489
1490 /* Signal / FPIN handling for Congestion Mgmt */
1491 u8 cgn_reg_fpin; /* Negotiated value from RDF */
1492 u8 cgn_init_reg_fpin; /* Initial value from READ_CONFIG */
1493 #define LPFC_CGN_FPIN_NONE 0x0
1494 #define LPFC_CGN_FPIN_WARN 0x1
1495 #define LPFC_CGN_FPIN_ALARM 0x2
1496 #define LPFC_CGN_FPIN_BOTH (LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM)
1497
1498 u8 cgn_reg_signal; /* Negotiated value from EDC */
1499 u8 cgn_init_reg_signal; /* Initial value from READ_CONFIG */
1500 /* cgn_reg_signal and cgn_init_reg_signal use
1501 * enum fc_edc_cg_signal_cap_types
1502 */
1503 u16 cgn_fpin_frequency; /* In units of msecs */
1504 #define LPFC_FPIN_INIT_FREQ 0xffff
1505 u32 cgn_sig_freq;
1506 u32 cgn_acqe_cnt;
1507
1508 /* RX monitor handling for CMF */
1509 struct lpfc_rx_info_monitor *rx_monitor;
1510 atomic_t rx_max_read_cnt; /* Maximum read bytes */
1511 uint64_t rx_block_cnt;
1512
1513 /* Congestion parameters from flash */
1514 struct lpfc_cgn_param cgn_p;
1515
1516 /* Statistics counter for ACQE cgn alarms and warnings */
1517 struct lpfc_cgn_acqe_stat cgn_acqe_stat;
1518
1519 /* Congestion buffer information */
1520 struct lpfc_dmabuf *cgn_i; /* Congestion Info buffer */
1521 atomic_t cgn_fabric_warn_cnt; /* Total warning cgn events for info */
1522 atomic_t cgn_fabric_alarm_cnt; /* Total alarm cgn events for info */
1523 atomic_t cgn_sync_warn_cnt; /* Total warning events for SYNC wqe */
1524 atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */
1525 atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */
1526 atomic_t cgn_latency_evt_cnt;
1527 atomic64_t cgn_latency_evt; /* Avg latency per minute */
1528 unsigned long cgn_evt_timestamp;
1529 #define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */
1530 uint32_t cgn_evt_minute;
1531 #define LPFC_SEC_MIN 60UL
1532 #define LPFC_MIN_HOUR 60
1533 #define LPFC_HOUR_DAY 24
1534 #define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY)
1535
1536 struct hlist_node cpuhp; /* used for cpuhp per hba callback */
1537 struct timer_list cpuhp_poll_timer;
1538 struct list_head poll_list; /* slowpath eq polling list */
1539 #define LPFC_POLL_HB 1 /* slowpath heartbeat */
1540
1541 char os_host_name[MAXHOSTNAMELEN];
1542
1543 /* LD Signaling */
1544 u32 degrade_activate_threshold;
1545 u32 degrade_deactivate_threshold;
1546 u32 fec_degrade_interval;
1547
1548 atomic_t dbg_log_idx;
1549 atomic_t dbg_log_cnt;
1550 atomic_t dbg_log_dmping;
1551 struct dbg_log_ent dbg_log[DBG_LOG_SZ];
1552 };
1553
1554 #define LPFC_MAX_RXMONITOR_ENTRY 800
1555 #define LPFC_MAX_RXMONITOR_DUMP 32
1556 struct rx_info_entry {
1557 uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
1558 uint64_t total_bytes; /* Total no of read bytes requested */
1559 uint64_t rcv_bytes; /* Total no of read bytes completed */
1560 uint64_t avg_io_size;
1561 uint64_t avg_io_latency;/* Average io latency in microseconds */
1562 uint64_t max_read_cnt; /* Maximum read bytes */
1563 uint64_t max_bytes_per_interval;
1564 uint32_t cmf_busy;
1565 uint32_t cmf_info; /* CMF_SYNC_WQE info */
1566 uint32_t io_cnt;
1567 uint32_t timer_utilization;
1568 uint32_t timer_interval;
1569 };
1570
1571 struct lpfc_rx_info_monitor {
1572 struct rx_info_entry *ring; /* info organized in a circular buffer */
1573 u32 head_idx, tail_idx; /* index to head/tail of ring */
1574 spinlock_t lock; /* spinlock for ring */
1575 u32 entries; /* storing number entries/size of ring */
1576 };
1577
1578 static inline struct Scsi_Host *
lpfc_shost_from_vport(struct lpfc_vport * vport)1579 lpfc_shost_from_vport(struct lpfc_vport *vport)
1580 {
1581 return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
1582 }
1583
1584 static inline void
lpfc_set_loopback_flag(struct lpfc_hba * phba)1585 lpfc_set_loopback_flag(struct lpfc_hba *phba)
1586 {
1587 if (phba->cfg_topology == FLAGS_LOCAL_LB)
1588 phba->link_flag |= LS_LOOPBACK_MODE;
1589 else
1590 phba->link_flag &= ~LS_LOOPBACK_MODE;
1591 }
1592
1593 static inline int
lpfc_is_link_up(struct lpfc_hba * phba)1594 lpfc_is_link_up(struct lpfc_hba *phba)
1595 {
1596 return phba->link_state == LPFC_LINK_UP ||
1597 phba->link_state == LPFC_CLEAR_LA ||
1598 phba->link_state == LPFC_HBA_READY;
1599 }
1600
1601 static inline void
lpfc_worker_wake_up(struct lpfc_hba * phba)1602 lpfc_worker_wake_up(struct lpfc_hba *phba)
1603 {
1604 /* Set the lpfc data pending flag */
1605 set_bit(LPFC_DATA_READY, &phba->data_flags);
1606
1607 /* Wake up worker thread */
1608 wake_up(&phba->work_waitq);
1609 return;
1610 }
1611
1612 static inline int
lpfc_readl(void __iomem * addr,uint32_t * data)1613 lpfc_readl(void __iomem *addr, uint32_t *data)
1614 {
1615 uint32_t temp;
1616 temp = readl(addr);
1617 if (temp == 0xffffffff)
1618 return -EIO;
1619 *data = temp;
1620 return 0;
1621 }
1622
1623 static inline int
lpfc_sli_read_hs(struct lpfc_hba * phba)1624 lpfc_sli_read_hs(struct lpfc_hba *phba)
1625 {
1626 /*
1627 * There was a link/board error. Read the status register to retrieve
1628 * the error event and process it.
1629 */
1630 phba->sli.slistat.err_attn_event++;
1631
1632 /* Save status info and check for unplug error */
1633 if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
1634 lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
1635 lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
1636 return -EIO;
1637 }
1638
1639 /* Clear chip Host Attention error bit */
1640 writel(HA_ERATT, phba->HAregaddr);
1641 readl(phba->HAregaddr); /* flush */
1642 phba->pport->stopped = 1;
1643
1644 return 0;
1645 }
1646
1647 static inline struct lpfc_sli_ring *
lpfc_phba_elsring(struct lpfc_hba * phba)1648 lpfc_phba_elsring(struct lpfc_hba *phba)
1649 {
1650 /* Return NULL if sli_rev has become invalid due to bad fw */
1651 if (phba->sli_rev != LPFC_SLI_REV4 &&
1652 phba->sli_rev != LPFC_SLI_REV3 &&
1653 phba->sli_rev != LPFC_SLI_REV2)
1654 return NULL;
1655
1656 if (phba->sli_rev == LPFC_SLI_REV4) {
1657 if (phba->sli4_hba.els_wq)
1658 return phba->sli4_hba.els_wq->pring;
1659 else
1660 return NULL;
1661 }
1662 return &phba->sli.sli3_ring[LPFC_ELS_RING];
1663 }
1664
1665 /**
1666 * lpfc_next_online_cpu - Finds next online CPU on cpumask
1667 * @mask: Pointer to phba's cpumask member.
1668 * @start: starting cpu index
1669 *
1670 * Note: If no valid cpu found, then nr_cpu_ids is returned.
1671 *
1672 **/
1673 static __always_inline unsigned int
lpfc_next_online_cpu(const struct cpumask * mask,unsigned int start)1674 lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
1675 {
1676 return cpumask_next_and_wrap(start, mask, cpu_online_mask);
1677 }
1678
1679 /**
1680 * lpfc_next_present_cpu - Finds next present CPU after n
1681 * @n: the cpu prior to search
1682 *
1683 * Note: If no next present cpu, then fallback to first present cpu.
1684 *
1685 **/
lpfc_next_present_cpu(int n)1686 static __always_inline unsigned int lpfc_next_present_cpu(int n)
1687 {
1688 return cpumask_next_wrap(n, cpu_present_mask);
1689 }
1690
1691 /**
1692 * lpfc_sli4_mod_hba_eq_delay - update EQ delay
1693 * @phba: Pointer to HBA context object.
1694 * @q: The Event Queue to update.
1695 * @delay: The delay value (in us) to be written.
1696 *
1697 **/
1698 static inline void
lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba * phba,struct lpfc_queue * eq,u32 delay)1699 lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq,
1700 u32 delay)
1701 {
1702 struct lpfc_register reg_data;
1703
1704 reg_data.word0 = 0;
1705 bf_set(lpfc_sliport_eqdelay_id, ®_data, eq->queue_id);
1706 bf_set(lpfc_sliport_eqdelay_delay, ®_data, delay);
1707 writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr);
1708 eq->q_mode = delay;
1709 }
1710
1711
1712 /*
1713 * Macro that declares tables and a routine to perform enum type to
1714 * ascii string lookup.
1715 *
1716 * Defines a <key,value> table for an enum. Uses xxx_INIT defines for
1717 * the enum to populate the table. Macro defines a routine (named
1718 * by caller) that will search all elements of the table for the key
1719 * and return the name string if found or "Unrecognized" if not found.
1720 */
1721 #define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init) \
1722 static struct { \
1723 enum enum_name value; \
1724 char *name; \
1725 } fc_##enum_name##_e2str_names[] = enum_init; \
1726 static const char *routine(enum enum_name table_key) \
1727 { \
1728 int i; \
1729 char *name = "Unrecognized"; \
1730 \
1731 for (i = 0; i < ARRAY_SIZE(fc_##enum_name##_e2str_names); i++) {\
1732 if (fc_##enum_name##_e2str_names[i].value == table_key) {\
1733 name = fc_##enum_name##_e2str_names[i].name; \
1734 break; \
1735 } \
1736 } \
1737 return name; \
1738 }
1739
1740 /**
1741 * lpfc_is_vmid_enabled - returns if VMID is enabled for either switch types
1742 * @phba: Pointer to HBA context object.
1743 *
1744 * Relationship between the enable, target support and if vmid tag is required
1745 * for the particular combination
1746 * ---------------------------------------------------
1747 * Switch Enable Flag Target Support VMID Needed
1748 * ---------------------------------------------------
1749 * App Id 0 NA N
1750 * App Id 1 0 N
1751 * App Id 1 1 Y
1752 * Pr Tag 0 NA N
1753 * Pr Tag 1 0 N
1754 * Pr Tag 1 1 Y
1755 * Pr Tag 2 * Y
1756 ---------------------------------------------------
1757 *
1758 **/
lpfc_is_vmid_enabled(struct lpfc_hba * phba)1759 static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
1760 {
1761 return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging;
1762 }
1763
1764 static inline
get_job_ulpstatus(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1765 u8 get_job_ulpstatus(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1766 {
1767 if (phba->sli_rev == LPFC_SLI_REV4)
1768 return bf_get(lpfc_wcqe_c_status, &iocbq->wcqe_cmpl);
1769 else
1770 return iocbq->iocb.ulpStatus;
1771 }
1772
1773 static inline
get_job_word4(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1774 u32 get_job_word4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1775 {
1776 if (phba->sli_rev == LPFC_SLI_REV4)
1777 return iocbq->wcqe_cmpl.parameter;
1778 else
1779 return iocbq->iocb.un.ulpWord[4];
1780 }
1781
1782 static inline
get_job_cmnd(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1783 u8 get_job_cmnd(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1784 {
1785 if (phba->sli_rev == LPFC_SLI_REV4)
1786 return bf_get(wqe_cmnd, &iocbq->wqe.generic.wqe_com);
1787 else
1788 return iocbq->iocb.ulpCommand;
1789 }
1790
1791 static inline
get_job_ulpcontext(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1792 u16 get_job_ulpcontext(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1793 {
1794 if (phba->sli_rev == LPFC_SLI_REV4)
1795 return bf_get(wqe_ctxt_tag, &iocbq->wqe.generic.wqe_com);
1796 else
1797 return iocbq->iocb.ulpContext;
1798 }
1799
1800 static inline
get_job_rcvoxid(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1801 u16 get_job_rcvoxid(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1802 {
1803 if (phba->sli_rev == LPFC_SLI_REV4)
1804 return bf_get(wqe_rcvoxid, &iocbq->wqe.generic.wqe_com);
1805 else
1806 return iocbq->iocb.unsli3.rcvsli3.ox_id;
1807 }
1808
1809 static inline
get_job_data_placed(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1810 u32 get_job_data_placed(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1811 {
1812 if (phba->sli_rev == LPFC_SLI_REV4)
1813 return iocbq->wcqe_cmpl.total_data_placed;
1814 else
1815 return iocbq->iocb.un.genreq64.bdl.bdeSize;
1816 }
1817
1818 static inline
get_job_abtsiotag(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1819 u32 get_job_abtsiotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1820 {
1821 if (phba->sli_rev == LPFC_SLI_REV4)
1822 return iocbq->wqe.abort_cmd.wqe_com.abort_tag;
1823 else
1824 return iocbq->iocb.un.acxri.abortIoTag;
1825 }
1826
1827 static inline
get_job_els_rsp64_did(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1828 u32 get_job_els_rsp64_did(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1829 {
1830 if (phba->sli_rev == LPFC_SLI_REV4)
1831 return bf_get(wqe_els_did, &iocbq->wqe.els_req.wqe_dest);
1832 else
1833 return iocbq->iocb.un.elsreq64.remoteID;
1834 }
1835