1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2015 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #ifndef _CXLFLASH_COMMON_H 16 #define _CXLFLASH_COMMON_H 17 18 #include <linux/list.h> 19 #include <linux/rwsem.h> 20 #include <linux/types.h> 21 #include <scsi/scsi.h> 22 #include <scsi/scsi_cmnd.h> 23 #include <scsi/scsi_device.h> 24 25 extern const struct file_operations cxlflash_cxl_fops; 26 27 #define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */ 28 29 #define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */ 30 #define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */ 31 #define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants 32 max_sectors 33 in units of 34 512 byte 35 sectors 36 */ 37 38 #define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry)) 39 40 /* AFU command retry limit */ 41 #define MC_RETRY_CNT 5 /* sufficient for SCSI check and 42 certain AFU errors */ 43 44 /* Command management definitions */ 45 #define CXLFLASH_NUM_CMDS (2 * CXLFLASH_MAX_CMDS) /* Must be a pow2 for 46 alignment and more 47 efficient array 48 index derivation 49 */ 50 51 #define CXLFLASH_MAX_CMDS 256 52 #define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS 53 54 /* RRQ for master issued cmds */ 55 #define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS 56 57 /* SQ for master issued cmds */ 58 #define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS 59 60 61 static inline void check_sizes(void) 62 { 63 BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS); 64 } 65 66 /* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */ 67 #define CMD_BUFSIZE SIZE_4K 68 69 enum cxlflash_lr_state { 70 LINK_RESET_INVALID, 71 LINK_RESET_REQUIRED, 72 LINK_RESET_COMPLETE 73 }; 74 75 enum cxlflash_init_state { 76 INIT_STATE_NONE, 77 INIT_STATE_PCI, 78 INIT_STATE_AFU, 79 INIT_STATE_SCSI 80 }; 81 82 enum cxlflash_state { 83 STATE_NORMAL, /* Normal running state, everything good */ 84 STATE_RESET, /* Reset state, trying to reset/recover */ 85 STATE_FAILTERM /* Failed/terminating state, error out users/threads */ 86 }; 87 88 /* 89 * Each context has its own set of resource handles that is visible 90 * only from that context. 91 */ 92 93 struct cxlflash_cfg { 94 struct afu *afu; 95 struct cxl_context *mcctx; 96 97 struct pci_dev *dev; 98 struct pci_device_id *dev_id; 99 struct Scsi_Host *host; 100 101 ulong cxlflash_regs_pci; 102 103 struct work_struct work_q; 104 enum cxlflash_init_state init_state; 105 enum cxlflash_lr_state lr_state; 106 int lr_port; 107 atomic_t scan_host_needed; 108 109 struct cxl_afu *cxl_afu; 110 111 atomic_t recovery_threads; 112 struct mutex ctx_recovery_mutex; 113 struct mutex ctx_tbl_list_mutex; 114 struct rw_semaphore ioctl_rwsem; 115 struct ctx_info *ctx_tbl[MAX_CONTEXT]; 116 struct list_head ctx_err_recovery; /* contexts w/ recovery pending */ 117 struct file_operations cxl_fops; 118 119 /* Parameters that are LUN table related */ 120 int last_lun_index[CXLFLASH_NUM_FC_PORTS]; 121 int promote_lun_index; 122 struct list_head lluns; /* list of llun_info structs */ 123 124 wait_queue_head_t tmf_waitq; 125 spinlock_t tmf_slock; 126 bool tmf_active; 127 wait_queue_head_t reset_waitq; 128 enum cxlflash_state state; 129 }; 130 131 struct afu_cmd { 132 struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */ 133 struct sisl_ioasa sa; /* IOASA must follow IOARCB */ 134 struct afu *parent; 135 struct scsi_cmnd *scp; 136 struct completion cevent; 137 138 u8 cmd_tmf:1; 139 140 /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned. 141 * However for performance reasons the IOARCB/IOASA should be 142 * cache line aligned. 143 */ 144 } __aligned(cache_line_size()); 145 146 static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc) 147 { 148 return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd)); 149 } 150 151 static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) 152 { 153 struct afu_cmd *afuc = sc_to_afuc(sc); 154 155 memset(afuc, 0, sizeof(*afuc)); 156 return afuc; 157 } 158 159 struct afu { 160 /* Stuff requiring alignment go first. */ 161 struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */ 162 u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ 163 164 /* Beware of alignment till here. Preferably introduce new 165 * fields after this point 166 */ 167 168 int (*send_cmd)(struct afu *, struct afu_cmd *); 169 void (*context_reset)(struct afu_cmd *); 170 171 /* AFU HW */ 172 struct cxl_ioctl_start_work work; 173 struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ 174 struct sisl_host_map __iomem *host_map; /* MC host map */ 175 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ 176 177 struct kref mapcount; 178 179 ctx_hndl_t ctx_hndl; /* master's context handle */ 180 181 atomic_t hsq_credits; 182 spinlock_t hsq_slock; 183 struct sisl_ioarcb *hsq_start; 184 struct sisl_ioarcb *hsq_end; 185 struct sisl_ioarcb *hsq_curr; 186 u64 *hrrq_start; 187 u64 *hrrq_end; 188 u64 *hrrq_curr; 189 bool toggle; 190 atomic_t cmds_active; /* Number of currently active AFU commands */ 191 s64 room; 192 spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */ 193 u64 hb; 194 u32 internal_lun; /* User-desired LUN mode for this AFU */ 195 196 char version[16]; 197 u64 interface_version; 198 199 struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */ 200 201 }; 202 203 static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode) 204 { 205 u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT; 206 207 return afu_cap & cmd_mode; 208 } 209 210 static inline bool afu_is_sq_cmd_mode(struct afu *afu) 211 { 212 return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE); 213 } 214 215 static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu) 216 { 217 return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE); 218 } 219 220 static inline u64 lun_to_lunid(u64 lun) 221 { 222 __be64 lun_id; 223 224 int_to_scsilun(lun, (struct scsi_lun *)&lun_id); 225 return be64_to_cpu(lun_id); 226 } 227 228 int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8); 229 void cxlflash_list_init(void); 230 void cxlflash_term_global_luns(void); 231 void cxlflash_free_errpage(void); 232 int cxlflash_ioctl(struct scsi_device *, int, void __user *); 233 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *); 234 int cxlflash_mark_contexts_error(struct cxlflash_cfg *); 235 void cxlflash_term_local_luns(struct cxlflash_cfg *); 236 void cxlflash_restore_luntable(struct cxlflash_cfg *); 237 238 #endif /* ifndef _CXLFLASH_COMMON_H */ 239