1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* Copyright 2021 Marvell. All rights reserved. */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/kernel.h> 8 #include <linux/list.h> 9 #include <linux/mm.h> 10 #include <linux/types.h> 11 #include <asm/byteorder.h> 12 #include <linux/qed/common_hsi.h> 13 #include <linux/qed/storage_common.h> 14 #include <linux/qed/nvmetcp_common.h> 15 #include <linux/qed/qed_nvmetcp_if.h> 16 #include "qed_nvmetcp_fw_funcs.h" 17 18 #define NVMETCP_NUM_SGES_IN_CACHE 0x4 19 20 bool nvmetcp_is_slow_sgl(u16 num_sges, bool small_mid_sge) 21 { 22 return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); 23 } 24 25 void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, 26 struct scsi_cached_sges *ctx_data_desc, 27 struct storage_sgl_task_params *sgl_params) 28 { 29 u8 num_sges_to_init = (u8)(sgl_params->num_sges > NVMETCP_NUM_SGES_IN_CACHE ? 30 NVMETCP_NUM_SGES_IN_CACHE : sgl_params->num_sges); 31 u8 sge_index; 32 33 /* sgl params */ 34 ctx_sgl_params->sgl_addr.lo = cpu_to_le32(sgl_params->sgl_phys_addr.lo); 35 ctx_sgl_params->sgl_addr.hi = cpu_to_le32(sgl_params->sgl_phys_addr.hi); 36 ctx_sgl_params->sgl_total_length = cpu_to_le32(sgl_params->total_buffer_size); 37 ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_params->num_sges); 38 39 for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) { 40 ctx_data_desc->sge[sge_index].sge_addr.lo = 41 cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.lo); 42 ctx_data_desc->sge[sge_index].sge_addr.hi = 43 cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.hi); 44 ctx_data_desc->sge[sge_index].sge_len = 45 cpu_to_le32(sgl_params->sgl[sge_index].sge_len); 46 } 47 } 48 49 static inline u32 calc_rw_task_size(struct nvmetcp_task_params *task_params, 50 enum nvmetcp_task_type task_type) 51 { 52 u32 io_size; 53 54 if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) 55 io_size = task_params->tx_io_size; 56 else 57 io_size = task_params->rx_io_size; 58 59 if (unlikely(!io_size)) 60 return 0; 61 62 return io_size; 63 } 64 65 static inline void init_sqe(struct nvmetcp_task_params *task_params, 66 struct storage_sgl_task_params *sgl_task_params, 67 enum nvmetcp_task_type task_type) 68 { 69 if (!task_params->sqe) 70 return; 71 72 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); 73 task_params->sqe->task_id = cpu_to_le16(task_params->itid); 74 75 switch (task_type) { 76 case NVMETCP_TASK_TYPE_HOST_WRITE: { 77 u32 buf_size = 0; 78 u32 num_sges = 0; 79 80 SET_FIELD(task_params->sqe->contlen_cdbsize, 81 NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1); 82 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, 83 NVMETCP_WQE_TYPE_NORMAL); 84 if (task_params->tx_io_size) { 85 if (task_params->send_write_incapsule) 86 buf_size = calc_rw_task_size(task_params, task_type); 87 88 if (nvmetcp_is_slow_sgl(sgl_task_params->num_sges, 89 sgl_task_params->small_mid_sge)) 90 num_sges = NVMETCP_WQE_NUM_SGES_SLOWIO; 91 else 92 num_sges = min((u16)sgl_task_params->num_sges, 93 (u16)SCSI_NUM_SGES_SLOW_SGL_THR); 94 } 95 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); 96 SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); 97 } break; 98 99 case NVMETCP_TASK_TYPE_HOST_READ: { 100 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, 101 NVMETCP_WQE_TYPE_NORMAL); 102 SET_FIELD(task_params->sqe->contlen_cdbsize, 103 NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1); 104 } break; 105 106 case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST: { 107 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, 108 NVMETCP_WQE_TYPE_MIDDLE_PATH); 109 110 if (task_params->tx_io_size) { 111 SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, 112 task_params->tx_io_size); 113 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, 114 min((u16)sgl_task_params->num_sges, 115 (u16)SCSI_NUM_SGES_SLOW_SGL_THR)); 116 } 117 } break; 118 119 case NVMETCP_TASK_TYPE_CLEANUP: 120 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, 121 NVMETCP_WQE_TYPE_TASK_CLEANUP); 122 123 default: 124 break; 125 } 126 } 127 128 /* The following function initializes of NVMeTCP task params */ 129 static inline void 130 init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context, 131 struct nvmetcp_task_params *task_params, 132 enum nvmetcp_task_type task_type) 133 { 134 context->ystorm_st_context.state.cccid = task_params->host_cccid; 135 SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1); 136 context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo); 137 context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi); 138 } 139 140 /* The following function initializes default values to all tasks */ 141 static inline void 142 init_default_nvmetcp_task(struct nvmetcp_task_params *task_params, 143 void *pdu_header, void *nvme_cmd, 144 enum nvmetcp_task_type task_type) 145 { 146 struct e5_nvmetcp_task_context *context = task_params->context; 147 const u8 val_byte = context->mstorm_ag_context.cdu_validation; 148 u8 dw_index; 149 150 memset(context, 0, sizeof(*context)); 151 init_nvmetcp_task_params(context, task_params, 152 (enum nvmetcp_task_type)task_type); 153 154 /* Swapping requirements used below, will be removed in future FW versions */ 155 if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE || 156 task_type == NVMETCP_TASK_TYPE_HOST_READ) { 157 for (dw_index = 0; 158 dw_index < QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32); 159 dw_index++) 160 context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = 161 cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index])); 162 163 for (dw_index = QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32); 164 dw_index < QED_NVMETCP_CMD_HDR_SIZE / sizeof(u32); 165 dw_index++) 166 context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = 167 cpu_to_le32(__swab32(((u32 *)nvme_cmd)[dw_index - 2])); 168 } else { 169 for (dw_index = 0; 170 dw_index < QED_NVMETCP_NON_IO_HDR_SIZE / sizeof(u32); 171 dw_index++) 172 context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = 173 cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index])); 174 } 175 176 /* M-Storm Context: */ 177 context->mstorm_ag_context.cdu_validation = val_byte; 178 context->mstorm_st_context.task_type = (u8)(task_type); 179 context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid); 180 181 /* Ustorm Context: */ 182 SET_FIELD(context->ustorm_ag_context.flags1, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV, 1); 183 context->ustorm_st_context.task_type = (u8)(task_type); 184 context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; 185 context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid); 186 } 187 188 /* The following function initializes the U-Storm Task Contexts */ 189 static inline void 190 init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx *ustorm_st_context, 191 struct e5_ustorm_nvmetcp_task_ag_ctx *ustorm_ag_context, 192 u32 remaining_recv_len, 193 u32 expected_data_transfer_len, u8 num_sges, 194 bool tx_dif_conn_err_en) 195 { 196 /* Remaining data to be received in bytes. Used in validations*/ 197 ustorm_st_context->rem_rcv_len = cpu_to_le32(remaining_recv_len); 198 ustorm_ag_context->exp_data_acked = cpu_to_le32(expected_data_transfer_len); 199 ustorm_st_context->exp_data_transfer_len = cpu_to_le32(expected_data_transfer_len); 200 SET_FIELD(ustorm_st_context->reg1_map, REG1_NUM_SGES, num_sges); 201 SET_FIELD(ustorm_ag_context->flags2, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN, 202 tx_dif_conn_err_en ? 1 : 0); 203 } 204 205 /* The following function initializes Local Completion Contexts: */ 206 static inline void 207 set_local_completion_context(struct e5_nvmetcp_task_context *context) 208 { 209 SET_FIELD(context->ystorm_st_context.state.flags, 210 YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP, 1); 211 SET_FIELD(context->ustorm_st_context.flags, 212 USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP, 1); 213 } 214 215 /* Common Fastpath task init function: */ 216 static inline void 217 init_rw_nvmetcp_task(struct nvmetcp_task_params *task_params, 218 enum nvmetcp_task_type task_type, 219 void *pdu_header, void *nvme_cmd, 220 struct storage_sgl_task_params *sgl_task_params) 221 { 222 struct e5_nvmetcp_task_context *context = task_params->context; 223 u32 task_size = calc_rw_task_size(task_params, task_type); 224 bool slow_io = false; 225 u8 num_sges = 0; 226 227 init_default_nvmetcp_task(task_params, pdu_header, nvme_cmd, task_type); 228 229 /* Tx/Rx: */ 230 if (task_params->tx_io_size) { 231 /* if data to transmit: */ 232 init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params, 233 &context->ystorm_st_context.state.data_desc, 234 sgl_task_params); 235 slow_io = nvmetcp_is_slow_sgl(sgl_task_params->num_sges, 236 sgl_task_params->small_mid_sge); 237 num_sges = 238 (u8)(!slow_io ? min((u32)sgl_task_params->num_sges, 239 (u32)SCSI_NUM_SGES_SLOW_SGL_THR) : 240 NVMETCP_WQE_NUM_SGES_SLOWIO); 241 if (slow_io) { 242 SET_FIELD(context->ystorm_st_context.state.flags, 243 YSTORM_NVMETCP_TASK_STATE_SLOW_IO, 1); 244 } 245 } else if (task_params->rx_io_size) { 246 /* if data to receive: */ 247 init_scsi_sgl_context(&context->mstorm_st_context.sgl_params, 248 &context->mstorm_st_context.data_desc, 249 sgl_task_params); 250 num_sges = 251 (u8)(!nvmetcp_is_slow_sgl(sgl_task_params->num_sges, 252 sgl_task_params->small_mid_sge) ? 253 min((u32)sgl_task_params->num_sges, 254 (u32)SCSI_NUM_SGES_SLOW_SGL_THR) : 255 NVMETCP_WQE_NUM_SGES_SLOWIO); 256 context->mstorm_st_context.rem_task_size = cpu_to_le32(task_size); 257 } 258 259 /* Ustorm context: */ 260 init_ustorm_task_contexts(&context->ustorm_st_context, 261 &context->ustorm_ag_context, 262 /* Remaining Receive length is the Task Size */ 263 task_size, 264 /* The size of the transmitted task */ 265 task_size, 266 /* num_sges */ 267 num_sges, 268 false); 269 270 /* Set exp_data_acked */ 271 if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) { 272 if (task_params->send_write_incapsule) 273 context->ustorm_ag_context.exp_data_acked = task_size; 274 else 275 context->ustorm_ag_context.exp_data_acked = 0; 276 } else if (task_type == NVMETCP_TASK_TYPE_HOST_READ) { 277 context->ustorm_ag_context.exp_data_acked = 0; 278 } 279 280 context->ustorm_ag_context.exp_cont_len = 0; 281 init_sqe(task_params, sgl_task_params, task_type); 282 } 283 284 static void 285 init_common_initiator_read_task(struct nvmetcp_task_params *task_params, 286 struct nvme_tcp_cmd_pdu *cmd_pdu_header, 287 struct nvme_command *nvme_cmd, 288 struct storage_sgl_task_params *sgl_task_params) 289 { 290 init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_READ, 291 cmd_pdu_header, nvme_cmd, sgl_task_params); 292 } 293 294 void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params, 295 struct nvme_tcp_cmd_pdu *cmd_pdu_header, 296 struct nvme_command *nvme_cmd, 297 struct storage_sgl_task_params *sgl_task_params) 298 { 299 init_common_initiator_read_task(task_params, (void *)cmd_pdu_header, 300 (void *)nvme_cmd, sgl_task_params); 301 } 302 303 static void 304 init_common_initiator_write_task(struct nvmetcp_task_params *task_params, 305 struct nvme_tcp_cmd_pdu *cmd_pdu_header, 306 struct nvme_command *nvme_cmd, 307 struct storage_sgl_task_params *sgl_task_params) 308 { 309 init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_WRITE, 310 cmd_pdu_header, nvme_cmd, sgl_task_params); 311 } 312 313 void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params, 314 struct nvme_tcp_cmd_pdu *cmd_pdu_header, 315 struct nvme_command *nvme_cmd, 316 struct storage_sgl_task_params *sgl_task_params) 317 { 318 init_common_initiator_write_task(task_params, (void *)cmd_pdu_header, 319 (void *)nvme_cmd, sgl_task_params); 320 } 321 322 static void 323 init_common_login_request_task(struct nvmetcp_task_params *task_params, 324 void *login_req_pdu_header, 325 struct storage_sgl_task_params *tx_sgl_task_params, 326 struct storage_sgl_task_params *rx_sgl_task_params) 327 { 328 struct e5_nvmetcp_task_context *context = task_params->context; 329 330 init_default_nvmetcp_task(task_params, (void *)login_req_pdu_header, NULL, 331 NVMETCP_TASK_TYPE_INIT_CONN_REQUEST); 332 333 /* Ustorm Context: */ 334 init_ustorm_task_contexts(&context->ustorm_st_context, 335 &context->ustorm_ag_context, 336 337 /* Remaining Receive length is the Task Size */ 338 task_params->rx_io_size ? 339 rx_sgl_task_params->total_buffer_size : 0, 340 341 /* The size of the transmitted task */ 342 task_params->tx_io_size ? 343 tx_sgl_task_params->total_buffer_size : 0, 344 0, /* num_sges */ 345 0); /* tx_dif_conn_err_en */ 346 347 /* SGL context: */ 348 if (task_params->tx_io_size) 349 init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params, 350 &context->ystorm_st_context.state.data_desc, 351 tx_sgl_task_params); 352 if (task_params->rx_io_size) 353 init_scsi_sgl_context(&context->mstorm_st_context.sgl_params, 354 &context->mstorm_st_context.data_desc, 355 rx_sgl_task_params); 356 357 context->mstorm_st_context.rem_task_size = 358 cpu_to_le32(task_params->rx_io_size ? 359 rx_sgl_task_params->total_buffer_size : 0); 360 init_sqe(task_params, tx_sgl_task_params, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST); 361 } 362 363 /* The following function initializes Login task in Host mode: */ 364 void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params, 365 struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr, 366 struct storage_sgl_task_params *tx_sgl_task_params, 367 struct storage_sgl_task_params *rx_sgl_task_params) 368 { 369 init_common_login_request_task(task_params, init_conn_req_pdu_hdr, 370 tx_sgl_task_params, rx_sgl_task_params); 371 } 372 373 void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params) 374 { 375 init_sqe(task_params, NULL, NVMETCP_TASK_TYPE_CLEANUP); 376 } 377