1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2022 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 /*$FreeBSD$*/ 35 36 #include "osdep.h" 37 #include "irdma_defs.h" 38 #include "irdma_user.h" 39 #include "irdma.h" 40 41 /** 42 * irdma_set_fragment - set fragment in wqe 43 * @wqe: wqe for setting fragment 44 * @offset: offset value 45 * @sge: sge length and stag 46 * @valid: The wqe valid 47 */ 48 static void 49 irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge, 50 u8 valid) 51 { 52 if (sge) { 53 set_64bit_val(wqe, offset, 54 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off)); 55 set_64bit_val(wqe, offset + IRDMA_BYTE_8, 56 FIELD_PREP(IRDMAQPSQ_VALID, valid) | 57 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) | 58 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag)); 59 } else { 60 set_64bit_val(wqe, offset, 0); 61 set_64bit_val(wqe, offset + IRDMA_BYTE_8, 62 FIELD_PREP(IRDMAQPSQ_VALID, valid)); 63 } 64 } 65 66 /** 67 * irdma_set_fragment_gen_1 - set fragment in wqe 68 * @wqe: wqe for setting fragment 69 * @offset: offset value 70 * @sge: sge length and stag 71 * @valid: wqe valid flag 72 */ 73 static void 74 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset, 75 struct irdma_sge *sge, u8 valid) 76 { 77 if (sge) { 78 set_64bit_val(wqe, offset, 79 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off)); 80 set_64bit_val(wqe, offset + IRDMA_BYTE_8, 81 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) | 82 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag)); 83 } else { 84 set_64bit_val(wqe, offset, 0); 85 set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0); 86 } 87 } 88 89 /** 90 * irdma_nop_hdr - Format header section of noop WQE 91 * @qp: hw qp ptr 92 */ 93 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){ 94 return FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | 95 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, false) | 96 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 97 } 98 99 /** 100 * irdma_nop_1 - insert a NOP wqe 101 * @qp: hw qp ptr 102 */ 103 static int 104 irdma_nop_1(struct irdma_qp_uk *qp) 105 { 106 __le64 *wqe; 107 u32 wqe_idx; 108 109 if (!qp->sq_ring.head) 110 return EINVAL; 111 112 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); 113 wqe = qp->sq_base[wqe_idx].elem; 114 115 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA; 116 117 set_64bit_val(wqe, IRDMA_BYTE_0, 0); 118 set_64bit_val(wqe, IRDMA_BYTE_8, 0); 119 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 120 121 /* make sure WQE is written before valid bit is set */ 122 udma_to_device_barrier(); 123 124 set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp)); 125 126 return 0; 127 } 128 129 /** 130 * irdma_clr_wqes - clear next 128 sq entries 131 * @qp: hw qp ptr 132 * @qp_wqe_idx: wqe_idx 133 */ 134 void 135 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) 136 { 137 __le64 *wqe; 138 u32 wqe_idx; 139 140 if (!(qp_wqe_idx & 0x7F)) { 141 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; 142 wqe = qp->sq_base[wqe_idx].elem; 143 if (wqe_idx) 144 memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000); 145 else 146 memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000); 147 } 148 } 149 150 /** 151 * irdma_uk_qp_post_wr - ring doorbell 152 * @qp: hw qp ptr 153 */ 154 void 155 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) 156 { 157 u64 temp; 158 u32 hw_sq_tail; 159 u32 sw_sq_head; 160 161 /* valid bit is written and loads completed before reading shadow */ 162 atomic_thread_fence(memory_order_seq_cst); 163 164 /* read the doorbell shadow area */ 165 get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp); 166 167 hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp); 168 sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); 169 if (sw_sq_head != qp->initial_ring.head) { 170 if (qp->push_dropped) { 171 db_wr32(qp->qp_id, qp->wqe_alloc_db); 172 qp->push_dropped = false; 173 } else if (sw_sq_head != hw_sq_tail) { 174 if (sw_sq_head > qp->initial_ring.head) { 175 if (hw_sq_tail >= qp->initial_ring.head && 176 hw_sq_tail < sw_sq_head) 177 db_wr32(qp->qp_id, qp->wqe_alloc_db); 178 } else { 179 if (hw_sq_tail >= qp->initial_ring.head || 180 hw_sq_tail < sw_sq_head) 181 db_wr32(qp->qp_id, qp->wqe_alloc_db); 182 } 183 } 184 } 185 186 qp->initial_ring.head = qp->sq_ring.head; 187 } 188 189 /** 190 * irdma_qp_ring_push_db - ring qp doorbell 191 * @qp: hw qp ptr 192 * @wqe_idx: wqe index 193 */ 194 static void 195 irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx) 196 { 197 set_32bit_val(qp->push_db, 0, 198 FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id); 199 qp->initial_ring.head = qp->sq_ring.head; 200 qp->push_mode = true; 201 qp->push_dropped = false; 202 } 203 204 void 205 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta, 206 u32 wqe_idx, bool post_sq) 207 { 208 __le64 *push; 209 210 if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) != 211 IRDMA_RING_CURRENT_TAIL(qp->sq_ring) && 212 !qp->push_mode) { 213 if (post_sq) 214 irdma_uk_qp_post_wr(qp); 215 } else { 216 push = (__le64 *) ((uintptr_t)qp->push_wqe + 217 (wqe_idx & 0x7) * 0x20); 218 irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE); 219 irdma_qp_ring_push_db(qp, wqe_idx); 220 } 221 } 222 223 /** 224 * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go 225 * @qp: hw qp ptr 226 * @wqe_idx: return wqe index 227 * @quanta: (in/out) ptr to size of WR in quanta. Modified in case pad is needed 228 * @total_size: size of WR in bytes 229 * @info: info on WR 230 */ 231 __le64 * 232 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, 233 u16 *quanta, u32 total_size, 234 struct irdma_post_sq_info *info) 235 { 236 __le64 *wqe; 237 __le64 *wqe_0 = NULL; 238 u32 nop_wqe_idx; 239 u16 avail_quanta, wqe_quanta = *quanta; 240 u16 i; 241 242 avail_quanta = qp->uk_attrs->max_hw_sq_chunk - 243 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) % 244 qp->uk_attrs->max_hw_sq_chunk); 245 246 if (*quanta <= avail_quanta) { 247 /* WR fits in current chunk */ 248 if (*quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) 249 return NULL; 250 } else { 251 /* Need to pad with NOP */ 252 if (*quanta + avail_quanta > 253 IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) 254 return NULL; 255 256 nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); 257 for (i = 0; i < avail_quanta; i++) { 258 irdma_nop_1(qp); 259 IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); 260 } 261 if (qp->push_db && info->push_wqe) 262 irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem, 263 avail_quanta, nop_wqe_idx, true); 264 } 265 266 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); 267 if (!*wqe_idx) 268 qp->swqe_polarity = !qp->swqe_polarity; 269 270 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, *quanta); 271 272 irdma_clr_wqes(qp, *wqe_idx); 273 274 wqe = qp->sq_base[*wqe_idx].elem; 275 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 && 276 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) { 277 wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem; 278 wqe_0[3] = htole64(FIELD_PREP(IRDMAQPSQ_VALID, !qp->swqe_polarity)); 279 } 280 qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id; 281 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; 282 qp->sq_wrtrk_array[*wqe_idx].quanta = wqe_quanta; 283 qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled; 284 285 return wqe; 286 } 287 288 /** 289 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe 290 * @qp: hw qp ptr 291 * @wqe_idx: return wqe index 292 */ 293 __le64 * 294 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) 295 { 296 __le64 *wqe; 297 int ret_code; 298 299 if (IRDMA_RING_FULL_ERR(qp->rq_ring)) 300 return NULL; 301 302 IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); 303 if (ret_code) 304 return NULL; 305 306 if (!*wqe_idx) 307 qp->rwqe_polarity = !qp->rwqe_polarity; 308 /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */ 309 wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem; 310 311 return wqe; 312 } 313 314 /** 315 * irdma_uk_rdma_write - rdma write operation 316 * @qp: hw qp ptr 317 * @info: post sq information 318 * @post_sq: flag to post sq 319 */ 320 int 321 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, 322 bool post_sq) 323 { 324 u64 hdr; 325 __le64 *wqe; 326 struct irdma_rdma_write *op_info; 327 u32 i, wqe_idx; 328 u32 total_size = 0, byte_off; 329 int ret_code; 330 u32 frag_cnt, addl_frag_cnt; 331 bool read_fence = false; 332 u16 quanta; 333 334 info->push_wqe = qp->push_db ? true : false; 335 336 op_info = &info->op.rdma_write; 337 if (op_info->num_lo_sges > qp->max_sq_frag_cnt) 338 return EINVAL; 339 340 for (i = 0; i < op_info->num_lo_sges; i++) 341 total_size += op_info->lo_sg_list[i].len; 342 343 read_fence |= info->read_fence; 344 345 if (info->imm_data_valid) 346 frag_cnt = op_info->num_lo_sges + 1; 347 else 348 frag_cnt = op_info->num_lo_sges; 349 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0; 350 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta); 351 if (ret_code) 352 return ret_code; 353 354 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info); 355 if (!wqe) 356 return ENOSPC; 357 358 qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled; 359 set_64bit_val(wqe, IRDMA_BYTE_16, 360 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); 361 362 if (info->imm_data_valid) { 363 set_64bit_val(wqe, IRDMA_BYTE_0, 364 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); 365 i = 0; 366 } else { 367 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, 368 op_info->lo_sg_list, 369 qp->swqe_polarity); 370 i = 1; 371 } 372 373 for (byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; i++) { 374 qp->wqe_ops.iw_set_fragment(wqe, byte_off, 375 &op_info->lo_sg_list[i], 376 qp->swqe_polarity); 377 byte_off += 16; 378 } 379 380 /* if not an odd number set valid bit in next fragment */ 381 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) && 382 frag_cnt) { 383 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, 384 qp->swqe_polarity); 385 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) 386 ++addl_frag_cnt; 387 } 388 389 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) | 390 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | 391 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) | 392 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) | 393 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | 394 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | 395 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | 396 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | 397 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 398 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 399 400 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 401 402 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 403 if (info->push_wqe) 404 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); 405 else if (post_sq) 406 irdma_uk_qp_post_wr(qp); 407 408 return 0; 409 } 410 411 /** 412 * irdma_uk_rdma_read - rdma read command 413 * @qp: hw qp ptr 414 * @info: post sq information 415 * @inv_stag: flag for inv_stag 416 * @post_sq: flag to post sq 417 */ 418 int 419 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, 420 bool inv_stag, bool post_sq) 421 { 422 struct irdma_rdma_read *op_info; 423 int ret_code; 424 u32 i, byte_off, total_size = 0; 425 bool local_fence = false; 426 bool ord_fence = false; 427 u32 addl_frag_cnt; 428 __le64 *wqe; 429 u32 wqe_idx; 430 u16 quanta; 431 u64 hdr; 432 433 info->push_wqe = qp->push_db ? true : false; 434 435 op_info = &info->op.rdma_read; 436 if (qp->max_sq_frag_cnt < op_info->num_lo_sges) 437 return EINVAL; 438 439 for (i = 0; i < op_info->num_lo_sges; i++) 440 total_size += op_info->lo_sg_list[i].len; 441 442 ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta); 443 if (ret_code) 444 return ret_code; 445 446 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info); 447 if (!wqe) 448 return ENOSPC; 449 450 if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) { 451 ord_fence = true; 452 qp->ord_cnt = 0; 453 } 454 455 qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled; 456 addl_frag_cnt = op_info->num_lo_sges > 1 ? 457 (op_info->num_lo_sges - 1) : 0; 458 local_fence |= info->local_fence; 459 460 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->lo_sg_list, 461 qp->swqe_polarity); 462 for (i = 1, byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; ++i) { 463 qp->wqe_ops.iw_set_fragment(wqe, byte_off, 464 &op_info->lo_sg_list[i], 465 qp->swqe_polarity); 466 byte_off += IRDMA_BYTE_16; 467 } 468 469 /* if not an odd number set valid bit in next fragment */ 470 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && 471 !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) { 472 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, 473 qp->swqe_polarity); 474 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) 475 ++addl_frag_cnt; 476 } 477 set_64bit_val(wqe, IRDMA_BYTE_16, 478 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); 479 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) | 480 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | 481 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | 482 FIELD_PREP(IRDMAQPSQ_OPCODE, 483 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) | 484 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | 485 FIELD_PREP(IRDMAQPSQ_READFENCE, 486 info->read_fence || ord_fence ? 1 : 0) | 487 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | 488 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 489 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 490 491 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 492 493 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 494 if (info->push_wqe) 495 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); 496 else if (post_sq) 497 irdma_uk_qp_post_wr(qp); 498 499 return 0; 500 } 501 502 /** 503 * irdma_uk_send - rdma send command 504 * @qp: hw qp ptr 505 * @info: post sq information 506 * @post_sq: flag to post sq 507 */ 508 int 509 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, 510 bool post_sq) 511 { 512 __le64 *wqe; 513 struct irdma_post_send *op_info; 514 u64 hdr; 515 u32 i, wqe_idx, total_size = 0, byte_off; 516 int ret_code; 517 u32 frag_cnt, addl_frag_cnt; 518 bool read_fence = false; 519 u16 quanta; 520 521 info->push_wqe = qp->push_db ? true : false; 522 523 op_info = &info->op.send; 524 if (qp->max_sq_frag_cnt < op_info->num_sges) 525 return EINVAL; 526 527 for (i = 0; i < op_info->num_sges; i++) 528 total_size += op_info->sg_list[i].len; 529 530 if (info->imm_data_valid) 531 frag_cnt = op_info->num_sges + 1; 532 else 533 frag_cnt = op_info->num_sges; 534 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta); 535 if (ret_code) 536 return ret_code; 537 538 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info); 539 if (!wqe) 540 return ENOSPC; 541 542 read_fence |= info->read_fence; 543 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0; 544 if (info->imm_data_valid) { 545 set_64bit_val(wqe, IRDMA_BYTE_0, 546 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); 547 i = 0; 548 } else { 549 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, 550 frag_cnt ? op_info->sg_list : NULL, 551 qp->swqe_polarity); 552 i = 1; 553 } 554 555 for (byte_off = IRDMA_BYTE_32; i < op_info->num_sges; i++) { 556 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i], 557 qp->swqe_polarity); 558 byte_off += IRDMA_BYTE_16; 559 } 560 561 /* if not an odd number set valid bit in next fragment */ 562 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) && 563 frag_cnt) { 564 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, 565 qp->swqe_polarity); 566 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) 567 ++addl_frag_cnt; 568 } 569 570 set_64bit_val(wqe, IRDMA_BYTE_16, 571 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) | 572 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp)); 573 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) | 574 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) | 575 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, 576 (info->imm_data_valid ? 1 : 0)) | 577 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | 578 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | 579 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | 580 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | 581 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | 582 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | 583 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 584 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) | 585 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) | 586 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 587 588 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 589 590 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 591 if (info->push_wqe) 592 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); 593 else if (post_sq) 594 irdma_uk_qp_post_wr(qp); 595 596 return 0; 597 } 598 599 /** 600 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe 601 * @wqe: wqe for setting fragment 602 * @op_info: info for setting bind wqe values 603 */ 604 static void 605 irdma_set_mw_bind_wqe_gen_1(__le64 * wqe, 606 struct irdma_bind_window *op_info) 607 { 608 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va); 609 set_64bit_val(wqe, IRDMA_BYTE_8, 610 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) | 611 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag)); 612 set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len); 613 } 614 615 /** 616 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe 617 * @wqe: pointer to wqe 618 * @sge_list: table of pointers to inline data 619 * @num_sges: Total inline data length 620 * @polarity: compatibility parameter 621 */ 622 static void 623 irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list, 624 u32 num_sges, u8 polarity) 625 { 626 u32 quanta_bytes_remaining = 16; 627 u32 i; 628 629 for (i = 0; i < num_sges; i++) { 630 u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off; 631 u32 sge_len = sge_list[i].len; 632 633 while (sge_len) { 634 u32 bytes_copied; 635 636 bytes_copied = min(sge_len, quanta_bytes_remaining); 637 irdma_memcpy(wqe, cur_sge, bytes_copied); 638 wqe += bytes_copied; 639 cur_sge += bytes_copied; 640 quanta_bytes_remaining -= bytes_copied; 641 sge_len -= bytes_copied; 642 643 if (!quanta_bytes_remaining) { 644 /* Remaining inline bytes reside after hdr */ 645 wqe += 16; 646 quanta_bytes_remaining = 32; 647 } 648 } 649 } 650 } 651 652 /** 653 * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta 654 * @data_size: data size for inline 655 * 656 * Gets the quanta based on inline and immediate data. 657 */ 658 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) { 659 return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2; 660 } 661 662 /** 663 * irdma_set_mw_bind_wqe - set mw bind in wqe 664 * @wqe: wqe for setting mw bind 665 * @op_info: info for setting wqe values 666 */ 667 static void 668 irdma_set_mw_bind_wqe(__le64 * wqe, 669 struct irdma_bind_window *op_info) 670 { 671 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va); 672 set_64bit_val(wqe, IRDMA_BYTE_8, 673 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) | 674 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag)); 675 set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len); 676 } 677 678 /** 679 * irdma_copy_inline_data - Copy inline data to wqe 680 * @wqe: pointer to wqe 681 * @sge_list: table of pointers to inline data 682 * @num_sges: number of SGE's 683 * @polarity: polarity of wqe valid bit 684 */ 685 static void 686 irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges, 687 u8 polarity) 688 { 689 u8 inline_valid = polarity << IRDMA_INLINE_VALID_S; 690 u32 quanta_bytes_remaining = 8; 691 u32 i; 692 bool first_quanta = true; 693 694 wqe += 8; 695 696 for (i = 0; i < num_sges; i++) { 697 u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off; 698 u32 sge_len = sge_list[i].len; 699 700 while (sge_len) { 701 u32 bytes_copied; 702 703 bytes_copied = min(sge_len, quanta_bytes_remaining); 704 irdma_memcpy(wqe, cur_sge, bytes_copied); 705 wqe += bytes_copied; 706 cur_sge += bytes_copied; 707 quanta_bytes_remaining -= bytes_copied; 708 sge_len -= bytes_copied; 709 710 if (!quanta_bytes_remaining) { 711 quanta_bytes_remaining = 31; 712 713 /* Remaining inline bytes reside after hdr */ 714 if (first_quanta) { 715 first_quanta = false; 716 wqe += 16; 717 } else { 718 *wqe = inline_valid; 719 wqe++; 720 } 721 } 722 } 723 } 724 if (!first_quanta && quanta_bytes_remaining < 31) 725 *(wqe + quanta_bytes_remaining) = inline_valid; 726 } 727 728 /** 729 * irdma_inline_data_size_to_quanta - based on inline data, quanta 730 * @data_size: data size for inline 731 * 732 * Gets the quanta based on inline and immediate data. 733 */ 734 static u16 irdma_inline_data_size_to_quanta(u32 data_size) { 735 if (data_size <= 8) 736 return IRDMA_QP_WQE_MIN_QUANTA; 737 else if (data_size <= 39) 738 return 2; 739 else if (data_size <= 70) 740 return 3; 741 else if (data_size <= 101) 742 return 4; 743 else if (data_size <= 132) 744 return 5; 745 else if (data_size <= 163) 746 return 6; 747 else if (data_size <= 194) 748 return 7; 749 else 750 return 8; 751 } 752 753 /** 754 * irdma_uk_inline_rdma_write - inline rdma write operation 755 * @qp: hw qp ptr 756 * @info: post sq information 757 * @post_sq: flag to post sq 758 */ 759 int 760 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, 761 struct irdma_post_sq_info *info, bool post_sq) 762 { 763 __le64 *wqe; 764 struct irdma_rdma_write *op_info; 765 u64 hdr = 0; 766 u32 wqe_idx; 767 bool read_fence = false; 768 u16 quanta; 769 u32 i, total_size = 0; 770 771 info->push_wqe = qp->push_db ? true : false; 772 op_info = &info->op.rdma_write; 773 774 if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges)) 775 return EINVAL; 776 777 for (i = 0; i < op_info->num_lo_sges; i++) 778 total_size += op_info->lo_sg_list[i].len; 779 780 if (unlikely(total_size > qp->max_inline_data)) 781 return EINVAL; 782 783 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size); 784 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info); 785 if (!wqe) 786 return ENOSPC; 787 788 qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled; 789 read_fence |= info->read_fence; 790 set_64bit_val(wqe, IRDMA_BYTE_16, 791 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); 792 793 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) | 794 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | 795 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) | 796 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) | 797 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | 798 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) | 799 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) | 800 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | 801 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | 802 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 803 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 804 805 if (info->imm_data_valid) 806 set_64bit_val(wqe, IRDMA_BYTE_0, 807 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); 808 809 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list, 810 op_info->num_lo_sges, qp->swqe_polarity); 811 812 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 813 814 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 815 816 if (info->push_wqe) 817 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); 818 else if (post_sq) 819 irdma_uk_qp_post_wr(qp); 820 821 return 0; 822 } 823 824 /** 825 * irdma_uk_inline_send - inline send operation 826 * @qp: hw qp ptr 827 * @info: post sq information 828 * @post_sq: flag to post sq 829 */ 830 int 831 irdma_uk_inline_send(struct irdma_qp_uk *qp, 832 struct irdma_post_sq_info *info, bool post_sq) 833 { 834 __le64 *wqe; 835 struct irdma_post_send *op_info; 836 u64 hdr; 837 u32 wqe_idx; 838 bool read_fence = false; 839 u16 quanta; 840 u32 i, total_size = 0; 841 842 info->push_wqe = qp->push_db ? true : false; 843 op_info = &info->op.send; 844 845 if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges)) 846 return EINVAL; 847 848 for (i = 0; i < op_info->num_sges; i++) 849 total_size += op_info->sg_list[i].len; 850 851 if (unlikely(total_size > qp->max_inline_data)) 852 return EINVAL; 853 854 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size); 855 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info); 856 if (!wqe) 857 return ENOSPC; 858 859 set_64bit_val(wqe, IRDMA_BYTE_16, 860 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) | 861 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp)); 862 863 read_fence |= info->read_fence; 864 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) | 865 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) | 866 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | 867 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) | 868 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, 869 (info->imm_data_valid ? 1 : 0)) | 870 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | 871 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | 872 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | 873 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | 874 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | 875 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 876 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) | 877 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) | 878 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 879 880 if (info->imm_data_valid) 881 set_64bit_val(wqe, IRDMA_BYTE_0, 882 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); 883 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list, 884 op_info->num_sges, qp->swqe_polarity); 885 886 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 887 888 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 889 890 if (info->push_wqe) 891 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); 892 else if (post_sq) 893 irdma_uk_qp_post_wr(qp); 894 895 return 0; 896 } 897 898 /** 899 * irdma_uk_stag_local_invalidate - stag invalidate operation 900 * @qp: hw qp ptr 901 * @info: post sq information 902 * @post_sq: flag to post sq 903 */ 904 int 905 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, 906 struct irdma_post_sq_info *info, 907 bool post_sq) 908 { 909 __le64 *wqe; 910 struct irdma_inv_local_stag *op_info; 911 u64 hdr; 912 u32 wqe_idx; 913 bool local_fence = false; 914 struct irdma_sge sge = {0}; 915 u16 quanta = IRDMA_QP_WQE_MIN_QUANTA; 916 917 info->push_wqe = qp->push_db ? true : false; 918 op_info = &info->op.inv_local_stag; 919 local_fence = info->local_fence; 920 921 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info); 922 if (!wqe) 923 return ENOSPC; 924 925 sge.stag = op_info->target_stag; 926 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0); 927 928 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 929 930 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) | 931 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | 932 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | 933 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | 934 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 935 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 936 937 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 938 939 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 940 941 if (info->push_wqe) 942 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); 943 else if (post_sq) 944 irdma_uk_qp_post_wr(qp); 945 946 return 0; 947 } 948 949 /** 950 * irdma_uk_mw_bind - bind Memory Window 951 * @qp: hw qp ptr 952 * @info: post sq information 953 * @post_sq: flag to post sq 954 */ 955 int 956 irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, 957 bool post_sq) 958 { 959 __le64 *wqe; 960 struct irdma_bind_window *op_info; 961 u64 hdr; 962 u32 wqe_idx; 963 bool local_fence; 964 u16 quanta = IRDMA_QP_WQE_MIN_QUANTA; 965 966 info->push_wqe = qp->push_db ? true : false; 967 op_info = &info->op.bind_window; 968 local_fence = info->local_fence; 969 970 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info); 971 if (!wqe) 972 return ENOSPC; 973 974 qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info); 975 976 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) | 977 FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, 978 ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) | 979 FIELD_PREP(IRDMAQPSQ_VABASEDTO, 980 (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) | 981 FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE, 982 (op_info->mem_window_type_1 ? 1 : 0)) | 983 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | 984 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | 985 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | 986 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 987 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 988 989 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 990 991 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 992 993 if (info->push_wqe) 994 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); 995 else if (post_sq) 996 irdma_uk_qp_post_wr(qp); 997 998 return 0; 999 } 1000 1001 /** 1002 * irdma_uk_post_receive - post receive wqe 1003 * @qp: hw qp ptr 1004 * @info: post rq information 1005 */ 1006 int 1007 irdma_uk_post_receive(struct irdma_qp_uk *qp, 1008 struct irdma_post_rq_info *info) 1009 { 1010 u32 wqe_idx, i, byte_off; 1011 u32 addl_frag_cnt; 1012 __le64 *wqe; 1013 u64 hdr; 1014 1015 if (qp->max_rq_frag_cnt < info->num_sges) 1016 return EINVAL; 1017 1018 wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx); 1019 if (!wqe) 1020 return ENOSPC; 1021 1022 qp->rq_wrid_array[wqe_idx] = info->wr_id; 1023 addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0; 1024 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, info->sg_list, 1025 qp->rwqe_polarity); 1026 1027 for (i = 1, byte_off = IRDMA_BYTE_32; i < info->num_sges; i++) { 1028 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i], 1029 qp->rwqe_polarity); 1030 byte_off += 16; 1031 } 1032 1033 /* if not an odd number set valid bit in next fragment */ 1034 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) && 1035 info->num_sges) { 1036 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, 1037 qp->rwqe_polarity); 1038 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) 1039 ++addl_frag_cnt; 1040 } 1041 1042 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1043 hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | 1044 FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity); 1045 1046 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 1047 1048 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1049 1050 return 0; 1051 } 1052 1053 /** 1054 * irdma_uk_cq_resize - reset the cq buffer info 1055 * @cq: cq to resize 1056 * @cq_base: new cq buffer addr 1057 * @cq_size: number of cqes 1058 */ 1059 void 1060 irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size) 1061 { 1062 cq->cq_base = cq_base; 1063 cq->cq_size = cq_size; 1064 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); 1065 cq->polarity = 1; 1066 } 1067 1068 /** 1069 * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers 1070 * @cq: cq to resize 1071 * @cq_cnt: the count of the resized cq buffers 1072 */ 1073 void 1074 irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt) 1075 { 1076 u64 temp_val; 1077 u16 sw_cq_sel; 1078 u8 arm_next_se; 1079 u8 arm_next; 1080 u8 arm_seq_num; 1081 1082 get_64bit_val(cq->shadow_area, 32, &temp_val); 1083 1084 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); 1085 sw_cq_sel += cq_cnt; 1086 1087 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); 1088 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); 1089 arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val); 1090 1091 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | 1092 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | 1093 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | 1094 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next); 1095 1096 set_64bit_val(cq->shadow_area, 32, temp_val); 1097 } 1098 1099 /** 1100 * irdma_uk_cq_request_notification - cq notification request (door bell) 1101 * @cq: hw cq 1102 * @cq_notify: notification type 1103 */ 1104 void 1105 irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, 1106 enum irdma_cmpl_notify cq_notify) 1107 { 1108 u64 temp_val; 1109 u16 sw_cq_sel; 1110 u8 arm_next_se = 0; 1111 u8 arm_next = 0; 1112 u8 arm_seq_num; 1113 1114 get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val); 1115 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); 1116 arm_seq_num++; 1117 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); 1118 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); 1119 arm_next_se |= 1; 1120 if (cq_notify == IRDMA_CQ_COMPL_EVENT) 1121 arm_next = 1; 1122 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | 1123 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | 1124 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | 1125 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next); 1126 1127 set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val); 1128 1129 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 1130 1131 db_wr32(cq->cq_id, cq->cqe_alloc_db); 1132 } 1133 1134 static void 1135 irdma_copy_quanta(__le64 * dst, __le64 * src, u32 offset, bool flip, 1136 bool barrier) 1137 { 1138 __le64 val; 1139 1140 get_64bit_val(src, offset, &val); 1141 set_64bit_val(dst, offset, val); 1142 1143 get_64bit_val(src, offset + 8, &val); 1144 if (flip) 1145 val ^= IRDMAQPSQ_VALID; 1146 set_64bit_val(dst, offset + 8, val); 1147 1148 get_64bit_val(src, offset + 24, &val); 1149 if (flip) 1150 val ^= IRDMAQPSQ_VALID; 1151 if (barrier) 1152 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 1153 set_64bit_val(dst, offset + 24, val); 1154 } 1155 1156 static void 1157 irdma_copy_wqe(__le64 * dst, __le64 * src, u8 wqe_quanta, 1158 bool flip_polarity) 1159 { 1160 u32 offset; 1161 1162 offset = 32; 1163 while (--wqe_quanta) { 1164 irdma_copy_quanta(dst, src, offset, flip_polarity, false); 1165 offset += 32; 1166 } 1167 1168 irdma_copy_quanta(dst, src, 0, flip_polarity, true); 1169 } 1170 1171 static void 1172 irdma_repost_rq_wqes(struct irdma_qp_uk *qp, u32 start_idx, 1173 u32 end_idx) 1174 { 1175 __le64 *dst_wqe, *src_wqe; 1176 u32 wqe_idx; 1177 u8 wqe_quanta = qp->rq_wqe_size_multiplier; 1178 bool flip_polarity; 1179 u64 val; 1180 1181 libirdma_debug("reposting_wqes: from start_idx=%d to end_idx = %d\n", start_idx, end_idx); 1182 if (pthread_spin_lock(qp->lock)) 1183 return; 1184 while (start_idx != end_idx) { 1185 IRDMA_RING_SET_TAIL(qp->rq_ring, start_idx + 1); 1186 src_wqe = qp->rq_base[start_idx * qp->rq_wqe_size_multiplier].elem; 1187 dst_wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx); 1188 1189 /* Check to see if polarity has changed */ 1190 get_64bit_val(src_wqe, 24, &val); 1191 if (FIELD_GET(IRDMAQPSQ_VALID, val) != qp->rwqe_polarity) 1192 flip_polarity = true; 1193 else 1194 flip_polarity = false; 1195 1196 qp->rq_wrid_array[wqe_idx] = qp->rq_wrid_array[start_idx]; 1197 irdma_copy_wqe(dst_wqe, src_wqe, wqe_quanta, flip_polarity); 1198 1199 start_idx = (start_idx + 1) % qp->rq_size; 1200 } 1201 1202 pthread_spin_unlock(qp->lock); 1203 } 1204 1205 static int 1206 irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx) 1207 { 1208 u32 exp_idx = (qp->last_rx_cmpl_idx + 1) % qp->rq_size; 1209 1210 if (*array_idx != exp_idx) { 1211 if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RELAX_RQ_ORDER) { 1212 irdma_repost_rq_wqes(qp, exp_idx, *array_idx); 1213 qp->last_rx_cmpl_idx = *array_idx; 1214 1215 return 0; 1216 } 1217 1218 *array_idx = exp_idx; 1219 qp->last_rx_cmpl_idx = exp_idx; 1220 1221 return -1; 1222 } 1223 1224 qp->last_rx_cmpl_idx = *array_idx; 1225 1226 return 0; 1227 } 1228 1229 /** 1230 * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed 1231 * 1232 * @ring: sq/rq ring 1233 * @flush_seen: information if flush for specific ring was already seen 1234 * @comp_status: completion status 1235 * @wqe_idx: new value of WQE index returned if there is more work on ring 1236 */ 1237 static inline int 1238 irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen, 1239 enum irdma_cmpl_status comp_status, 1240 u32 *wqe_idx) 1241 { 1242 if (flush_seen) { 1243 if (IRDMA_RING_MORE_WORK(ring)) 1244 *wqe_idx = ring.tail; 1245 else 1246 return ENOENT; 1247 } 1248 1249 return 0; 1250 } 1251 1252 /** 1253 * irdma_uk_cq_poll_cmpl - get cq completion info 1254 * @cq: hw cq 1255 * @info: cq poll information returned 1256 */ 1257 int 1258 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, 1259 struct irdma_cq_poll_info *info) 1260 { 1261 u64 comp_ctx, qword0, qword2, qword3; 1262 __le64 *cqe; 1263 struct irdma_qp_uk *qp; 1264 struct irdma_ring *pring = NULL; 1265 u32 wqe_idx; 1266 int ret_code; 1267 bool move_cq_head = true; 1268 u8 polarity; 1269 bool ext_valid; 1270 __le64 *ext_cqe; 1271 1272 if (cq->avoid_mem_cflct) 1273 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq); 1274 else 1275 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq); 1276 1277 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3); 1278 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); 1279 if (polarity != cq->polarity) 1280 return ENOENT; 1281 1282 /* Ensure CQE contents are read after valid bit is checked */ 1283 udma_from_device_barrier(); 1284 1285 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); 1286 if (ext_valid) { 1287 u64 qword6, qword7; 1288 u32 peek_head; 1289 1290 if (cq->avoid_mem_cflct) { 1291 ext_cqe = (__le64 *) ((u8 *)cqe + 32); 1292 get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7); 1293 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); 1294 } else { 1295 peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size; 1296 ext_cqe = cq->cq_base[peek_head].buf; 1297 get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7); 1298 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); 1299 if (!peek_head) 1300 polarity ^= 1; 1301 } 1302 if (polarity != cq->polarity) 1303 return ENOENT; 1304 1305 /* Ensure ext CQE contents are read after ext valid bit is checked */ 1306 udma_from_device_barrier(); 1307 1308 info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7); 1309 if (info->imm_valid) { 1310 u64 qword4; 1311 1312 get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4); 1313 info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4); 1314 } 1315 info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7); 1316 info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7); 1317 if (info->ud_smac_valid || info->ud_vlan_valid) { 1318 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6); 1319 if (info->ud_vlan_valid) 1320 info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6); 1321 if (info->ud_smac_valid) { 1322 info->ud_smac[5] = qword6 & 0xFF; 1323 info->ud_smac[4] = (qword6 >> 8) & 0xFF; 1324 info->ud_smac[3] = (qword6 >> 16) & 0xFF; 1325 info->ud_smac[2] = (qword6 >> 24) & 0xFF; 1326 info->ud_smac[1] = (qword6 >> 32) & 0xFF; 1327 info->ud_smac[0] = (qword6 >> 40) & 0xFF; 1328 } 1329 } 1330 } else { 1331 info->imm_valid = false; 1332 info->ud_smac_valid = false; 1333 info->ud_vlan_valid = false; 1334 } 1335 1336 info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); 1337 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); 1338 info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3); 1339 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); 1340 get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx); 1341 qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx; 1342 if (info->error) { 1343 info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3); 1344 info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3); 1345 switch (info->major_err) { 1346 case IRDMA_FLUSH_MAJOR_ERR: 1347 /* Set the min error to standard flush error code for remaining cqes */ 1348 if (info->minor_err != FLUSH_GENERAL_ERR) { 1349 qword3 &= ~IRDMA_CQ_MINERR; 1350 qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR); 1351 set_64bit_val(cqe, IRDMA_BYTE_24, qword3); 1352 } 1353 info->comp_status = IRDMA_COMPL_STATUS_FLUSHED; 1354 break; 1355 default: 1356 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN; 1357 break; 1358 } 1359 } else { 1360 info->comp_status = IRDMA_COMPL_STATUS_SUCCESS; 1361 } 1362 1363 get_64bit_val(cqe, IRDMA_BYTE_0, &qword0); 1364 get_64bit_val(cqe, IRDMA_BYTE_16, &qword2); 1365 1366 info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0); 1367 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); 1368 info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2); 1369 1370 info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3); 1371 if (!qp || qp->destroy_pending) { 1372 ret_code = EFAULT; 1373 goto exit; 1374 } 1375 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); 1376 info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp; 1377 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); 1378 1379 if (info->q_type == IRDMA_CQE_QTYPE_RQ) { 1380 u32 array_idx; 1381 1382 ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring, 1383 qp->rq_flush_seen, 1384 info->comp_status, 1385 &wqe_idx); 1386 if (ret_code != 0) 1387 goto exit; 1388 1389 array_idx = wqe_idx / qp->rq_wqe_size_multiplier; 1390 1391 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED || 1392 info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) { 1393 if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) { 1394 ret_code = ENOENT; 1395 goto exit; 1396 } 1397 1398 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail]; 1399 info->signaled = 1; 1400 array_idx = qp->rq_ring.tail; 1401 } else { 1402 info->wr_id = qp->rq_wrid_array[array_idx]; 1403 info->signaled = 1; 1404 if (irdma_check_rq_cqe(qp, &array_idx)) { 1405 info->wr_id = qp->rq_wrid_array[array_idx]; 1406 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN; 1407 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1); 1408 return 0; 1409 } 1410 } 1411 1412 info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0); 1413 1414 if (qword3 & IRDMACQ_STAG) { 1415 info->stag_invalid_set = true; 1416 info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2); 1417 } else { 1418 info->stag_invalid_set = false; 1419 } 1420 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1); 1421 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) { 1422 qp->rq_flush_seen = true; 1423 if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) 1424 qp->rq_flush_complete = true; 1425 else 1426 move_cq_head = false; 1427 } 1428 pring = &qp->rq_ring; 1429 } else { /* q_type is IRDMA_CQE_QTYPE_SQ */ 1430 if (qp->first_sq_wq) { 1431 if (wqe_idx + 1 >= qp->conn_wqes) 1432 qp->first_sq_wq = false; 1433 1434 if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) { 1435 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); 1436 IRDMA_RING_MOVE_TAIL(cq->cq_ring); 1437 set_64bit_val(cq->shadow_area, IRDMA_BYTE_0, 1438 IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); 1439 memset(info, 0, 1440 sizeof(struct irdma_cq_poll_info)); 1441 return irdma_uk_cq_poll_cmpl(cq, info); 1442 } 1443 } 1444 /* cease posting push mode on push drop */ 1445 if (info->push_dropped) { 1446 qp->push_mode = false; 1447 qp->push_dropped = true; 1448 } 1449 ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring, 1450 qp->sq_flush_seen, 1451 info->comp_status, 1452 &wqe_idx); 1453 if (ret_code != 0) 1454 goto exit; 1455 if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) { 1456 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; 1457 info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled; 1458 if (!info->comp_status) 1459 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len; 1460 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); 1461 IRDMA_RING_SET_TAIL(qp->sq_ring, 1462 wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); 1463 } else { 1464 if (pthread_spin_lock(qp->lock)) { 1465 ret_code = ENOENT; 1466 goto exit; 1467 } 1468 if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) { 1469 pthread_spin_unlock(qp->lock); 1470 ret_code = ENOENT; 1471 goto exit; 1472 } 1473 1474 do { 1475 __le64 *sw_wqe; 1476 u64 wqe_qword; 1477 u32 tail; 1478 1479 tail = qp->sq_ring.tail; 1480 sw_wqe = qp->sq_base[tail].elem; 1481 get_64bit_val(sw_wqe, IRDMA_BYTE_24, 1482 &wqe_qword); 1483 info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword); 1484 IRDMA_RING_SET_TAIL(qp->sq_ring, 1485 tail + qp->sq_wrtrk_array[tail].quanta); 1486 if (info->op_type != IRDMAQP_OP_NOP) { 1487 info->wr_id = qp->sq_wrtrk_array[tail].wrid; 1488 info->signaled = qp->sq_wrtrk_array[tail].signaled; 1489 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len; 1490 break; 1491 } 1492 } while (1); 1493 1494 if (info->op_type == IRDMA_OP_TYPE_BIND_MW && 1495 info->minor_err == FLUSH_PROT_ERR) 1496 info->minor_err = FLUSH_MW_BIND_ERR; 1497 qp->sq_flush_seen = true; 1498 if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) 1499 qp->sq_flush_complete = true; 1500 pthread_spin_unlock(qp->lock); 1501 } 1502 pring = &qp->sq_ring; 1503 } 1504 1505 ret_code = 0; 1506 1507 exit: 1508 if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) { 1509 if (pring && IRDMA_RING_MORE_WORK(*pring)) 1510 move_cq_head = false; 1511 } 1512 1513 if (move_cq_head) { 1514 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); 1515 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring)) 1516 cq->polarity ^= 1; 1517 1518 if (ext_valid && !cq->avoid_mem_cflct) { 1519 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); 1520 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring)) 1521 cq->polarity ^= 1; 1522 } 1523 1524 IRDMA_RING_MOVE_TAIL(cq->cq_ring); 1525 if (!cq->avoid_mem_cflct && ext_valid) 1526 IRDMA_RING_MOVE_TAIL(cq->cq_ring); 1527 set_64bit_val(cq->shadow_area, IRDMA_BYTE_0, 1528 IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); 1529 } else { 1530 qword3 &= ~IRDMA_CQ_WQEIDX; 1531 qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail); 1532 set_64bit_val(cqe, IRDMA_BYTE_24, qword3); 1533 } 1534 1535 return ret_code; 1536 } 1537 1538 /** 1539 * irdma_round_up_wq - return round up qp wq depth 1540 * @wqdepth: wq depth in quanta to round up 1541 */ 1542 static int 1543 irdma_round_up_wq(u32 wqdepth) 1544 { 1545 int scount = 1; 1546 1547 for (wqdepth--; scount <= 16; scount *= 2) 1548 wqdepth |= wqdepth >> scount; 1549 1550 return ++wqdepth; 1551 } 1552 1553 /** 1554 * irdma_get_wqe_shift - get shift count for maximum wqe size 1555 * @uk_attrs: qp HW attributes 1556 * @sge: Maximum Scatter Gather Elements wqe 1557 * @inline_data: Maximum inline data size 1558 * @shift: Returns the shift needed based on sge 1559 * 1560 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size. 1561 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32 1562 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe 1563 * size of 64 bytes). 1564 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe 1565 * size of 256 bytes). 1566 */ 1567 void 1568 irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, 1569 u32 inline_data, u8 *shift) 1570 { 1571 *shift = 0; 1572 if (uk_attrs->hw_rev >= IRDMA_GEN_2) { 1573 if (sge > 1 || inline_data > 8) { 1574 if (sge < 4 && inline_data <= 39) 1575 *shift = 1; 1576 else if (sge < 8 && inline_data <= 101) 1577 *shift = 2; 1578 else 1579 *shift = 3; 1580 } 1581 } else if (sge > 1 || inline_data > 16) { 1582 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2; 1583 } 1584 } 1585 1586 /* 1587 * irdma_get_sqdepth - get SQ depth (quanta) @uk_attrs: qp HW attributes @sq_size: SQ size @shift: shift which 1588 * determines size of WQE @sqdepth: depth of SQ 1589 */ 1590 int 1591 irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth) 1592 { 1593 *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD); 1594 1595 if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift)) 1596 *sqdepth = uk_attrs->min_hw_wq_size << shift; 1597 else if (*sqdepth > uk_attrs->max_hw_wq_quanta) 1598 return EINVAL; 1599 1600 return 0; 1601 } 1602 1603 /* 1604 * irdma_get_rqdepth - get RQ depth (quanta) @uk_attrs: qp HW attributes @rq_size: SRQ size @shift: shift which 1605 * determines size of WQE @rqdepth: depth of RQ/SRQ 1606 */ 1607 int 1608 irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth) 1609 { 1610 *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD); 1611 1612 if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift)) 1613 *rqdepth = uk_attrs->min_hw_wq_size << shift; 1614 else if (*rqdepth > uk_attrs->max_hw_rq_quanta) 1615 return EINVAL; 1616 1617 return 0; 1618 } 1619 1620 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = { 1621 .iw_copy_inline_data = irdma_copy_inline_data, 1622 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta, 1623 .iw_set_fragment = irdma_set_fragment, 1624 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe, 1625 }; 1626 1627 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = { 1628 .iw_copy_inline_data = irdma_copy_inline_data_gen_1, 1629 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1, 1630 .iw_set_fragment = irdma_set_fragment_gen_1, 1631 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1, 1632 }; 1633 1634 /** 1635 * irdma_setup_connection_wqes - setup WQEs necessary to complete 1636 * connection. 1637 * @qp: hw qp (user and kernel) 1638 * @info: qp initialization info 1639 */ 1640 static void 1641 irdma_setup_connection_wqes(struct irdma_qp_uk *qp, 1642 struct irdma_qp_uk_init_info *info) 1643 { 1644 u16 move_cnt = 1; 1645 1646 if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE) 1647 move_cnt = 3; 1648 1649 qp->conn_wqes = move_cnt; 1650 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt); 1651 IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt); 1652 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt); 1653 } 1654 1655 /** 1656 * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ 1657 * @ukinfo: qp initialization info 1658 * @sq_shift: Returns shift of SQ 1659 * @rq_shift: Returns shift of RQ 1660 */ 1661 void 1662 irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift, 1663 u8 *rq_shift) 1664 { 1665 bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false; 1666 1667 irdma_get_wqe_shift(ukinfo->uk_attrs, 1668 imm_support ? ukinfo->max_sq_frag_cnt + 1 : 1669 ukinfo->max_sq_frag_cnt, 1670 ukinfo->max_inline_data, sq_shift); 1671 1672 irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, 1673 rq_shift); 1674 1675 if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { 1676 if (ukinfo->abi_ver > 4) 1677 *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; 1678 } 1679 } 1680 1681 /** 1682 * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size. 1683 * @ukinfo: qp initialization info 1684 * @sq_depth: Returns depth of SQ 1685 * @sq_shift: Returns shift of SQ 1686 */ 1687 int 1688 irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo, 1689 u32 *sq_depth, u8 *sq_shift) 1690 { 1691 bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false; 1692 int status; 1693 irdma_get_wqe_shift(ukinfo->uk_attrs, 1694 imm_support ? ukinfo->max_sq_frag_cnt + 1 : 1695 ukinfo->max_sq_frag_cnt, 1696 ukinfo->max_inline_data, sq_shift); 1697 status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size, 1698 *sq_shift, sq_depth); 1699 1700 return status; 1701 } 1702 1703 /** 1704 * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size. 1705 * @ukinfo: qp initialization info 1706 * @rq_depth: Returns depth of RQ 1707 * @rq_shift: Returns shift of RQ 1708 */ 1709 int 1710 irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo, 1711 u32 *rq_depth, u8 *rq_shift) 1712 { 1713 int status; 1714 1715 irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, 1716 rq_shift); 1717 1718 if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { 1719 if (ukinfo->abi_ver > 4) 1720 *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; 1721 } 1722 1723 status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size, 1724 *rq_shift, rq_depth); 1725 1726 return status; 1727 } 1728 1729 /** 1730 * irdma_uk_qp_init - initialize shared qp 1731 * @qp: hw qp (user and kernel) 1732 * @info: qp initialization info 1733 * 1734 * initializes the vars used in both user and kernel mode. 1735 * size of the wqe depends on numbers of max. fragements 1736 * allowed. Then size of wqe * the number of wqes should be the 1737 * amount of memory allocated for sq and rq. 1738 */ 1739 int 1740 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) 1741 { 1742 int ret_code = 0; 1743 u32 sq_ring_size; 1744 1745 qp->uk_attrs = info->uk_attrs; 1746 if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags || 1747 info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags) 1748 return EINVAL; 1749 1750 qp->qp_caps = info->qp_caps; 1751 qp->sq_base = info->sq; 1752 qp->rq_base = info->rq; 1753 qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP; 1754 qp->shadow_area = info->shadow_area; 1755 qp->sq_wrtrk_array = info->sq_wrtrk_array; 1756 1757 qp->rq_wrid_array = info->rq_wrid_array; 1758 qp->wqe_alloc_db = info->wqe_alloc_db; 1759 qp->last_rx_cmpl_idx = 0xffffffff; 1760 qp->rd_fence_rate = info->rd_fence_rate; 1761 qp->qp_id = info->qp_id; 1762 qp->sq_size = info->sq_size; 1763 qp->push_mode = false; 1764 qp->max_sq_frag_cnt = info->max_sq_frag_cnt; 1765 sq_ring_size = qp->sq_size << info->sq_shift; 1766 IRDMA_RING_INIT(qp->sq_ring, sq_ring_size); 1767 IRDMA_RING_INIT(qp->initial_ring, sq_ring_size); 1768 if (info->first_sq_wq) { 1769 irdma_setup_connection_wqes(qp, info); 1770 qp->swqe_polarity = 1; 1771 qp->first_sq_wq = true; 1772 } else { 1773 qp->swqe_polarity = 0; 1774 } 1775 qp->swqe_polarity_deferred = 1; 1776 qp->rwqe_polarity = 0; 1777 qp->rq_size = info->rq_size; 1778 qp->max_rq_frag_cnt = info->max_rq_frag_cnt; 1779 qp->max_inline_data = info->max_inline_data; 1780 qp->rq_wqe_size = info->rq_shift; 1781 IRDMA_RING_INIT(qp->rq_ring, qp->rq_size); 1782 qp->rq_wqe_size_multiplier = 1 << info->rq_shift; 1783 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) 1784 qp->wqe_ops = iw_wqe_uk_ops_gen_1; 1785 else 1786 qp->wqe_ops = iw_wqe_uk_ops; 1787 return ret_code; 1788 } 1789 1790 /** 1791 * irdma_uk_cq_init - initialize shared cq (user and kernel) 1792 * @cq: hw cq 1793 * @info: hw cq initialization info 1794 */ 1795 int 1796 irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info) 1797 { 1798 cq->cq_base = info->cq_base; 1799 cq->cq_id = info->cq_id; 1800 cq->cq_size = info->cq_size; 1801 cq->cqe_alloc_db = info->cqe_alloc_db; 1802 cq->cq_ack_db = info->cq_ack_db; 1803 cq->shadow_area = info->shadow_area; 1804 cq->avoid_mem_cflct = info->avoid_mem_cflct; 1805 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); 1806 cq->polarity = 1; 1807 1808 return 0; 1809 } 1810 1811 /** 1812 * irdma_uk_clean_cq - clean cq entries 1813 * @q: completion context 1814 * @cq: cq to clean 1815 */ 1816 int 1817 irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq) 1818 { 1819 __le64 *cqe; 1820 u64 qword3, comp_ctx; 1821 u32 cq_head; 1822 u8 polarity, temp; 1823 1824 cq_head = cq->cq_ring.head; 1825 temp = cq->polarity; 1826 do { 1827 if (cq->avoid_mem_cflct) 1828 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf; 1829 else 1830 cqe = cq->cq_base[cq_head].buf; 1831 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3); 1832 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); 1833 1834 if (polarity != temp) 1835 break; 1836 1837 get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx); 1838 if ((void *)(irdma_uintptr) comp_ctx == q) 1839 set_64bit_val(cqe, IRDMA_BYTE_8, 0); 1840 1841 cq_head = (cq_head + 1) % cq->cq_ring.size; 1842 if (!cq_head) 1843 temp ^= 1; 1844 } while (true); 1845 return 0; 1846 } 1847 1848 /** 1849 * irdma_nop - post a nop 1850 * @qp: hw qp ptr 1851 * @wr_id: work request id 1852 * @signaled: signaled for completion 1853 * @post_sq: ring doorbell 1854 */ 1855 int 1856 irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) 1857 { 1858 __le64 *wqe; 1859 u64 hdr; 1860 u32 wqe_idx; 1861 struct irdma_post_sq_info info = {0}; 1862 u16 quanta = IRDMA_QP_WQE_MIN_QUANTA; 1863 1864 info.push_wqe = qp->push_db ? true : false; 1865 info.wr_id = wr_id; 1866 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, &info); 1867 if (!wqe) 1868 return ENOSPC; 1869 1870 set_64bit_val(wqe, IRDMA_BYTE_0, 0); 1871 set_64bit_val(wqe, IRDMA_BYTE_8, 0); 1872 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1873 1874 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | 1875 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | 1876 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); 1877 1878 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */ 1879 1880 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1881 1882 if (info.push_wqe) 1883 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); 1884 else if (post_sq) 1885 irdma_uk_qp_post_wr(qp); 1886 1887 return 0; 1888 } 1889 1890 /** 1891 * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ 1892 * @frag_cnt: number of fragments 1893 * @quanta: quanta for frag_cnt 1894 */ 1895 int 1896 irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) 1897 { 1898 switch (frag_cnt) { 1899 case 0: 1900 case 1: 1901 *quanta = IRDMA_QP_WQE_MIN_QUANTA; 1902 break; 1903 case 2: 1904 case 3: 1905 *quanta = 2; 1906 break; 1907 case 4: 1908 case 5: 1909 *quanta = 3; 1910 break; 1911 case 6: 1912 case 7: 1913 *quanta = 4; 1914 break; 1915 case 8: 1916 case 9: 1917 *quanta = 5; 1918 break; 1919 case 10: 1920 case 11: 1921 *quanta = 6; 1922 break; 1923 case 12: 1924 case 13: 1925 *quanta = 7; 1926 break; 1927 case 14: 1928 case 15: /* when immediate data is present */ 1929 *quanta = 8; 1930 break; 1931 default: 1932 return EINVAL; 1933 } 1934 1935 return 0; 1936 } 1937 1938 /** 1939 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ 1940 * @frag_cnt: number of fragments 1941 * @wqe_size: size in bytes given frag_cnt 1942 */ 1943 int 1944 irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) 1945 { 1946 switch (frag_cnt) { 1947 case 0: 1948 case 1: 1949 *wqe_size = 32; 1950 break; 1951 case 2: 1952 case 3: 1953 *wqe_size = 64; 1954 break; 1955 case 4: 1956 case 5: 1957 case 6: 1958 case 7: 1959 *wqe_size = 128; 1960 break; 1961 case 8: 1962 case 9: 1963 case 10: 1964 case 11: 1965 case 12: 1966 case 13: 1967 case 14: 1968 *wqe_size = 256; 1969 break; 1970 default: 1971 return EINVAL; 1972 } 1973 1974 return 0; 1975 } 1976