1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2026 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "osdep.h" 36 #include "irdma_hmc.h" 37 #include "irdma_defs.h" 38 #include "irdma_type.h" 39 #include "irdma_ws.h" 40 #include "irdma_protos.h" 41 42 /** 43 * irdma_qp_from_entry - Given entry, get to the qp structure 44 * @entry: Points to list of qp structure 45 */ 46 static struct irdma_sc_qp * 47 irdma_qp_from_entry(struct list_head *entry) 48 { 49 if (!entry) 50 return NULL; 51 52 return (struct irdma_sc_qp *)((char *)entry - 53 offsetof(struct irdma_sc_qp, list)); 54 } 55 56 /** 57 * irdma_get_qp_from_list - get next qp from a list 58 * @head: Listhead of qp's 59 * @qp: current qp 60 */ 61 struct irdma_sc_qp * 62 irdma_get_qp_from_list(struct list_head *head, 63 struct irdma_sc_qp *qp) 64 { 65 struct list_head *lastentry; 66 struct list_head *entry = NULL; 67 68 if (list_empty(head)) 69 return NULL; 70 71 if (!qp) { 72 entry = (head)->next; 73 } else { 74 lastentry = &qp->list; 75 entry = (lastentry)->next; 76 if (entry == head) 77 return NULL; 78 } 79 80 return irdma_qp_from_entry(entry); 81 } 82 83 /** 84 * irdma_get_qp_qs - return qs_handle for the qp 85 * @qp: qp for qset 86 * 87 * Returns the queue set that should be used for a given qp. The qos 88 * mutex should be acquired before calling. 89 */ 90 static u16 irdma_get_qp_qs(struct irdma_sc_qp *qp){ 91 92 struct irdma_sc_vsi *vsi = qp->vsi; 93 u16 qs_handle; 94 95 qs_handle = 96 vsi->qos[qp->user_pri].qs_handle; 97 98 return qs_handle; 99 } 100 101 /** 102 * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI 103 * @vsi: the VSI struct pointer 104 * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND 105 */ 106 void 107 irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op) 108 { 109 struct irdma_sc_qp *qp = NULL; 110 u8 i; 111 112 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 113 mutex_lock(&vsi->qos[i].qos_mutex); 114 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 115 while (qp) { 116 if (op == IRDMA_OP_RESUME) { 117 if (!qp->suspended) { 118 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, 119 qp); 120 continue; 121 } 122 if (!qp->dev->ws_add(vsi, i)) { 123 qp->qs_handle = irdma_get_qp_qs(qp); 124 if (!irdma_cqp_qp_suspend_resume(qp, op)) 125 qp->suspended = false; 126 } else { 127 if (!irdma_cqp_qp_suspend_resume(qp, op)) 128 qp->suspended = false; 129 irdma_modify_qp_to_err(qp); 130 } 131 } else if (op == IRDMA_OP_SUSPEND) { 132 /* issue cqp suspend command */ 133 if ((qp->qp_state == IRDMA_QP_STATE_RTS || 134 qp->qp_state == IRDMA_QP_STATE_RTR) && 135 !irdma_cqp_qp_suspend_resume(qp, op)) { 136 atomic_inc(&vsi->qp_suspend_reqs); 137 qp->suspended = true; 138 } 139 } 140 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 141 } 142 mutex_unlock(&vsi->qos[i].qos_mutex); 143 } 144 } 145 146 static void 147 irdma_set_qos_info(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2p) 148 { 149 u8 i; 150 151 vsi->qos_rel_bw = l2p->vsi_rel_bw; 152 vsi->qos_prio_type = l2p->vsi_prio_type; 153 vsi->dscp_mode = l2p->dscp_mode; 154 if (l2p->dscp_mode) { 155 irdma_memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map)); 156 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) 157 l2p->up2tc[i] = i; 158 } 159 for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++) 160 vsi->tc_print_warning[i] = true; 161 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 162 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 163 vsi->qos[i].qs_handle = l2p->qs_handle_list[i]; 164 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) 165 irdma_init_config_check(&vsi->cfg_check[i], 166 l2p->up2tc[i], i, 167 l2p->qs_handle_list[i]); 168 vsi->qos[i].traffic_class = l2p->up2tc[i]; 169 vsi->qos[i].rel_bw = 170 l2p->tc_info[vsi->qos[i].traffic_class].rel_bw; 171 vsi->qos[i].prio_type = 172 l2p->tc_info[vsi->qos[i].traffic_class].prio_type; 173 vsi->qos[i].valid = false; 174 } 175 } 176 177 /** 178 * irdma_change_l2params - given the new l2 parameters, change all qp 179 * @vsi: RDMA VSI pointer 180 * @l2params: New parameters from l2 181 */ 182 void 183 irdma_change_l2params(struct irdma_sc_vsi *vsi, 184 struct irdma_l2params *l2params) 185 { 186 if (l2params->tc_changed) { 187 vsi->tc_change_pending = false; 188 irdma_set_qos_info(vsi, l2params); 189 irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME); 190 } 191 if (l2params->mtu_changed) { 192 vsi->mtu = l2params->mtu; 193 if (vsi->ieq) 194 irdma_reinitialize_ieq(vsi); 195 } 196 } 197 198 /** 199 * irdma_qp_rem_qos - remove qp from qos lists during destroy qp 200 * @qp: qp to be removed from qos 201 */ 202 void 203 irdma_qp_rem_qos(struct irdma_sc_qp *qp) 204 { 205 struct irdma_sc_vsi *vsi = qp->vsi; 206 207 irdma_debug(qp->dev, IRDMA_DEBUG_DCB, 208 "DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n", 209 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, 210 qp->on_qoslist); 211 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); 212 if (qp->on_qoslist) { 213 qp->on_qoslist = false; 214 list_del(&qp->list); 215 } 216 mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); 217 } 218 219 /** 220 * irdma_qp_add_qos - called during setctx for qp to be added to qos 221 * @qp: qp to be added to qos 222 */ 223 void 224 irdma_qp_add_qos(struct irdma_sc_qp *qp) 225 { 226 struct irdma_sc_vsi *vsi = qp->vsi; 227 228 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); 229 if (!qp->on_qoslist) { 230 list_add(&qp->list, &vsi->qos[qp->user_pri].qplist); 231 qp->on_qoslist = true; 232 qp->qs_handle = irdma_get_qp_qs(qp); 233 irdma_debug(qp->dev, IRDMA_DEBUG_DCB, 234 "DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n", 235 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, 236 qp->on_qoslist); 237 238 } 239 mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); 240 } 241 242 /** 243 * irdma_sc_pd_init - initialize sc pd struct 244 * @dev: sc device struct 245 * @pd: sc pd ptr 246 * @pd_id: pd_id for allocated pd 247 * @abi_ver: User/Kernel ABI version 248 */ 249 void 250 irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, 251 int abi_ver) 252 { 253 pd->pd_id = pd_id; 254 pd->abi_ver = abi_ver; 255 pd->dev = dev; 256 } 257 258 /** 259 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry 260 * @cqp: struct for cqp hw 261 * @info: arp entry information 262 * @scratch: u64 saved to be used during cqp completion 263 * @post_sq: flag for cqp db to ring 264 */ 265 static int 266 irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, 267 struct irdma_add_arp_cache_entry_info *info, 268 u64 scratch, bool post_sq) 269 { 270 __le64 *wqe; 271 u64 hdr; 272 273 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 274 if (!wqe) 275 return -ENOSPC; 276 set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max); 277 278 set_64bit_val(wqe, IRDMA_BYTE_16, irdma_mac_to_u64(info->mac_addr)); 279 280 hdr = info->arp_index | 281 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) | 282 FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, info->permanent) | 283 FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, true) | 284 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 285 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 286 287 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 288 289 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", wqe, 290 IRDMA_CQP_WQE_SIZE * 8); 291 if (post_sq) 292 irdma_sc_cqp_post_sq(cqp); 293 294 return 0; 295 } 296 297 /** 298 * irdma_sc_del_arp_cache_entry - dele arp cache entry 299 * @cqp: struct for cqp hw 300 * @scratch: u64 saved to be used during cqp completion 301 * @arp_index: arp index to delete arp entry 302 * @post_sq: flag for cqp db to ring 303 */ 304 static int 305 irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, 306 u16 arp_index, bool post_sq) 307 { 308 __le64 *wqe; 309 u64 hdr; 310 311 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 312 if (!wqe) 313 return -ENOSPC; 314 315 hdr = arp_index | 316 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) | 317 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 318 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 319 320 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 321 322 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE", 323 wqe, IRDMA_CQP_WQE_SIZE * 8); 324 if (post_sq) 325 irdma_sc_cqp_post_sq(cqp); 326 327 return 0; 328 } 329 330 /** 331 * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries 332 * @cqp: struct for cqp hw 333 * @info: info for apbvt entry to add or delete 334 * @scratch: u64 saved to be used during cqp completion 335 * @post_sq: flag for cqp db to ring 336 */ 337 static int 338 irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, 339 struct irdma_apbvt_info *info, 340 u64 scratch, bool post_sq) 341 { 342 __le64 *wqe; 343 u64 hdr; 344 345 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 346 if (!wqe) 347 return -ENOSPC; 348 349 set_64bit_val(wqe, IRDMA_BYTE_16, info->port); 350 351 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) | 352 FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) | 353 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 354 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 355 356 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 357 358 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_APBVT WQE", wqe, 359 IRDMA_CQP_WQE_SIZE * 8); 360 if (post_sq) 361 irdma_sc_cqp_post_sq(cqp); 362 363 return 0; 364 } 365 366 /** 367 * irdma_sc_manage_qhash_table_entry - manage quad hash entries 368 * @cqp: struct for cqp hw 369 * @info: info for quad hash to manage 370 * @scratch: u64 saved to be used during cqp completion 371 * @post_sq: flag for cqp db to ring 372 * 373 * This is called before connection establishment is started. 374 * For passive connections, when listener is created, it will 375 * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local 376 * ip address and tcp port. When SYN is received (passive 377 * connections) or sent (active connections), this routine is 378 * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED 379 * and quad is passed in info. 380 * 381 * When iwarp connection is done and its state moves to RTS, the 382 * quad hash entry in the hardware will point to iwarp's qp 383 * number and requires no calls from the driver. 384 */ 385 static int 386 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp, 387 struct irdma_qhash_table_info *info, 388 u64 scratch, bool post_sq) 389 { 390 __le64 *wqe; 391 u64 qw1 = 0; 392 u64 qw2 = 0; 393 u64 temp; 394 u16 qs_handle; 395 struct irdma_sc_vsi *vsi = info->vsi; 396 397 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 398 if (!wqe) 399 return -ENOSPC; 400 set_64bit_val(wqe, IRDMA_BYTE_0, irdma_mac_to_u64(info->mac_addr)); 401 402 qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) | 403 FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port); 404 if (info->ipv4_valid) { 405 set_64bit_val(wqe, IRDMA_BYTE_48, 406 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0])); 407 } else { 408 set_64bit_val(wqe, IRDMA_BYTE_56, 409 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) | 410 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1])); 411 412 set_64bit_val(wqe, IRDMA_BYTE_48, 413 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) | 414 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3])); 415 } 416 417 qs_handle = vsi->qos[info->user_pri].qs_handle; 418 419 qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE, qs_handle); 420 if (info->vlan_valid) 421 qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id); 422 set_64bit_val(wqe, IRDMA_BYTE_16, qw2); 423 if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) { 424 qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port); 425 if (!info->ipv4_valid) { 426 set_64bit_val(wqe, IRDMA_BYTE_40, 427 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) | 428 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1])); 429 set_64bit_val(wqe, IRDMA_BYTE_32, 430 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) | 431 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3])); 432 } else { 433 set_64bit_val(wqe, IRDMA_BYTE_32, 434 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0])); 435 } 436 } 437 438 set_64bit_val(wqe, IRDMA_BYTE_8, qw1); 439 temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) | 440 FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE, 441 IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) | 442 FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) | 443 FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) | 444 FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) | 445 FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type); 446 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 447 448 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 449 450 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_QHASH WQE", wqe, 451 IRDMA_CQP_WQE_SIZE * 8); 452 if (post_sq) 453 irdma_sc_cqp_post_sq(cqp); 454 455 return 0; 456 } 457 458 /** 459 * irdma_sc_qp_init - initialize qp 460 * @qp: sc qp 461 * @info: initialization qp info 462 */ 463 int 464 irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info) 465 { 466 int ret_code; 467 u32 pble_obj_cnt; 468 u16 wqe_size; 469 470 if (info->qp_uk_init_info.max_sq_frag_cnt > 471 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags || 472 info->qp_uk_init_info.max_rq_frag_cnt > 473 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) 474 return -EINVAL; 475 476 qp->dev = info->pd->dev; 477 qp->vsi = info->vsi; 478 qp->ieq_qp = info->vsi->exception_lan_q; 479 qp->sq_pa = info->sq_pa; 480 qp->rq_pa = info->rq_pa; 481 qp->hw_host_ctx_pa = info->host_ctx_pa; 482 qp->q2_pa = info->q2_pa; 483 qp->shadow_area_pa = info->shadow_area_pa; 484 qp->q2_buf = info->q2; 485 qp->pd = info->pd; 486 qp->hw_host_ctx = info->host_ctx; 487 info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db; 488 ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info); 489 if (ret_code) 490 return ret_code; 491 492 qp->virtual_map = info->virtual_map; 493 pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 494 495 if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) || 496 (info->virtual_map && info->rq_pa >= pble_obj_cnt)) 497 return -EINVAL; 498 499 qp->llp_stream_handle = (void *)(-1); 500 qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, 501 IRDMA_QUEUE_TYPE_SQ_RQ); 502 irdma_debug(qp->dev, IRDMA_DEBUG_WQE, 503 "hw_sq_size[%04d] sq_ring.size[%04d]\n", qp->hw_sq_size, 504 qp->qp_uk.sq_ring.size); 505 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) 506 wqe_size = IRDMA_WQE_SIZE_128; 507 else 508 ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, 509 &wqe_size); 510 if (ret_code) 511 return ret_code; 512 513 qp->hw_rq_size = 514 irdma_get_encoded_wqe_size(qp->qp_uk.rq_size * 515 (wqe_size / IRDMA_QP_WQE_MIN_SIZE), 516 IRDMA_QUEUE_TYPE_SQ_RQ); 517 irdma_debug(qp->dev, IRDMA_DEBUG_WQE, 518 "hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n", 519 qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size); 520 521 qp->sq_tph_val = info->sq_tph_val; 522 qp->rq_tph_val = info->rq_tph_val; 523 qp->sq_tph_en = info->sq_tph_en; 524 qp->rq_tph_en = info->rq_tph_en; 525 qp->rcv_tph_en = info->rcv_tph_en; 526 qp->xmit_tph_en = info->xmit_tph_en; 527 qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq; 528 529 return 0; 530 } 531 532 /** 533 * irdma_sc_qp_create - create qp 534 * @qp: sc qp 535 * @info: qp create info 536 * @scratch: u64 saved to be used during cqp completion 537 * @post_sq: flag for cqp db to ring 538 */ 539 int 540 irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info, 541 u64 scratch, bool post_sq) 542 { 543 struct irdma_sc_cqp *cqp; 544 __le64 *wqe; 545 u64 hdr; 546 547 cqp = qp->dev->cqp; 548 if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id || 549 qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1)) 550 return -EINVAL; 551 552 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 553 if (!wqe) 554 return -ENOSPC; 555 556 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 557 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 558 559 hdr = qp->qp_uk.qp_id | 560 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) | 561 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) | 562 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) | 563 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) | 564 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | 565 FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) | 566 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) | 567 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) | 568 FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID, 569 info->arp_cache_idx_valid) | 570 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) | 571 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 572 573 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 574 575 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 576 577 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_CREATE WQE", wqe, 578 IRDMA_CQP_WQE_SIZE * 8); 579 if (post_sq) 580 irdma_sc_cqp_post_sq(cqp); 581 582 return 0; 583 } 584 585 /** 586 * irdma_sc_qp_modify - modify qp cqp wqe 587 * @qp: sc qp 588 * @info: modify qp info 589 * @scratch: u64 saved to be used during cqp completion 590 * @post_sq: flag for cqp db to ring 591 */ 592 int 593 irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info, 594 u64 scratch, bool post_sq) 595 { 596 __le64 *wqe; 597 struct irdma_sc_cqp *cqp; 598 u64 hdr; 599 u8 term_actions = 0; 600 u8 term_len = 0; 601 602 cqp = qp->dev->cqp; 603 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 604 if (!wqe) 605 return -ENOSPC; 606 607 if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) { 608 if (info->dont_send_fin) 609 term_actions += IRDMAQP_TERM_SEND_TERM_ONLY; 610 if (info->dont_send_term) 611 term_actions += IRDMAQP_TERM_SEND_FIN_ONLY; 612 if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN || 613 term_actions == IRDMAQP_TERM_SEND_TERM_ONLY) 614 term_len = info->termlen; 615 } 616 617 set_64bit_val(wqe, IRDMA_BYTE_8, 618 FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) | 619 FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len)); 620 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 621 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 622 623 hdr = qp->qp_uk.qp_id | 624 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) | 625 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) | 626 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) | 627 FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID, 628 info->cached_var_valid) | 629 FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) | 630 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) | 631 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) | 632 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) | 633 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | 634 FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) | 635 FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, 636 info->remove_hash_idx) | 637 FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) | 638 FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) | 639 FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID, 640 info->arp_cache_idx_valid) | 641 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) | 642 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 643 644 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 645 646 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 647 648 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_MODIFY WQE", wqe, 649 IRDMA_CQP_WQE_SIZE * 8); 650 if (post_sq) 651 irdma_sc_cqp_post_sq(cqp); 652 653 return 0; 654 } 655 656 /** 657 * irdma_sc_qp_destroy - cqp destroy qp 658 * @qp: sc qp 659 * @scratch: u64 saved to be used during cqp completion 660 * @remove_hash_idx: flag if to remove hash idx 661 * @ignore_mw_bnd: memory window bind flag 662 * @post_sq: flag for cqp db to ring 663 */ 664 int 665 irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, 666 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq) 667 { 668 __le64 *wqe; 669 struct irdma_sc_cqp *cqp; 670 u64 hdr; 671 672 cqp = qp->dev->cqp; 673 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 674 if (!wqe) 675 return -ENOSPC; 676 677 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 678 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 679 680 hdr = qp->qp_uk.qp_id | 681 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) | 682 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | 683 FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) | 684 FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) | 685 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 686 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 687 688 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 689 690 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_DESTROY WQE", wqe, 691 IRDMA_CQP_WQE_SIZE * 8); 692 if (post_sq) 693 irdma_sc_cqp_post_sq(cqp); 694 695 return 0; 696 } 697 698 /** 699 * irdma_sc_get_encoded_ird_size - 700 * @ird_size: IRD size 701 * The ird from the connection is rounded to a supported HW setting and then encoded 702 * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based 703 * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input 704 */ 705 static u8 irdma_sc_get_encoded_ird_size(u16 ird_size) { 706 switch (ird_size ? 707 roundup_pow_of_two(2 * ird_size) : 4) { 708 case 256: 709 return IRDMA_IRD_HW_SIZE_256; 710 case 128: 711 return IRDMA_IRD_HW_SIZE_128; 712 case 64: 713 case 32: 714 return IRDMA_IRD_HW_SIZE_64; 715 case 16: 716 case 8: 717 return IRDMA_IRD_HW_SIZE_16; 718 case 4: 719 default: 720 break; 721 } 722 723 return IRDMA_IRD_HW_SIZE_4; 724 } 725 726 /** 727 * irdma_sc_qp_setctx_roce - set qp's context 728 * @qp: sc qp 729 * @qp_ctx: context ptr 730 * @info: ctx info 731 */ 732 void 733 irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx, 734 struct irdma_qp_host_ctx_info *info) 735 { 736 struct irdma_roce_offload_info *roce_info; 737 struct irdma_udp_offload_info *udp; 738 u8 push_mode_en; 739 u32 push_idx; 740 741 roce_info = info->roce_info; 742 udp = info->udp_info; 743 744 qp->user_pri = info->user_pri; 745 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) { 746 push_mode_en = 0; 747 push_idx = 0; 748 } else { 749 push_mode_en = 1; 750 push_idx = qp->push_idx; 751 } 752 set_64bit_val(qp_ctx, IRDMA_BYTE_0, 753 FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) | 754 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) | 755 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) | 756 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) | 757 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) | 758 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) | 759 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) | 760 FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) | 761 FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) | 762 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) | 763 FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) | 764 FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) | 765 FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) | 766 FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag)); 767 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa); 768 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa); 769 if (roce_info->dcqcn_en || roce_info->dctcp_en) { 770 udp->tos &= ~ECN_CODE_PT_MASK; 771 udp->tos |= ECN_CODE_PT_VAL; 772 } 773 774 set_64bit_val(qp_ctx, IRDMA_BYTE_24, 775 FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | 776 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) | 777 FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) | 778 FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) | 779 FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port)); 780 set_64bit_val(qp_ctx, IRDMA_BYTE_32, 781 FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) | 782 FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3])); 783 set_64bit_val(qp_ctx, IRDMA_BYTE_40, 784 FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) | 785 FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1])); 786 set_64bit_val(qp_ctx, IRDMA_BYTE_48, 787 FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) | 788 FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) | 789 FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx)); 790 set_64bit_val(qp_ctx, IRDMA_BYTE_56, 791 FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) | 792 FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) | 793 FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) | 794 FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label)); 795 set_64bit_val(qp_ctx, IRDMA_BYTE_64, 796 FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) | 797 FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp)); 798 set_64bit_val(qp_ctx, IRDMA_BYTE_80, 799 FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) | 800 FIELD_PREP(IRDMAQPC_LSN, udp->lsn)); 801 set_64bit_val(qp_ctx, IRDMA_BYTE_88, 802 FIELD_PREP(IRDMAQPC_EPSN, udp->epsn)); 803 set_64bit_val(qp_ctx, IRDMA_BYTE_96, 804 FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) | 805 FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una)); 806 set_64bit_val(qp_ctx, IRDMA_BYTE_112, 807 FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd)); 808 set_64bit_val(qp_ctx, IRDMA_BYTE_128, 809 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) | 810 FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) | 811 FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) | 812 FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin)); 813 set_64bit_val(qp_ctx, IRDMA_BYTE_136, 814 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) | 815 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num)); 816 set_64bit_val(qp_ctx, IRDMA_BYTE_144, 817 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx)); 818 set_64bit_val(qp_ctx, IRDMA_BYTE_152, 819 FIELD_PREP(IRDMAQPC_MACADDRESS, 820 irdma_mac_to_u64(roce_info->mac_addr))); 821 set_64bit_val(qp_ctx, IRDMA_BYTE_160, 822 FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) | 823 FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) | 824 FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) | 825 FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) | 826 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) | 827 FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) | 828 FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) | 829 FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) | 830 FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) | 831 FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) | 832 FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) | 833 FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en)); 834 set_64bit_val(qp_ctx, IRDMA_BYTE_168, 835 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx)); 836 set_64bit_val(qp_ctx, IRDMA_BYTE_176, 837 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | 838 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | 839 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle)); 840 set_64bit_val(qp_ctx, IRDMA_BYTE_184, 841 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) | 842 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2])); 843 set_64bit_val(qp_ctx, IRDMA_BYTE_192, 844 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) | 845 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0])); 846 set_64bit_val(qp_ctx, IRDMA_BYTE_200, 847 FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) | 848 FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low)); 849 set_64bit_val(qp_ctx, IRDMA_BYTE_208, 850 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx)); 851 852 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX WQE", qp_ctx, 853 IRDMA_QP_CTX_SIZE); 854 } 855 856 /* 857 * irdma_sc_alloc_local_mac_entry - allocate a mac entry @cqp: struct for cqp hw @scratch: u64 saved to be used during 858 * cqp completion @post_sq: flag for cqp db to ring 859 */ 860 static int 861 irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, 862 bool post_sq) 863 { 864 __le64 *wqe; 865 u64 hdr; 866 867 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 868 if (!wqe) 869 return -ENOSPC; 870 871 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, 872 IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) | 873 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 874 875 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 876 877 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 878 879 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ALLOCATE_LOCAL_MAC WQE", 880 wqe, IRDMA_CQP_WQE_SIZE * 8); 881 882 if (post_sq) 883 irdma_sc_cqp_post_sq(cqp); 884 return 0; 885 } 886 887 /** 888 * irdma_sc_add_local_mac_entry - add mac enry 889 * @cqp: struct for cqp hw 890 * @info:mac addr info 891 * @scratch: u64 saved to be used during cqp completion 892 * @post_sq: flag for cqp db to ring 893 */ 894 static int 895 irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp, 896 struct irdma_local_mac_entry_info *info, 897 u64 scratch, bool post_sq) 898 { 899 __le64 *wqe; 900 u64 header; 901 902 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 903 if (!wqe) 904 return -ENOSPC; 905 906 set_64bit_val(wqe, IRDMA_BYTE_32, irdma_mac_to_u64(info->mac_addr)); 907 908 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) | 909 FIELD_PREP(IRDMA_CQPSQ_OPCODE, 910 IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) | 911 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 912 913 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 914 915 set_64bit_val(wqe, IRDMA_BYTE_24, header); 916 917 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ADD_LOCAL_MAC WQE", wqe, 918 IRDMA_CQP_WQE_SIZE * 8); 919 920 if (post_sq) 921 irdma_sc_cqp_post_sq(cqp); 922 return 0; 923 } 924 925 /** 926 * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac 927 * @cqp: struct for cqp hw 928 * @scratch: u64 saved to be used during cqp completion 929 * @entry_idx: index of mac entry 930 * @ignore_ref_count: to force mac adde delete 931 * @post_sq: flag for cqp db to ring 932 */ 933 static int 934 irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, 935 u16 entry_idx, u8 ignore_ref_count, 936 bool post_sq) 937 { 938 __le64 *wqe; 939 u64 header; 940 941 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 942 if (!wqe) 943 return -ENOSPC; 944 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) | 945 FIELD_PREP(IRDMA_CQPSQ_OPCODE, 946 IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) | 947 FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) | 948 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | 949 FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count); 950 951 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 952 953 set_64bit_val(wqe, IRDMA_BYTE_24, header); 954 955 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE", 956 wqe, IRDMA_CQP_WQE_SIZE * 8); 957 958 if (post_sq) 959 irdma_sc_cqp_post_sq(cqp); 960 return 0; 961 } 962 963 /** 964 * irdma_sc_qp_setctx - set qp's context 965 * @qp: sc qp 966 * @qp_ctx: context ptr 967 * @info: ctx info 968 */ 969 void 970 irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx, 971 struct irdma_qp_host_ctx_info *info) 972 { 973 struct irdma_iwarp_offload_info *iw; 974 struct irdma_tcp_offload_info *tcp; 975 struct irdma_sc_dev *dev; 976 u8 push_mode_en; 977 u32 push_idx; 978 u64 qw0, qw3, qw7 = 0, qw16 = 0; 979 u64 mac = 0; 980 981 iw = info->iwarp_info; 982 tcp = info->tcp_info; 983 dev = qp->dev; 984 if (iw->rcv_mark_en) { 985 qp->pfpdu.marker_len = 4; 986 qp->pfpdu.rcv_start_seq = tcp->rcv_nxt; 987 } 988 qp->user_pri = info->user_pri; 989 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) { 990 push_mode_en = 0; 991 push_idx = 0; 992 } else { 993 push_mode_en = 1; 994 push_idx = qp->push_idx; 995 } 996 qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) | 997 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) | 998 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) | 999 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) | 1000 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) | 1001 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) | 1002 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en); 1003 1004 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa); 1005 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa); 1006 1007 qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | 1008 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size); 1009 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 1010 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, 1011 qp->src_mac_addr_idx); 1012 set_64bit_val(qp_ctx, IRDMA_BYTE_136, 1013 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) | 1014 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num)); 1015 set_64bit_val(qp_ctx, IRDMA_BYTE_168, 1016 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx)); 1017 set_64bit_val(qp_ctx, IRDMA_BYTE_176, 1018 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | 1019 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | 1020 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) | 1021 FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp)); 1022 if (info->iwarp_info_valid) { 1023 qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) | 1024 FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) | 1025 FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) | 1026 FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) | 1027 FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) | 1028 FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) | 1029 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, 1030 iw->err_rq_idx_valid); 1031 qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id); 1032 qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) | 1033 FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin); 1034 set_64bit_val(qp_ctx, IRDMA_BYTE_144, 1035 FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) | 1036 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx)); 1037 1038 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1039 mac = FIELD_PREP(IRDMAQPC_MACADDRESS, 1040 irdma_mac_to_u64(iw->mac_addr)); 1041 1042 set_64bit_val(qp_ctx, IRDMA_BYTE_152, 1043 mac | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent)); 1044 set_64bit_val(qp_ctx, IRDMA_BYTE_160, 1045 FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) | 1046 FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) | 1047 FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) | 1048 FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) | 1049 FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) | 1050 FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) | 1051 FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) | 1052 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) | 1053 FIELD_PREP(IRDMAQPC_IWARPMODE, 1) | 1054 FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) | 1055 FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) | 1056 FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) | 1057 FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset) | 1058 FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset) | 1059 FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en)); 1060 } 1061 if (info->tcp_info_valid) { 1062 qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) | 1063 FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) | 1064 FIELD_PREP(IRDMAQPC_INSERTVLANTAG, 1065 tcp->insert_vlan_tag) | 1066 FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) | 1067 FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) | 1068 FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) | 1069 FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh); 1070 1071 if (iw->ecn_en || iw->dctcp_en) { 1072 tcp->tos &= ~ECN_CODE_PT_MASK; 1073 tcp->tos |= ECN_CODE_PT_VAL; 1074 } 1075 1076 qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) | 1077 FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) | 1078 FIELD_PREP(IRDMAQPC_TOS, tcp->tos) | 1079 FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) | 1080 FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port); 1081 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 1082 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx); 1083 1084 qp->src_mac_addr_idx = tcp->src_mac_addr_idx; 1085 } 1086 set_64bit_val(qp_ctx, IRDMA_BYTE_32, 1087 FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) | 1088 FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3])); 1089 set_64bit_val(qp_ctx, IRDMA_BYTE_40, 1090 FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) | 1091 FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1])); 1092 set_64bit_val(qp_ctx, IRDMA_BYTE_48, 1093 FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) | 1094 FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) | 1095 FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) | 1096 FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx)); 1097 qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) | 1098 FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) | 1099 FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT, 1100 tcp->ignore_tcp_opt) | 1101 FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT, 1102 tcp->ignore_tcp_uns_opt) | 1103 FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) | 1104 FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) | 1105 FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale); 1106 set_64bit_val(qp_ctx, IRDMA_BYTE_72, 1107 FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) | 1108 FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age)); 1109 set_64bit_val(qp_ctx, IRDMA_BYTE_80, 1110 FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) | 1111 FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd)); 1112 set_64bit_val(qp_ctx, IRDMA_BYTE_88, 1113 FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) | 1114 FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd)); 1115 set_64bit_val(qp_ctx, IRDMA_BYTE_96, 1116 FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) | 1117 FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una)); 1118 set_64bit_val(qp_ctx, IRDMA_BYTE_104, 1119 FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) | 1120 FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var)); 1121 set_64bit_val(qp_ctx, IRDMA_BYTE_112, 1122 FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) | 1123 FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd)); 1124 set_64bit_val(qp_ctx, IRDMA_BYTE_120, 1125 FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) | 1126 FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2)); 1127 qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) | 1128 FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh); 1129 set_64bit_val(qp_ctx, IRDMA_BYTE_184, 1130 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) | 1131 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2])); 1132 set_64bit_val(qp_ctx, IRDMA_BYTE_192, 1133 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) | 1134 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0])); 1135 set_64bit_val(qp_ctx, IRDMA_BYTE_200, 1136 FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) | 1137 FIELD_PREP(IRDMAQPC_TLOW, iw->t_low)); 1138 set_64bit_val(qp_ctx, IRDMA_BYTE_208, 1139 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx)); 1140 } 1141 1142 set_64bit_val(qp_ctx, IRDMA_BYTE_0, qw0); 1143 set_64bit_val(qp_ctx, IRDMA_BYTE_24, qw3); 1144 set_64bit_val(qp_ctx, IRDMA_BYTE_56, qw7); 1145 set_64bit_val(qp_ctx, IRDMA_BYTE_128, qw16); 1146 1147 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX", qp_ctx, 1148 IRDMA_QP_CTX_SIZE); 1149 } 1150 1151 /** 1152 * irdma_sc_alloc_stag - mr stag alloc 1153 * @dev: sc device struct 1154 * @info: stag info 1155 * @scratch: u64 saved to be used during cqp completion 1156 * @post_sq: flag for cqp db to ring 1157 */ 1158 static int 1159 irdma_sc_alloc_stag(struct irdma_sc_dev *dev, 1160 struct irdma_allocate_stag_info *info, 1161 u64 scratch, bool post_sq) 1162 { 1163 __le64 *wqe; 1164 struct irdma_sc_cqp *cqp; 1165 u64 hdr; 1166 enum irdma_page_size page_size; 1167 1168 if (!info->total_len && !info->all_memory) 1169 return -EINVAL; 1170 1171 if (info->page_size == 0x40000000) 1172 page_size = IRDMA_PAGE_SIZE_1G; 1173 else if (info->page_size == 0x200000) 1174 page_size = IRDMA_PAGE_SIZE_2M; 1175 else 1176 page_size = IRDMA_PAGE_SIZE_4K; 1177 1178 cqp = dev->cqp; 1179 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1180 if (!wqe) 1181 return -ENOSPC; 1182 1183 set_64bit_val(wqe, IRDMA_BYTE_8, 1184 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) | 1185 FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len)); 1186 set_64bit_val(wqe, IRDMA_BYTE_16, 1187 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); 1188 set_64bit_val(wqe, IRDMA_BYTE_40, 1189 FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index)); 1190 1191 if (info->chunk_size) 1192 set_64bit_val(wqe, IRDMA_BYTE_48, 1193 FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx)); 1194 1195 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) | 1196 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) | 1197 FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) | 1198 FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | 1199 FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) | 1200 FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) | 1201 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 1202 /* for FNIC, a PF can send this WQE for a VF */ 1203 hdr |= FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index); 1204 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1205 1206 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1207 1208 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "ALLOC_STAG WQE", wqe, 1209 IRDMA_CQP_WQE_SIZE * 8); 1210 if (post_sq) 1211 irdma_sc_cqp_post_sq(cqp); 1212 1213 return 0; 1214 } 1215 1216 /** 1217 * irdma_sc_mr_reg_non_shared - non-shared mr registration 1218 * @dev: sc device struct 1219 * @info: mr info 1220 * @scratch: u64 saved to be used during cqp completion 1221 * @post_sq: flag for cqp db to ring 1222 */ 1223 static int 1224 irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, 1225 struct irdma_reg_ns_stag_info *info, 1226 u64 scratch, bool post_sq) 1227 { 1228 __le64 *wqe; 1229 u64 fbo; 1230 struct irdma_sc_cqp *cqp; 1231 u64 hdr; 1232 u32 pble_obj_cnt; 1233 bool remote_access; 1234 u8 addr_type; 1235 enum irdma_page_size page_size; 1236 1237 if (!info->total_len && !info->all_memory) 1238 return -EINVAL; 1239 1240 if (info->page_size == 0x40000000) 1241 page_size = IRDMA_PAGE_SIZE_1G; 1242 else if (info->page_size == 0x200000) 1243 page_size = IRDMA_PAGE_SIZE_2M; 1244 else if (info->page_size == 0x1000) 1245 page_size = IRDMA_PAGE_SIZE_4K; 1246 else 1247 return -EINVAL; 1248 1249 if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY | 1250 IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY)) 1251 remote_access = true; 1252 else 1253 remote_access = false; 1254 1255 pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 1256 if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt) 1257 return -EINVAL; 1258 1259 cqp = dev->cqp; 1260 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1261 if (!wqe) 1262 return -ENOSPC; 1263 fbo = info->va & (info->page_size - 1); 1264 1265 set_64bit_val(wqe, IRDMA_BYTE_0, 1266 (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ? 1267 info->va : fbo)); 1268 set_64bit_val(wqe, IRDMA_BYTE_8, 1269 FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) | 1270 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1271 set_64bit_val(wqe, IRDMA_BYTE_16, 1272 FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) | 1273 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); 1274 if (!info->chunk_size) 1275 set_64bit_val(wqe, IRDMA_BYTE_32, info->reg_addr_pa); 1276 else 1277 set_64bit_val(wqe, IRDMA_BYTE_48, 1278 FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index)); 1279 1280 hdr = info->hmc_fcn_index; 1281 set_64bit_val(wqe, IRDMA_BYTE_40, hdr); 1282 1283 addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0; 1284 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) | 1285 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) | 1286 FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | 1287 FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) | 1288 FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) | 1289 FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) | 1290 FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) | 1291 FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | 1292 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 1293 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1294 1295 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1296 1297 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MR_REG_NS WQE", wqe, 1298 IRDMA_CQP_WQE_SIZE * 8); 1299 if (post_sq) 1300 irdma_sc_cqp_post_sq(cqp); 1301 1302 return 0; 1303 } 1304 1305 /** 1306 * irdma_sc_dealloc_stag - deallocate stag 1307 * @dev: sc device struct 1308 * @info: dealloc stag info 1309 * @scratch: u64 saved to be used during cqp completion 1310 * @post_sq: flag for cqp db to ring 1311 */ 1312 static int 1313 irdma_sc_dealloc_stag(struct irdma_sc_dev *dev, 1314 struct irdma_dealloc_stag_info *info, 1315 u64 scratch, bool post_sq) 1316 { 1317 u64 hdr; 1318 __le64 *wqe; 1319 struct irdma_sc_cqp *cqp; 1320 1321 cqp = dev->cqp; 1322 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1323 if (!wqe) 1324 return -ENOSPC; 1325 1326 set_64bit_val(wqe, IRDMA_BYTE_8, 1327 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1328 set_64bit_val(wqe, IRDMA_BYTE_16, 1329 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); 1330 1331 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) | 1332 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) | 1333 FIELD_PREP(IRDMA_CQPSQ_STAG_SKIPFLUSH, info->skip_flush_markers) | 1334 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 1335 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1336 1337 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1338 1339 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "DEALLOC_STAG WQE", wqe, 1340 IRDMA_CQP_WQE_SIZE * 8); 1341 if (post_sq) 1342 irdma_sc_cqp_post_sq(cqp); 1343 1344 return 0; 1345 } 1346 1347 /** 1348 * irdma_sc_mw_alloc - mw allocate 1349 * @dev: sc device struct 1350 * @info: memory window allocation information 1351 * @scratch: u64 saved to be used during cqp completion 1352 * @post_sq: flag for cqp db to ring 1353 */ 1354 static int 1355 irdma_sc_mw_alloc(struct irdma_sc_dev *dev, 1356 struct irdma_mw_alloc_info *info, u64 scratch, 1357 bool post_sq) 1358 { 1359 u64 hdr; 1360 struct irdma_sc_cqp *cqp; 1361 __le64 *wqe; 1362 1363 cqp = dev->cqp; 1364 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1365 if (!wqe) 1366 return -ENOSPC; 1367 1368 set_64bit_val(wqe, IRDMA_BYTE_8, 1369 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1370 set_64bit_val(wqe, IRDMA_BYTE_16, 1371 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index)); 1372 1373 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) | 1374 FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) | 1375 FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY, 1376 info->mw1_bind_dont_vldt_key) | 1377 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 1378 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1379 1380 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1381 1382 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MW_ALLOC WQE", wqe, 1383 IRDMA_CQP_WQE_SIZE * 8); 1384 if (post_sq) 1385 irdma_sc_cqp_post_sq(cqp); 1386 1387 return 0; 1388 } 1389 1390 /** 1391 * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp 1392 * @qp: sc qp struct 1393 * @info: fast mr info 1394 * @post_sq: flag for cqp db to ring 1395 */ 1396 int 1397 irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, 1398 struct irdma_fast_reg_stag_info *info, 1399 bool post_sq) 1400 { 1401 u64 temp, hdr; 1402 __le64 *wqe; 1403 u32 wqe_idx; 1404 u16 quanta = IRDMA_QP_WQE_MIN_QUANTA; 1405 enum irdma_page_size page_size; 1406 struct irdma_post_sq_info sq_info = {0}; 1407 1408 if (info->page_size == 0x40000000) 1409 page_size = IRDMA_PAGE_SIZE_1G; 1410 else if (info->page_size == 0x200000) 1411 page_size = IRDMA_PAGE_SIZE_2M; 1412 else 1413 page_size = IRDMA_PAGE_SIZE_4K; 1414 1415 sq_info.wr_id = info->wr_id; 1416 sq_info.signaled = info->signaled; 1417 sq_info.push_wqe = info->push_wqe; 1418 1419 wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, &quanta, 0, &sq_info); 1420 if (!wqe) 1421 return -ENOSPC; 1422 1423 qp->qp_uk.sq_wrtrk_array[wqe_idx].signaled = info->signaled; 1424 irdma_debug(qp->dev, IRDMA_DEBUG_MR, 1425 "wr_id[%llxh] wqe_idx[%04d] location[%p]\n", 1426 (unsigned long long)info->wr_id, wqe_idx, 1427 &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid); 1428 1429 temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1430 (uintptr_t)info->va : info->fbo; 1431 set_64bit_val(wqe, IRDMA_BYTE_0, temp); 1432 1433 temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI, 1434 info->first_pm_pbl_index >> 16); 1435 set_64bit_val(wqe, IRDMA_BYTE_8, 1436 FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) | 1437 FIELD_PREP(IRDMAQPSQ_PBLADDR, info->reg_addr_pa >> IRDMA_HW_PAGE_SHIFT)); 1438 set_64bit_val(wqe, IRDMA_BYTE_16, 1439 info->total_len | 1440 FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index)); 1441 1442 hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) | 1443 FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) | 1444 FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) | 1445 FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) | 1446 FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) | 1447 FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) | 1448 FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) | 1449 FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) | 1450 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | 1451 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | 1452 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 1453 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1454 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1455 1456 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1457 1458 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "FAST_REG WQE", wqe, 1459 quanta * IRDMA_QP_WQE_MIN_SIZE); 1460 if (sq_info.push_wqe) 1461 irdma_qp_push_wqe(&qp->qp_uk, wqe, quanta, wqe_idx, post_sq); 1462 else if (post_sq) 1463 irdma_uk_qp_post_wr(&qp->qp_uk); 1464 1465 return 0; 1466 } 1467 1468 /** 1469 * irdma_sc_gen_rts_ae - request AE generated after RTS 1470 * @qp: sc qp struct 1471 */ 1472 static void 1473 irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp) 1474 { 1475 __le64 *wqe; 1476 u64 hdr; 1477 struct irdma_qp_uk *qp_uk; 1478 1479 qp_uk = &qp->qp_uk; 1480 1481 wqe = qp_uk->sq_base[1].elem; 1482 1483 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | 1484 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) | 1485 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1486 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1487 1488 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1489 irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "NOP W/LOCAL FENCE WQE", wqe, 1490 IRDMA_QP_WQE_MIN_SIZE); 1491 1492 wqe = qp_uk->sq_base[2].elem; 1493 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) | 1494 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1495 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1496 1497 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1498 irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "CONN EST WQE", wqe, 1499 IRDMA_QP_WQE_MIN_SIZE); 1500 if (qp->qp_uk.start_wqe_idx) { 1501 wqe = qp_uk->sq_base[3].elem; 1502 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | 1503 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) | 1504 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1505 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1506 1507 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1508 } 1509 } 1510 1511 /** 1512 * irdma_sc_send_lsmm - send last streaming mode message 1513 * @qp: sc qp struct 1514 * @lsmm_buf: buffer with lsmm message 1515 * @size: size of lsmm buffer 1516 * @stag: stag of lsmm buffer 1517 */ 1518 void 1519 irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, 1520 irdma_stag stag) 1521 { 1522 __le64 *wqe; 1523 u64 hdr; 1524 struct irdma_qp_uk *qp_uk; 1525 1526 qp_uk = &qp->qp_uk; 1527 wqe = qp_uk->sq_base->elem; 1528 1529 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf); 1530 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1531 set_64bit_val(wqe, IRDMA_BYTE_8, 1532 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) | 1533 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag)); 1534 } else { 1535 set_64bit_val(wqe, IRDMA_BYTE_8, 1536 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) | 1537 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) | 1538 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); 1539 } 1540 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1541 1542 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) | 1543 FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) | 1544 FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) | 1545 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1546 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1547 1548 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1549 1550 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM WQE", wqe, 1551 IRDMA_QP_WQE_MIN_SIZE); 1552 1553 if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) 1554 irdma_sc_gen_rts_ae(qp); 1555 } 1556 1557 /** 1558 * irdma_sc_send_rtt - send last read0 or write0 1559 * @qp: sc qp struct 1560 * @read: Do read0 or write0 1561 */ 1562 void 1563 irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read) 1564 { 1565 __le64 *wqe; 1566 u64 hdr; 1567 struct irdma_qp_uk *qp_uk; 1568 1569 qp_uk = &qp->qp_uk; 1570 wqe = qp_uk->sq_base->elem; 1571 1572 set_64bit_val(wqe, IRDMA_BYTE_0, 0); 1573 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1574 if (read) { 1575 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1576 set_64bit_val(wqe, IRDMA_BYTE_8, 1577 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd)); 1578 } else { 1579 set_64bit_val(wqe, IRDMA_BYTE_8, 1580 (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, 1581 qp->qp_uk.swqe_polarity)); 1582 } 1583 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) | 1584 FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) | 1585 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1586 1587 } else { 1588 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1589 set_64bit_val(wqe, IRDMA_BYTE_8, 0); 1590 } else { 1591 set_64bit_val(wqe, IRDMA_BYTE_8, 1592 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); 1593 } 1594 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) | 1595 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1596 } 1597 1598 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1599 1600 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1601 1602 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "RTR WQE", wqe, 1603 IRDMA_QP_WQE_MIN_SIZE); 1604 1605 if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) 1606 irdma_sc_gen_rts_ae(qp); 1607 } 1608 1609 /** 1610 * irdma_iwarp_opcode - determine if incoming is rdma layer 1611 * @info: aeq info for the packet 1612 * @pkt: packet for error 1613 */ 1614 static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt){ 1615 BE16 *mpa; 1616 u32 opcode = 0xffffffff; 1617 1618 if (info->q2_data_written) { 1619 mpa = (BE16 *) pkt; 1620 opcode = IRDMA_NTOHS(mpa[1]) & 0xf; 1621 } 1622 1623 return opcode; 1624 } 1625 1626 /** 1627 * irdma_locate_mpa - return pointer to mpa in the pkt 1628 * @pkt: packet with data 1629 */ 1630 static u8 *irdma_locate_mpa(u8 *pkt) { 1631 /* skip over ethernet header */ 1632 pkt += IRDMA_MAC_HLEN; 1633 1634 /* Skip over IP and TCP headers */ 1635 pkt += 4 * (pkt[0] & 0x0f); 1636 pkt += 4 * ((pkt[12] >> 4) & 0x0f); 1637 1638 return pkt; 1639 } 1640 1641 /** 1642 * irdma_bld_termhdr_ctrl - setup terminate hdr control fields 1643 * @qp: sc qp ptr for pkt 1644 * @hdr: term hdr 1645 * @opcode: flush opcode for termhdr 1646 * @layer_etype: error layer + error type 1647 * @err: error cod ein the header 1648 */ 1649 static void 1650 irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp, 1651 struct irdma_terminate_hdr *hdr, 1652 enum irdma_flush_opcode opcode, 1653 u8 layer_etype, u8 err) 1654 { 1655 qp->flush_code = opcode; 1656 hdr->layer_etype = layer_etype; 1657 hdr->error_code = err; 1658 } 1659 1660 /** 1661 * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr 1662 * @pkt: ptr to mpa in offending pkt 1663 * @hdr: term hdr 1664 * @copy_len: offending pkt length to be copied to term hdr 1665 * @is_tagged: DDP tagged or untagged 1666 */ 1667 static void 1668 irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr, 1669 int *copy_len, u8 *is_tagged) 1670 { 1671 u16 ddp_seg_len; 1672 1673 ddp_seg_len = IRDMA_NTOHS(*(BE16 *) pkt); 1674 if (ddp_seg_len) { 1675 *copy_len = 2; 1676 hdr->hdrct = DDP_LEN_FLAG; 1677 if (pkt[2] & 0x80) { 1678 *is_tagged = 1; 1679 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { 1680 *copy_len += TERM_DDP_LEN_TAGGED; 1681 hdr->hdrct |= DDP_HDR_FLAG; 1682 } 1683 } else { 1684 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { 1685 *copy_len += TERM_DDP_LEN_UNTAGGED; 1686 hdr->hdrct |= DDP_HDR_FLAG; 1687 } 1688 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) && 1689 ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) { 1690 *copy_len += TERM_RDMA_LEN; 1691 hdr->hdrct |= RDMA_HDR_FLAG; 1692 } 1693 } 1694 } 1695 } 1696 1697 /** 1698 * irdma_bld_terminate_hdr - build terminate message header 1699 * @qp: qp associated with received terminate AE 1700 * @info: the struct contiaing AE information 1701 */ 1702 static int 1703 irdma_bld_terminate_hdr(struct irdma_sc_qp *qp, 1704 struct irdma_aeqe_info *info) 1705 { 1706 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; 1707 int copy_len = 0; 1708 u8 is_tagged = 0; 1709 u32 opcode; 1710 struct irdma_terminate_hdr *termhdr; 1711 1712 termhdr = (struct irdma_terminate_hdr *)qp->q2_buf; 1713 memset(termhdr, 0, Q2_BAD_FRAME_OFFSET); 1714 1715 if (info->q2_data_written) { 1716 pkt = irdma_locate_mpa(pkt); 1717 irdma_bld_termhdr_ddp_rdma(pkt, termhdr, ©_len, &is_tagged); 1718 } 1719 1720 opcode = irdma_iwarp_opcode(info, pkt); 1721 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; 1722 qp->sq_flush_code = info->sq; 1723 qp->rq_flush_code = info->rq; 1724 1725 switch (info->ae_id) { 1726 case IRDMA_AE_AMP_UNALLOCATED_STAG: 1727 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1728 if (opcode == IRDMA_OP_TYPE_RDMA_WRITE) 1729 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1730 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1731 DDP_TAGGED_INV_STAG); 1732 else 1733 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1734 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1735 RDMAP_INV_STAG); 1736 break; 1737 case IRDMA_AE_AMP_BOUNDS_VIOLATION: 1738 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1739 if (info->q2_data_written) 1740 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1741 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1742 DDP_TAGGED_BOUNDS); 1743 else 1744 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1745 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1746 RDMAP_INV_BOUNDS); 1747 break; 1748 case IRDMA_AE_AMP_BAD_PD: 1749 switch (opcode) { 1750 case IRDMA_OP_TYPE_RDMA_WRITE: 1751 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1752 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1753 DDP_TAGGED_UNASSOC_STAG); 1754 break; 1755 case IRDMA_OP_TYPE_SEND_INV: 1756 case IRDMA_OP_TYPE_SEND_SOL_INV: 1757 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1758 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1759 RDMAP_CANT_INV_STAG); 1760 break; 1761 default: 1762 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1763 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1764 RDMAP_UNASSOC_STAG); 1765 } 1766 break; 1767 case IRDMA_AE_AMP_INVALID_STAG: 1768 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1769 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1770 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1771 RDMAP_INV_STAG); 1772 break; 1773 case IRDMA_AE_AMP_BAD_QP: 1774 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR, 1775 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1776 DDP_UNTAGGED_INV_QN); 1777 break; 1778 case IRDMA_AE_AMP_BAD_STAG_KEY: 1779 case IRDMA_AE_AMP_BAD_STAG_INDEX: 1780 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1781 switch (opcode) { 1782 case IRDMA_OP_TYPE_SEND_INV: 1783 case IRDMA_OP_TYPE_SEND_SOL_INV: 1784 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR, 1785 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1786 RDMAP_CANT_INV_STAG); 1787 break; 1788 default: 1789 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1790 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1791 RDMAP_INV_STAG); 1792 } 1793 break; 1794 case IRDMA_AE_AMP_RIGHTS_VIOLATION: 1795 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: 1796 case IRDMA_AE_PRIV_OPERATION_DENIED: 1797 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1798 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1799 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1800 RDMAP_ACCESS); 1801 break; 1802 case IRDMA_AE_AMP_TO_WRAP: 1803 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1804 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1805 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1806 RDMAP_TO_WRAP); 1807 break; 1808 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 1809 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1810 (LAYER_MPA << 4) | DDP_LLP, MPA_CRC); 1811 break; 1812 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: 1813 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR, 1814 (LAYER_DDP << 4) | DDP_CATASTROPHIC, 1815 DDP_CATASTROPHIC_LOCAL); 1816 break; 1817 case IRDMA_AE_LCE_QP_CATASTROPHIC: 1818 case IRDMA_AE_DDP_NO_L_BIT: 1819 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR, 1820 (LAYER_DDP << 4) | DDP_CATASTROPHIC, 1821 DDP_CATASTROPHIC_LOCAL); 1822 break; 1823 case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN: 1824 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1825 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1826 DDP_UNTAGGED_INV_MSN_RANGE); 1827 break; 1828 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 1829 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1830 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR, 1831 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1832 DDP_UNTAGGED_INV_TOO_LONG); 1833 break; 1834 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: 1835 if (is_tagged) 1836 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1837 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1838 DDP_TAGGED_INV_DDP_VER); 1839 else 1840 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1841 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1842 DDP_UNTAGGED_INV_DDP_VER); 1843 break; 1844 case IRDMA_AE_DDP_UBE_INVALID_MO: 1845 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1846 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1847 DDP_UNTAGGED_INV_MO); 1848 break; 1849 case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: 1850 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR, 1851 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1852 DDP_UNTAGGED_INV_MSN_NO_BUF); 1853 break; 1854 case IRDMA_AE_DDP_UBE_INVALID_QN: 1855 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1856 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1857 DDP_UNTAGGED_INV_QN); 1858 break; 1859 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: 1860 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1861 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1862 RDMAP_INV_RDMAP_VER); 1863 break; 1864 default: 1865 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR, 1866 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1867 RDMAP_UNSPECIFIED); 1868 break; 1869 } 1870 1871 if (copy_len) 1872 irdma_memcpy(termhdr + 1, pkt, copy_len); 1873 1874 return sizeof(*termhdr) + copy_len; 1875 } 1876 1877 /** 1878 * irdma_terminate_send_fin() - Send fin for terminate message 1879 * @qp: qp associated with received terminate AE 1880 */ 1881 void 1882 irdma_terminate_send_fin(struct irdma_sc_qp *qp) 1883 { 1884 irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE, 1885 IRDMAQP_TERM_SEND_FIN_ONLY, 0); 1886 } 1887 1888 /** 1889 * irdma_terminate_connection() - Bad AE and send terminate to remote QP 1890 * @qp: qp associated with received terminate AE 1891 * @info: the struct contiaing AE information 1892 */ 1893 void 1894 irdma_terminate_connection(struct irdma_sc_qp *qp, 1895 struct irdma_aeqe_info *info) 1896 { 1897 u8 termlen = 0; 1898 1899 if (qp->term_flags & IRDMA_TERM_SENT) 1900 return; 1901 1902 termlen = irdma_bld_terminate_hdr(qp, info); 1903 irdma_terminate_start_timer(qp); 1904 qp->term_flags |= IRDMA_TERM_SENT; 1905 irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE, 1906 IRDMAQP_TERM_SEND_TERM_ONLY, termlen); 1907 } 1908 1909 /** 1910 * irdma_terminate_received - handle terminate received AE 1911 * @qp: qp associated with received terminate AE 1912 * @info: the struct contiaing AE information 1913 */ 1914 void 1915 irdma_terminate_received(struct irdma_sc_qp *qp, 1916 struct irdma_aeqe_info *info) 1917 { 1918 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; 1919 BE32 *mpa; 1920 u8 ddp_ctl; 1921 u8 rdma_ctl; 1922 u16 aeq_id = 0; 1923 struct irdma_terminate_hdr *termhdr; 1924 1925 mpa = (BE32 *) irdma_locate_mpa(pkt); 1926 if (info->q2_data_written) { 1927 /* did not validate the frame - do it now */ 1928 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff; 1929 rdma_ctl = ntohl(mpa[0]) & 0xff; 1930 if ((ddp_ctl & 0xc0) != 0x40) 1931 aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC; 1932 else if ((ddp_ctl & 0x03) != 1) 1933 aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION; 1934 else if (ntohl(mpa[2]) != 2) 1935 aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN; 1936 else if (ntohl(mpa[3]) != 1) 1937 aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN; 1938 else if (ntohl(mpa[4]) != 0) 1939 aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO; 1940 else if ((rdma_ctl & 0xc0) != 0x40) 1941 aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION; 1942 1943 info->ae_id = aeq_id; 1944 if (info->ae_id) { 1945 /* Bad terminate recvd - send back a terminate */ 1946 irdma_terminate_connection(qp, info); 1947 return; 1948 } 1949 } 1950 1951 qp->term_flags |= IRDMA_TERM_RCVD; 1952 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; 1953 termhdr = (struct irdma_terminate_hdr *)&mpa[5]; 1954 if (termhdr->layer_etype == RDMAP_REMOTE_PROT || 1955 termhdr->layer_etype == RDMAP_REMOTE_OP) { 1956 irdma_terminate_done(qp, 0); 1957 } else { 1958 irdma_terminate_start_timer(qp); 1959 irdma_terminate_send_fin(qp); 1960 } 1961 } 1962 1963 static int 1964 irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) 1965 { 1966 return 0; 1967 } 1968 1969 static void 1970 irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri) 1971 { 1972 /* do nothing */ 1973 } 1974 1975 static void 1976 irdma_null_ws_reset(struct irdma_sc_vsi *vsi) 1977 { 1978 /* do nothing */ 1979 } 1980 1981 /** 1982 * irdma_sc_vsi_init - Init the vsi structure 1983 * @vsi: pointer to vsi structure to initialize 1984 * @info: the info used to initialize the vsi struct 1985 */ 1986 void 1987 irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, 1988 struct irdma_vsi_init_info *info) 1989 { 1990 u8 i; 1991 1992 vsi->dev = info->dev; 1993 vsi->back_vsi = info->back_vsi; 1994 vsi->register_qset = info->register_qset; 1995 vsi->unregister_qset = info->unregister_qset; 1996 vsi->mtu = info->params->mtu; 1997 vsi->exception_lan_q = info->exception_lan_q; 1998 vsi->vsi_idx = info->pf_data_vsi_num; 1999 2000 irdma_set_qos_info(vsi, info->params); 2001 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 2002 mutex_init(&vsi->qos[i].qos_mutex); 2003 INIT_LIST_HEAD(&vsi->qos[i].qplist); 2004 } 2005 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) { 2006 vsi->dev->ws_add = irdma_ws_add; 2007 vsi->dev->ws_remove = irdma_ws_remove; 2008 vsi->dev->ws_reset = irdma_ws_reset; 2009 } else { 2010 vsi->dev->ws_add = irdma_null_ws_add; 2011 vsi->dev->ws_remove = irdma_null_ws_remove; 2012 vsi->dev->ws_reset = irdma_null_ws_reset; 2013 } 2014 } 2015 2016 /** 2017 * irdma_vsi_stats_init - Initialize the vsi statistics 2018 * @vsi: pointer to the vsi structure 2019 * @info: The info structure used for initialization 2020 */ 2021 int 2022 irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, 2023 struct irdma_vsi_stats_info *info) 2024 { 2025 struct irdma_dma_mem *stats_buff_mem; 2026 2027 vsi->pestat = info->pestat; 2028 vsi->pestat->hw = vsi->dev->hw; 2029 vsi->pestat->vsi = vsi; 2030 2031 stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem; 2032 stats_buff_mem->size = IRDMA_GATHER_STATS_BUF_SIZE * 2; 2033 stats_buff_mem->va = irdma_allocate_dma_mem(vsi->pestat->hw, 2034 stats_buff_mem, 2035 stats_buff_mem->size, 1); 2036 if (!stats_buff_mem->va) 2037 return -ENOMEM; 2038 2039 vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va; 2040 vsi->pestat->gather_info.last_gather_stats_va = 2041 (void *)((uintptr_t)stats_buff_mem->va + 2042 IRDMA_GATHER_STATS_BUF_SIZE); 2043 2044 irdma_hw_stats_start_timer(vsi); 2045 2046 /* when stat allocation is not required default to fcn_id. */ 2047 vsi->stats_idx = info->fcn_id; 2048 2049 return 0; 2050 } 2051 2052 /** 2053 * irdma_vsi_stats_free - Free the vsi stats 2054 * @vsi: pointer to the vsi structure 2055 */ 2056 void 2057 irdma_vsi_stats_free(struct irdma_sc_vsi *vsi) 2058 { 2059 2060 if (!vsi->pestat) 2061 return; 2062 2063 irdma_hw_stats_stop_timer(vsi); 2064 irdma_free_dma_mem(vsi->pestat->hw, 2065 &vsi->pestat->gather_info.stats_buff_mem); 2066 } 2067 2068 /** 2069 * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size 2070 * @wqsize: size of the wq (sq, rq) to encoded_size 2071 * @queue_type: queue type selected for the calculation algorithm 2072 */ 2073 u8 2074 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type) 2075 { 2076 u8 encoded_size = 0; 2077 2078 /* 2079 * cqp sq's hw coded value starts from 1 for size of 4 while it starts from 0 for qp' wq's. 2080 */ 2081 if (queue_type == IRDMA_QUEUE_TYPE_CQP) 2082 encoded_size = 1; 2083 wqsize >>= 2; 2084 while (wqsize >>= 1) 2085 encoded_size++; 2086 2087 return encoded_size; 2088 } 2089 2090 /** 2091 * irdma_sc_gather_stats - collect the statistics 2092 * @cqp: struct for cqp hw 2093 * @info: gather stats info structure 2094 * @scratch: u64 saved to be used during cqp completion 2095 */ 2096 static int 2097 irdma_sc_gather_stats(struct irdma_sc_cqp *cqp, 2098 struct irdma_stats_gather_info *info, 2099 u64 scratch) 2100 { 2101 __le64 *wqe; 2102 u64 temp; 2103 2104 if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE) 2105 return -ENOSPC; 2106 2107 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2108 if (!wqe) 2109 return -ENOSPC; 2110 2111 set_64bit_val(wqe, IRDMA_BYTE_40, 2112 FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index)); 2113 set_64bit_val(wqe, IRDMA_BYTE_32, info->stats_buff_mem.pa); 2114 2115 temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) | 2116 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) | 2117 FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, 2118 info->stats_inst_index) | 2119 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX, 2120 info->use_hmc_fcn_index) | 2121 FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS); 2122 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2123 2124 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2125 2126 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_STATS, "GATHER_STATS WQE", wqe, 2127 IRDMA_CQP_WQE_SIZE * 8); 2128 2129 irdma_sc_cqp_post_sq(cqp); 2130 return 0; 2131 } 2132 2133 /** 2134 * irdma_sc_set_up_map - set the up map table 2135 * @cqp: struct for cqp hw 2136 * @info: User priority map info 2137 * @scratch: u64 saved to be used during cqp completion 2138 */ 2139 static int 2140 irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, 2141 struct irdma_up_info *info, u64 scratch) 2142 { 2143 __le64 *wqe; 2144 u64 temp = 0; 2145 int i; 2146 2147 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2148 if (!wqe) 2149 return -ENOSPC; 2150 2151 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) 2152 temp |= (u64)info->map[i] << (i * 8); 2153 2154 set_64bit_val(wqe, IRDMA_BYTE_0, temp); 2155 set_64bit_val(wqe, IRDMA_BYTE_40, 2156 FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) | 2157 FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx)); 2158 2159 temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) | 2160 FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) | 2161 FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE, 2162 info->use_cnp_up_override) | 2163 FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP); 2164 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2165 2166 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2167 2168 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPMAP WQE", wqe, 2169 IRDMA_CQP_WQE_SIZE * 8); 2170 irdma_sc_cqp_post_sq(cqp); 2171 2172 return 0; 2173 } 2174 2175 /** 2176 * irdma_sc_manage_ws_node - create/modify/destroy WS node 2177 * @cqp: struct for cqp hw 2178 * @info: node info structure 2179 * @node_op: 0 for add 1 for modify, 2 for delete 2180 * @scratch: u64 saved to be used during cqp completion 2181 */ 2182 static int 2183 irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp, 2184 struct irdma_ws_node_info *info, 2185 enum irdma_ws_node_op node_op, u64 scratch) 2186 { 2187 __le64 *wqe; 2188 u64 temp = 0; 2189 2190 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2191 if (!wqe) 2192 return -ENOSPC; 2193 2194 set_64bit_val(wqe, IRDMA_BYTE_32, 2195 FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) | 2196 FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight)); 2197 2198 temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) | 2199 FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) | 2200 FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) | 2201 FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) | 2202 FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) | 2203 FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) | 2204 FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) | 2205 FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) | 2206 FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id); 2207 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2208 2209 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2210 2211 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_WS WQE", wqe, 2212 IRDMA_CQP_WQE_SIZE * 8); 2213 irdma_sc_cqp_post_sq(cqp); 2214 2215 return 0; 2216 } 2217 2218 /** 2219 * irdma_sc_qp_flush_wqes - flush qp's wqe 2220 * @qp: sc qp 2221 * @info: dlush information 2222 * @scratch: u64 saved to be used during cqp completion 2223 * @post_sq: flag for cqp db to ring 2224 */ 2225 int 2226 irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, 2227 struct irdma_qp_flush_info *info, u64 scratch, 2228 bool post_sq) 2229 { 2230 u64 temp = 0; 2231 __le64 *wqe; 2232 struct irdma_sc_cqp *cqp; 2233 u64 hdr; 2234 bool flush_sq = false, flush_rq = false; 2235 2236 if (info->rq && !qp->flush_rq) 2237 flush_rq = true; 2238 if (info->sq && !qp->flush_sq) 2239 flush_sq = true; 2240 qp->flush_sq |= flush_sq; 2241 qp->flush_rq |= flush_rq; 2242 2243 if (!flush_sq && !flush_rq) { 2244 irdma_debug(qp->dev, IRDMA_DEBUG_CQP, 2245 "Additional flush request ignored for qp %x\n", 2246 qp->qp_uk.qp_id); 2247 return -EALREADY; 2248 } 2249 2250 cqp = qp->pd->dev->cqp; 2251 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2252 if (!wqe) 2253 return -ENOSPC; 2254 2255 if (info->userflushcode) { 2256 if (flush_rq) 2257 temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR, 2258 info->rq_minor_code) | 2259 FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR, 2260 info->rq_major_code); 2261 if (flush_sq) 2262 temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR, 2263 info->sq_minor_code) | 2264 FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR, 2265 info->sq_major_code); 2266 } 2267 set_64bit_val(wqe, IRDMA_BYTE_16, temp); 2268 2269 temp = (info->generate_ae) ? 2270 info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE, 2271 info->ae_src) : 0; 2272 set_64bit_val(wqe, IRDMA_BYTE_8, temp); 2273 hdr = qp->qp_uk.qp_id | 2274 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) | 2275 FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) | 2276 FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) | 2277 FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) | 2278 FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) | 2279 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2280 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2281 2282 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2283 2284 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_FLUSH WQE", wqe, 2285 IRDMA_CQP_WQE_SIZE * 8); 2286 if (post_sq) 2287 irdma_sc_cqp_post_sq(cqp); 2288 2289 return 0; 2290 } 2291 2292 /** 2293 * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP 2294 * @qp: sc qp 2295 * @info: gen ae information 2296 * @scratch: u64 saved to be used during cqp completion 2297 * @post_sq: flag for cqp db to ring 2298 */ 2299 static int 2300 irdma_sc_gen_ae(struct irdma_sc_qp *qp, 2301 struct irdma_gen_ae_info *info, u64 scratch, 2302 bool post_sq) 2303 { 2304 u64 temp; 2305 __le64 *wqe; 2306 struct irdma_sc_cqp *cqp; 2307 u64 hdr; 2308 2309 cqp = qp->pd->dev->cqp; 2310 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2311 if (!wqe) 2312 return -ENOSPC; 2313 2314 temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE, 2315 info->ae_src); 2316 set_64bit_val(wqe, IRDMA_BYTE_8, temp); 2317 2318 hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, 2319 IRDMA_CQP_OP_GEN_AE) | 2320 FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) | 2321 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2322 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2323 2324 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2325 2326 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "GEN_AE WQE", wqe, 2327 IRDMA_CQP_WQE_SIZE * 8); 2328 if (post_sq) 2329 irdma_sc_cqp_post_sq(cqp); 2330 2331 return 0; 2332 } 2333 2334 /*** irdma_sc_qp_upload_context - upload qp's context 2335 * @dev: sc device struct 2336 * @info: upload context info ptr for return 2337 * @scratch: u64 saved to be used during cqp completion 2338 * @post_sq: flag for cqp db to ring 2339 */ 2340 static int 2341 irdma_sc_qp_upload_context(struct irdma_sc_dev *dev, 2342 struct irdma_upload_context_info *info, 2343 u64 scratch, bool post_sq) 2344 { 2345 __le64 *wqe; 2346 struct irdma_sc_cqp *cqp; 2347 u64 hdr; 2348 2349 cqp = dev->cqp; 2350 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2351 if (!wqe) 2352 return -ENOSPC; 2353 2354 set_64bit_val(wqe, IRDMA_BYTE_16, info->buf_pa); 2355 2356 hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) | 2357 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) | 2358 FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) | 2359 FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) | 2360 FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) | 2361 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2362 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2363 2364 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2365 2366 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QP_UPLOAD_CTX WQE", wqe, 2367 IRDMA_CQP_WQE_SIZE * 8); 2368 if (post_sq) 2369 irdma_sc_cqp_post_sq(cqp); 2370 2371 return 0; 2372 } 2373 2374 /** 2375 * irdma_sc_manage_push_page - Handle push page 2376 * @cqp: struct for cqp hw 2377 * @info: push page info 2378 * @scratch: u64 saved to be used during cqp completion 2379 * @post_sq: flag for cqp db to ring 2380 */ 2381 static int 2382 irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp, 2383 struct irdma_cqp_manage_push_page_info *info, 2384 u64 scratch, bool post_sq) 2385 { 2386 __le64 *wqe; 2387 u64 hdr; 2388 2389 if (info->free_page && 2390 info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages) 2391 return -EINVAL; 2392 2393 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2394 if (!wqe) 2395 return -ENOSPC; 2396 2397 set_64bit_val(wqe, IRDMA_BYTE_16, info->qs_handle); 2398 hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) | 2399 FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) | 2400 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) | 2401 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | 2402 FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page); 2403 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2404 2405 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2406 2407 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", wqe, 2408 IRDMA_CQP_WQE_SIZE * 8); 2409 if (post_sq) 2410 irdma_sc_cqp_post_sq(cqp); 2411 2412 return 0; 2413 } 2414 2415 /** 2416 * irdma_sc_suspend_qp - suspend qp for param change 2417 * @cqp: struct for cqp hw 2418 * @qp: sc qp struct 2419 * @scratch: u64 saved to be used during cqp completion 2420 */ 2421 static int 2422 irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, 2423 u64 scratch) 2424 { 2425 u64 hdr; 2426 __le64 *wqe; 2427 2428 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2429 if (!wqe) 2430 return -ENOSPC; 2431 2432 hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) | 2433 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) | 2434 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2435 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2436 2437 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2438 2439 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SUSPEND_QP WQE", wqe, 2440 IRDMA_CQP_WQE_SIZE * 8); 2441 irdma_sc_cqp_post_sq(cqp); 2442 2443 return 0; 2444 } 2445 2446 /** 2447 * irdma_sc_resume_qp - resume qp after suspend 2448 * @cqp: struct for cqp hw 2449 * @qp: sc qp struct 2450 * @scratch: u64 saved to be used during cqp completion 2451 */ 2452 static int 2453 irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, 2454 u64 scratch) 2455 { 2456 u64 hdr; 2457 __le64 *wqe; 2458 2459 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2460 if (!wqe) 2461 return -ENOSPC; 2462 2463 set_64bit_val(wqe, IRDMA_BYTE_16, 2464 FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle)); 2465 2466 hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) | 2467 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) | 2468 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2469 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2470 2471 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2472 2473 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "RESUME_QP WQE", wqe, 2474 IRDMA_CQP_WQE_SIZE * 8); 2475 irdma_sc_cqp_post_sq(cqp); 2476 2477 return 0; 2478 } 2479 2480 /** 2481 * irdma_sc_cq_ack - acknowledge completion q 2482 * @cq: cq struct 2483 */ 2484 static inline void 2485 irdma_sc_cq_ack(struct irdma_sc_cq *cq) 2486 { 2487 db_wr32(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db); 2488 } 2489 2490 /** 2491 * irdma_sc_cq_init - initialize completion q 2492 * @cq: cq struct 2493 * @info: cq initialization info 2494 */ 2495 int 2496 irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info) 2497 { 2498 int ret_code; 2499 u32 pble_obj_cnt; 2500 2501 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 2502 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 2503 return -EINVAL; 2504 2505 cq->cq_pa = info->cq_base_pa; 2506 cq->dev = info->dev; 2507 cq->ceq_id = info->ceq_id; 2508 info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db; 2509 info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db; 2510 ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info); 2511 if (ret_code) 2512 return ret_code; 2513 2514 cq->virtual_map = info->virtual_map; 2515 cq->pbl_chunk_size = info->pbl_chunk_size; 2516 cq->ceqe_mask = info->ceqe_mask; 2517 cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP; 2518 cq->shadow_area_pa = info->shadow_area_pa; 2519 cq->shadow_read_threshold = info->shadow_read_threshold; 2520 cq->ceq_id_valid = info->ceq_id_valid; 2521 cq->tph_en = info->tph_en; 2522 cq->tph_val = info->tph_val; 2523 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 2524 cq->vsi = info->vsi; 2525 2526 return 0; 2527 } 2528 2529 /** 2530 * irdma_sc_cq_create - create completion q 2531 * @cq: cq struct 2532 * @scratch: u64 saved to be used during cqp completion 2533 * @check_overflow: flag for overflow check 2534 * @post_sq: flag for cqp db to ring 2535 */ 2536 static int 2537 irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch, 2538 bool check_overflow, bool post_sq) 2539 { 2540 __le64 *wqe; 2541 struct irdma_sc_cqp *cqp; 2542 u64 hdr; 2543 struct irdma_sc_ceq *ceq; 2544 int ret_code = 0; 2545 2546 cqp = cq->dev->cqp; 2547 if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1)) 2548 return -EINVAL; 2549 2550 if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1)) 2551 return -EINVAL; 2552 2553 ceq = cq->dev->ceq[cq->ceq_id]; 2554 if (ceq && ceq->reg_cq) { 2555 ret_code = irdma_sc_add_cq_ctx(ceq, cq); 2556 if (ret_code) 2557 return ret_code; 2558 } 2559 2560 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2561 if (!wqe) { 2562 if (ceq && ceq->reg_cq) 2563 irdma_sc_remove_cq_ctx(ceq, cq); 2564 return -ENOSPC; 2565 } 2566 2567 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size); 2568 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2569 set_64bit_val(wqe, IRDMA_BYTE_16, 2570 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, 2571 cq->shadow_read_threshold)); 2572 set_64bit_val(wqe, IRDMA_BYTE_32, cq->virtual_map ? 0 : cq->cq_pa); 2573 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2574 set_64bit_val(wqe, IRDMA_BYTE_48, 2575 FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, 2576 cq->virtual_map ? cq->first_pm_pbl_idx : 0)); 2577 set_64bit_val(wqe, IRDMA_BYTE_56, 2578 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | 2579 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); 2580 hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) | 2581 FLD_LS_64(cq->dev, cq->ceq_id_valid ? cq->ceq_id : 0, 2582 IRDMA_CQPSQ_CQ_CEQID) | 2583 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) | 2584 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) | 2585 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) | 2586 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) | 2587 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | 2588 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) | 2589 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | 2590 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, 2591 cq->cq_uk.avoid_mem_cflct) | 2592 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2593 2594 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2595 2596 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2597 2598 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_CREATE WQE", wqe, 2599 IRDMA_CQP_WQE_SIZE * 8); 2600 if (post_sq) 2601 irdma_sc_cqp_post_sq(cqp); 2602 2603 return 0; 2604 } 2605 2606 /** 2607 * irdma_sc_cq_destroy - destroy completion q 2608 * @cq: cq struct 2609 * @scratch: u64 saved to be used during cqp completion 2610 * @post_sq: flag for cqp db to ring 2611 */ 2612 int 2613 irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq) 2614 { 2615 struct irdma_sc_cqp *cqp; 2616 __le64 *wqe; 2617 u64 hdr; 2618 struct irdma_sc_ceq *ceq; 2619 2620 cqp = cq->dev->cqp; 2621 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2622 if (!wqe) 2623 return -ENOSPC; 2624 2625 ceq = cq->dev->ceq[cq->ceq_id]; 2626 if (ceq && ceq->reg_cq) 2627 irdma_sc_remove_cq_ctx(ceq, cq); 2628 2629 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size); 2630 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2631 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2632 set_64bit_val(wqe, IRDMA_BYTE_48, 2633 (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); 2634 2635 hdr = cq->cq_uk.cq_id | 2636 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), 2637 IRDMA_CQPSQ_CQ_CEQID) | 2638 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) | 2639 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) | 2640 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) | 2641 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | 2642 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) | 2643 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | 2644 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) | 2645 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2646 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2647 2648 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2649 2650 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_DESTROY WQE", wqe, 2651 IRDMA_CQP_WQE_SIZE * 8); 2652 if (post_sq) 2653 irdma_sc_cqp_post_sq(cqp); 2654 2655 return 0; 2656 } 2657 2658 /** 2659 * irdma_sc_cq_resize - set resized cq buffer info 2660 * @cq: resized cq 2661 * @info: resized cq buffer info 2662 */ 2663 void 2664 irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info) 2665 { 2666 cq->virtual_map = info->virtual_map; 2667 cq->cq_pa = info->cq_pa; 2668 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 2669 cq->pbl_chunk_size = info->pbl_chunk_size; 2670 irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size); 2671 } 2672 2673 /** 2674 * irdma_sc_cq_modify - modify a Completion Queue 2675 * @cq: cq struct 2676 * @info: modification info struct 2677 * @scratch: u64 saved to be used during cqp completion 2678 * @post_sq: flag to post to sq 2679 */ 2680 static int 2681 irdma_sc_cq_modify(struct irdma_sc_cq *cq, 2682 struct irdma_modify_cq_info *info, u64 scratch, 2683 bool post_sq) 2684 { 2685 struct irdma_sc_cqp *cqp; 2686 __le64 *wqe; 2687 u64 hdr; 2688 u32 pble_obj_cnt; 2689 2690 pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 2691 if (info->cq_resize && info->virtual_map && 2692 info->first_pm_pbl_idx >= pble_obj_cnt) 2693 return -EINVAL; 2694 2695 cqp = cq->dev->cqp; 2696 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2697 if (!wqe) 2698 return -ENOSPC; 2699 2700 set_64bit_val(wqe, IRDMA_BYTE_0, info->cq_size); 2701 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2702 set_64bit_val(wqe, IRDMA_BYTE_16, 2703 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold)); 2704 set_64bit_val(wqe, IRDMA_BYTE_32, info->cq_pa); 2705 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2706 set_64bit_val(wqe, IRDMA_BYTE_48, info->first_pm_pbl_idx); 2707 set_64bit_val(wqe, IRDMA_BYTE_56, 2708 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | 2709 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); 2710 2711 hdr = cq->cq_uk.cq_id | 2712 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) | 2713 FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) | 2714 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) | 2715 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) | 2716 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) | 2717 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | 2718 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | 2719 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, 2720 cq->cq_uk.avoid_mem_cflct) | 2721 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2722 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2723 2724 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2725 2726 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_MODIFY WQE", wqe, 2727 IRDMA_CQP_WQE_SIZE * 8); 2728 if (post_sq) 2729 irdma_sc_cqp_post_sq(cqp); 2730 2731 return 0; 2732 } 2733 2734 /** 2735 * irdma_check_cqp_progress - check cqp processing progress 2736 * @timeout: timeout info struct 2737 * @dev: sc device struct 2738 */ 2739 void 2740 irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, 2741 struct irdma_sc_dev *dev) 2742 { 2743 u64 completed_ops = atomic64_read(&dev->cqp->completed_ops); 2744 2745 if (timeout->compl_cqp_cmds != completed_ops) { 2746 timeout->compl_cqp_cmds = completed_ops; 2747 timeout->count = 0; 2748 } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) { 2749 timeout->count++; 2750 } 2751 } 2752 2753 /** 2754 * irdma_get_cqp_reg_info - get head and tail for cqp using registers 2755 * @cqp: struct for cqp hw 2756 * @val: cqp tail register value 2757 * @tail: wqtail register value 2758 * @error: cqp processing err 2759 */ 2760 static inline void 2761 irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val, 2762 u32 *tail, u32 *error) 2763 { 2764 *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]); 2765 *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val); 2766 *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val); 2767 } 2768 2769 /** 2770 * irdma_cqp_poll_registers - poll cqp registers 2771 * @cqp: struct for cqp hw 2772 * @tail: wqtail register value 2773 * @count: how many times to try for completion 2774 */ 2775 static int 2776 irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail, 2777 u32 count) 2778 { 2779 u32 i = 0; 2780 u32 newtail, error, val; 2781 2782 while (i++ < count) { 2783 irdma_get_cqp_reg_info(cqp, &val, &newtail, &error); 2784 if (error) { 2785 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 2786 irdma_debug(cqp->dev, IRDMA_DEBUG_CQP, 2787 "CQPERRCODES error_code[x%08X]\n", error); 2788 return -EIO; 2789 } 2790 if (newtail != tail) { 2791 /* SUCCESS */ 2792 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); 2793 atomic64_inc(&cqp->completed_ops); 2794 return 0; 2795 } 2796 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 2797 } 2798 2799 return -ETIMEDOUT; 2800 } 2801 2802 /** 2803 * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base 2804 * @dev: sc device struct 2805 * @buf: pointer to commit buffer 2806 * @buf_idx: buffer index 2807 * @obj_info: object info pointer 2808 * @rsrc_idx: indexs of memory resource 2809 */ 2810 static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 * buf, 2811 u32 buf_idx, struct irdma_hmc_obj_info *obj_info, 2812 u32 rsrc_idx){ 2813 u64 temp; 2814 2815 get_64bit_val(buf, buf_idx, &temp); 2816 2817 switch (rsrc_idx) { 2818 case IRDMA_HMC_IW_QP: 2819 obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp); 2820 break; 2821 case IRDMA_HMC_IW_CQ: 2822 obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT); 2823 break; 2824 case IRDMA_HMC_IW_APBVT_ENTRY: 2825 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) 2826 obj_info[rsrc_idx].cnt = 1; 2827 else 2828 obj_info[rsrc_idx].cnt = 0; 2829 break; 2830 default: 2831 obj_info[rsrc_idx].cnt = (u32)temp; 2832 break; 2833 } 2834 2835 obj_info[rsrc_idx].base = (u64)RS_64_1(temp, IRDMA_COMMIT_FPM_BASE_S) * 512; 2836 2837 return temp; 2838 } 2839 2840 /** 2841 * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer 2842 * @dev: pointer to dev struct 2843 * @buf: ptr to fpm commit buffer 2844 * @info: ptr to irdma_hmc_obj_info struct 2845 * @sd: number of SDs for HMC objects 2846 * 2847 * parses fpm commit info and copy base value 2848 * of hmc objects in hmc_info 2849 */ 2850 static void 2851 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf, 2852 struct irdma_hmc_obj_info *info, 2853 u32 *sd) 2854 { 2855 u64 size; 2856 u32 i; 2857 u64 max_base = 0; 2858 u32 last_hmc_obj = 0; 2859 2860 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_0, info, 2861 IRDMA_HMC_IW_QP); 2862 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_8, info, 2863 IRDMA_HMC_IW_CQ); 2864 /* skiping RSRVD */ 2865 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_24, info, 2866 IRDMA_HMC_IW_HTE); 2867 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_32, info, 2868 IRDMA_HMC_IW_ARP); 2869 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_40, info, 2870 IRDMA_HMC_IW_APBVT_ENTRY); 2871 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_48, info, 2872 IRDMA_HMC_IW_MR); 2873 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_56, info, 2874 IRDMA_HMC_IW_XF); 2875 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_64, info, 2876 IRDMA_HMC_IW_XFFL); 2877 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_72, info, 2878 IRDMA_HMC_IW_Q1); 2879 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_80, info, 2880 IRDMA_HMC_IW_Q1FL); 2881 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_88, info, 2882 IRDMA_HMC_IW_TIMER); 2883 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_112, info, 2884 IRDMA_HMC_IW_PBLE); 2885 /* skipping RSVD. */ 2886 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) { 2887 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_96, info, 2888 IRDMA_HMC_IW_FSIMC); 2889 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_104, info, 2890 IRDMA_HMC_IW_FSIAV); 2891 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_128, info, 2892 IRDMA_HMC_IW_RRF); 2893 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_136, info, 2894 IRDMA_HMC_IW_RRFFL); 2895 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_144, info, 2896 IRDMA_HMC_IW_HDR); 2897 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info, 2898 IRDMA_HMC_IW_MD); 2899 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) { 2900 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info, 2901 IRDMA_HMC_IW_OOISC); 2902 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info, 2903 IRDMA_HMC_IW_OOISCFFL); 2904 } 2905 } 2906 2907 /* searching for the last object in HMC to find the size of the HMC area. */ 2908 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) { 2909 if (info[i].base > max_base && info[i].cnt) { 2910 max_base = info[i].base; 2911 last_hmc_obj = i; 2912 } 2913 } 2914 2915 size = info[last_hmc_obj].cnt * info[last_hmc_obj].size + 2916 info[last_hmc_obj].base; 2917 2918 if (size & 0x1FFFFF) 2919 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */ 2920 else 2921 *sd = (u32)(size >> 21); 2922 2923 } 2924 2925 /** 2926 * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size 2927 * @buf: ptr to fpm query buffer 2928 * @buf_idx: index into buf 2929 * @obj_info: ptr to irdma_hmc_obj_info struct 2930 * @rsrc_idx: resource index into info 2931 * 2932 * Decode a 64 bit value from fpm query buffer into max count and size 2933 */ 2934 static u64 irdma_sc_decode_fpm_query(__le64 * buf, u32 buf_idx, 2935 struct irdma_hmc_obj_info *obj_info, 2936 u32 rsrc_idx){ 2937 u64 temp; 2938 u32 size; 2939 2940 get_64bit_val(buf, buf_idx, &temp); 2941 obj_info[rsrc_idx].max_cnt = (u32)temp; 2942 size = (u32)RS_64_1(temp, 32); 2943 obj_info[rsrc_idx].size = LS_64_1(1, size); 2944 2945 return temp; 2946 } 2947 2948 /** 2949 * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer 2950 * @dev: ptr to shared code device 2951 * @buf: ptr to fpm query buffer 2952 * @hmc_info: ptr to irdma_hmc_obj_info struct 2953 * @hmc_fpm_misc: ptr to fpm data 2954 * 2955 * parses fpm query buffer and copy max_cnt and 2956 * size value of hmc objects in hmc_info 2957 */ 2958 static int 2959 irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf, 2960 struct irdma_hmc_info *hmc_info, 2961 struct irdma_hmc_fpm_misc *hmc_fpm_misc) 2962 { 2963 struct irdma_hmc_obj_info *obj_info; 2964 u64 temp; 2965 u32 size; 2966 u16 max_pe_sds; 2967 2968 obj_info = hmc_info->hmc_obj; 2969 2970 get_64bit_val(buf, IRDMA_BYTE_0, &temp); 2971 hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp); 2972 max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp); 2973 2974 hmc_fpm_misc->max_sds = max_pe_sds; 2975 hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; 2976 get_64bit_val(buf, 8, &temp); 2977 obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp); 2978 size = (u32)RS_64_1(temp, 32); 2979 obj_info[IRDMA_HMC_IW_QP].size = LS_64_1(1, size); 2980 2981 get_64bit_val(buf, 16, &temp); 2982 obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp); 2983 size = (u32)RS_64_1(temp, 32); 2984 obj_info[IRDMA_HMC_IW_CQ].size = LS_64_1(1, size); 2985 2986 irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE); 2987 irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP); 2988 2989 obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192; 2990 obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1; 2991 2992 irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR); 2993 irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF); 2994 2995 get_64bit_val(buf, 64, &temp); 2996 obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp; 2997 obj_info[IRDMA_HMC_IW_XFFL].size = 4; 2998 hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp); 2999 if (!hmc_fpm_misc->xf_block_size) 3000 return -EINVAL; 3001 3002 irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1); 3003 get_64bit_val(buf, 80, &temp); 3004 obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp; 3005 obj_info[IRDMA_HMC_IW_Q1FL].size = 4; 3006 3007 hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp); 3008 if (!hmc_fpm_misc->q1_block_size) 3009 return -EINVAL; 3010 3011 irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER); 3012 3013 get_64bit_val(buf, 112, &temp); 3014 obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp; 3015 obj_info[IRDMA_HMC_IW_PBLE].size = 8; 3016 3017 get_64bit_val(buf, 120, &temp); 3018 hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp); 3019 hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp); 3020 hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp); 3021 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 3022 return 0; 3023 irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC); 3024 irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV); 3025 irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF); 3026 3027 get_64bit_val(buf, IRDMA_BYTE_136, &temp); 3028 obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp; 3029 obj_info[IRDMA_HMC_IW_RRFFL].size = 4; 3030 hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp); 3031 if (!hmc_fpm_misc->rrf_block_size && 3032 obj_info[IRDMA_HMC_IW_RRFFL].max_cnt) 3033 return -EINVAL; 3034 3035 irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR); 3036 irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD); 3037 3038 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) { 3039 irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC); 3040 3041 get_64bit_val(buf, IRDMA_BYTE_168, &temp); 3042 obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp; 3043 obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4; 3044 hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp); 3045 if (!hmc_fpm_misc->ooiscf_block_size && 3046 obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt) 3047 return -EINVAL; 3048 } 3049 3050 return 0; 3051 } 3052 3053 /** 3054 * irdma_sc_find_reg_cq - find cq ctx index 3055 * @ceq: ceq sc structure 3056 * @cq: cq sc structure 3057 */ 3058 static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq, 3059 struct irdma_sc_cq *cq){ 3060 u32 i; 3061 3062 for (i = 0; i < ceq->reg_cq_size; i++) { 3063 if (cq == ceq->reg_cq[i]) 3064 return i; 3065 } 3066 3067 return IRDMA_INVALID_CQ_IDX; 3068 } 3069 3070 /** 3071 * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq 3072 * @ceq: ceq sc structure 3073 * @cq: cq sc structure 3074 */ 3075 int 3076 irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) 3077 { 3078 unsigned long flags; 3079 3080 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3081 3082 if (ceq->reg_cq_size == ceq->elem_cnt) { 3083 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3084 return -ENOSPC; 3085 } 3086 3087 ceq->reg_cq[ceq->reg_cq_size++] = cq; 3088 3089 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3090 3091 return 0; 3092 } 3093 3094 /** 3095 * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq 3096 * @ceq: ceq sc structure 3097 * @cq: cq sc structure 3098 */ 3099 void 3100 irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) 3101 { 3102 unsigned long flags; 3103 u32 cq_ctx_idx; 3104 3105 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3106 cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq); 3107 if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX) 3108 goto exit; 3109 3110 ceq->reg_cq_size--; 3111 if (cq_ctx_idx != ceq->reg_cq_size) 3112 ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size]; 3113 ceq->reg_cq[ceq->reg_cq_size] = NULL; 3114 3115 exit: 3116 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3117 } 3118 3119 /** 3120 * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair 3121 * @cqp: IWARP control queue pair pointer 3122 * @info: IWARP control queue pair init info pointer 3123 * 3124 * Initializes the object and context buffers for a control Queue Pair. 3125 */ 3126 int 3127 irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, 3128 struct irdma_cqp_init_info *info) 3129 { 3130 u8 hw_sq_size; 3131 3132 if (info->sq_size > IRDMA_CQP_SW_SQSIZE_MAX || 3133 info->sq_size < IRDMA_CQP_SW_SQSIZE_MIN || 3134 ((info->sq_size & (info->sq_size - 1)))) 3135 return -EINVAL; 3136 3137 hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size, 3138 IRDMA_QUEUE_TYPE_CQP); 3139 cqp->size = sizeof(*cqp); 3140 cqp->sq_size = info->sq_size; 3141 cqp->hw_sq_size = hw_sq_size; 3142 cqp->sq_base = info->sq; 3143 cqp->host_ctx = info->host_ctx; 3144 cqp->sq_pa = info->sq_pa; 3145 cqp->host_ctx_pa = info->host_ctx_pa; 3146 cqp->dev = info->dev; 3147 cqp->struct_ver = info->struct_ver; 3148 cqp->hw_maj_ver = info->hw_maj_ver; 3149 cqp->hw_min_ver = info->hw_min_ver; 3150 cqp->scratch_array = info->scratch_array; 3151 cqp->polarity = 0; 3152 cqp->en_datacenter_tcp = info->en_datacenter_tcp; 3153 cqp->ena_vf_count = info->ena_vf_count; 3154 cqp->hmc_profile = info->hmc_profile; 3155 cqp->ceqs_per_vf = info->ceqs_per_vf; 3156 cqp->disable_packed = info->disable_packed; 3157 cqp->rocev2_rto_policy = info->rocev2_rto_policy; 3158 cqp->protocol_used = info->protocol_used; 3159 irdma_memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params)); 3160 cqp->en_rem_endpoint_trk = info->en_rem_endpoint_trk; 3161 cqp->timer_slots = info->timer_slots; 3162 info->dev->cqp = cqp; 3163 3164 IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size); 3165 cqp->requested_ops = 0; 3166 atomic64_set(&cqp->completed_ops, 0); 3167 /* for the cqp commands backlog. */ 3168 INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); 3169 3170 writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]); 3171 writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]); 3172 writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3173 3174 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3175 "sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04x]\n", 3176 cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, 3177 (unsigned long long)cqp->sq_pa, cqp, cqp->polarity); 3178 return 0; 3179 } 3180 3181 /** 3182 * irdma_sc_cqp_create - create cqp during bringup 3183 * @cqp: struct for cqp hw 3184 * @maj_err: If error, major err number 3185 * @min_err: If error, minor err number 3186 */ 3187 int 3188 irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err) 3189 { 3190 u64 temp; 3191 u8 hw_rev; 3192 u32 cnt = 0, p1, p2, val = 0, err_code; 3193 int ret_code; 3194 3195 hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev; 3196 cqp->sdbuf.size = IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size; 3197 cqp->sdbuf.va = irdma_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf, 3198 cqp->sdbuf.size, 3199 IRDMA_SD_BUF_ALIGNMENT); 3200 if (!cqp->sdbuf.va) 3201 return -ENOMEM; 3202 3203 spin_lock_init(&cqp->dev->cqp_lock); 3204 3205 temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) | 3206 FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) | 3207 FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) | 3208 FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf); 3209 if (hw_rev >= IRDMA_GEN_2) { 3210 temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY, 3211 cqp->rocev2_rto_policy) | 3212 FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED, 3213 cqp->protocol_used); 3214 } 3215 3216 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_0, temp); 3217 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_8, cqp->sq_pa); 3218 3219 temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) | 3220 FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile); 3221 3222 if (hw_rev == IRDMA_GEN_2) 3223 temp |= FIELD_PREP(IRDMA_CQPHC_TMR_SLOT, cqp->timer_slots); 3224 if (hw_rev >= IRDMA_GEN_2) 3225 temp |= FIELD_PREP(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK, 3226 cqp->en_rem_endpoint_trk); 3227 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_16, temp); 3228 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_24, (uintptr_t)cqp); 3229 temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) | 3230 FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver); 3231 if (hw_rev >= IRDMA_GEN_2) { 3232 temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) | 3233 FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor); 3234 } 3235 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_32, temp); 3236 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_40, 0); 3237 temp = 0; 3238 if (hw_rev >= IRDMA_GEN_2) { 3239 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) | 3240 FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) | 3241 FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor); 3242 } 3243 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_48, temp); 3244 temp = 0; 3245 if (hw_rev >= IRDMA_GEN_2) { 3246 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) | 3247 FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) | 3248 FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) | 3249 FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod); 3250 } 3251 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_56, temp); 3252 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQP_HOST_CTX WQE", 3253 cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8); 3254 p1 = RS_32_1(cqp->host_ctx_pa, 32); 3255 p2 = (u32)cqp->host_ctx_pa; 3256 3257 writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); 3258 writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]); 3259 3260 do { 3261 if (cnt++ > cqp->dev->hw_attrs.max_done_count) { 3262 ret_code = -ETIMEDOUT; 3263 goto err; 3264 } 3265 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3266 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3267 } while (!val); 3268 3269 if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) { 3270 ret_code = -EOPNOTSUPP; 3271 goto err; 3272 } 3273 3274 cqp->process_cqp_sds = irdma_update_sds_noccq; 3275 return 0; 3276 3277 err: 3278 spin_lock_destroy(&cqp->dev->cqp_lock); 3279 irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); 3280 err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 3281 *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code); 3282 *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code); 3283 return ret_code; 3284 } 3285 3286 /** 3287 * irdma_sc_cqp_post_sq - post of cqp's sq 3288 * @cqp: struct for cqp hw 3289 */ 3290 void 3291 irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp) 3292 { 3293 db_wr32(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db); 3294 3295 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3296 "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head, 3297 cqp->sq_ring.tail, cqp->sq_ring.size); 3298 } 3299 3300 /** 3301 * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq 3302 * and pass back index 3303 * @cqp: CQP HW structure 3304 * @scratch: private data for CQP WQE 3305 * @wqe_idx: WQE index of CQP SQ 3306 */ 3307 __le64 * 3308 irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch, 3309 u32 *wqe_idx) 3310 { 3311 __le64 *wqe = NULL; 3312 int ret_code; 3313 3314 if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) { 3315 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3316 "CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n", 3317 cqp->sq_ring.head, cqp->sq_ring.tail, 3318 cqp->sq_ring.size); 3319 return NULL; 3320 } 3321 IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); 3322 if (ret_code) 3323 return NULL; 3324 3325 cqp->requested_ops++; 3326 if (!*wqe_idx) 3327 cqp->polarity = !cqp->polarity; 3328 wqe = cqp->sq_base[*wqe_idx].elem; 3329 cqp->scratch_array[*wqe_idx] = scratch; 3330 3331 memset(&wqe[0], 0, 24); 3332 memset(&wqe[4], 0, 32); 3333 3334 return wqe; 3335 } 3336 3337 /** 3338 * irdma_sc_cqp_destroy - destroy cqp during close 3339 * @cqp: struct for cqp hw 3340 * @free_hwcqp: true for regular cqp destroy; false for reset path 3341 */ 3342 int 3343 irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp) 3344 { 3345 u32 cnt = 0, val; 3346 int ret_code = 0; 3347 3348 if (free_hwcqp) { 3349 writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); 3350 writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]); 3351 do { 3352 if (cnt++ > cqp->dev->hw_attrs.max_done_count) { 3353 ret_code = -ETIMEDOUT; 3354 break; 3355 } 3356 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3357 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3358 } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE)); 3359 } 3360 irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); 3361 spin_lock_destroy(&cqp->dev->cqp_lock); 3362 return ret_code; 3363 } 3364 3365 /** 3366 * irdma_sc_ccq_arm - enable intr for control cq 3367 * @ccq: ccq sc struct 3368 */ 3369 void 3370 irdma_sc_ccq_arm(struct irdma_sc_cq *ccq) 3371 { 3372 unsigned long flags; 3373 u64 temp_val; 3374 u16 sw_cq_sel; 3375 u8 arm_next_se; 3376 u8 arm_seq_num; 3377 3378 spin_lock_irqsave(&ccq->dev->cqp_lock, flags); 3379 get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val); 3380 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); 3381 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); 3382 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); 3383 arm_seq_num++; 3384 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | 3385 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | 3386 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | 3387 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1); 3388 set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val); 3389 spin_unlock_irqrestore(&ccq->dev->cqp_lock, flags); 3390 3391 irdma_wmb(); /* make sure shadow area is updated before arming */ 3392 3393 db_wr32(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db); 3394 } 3395 3396 /** 3397 * irdma_sc_ccq_get_cqe_info - get ccq's cq entry 3398 * @ccq: ccq sc struct 3399 * @info: completion q entry to return 3400 */ 3401 int 3402 irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, 3403 struct irdma_ccq_cqe_info *info) 3404 { 3405 u64 qp_ctx, temp, temp1; 3406 __le64 *cqe; 3407 struct irdma_sc_cqp *cqp; 3408 u32 wqe_idx; 3409 u32 error; 3410 u8 polarity; 3411 int ret_code = 0; 3412 unsigned long flags; 3413 3414 if (ccq->cq_uk.avoid_mem_cflct) 3415 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk); 3416 else 3417 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk); 3418 3419 get_64bit_val(cqe, IRDMA_BYTE_24, &temp); 3420 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp); 3421 if (polarity != ccq->cq_uk.polarity) 3422 return -ENOENT; 3423 3424 /* Ensure CEQE contents are read after valid bit is checked */ 3425 rmb(); 3426 3427 get_64bit_val(cqe, IRDMA_BYTE_8, &qp_ctx); 3428 cqp = (struct irdma_sc_cqp *)(irdma_uintptr) qp_ctx; 3429 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp); 3430 info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR; 3431 info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp); 3432 if (info->error) { 3433 info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp); 3434 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 3435 irdma_debug(cqp->dev, IRDMA_DEBUG_CQP, 3436 "CQPERRCODES error_code[x%08X]\n", error); 3437 } 3438 3439 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp); 3440 info->scratch = cqp->scratch_array[wqe_idx]; 3441 3442 get_64bit_val(cqe, IRDMA_BYTE_16, &temp1); 3443 info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1); 3444 3445 get_64bit_val(cqp->sq_base[wqe_idx].elem, IRDMA_BYTE_24, &temp1); 3446 info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1); 3447 info->cqp = cqp; 3448 3449 /* move the head for cq */ 3450 IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); 3451 if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)) 3452 ccq->cq_uk.polarity ^= 1; 3453 3454 /* update cq tail in cq shadow memory also */ 3455 IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); 3456 set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_0, 3457 IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)); 3458 3459 irdma_wmb(); /* make sure shadow area is updated before moving tail */ 3460 3461 spin_lock_irqsave(&cqp->dev->cqp_lock, flags); 3462 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); 3463 spin_unlock_irqrestore(&cqp->dev->cqp_lock, flags); 3464 atomic64_inc(&cqp->completed_ops); 3465 3466 return ret_code; 3467 } 3468 3469 /** 3470 * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ 3471 * @cqp: struct for cqp hw 3472 * @op_code: cqp opcode for completion 3473 * @compl_info: completion q entry to return 3474 */ 3475 int 3476 irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code, 3477 struct irdma_ccq_cqe_info *compl_info) 3478 { 3479 struct irdma_ccq_cqe_info info = {0}; 3480 struct irdma_sc_cq *ccq; 3481 int ret_code = 0; 3482 u32 cnt = 0; 3483 3484 ccq = cqp->dev->ccq; 3485 while (1) { 3486 if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count) 3487 return -ETIMEDOUT; 3488 3489 if (irdma_sc_ccq_get_cqe_info(ccq, &info)) { 3490 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3491 continue; 3492 } 3493 if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) { 3494 ret_code = -EIO; 3495 break; 3496 } 3497 /* make sure op code matches */ 3498 if (op_code == info.op_code) 3499 break; 3500 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3501 "opcode mismatch for my op code 0x%x, returned opcode %x\n", 3502 op_code, info.op_code); 3503 } 3504 3505 if (compl_info) 3506 irdma_memcpy(compl_info, &info, sizeof(*compl_info)); 3507 3508 return ret_code; 3509 } 3510 3511 /** 3512 * irdma_sc_manage_hmc_pm_func_table - manage of function table 3513 * @cqp: struct for cqp hw 3514 * @scratch: u64 saved to be used during cqp completion 3515 * @info: info for the manage function table operation 3516 * @post_sq: flag for cqp db to ring 3517 */ 3518 static int 3519 irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp, 3520 struct irdma_hmc_fcn_info *info, 3521 u64 scratch, bool post_sq) 3522 { 3523 __le64 *wqe; 3524 u64 hdr; 3525 3526 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3527 if (!wqe) 3528 return -ENOSPC; 3529 3530 hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) | 3531 FIELD_PREP(IRDMA_CQPSQ_OPCODE, 3532 IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) | 3533 FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) | 3534 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3535 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3536 3537 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3538 3539 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, 3540 "MANAGE_HMC_PM_FUNC_TABLE WQE", wqe, 3541 IRDMA_CQP_WQE_SIZE * 8); 3542 if (post_sq) 3543 irdma_sc_cqp_post_sq(cqp); 3544 3545 return 0; 3546 } 3547 3548 /** 3549 * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion 3550 * for fpm commit 3551 * @cqp: struct for cqp hw 3552 */ 3553 static int 3554 irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp) 3555 { 3556 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL, 3557 NULL); 3558 } 3559 3560 /** 3561 * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values 3562 * @cqp: struct for cqp hw 3563 * @scratch: u64 saved to be used during cqp completion 3564 * @hmc_fn_id: hmc function id 3565 * @commit_fpm_mem: Memory for fpm values 3566 * @post_sq: flag for cqp db to ring 3567 * @wait_type: poll ccq or cqp registers for cqp completion 3568 */ 3569 static int 3570 irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, 3571 u16 hmc_fn_id, 3572 struct irdma_dma_mem *commit_fpm_mem, 3573 bool post_sq, u8 wait_type) 3574 { 3575 __le64 *wqe; 3576 u64 hdr; 3577 u32 tail, val, error; 3578 int ret_code = 0; 3579 3580 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3581 if (!wqe) 3582 return -ENOSPC; 3583 3584 set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id); 3585 set_64bit_val(wqe, IRDMA_BYTE_32, commit_fpm_mem->pa); 3586 3587 hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) | 3588 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) | 3589 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3590 3591 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3592 3593 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3594 3595 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "COMMIT_FPM_VAL WQE", wqe, 3596 IRDMA_CQP_WQE_SIZE * 8); 3597 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 3598 3599 if (post_sq) { 3600 irdma_sc_cqp_post_sq(cqp); 3601 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS) 3602 ret_code = irdma_cqp_poll_registers(cqp, tail, 3603 cqp->dev->hw_attrs.max_done_count); 3604 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ) 3605 ret_code = irdma_sc_commit_fpm_val_done(cqp); 3606 } 3607 3608 return ret_code; 3609 } 3610 3611 /** 3612 * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for 3613 * query fpm 3614 * @cqp: struct for cqp hw 3615 */ 3616 static int 3617 irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp) 3618 { 3619 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL, 3620 NULL); 3621 } 3622 3623 /** 3624 * irdma_sc_query_fpm_val - cqp wqe query fpm values 3625 * @cqp: struct for cqp hw 3626 * @scratch: u64 saved to be used during cqp completion 3627 * @hmc_fn_id: hmc function id 3628 * @query_fpm_mem: memory for return fpm values 3629 * @post_sq: flag for cqp db to ring 3630 * @wait_type: poll ccq or cqp registers for cqp completion 3631 */ 3632 static int 3633 irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, 3634 u16 hmc_fn_id, 3635 struct irdma_dma_mem *query_fpm_mem, 3636 bool post_sq, u8 wait_type) 3637 { 3638 __le64 *wqe; 3639 u64 hdr; 3640 u32 tail, val, error; 3641 int ret_code = 0; 3642 3643 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3644 if (!wqe) 3645 return -ENOSPC; 3646 3647 set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id); 3648 set_64bit_val(wqe, IRDMA_BYTE_32, query_fpm_mem->pa); 3649 3650 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) | 3651 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3652 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3653 3654 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3655 3656 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY_FPM WQE", wqe, 3657 IRDMA_CQP_WQE_SIZE * 8); 3658 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 3659 3660 if (post_sq) { 3661 irdma_sc_cqp_post_sq(cqp); 3662 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS) 3663 ret_code = irdma_cqp_poll_registers(cqp, tail, 3664 cqp->dev->hw_attrs.max_done_count); 3665 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ) 3666 ret_code = irdma_sc_query_fpm_val_done(cqp); 3667 } 3668 3669 return ret_code; 3670 } 3671 3672 /** 3673 * irdma_sc_ceq_init - initialize ceq 3674 * @ceq: ceq sc structure 3675 * @info: ceq initialization info 3676 */ 3677 int 3678 irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, 3679 struct irdma_ceq_init_info *info) 3680 { 3681 u32 pble_obj_cnt; 3682 3683 if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size || 3684 info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size) 3685 return -EINVAL; 3686 3687 if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) 3688 return -EINVAL; 3689 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 3690 3691 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 3692 return -EINVAL; 3693 3694 ceq->size = sizeof(*ceq); 3695 ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base; 3696 ceq->ceq_id = info->ceq_id; 3697 ceq->dev = info->dev; 3698 ceq->elem_cnt = info->elem_cnt; 3699 ceq->ceq_elem_pa = info->ceqe_pa; 3700 ceq->virtual_map = info->virtual_map; 3701 ceq->itr_no_expire = info->itr_no_expire; 3702 ceq->reg_cq = info->reg_cq; 3703 ceq->reg_cq_size = 0; 3704 spin_lock_init(&ceq->req_cq_lock); 3705 ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); 3706 ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); 3707 ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); 3708 ceq->tph_en = info->tph_en; 3709 ceq->tph_val = info->tph_val; 3710 ceq->vsi = info->vsi; 3711 ceq->polarity = 1; 3712 IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); 3713 ceq->dev->ceq[info->ceq_id] = ceq; 3714 3715 return 0; 3716 } 3717 3718 /** 3719 * irdma_sc_ceq_create - create ceq wqe 3720 * @ceq: ceq sc structure 3721 * @scratch: u64 saved to be used during cqp completion 3722 * @post_sq: flag for cqp db to ring 3723 */ 3724 static int 3725 irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch, 3726 bool post_sq) 3727 { 3728 struct irdma_sc_cqp *cqp; 3729 __le64 *wqe; 3730 u64 hdr; 3731 3732 cqp = ceq->dev->cqp; 3733 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3734 if (!wqe) 3735 return -ENOSPC; 3736 set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt); 3737 set_64bit_val(wqe, IRDMA_BYTE_32, 3738 (ceq->virtual_map ? 0 : ceq->ceq_elem_pa)); 3739 set_64bit_val(wqe, IRDMA_BYTE_48, 3740 (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0)); 3741 set_64bit_val(wqe, IRDMA_BYTE_56, 3742 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) | 3743 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx)); 3744 hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) | 3745 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) | 3746 FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) | 3747 FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) | 3748 FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) | 3749 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) | 3750 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3751 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3752 3753 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3754 3755 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_CREATE WQE", wqe, 3756 IRDMA_CQP_WQE_SIZE * 8); 3757 if (post_sq) 3758 irdma_sc_cqp_post_sq(cqp); 3759 3760 return 0; 3761 } 3762 3763 /** 3764 * irdma_sc_cceq_create_done - poll for control ceq wqe to complete 3765 * @ceq: ceq sc structure 3766 */ 3767 static int 3768 irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq) 3769 { 3770 struct irdma_sc_cqp *cqp; 3771 3772 cqp = ceq->dev->cqp; 3773 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ, 3774 NULL); 3775 } 3776 3777 /** 3778 * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete 3779 * @ceq: ceq sc structure 3780 */ 3781 int 3782 irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq) 3783 { 3784 struct irdma_sc_cqp *cqp; 3785 3786 if (ceq->reg_cq) 3787 irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq); 3788 cqp = ceq->dev->cqp; 3789 cqp->process_cqp_sds = irdma_update_sds_noccq; 3790 3791 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ, 3792 NULL); 3793 } 3794 3795 /** 3796 * irdma_sc_cceq_create - create cceq 3797 * @ceq: ceq sc structure 3798 */ 3799 int 3800 irdma_sc_cceq_create(struct irdma_sc_ceq *ceq) 3801 { 3802 int ret_code; 3803 struct irdma_sc_dev *dev = ceq->dev; 3804 3805 dev->ccq->vsi = ceq->vsi; 3806 if (ceq->reg_cq) { 3807 ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq); 3808 if (ret_code) 3809 return ret_code; 3810 } 3811 ret_code = irdma_sc_ceq_create(ceq, 0, true); 3812 if (!ret_code) 3813 return irdma_sc_cceq_create_done(ceq); 3814 3815 return ret_code; 3816 } 3817 3818 /** 3819 * irdma_sc_ceq_destroy - destroy ceq 3820 * @ceq: ceq sc structure 3821 * @scratch: u64 saved to be used during cqp completion 3822 * @post_sq: flag for cqp db to ring 3823 */ 3824 int 3825 irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq) 3826 { 3827 struct irdma_sc_cqp *cqp; 3828 __le64 *wqe; 3829 u64 hdr; 3830 3831 cqp = ceq->dev->cqp; 3832 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3833 if (!wqe) 3834 return -ENOSPC; 3835 3836 set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt); 3837 set_64bit_val(wqe, IRDMA_BYTE_48, ceq->first_pm_pbl_idx); 3838 hdr = ceq->ceq_id | 3839 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) | 3840 FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) | 3841 FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) | 3842 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) | 3843 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3844 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3845 3846 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3847 3848 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_DESTROY WQE", wqe, 3849 IRDMA_CQP_WQE_SIZE * 8); 3850 ceq->dev->ceq[ceq->ceq_id] = NULL; 3851 if (post_sq) 3852 irdma_sc_cqp_post_sq(cqp); 3853 3854 return 0; 3855 } 3856 3857 /** 3858 * irdma_sc_process_ceq - process ceq 3859 * @dev: sc device struct 3860 * @ceq: ceq sc structure 3861 * 3862 * It is expected caller serializes this function with cleanup_ceqes() 3863 * because these functions manipulate the same ceq 3864 */ 3865 void * 3866 irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq) 3867 { 3868 u64 temp; 3869 __le64 *ceqe; 3870 struct irdma_sc_cq *cq = NULL; 3871 struct irdma_sc_cq *temp_cq; 3872 u8 polarity; 3873 u32 cq_idx; 3874 unsigned long flags; 3875 3876 do { 3877 cq_idx = 0; 3878 ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq); 3879 get_64bit_val(ceqe, IRDMA_BYTE_0, &temp); 3880 polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp); 3881 if (polarity != ceq->polarity) 3882 return NULL; 3883 3884 temp_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1); 3885 if (!temp_cq) { 3886 cq_idx = IRDMA_INVALID_CQ_IDX; 3887 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); 3888 3889 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) 3890 ceq->polarity ^= 1; 3891 continue; 3892 } 3893 3894 cq = temp_cq; 3895 if (ceq->reg_cq) { 3896 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3897 cq_idx = irdma_sc_find_reg_cq(ceq, cq); 3898 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3899 } 3900 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); 3901 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) 3902 ceq->polarity ^= 1; 3903 } while (cq_idx == IRDMA_INVALID_CQ_IDX); 3904 3905 if (cq) 3906 irdma_sc_cq_ack(cq); 3907 return cq; 3908 } 3909 3910 /** 3911 * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq 3912 * @cq: cq for which the ceqes need to be cleaned up 3913 * @ceq: ceq ptr 3914 * 3915 * The function is called after the cq is destroyed to cleanup 3916 * its pending ceqe entries. It is expected caller serializes this 3917 * function with process_ceq() in interrupt context. 3918 */ 3919 void 3920 irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq) 3921 { 3922 struct irdma_sc_cq *next_cq; 3923 u8 ceq_polarity = ceq->polarity; 3924 __le64 *ceqe; 3925 u8 polarity; 3926 u64 temp; 3927 int next; 3928 u32 i; 3929 3930 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0); 3931 3932 for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) { 3933 ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next); 3934 3935 get_64bit_val(ceqe, IRDMA_BYTE_0, &temp); 3936 polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp); 3937 if (polarity != ceq_polarity) 3938 return; 3939 3940 next_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1); 3941 if (cq == next_cq) 3942 set_64bit_val(ceqe, IRDMA_BYTE_0, temp & IRDMA_CEQE_VALID); 3943 3944 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i); 3945 if (!next) 3946 ceq_polarity ^= 1; 3947 } 3948 } 3949 3950 /** 3951 * irdma_sc_aeq_init - initialize aeq 3952 * @aeq: aeq structure ptr 3953 * @info: aeq initialization info 3954 */ 3955 int 3956 irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, 3957 struct irdma_aeq_init_info *info) 3958 { 3959 u32 pble_obj_cnt; 3960 3961 if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size || 3962 info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size) 3963 return -EINVAL; 3964 3965 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 3966 3967 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 3968 return -EINVAL; 3969 3970 aeq->size = sizeof(*aeq); 3971 aeq->polarity = 1; 3972 aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base; 3973 aeq->dev = info->dev; 3974 aeq->elem_cnt = info->elem_cnt; 3975 aeq->aeq_elem_pa = info->aeq_elem_pa; 3976 IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); 3977 aeq->virtual_map = info->virtual_map; 3978 aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); 3979 aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); 3980 aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); 3981 aeq->msix_idx = info->msix_idx; 3982 info->dev->aeq = aeq; 3983 3984 return 0; 3985 } 3986 3987 /** 3988 * irdma_sc_aeq_create - create aeq 3989 * @aeq: aeq structure ptr 3990 * @scratch: u64 saved to be used during cqp completion 3991 * @post_sq: flag for cqp db to ring 3992 */ 3993 static int 3994 irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch, 3995 bool post_sq) 3996 { 3997 __le64 *wqe; 3998 struct irdma_sc_cqp *cqp; 3999 u64 hdr; 4000 4001 cqp = aeq->dev->cqp; 4002 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4003 if (!wqe) 4004 return -ENOSPC; 4005 set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt); 4006 set_64bit_val(wqe, IRDMA_BYTE_32, 4007 (aeq->virtual_map ? 0 : aeq->aeq_elem_pa)); 4008 set_64bit_val(wqe, IRDMA_BYTE_48, 4009 (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0)); 4010 4011 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) | 4012 FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) | 4013 FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) | 4014 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 4015 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4016 4017 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4018 4019 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "AEQ_CREATE WQE", wqe, 4020 IRDMA_CQP_WQE_SIZE * 8); 4021 if (post_sq) 4022 irdma_sc_cqp_post_sq(cqp); 4023 4024 return 0; 4025 } 4026 4027 /** 4028 * irdma_sc_aeq_destroy - destroy aeq during close 4029 * @aeq: aeq structure ptr 4030 * @scratch: u64 saved to be used during cqp completion 4031 * @post_sq: flag for cqp db to ring 4032 */ 4033 int 4034 irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq) 4035 { 4036 __le64 *wqe; 4037 struct irdma_sc_cqp *cqp; 4038 struct irdma_sc_dev *dev; 4039 u64 hdr; 4040 4041 dev = aeq->dev; 4042 4043 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) 4044 writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]); 4045 4046 cqp = dev->cqp; 4047 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4048 if (!wqe) 4049 return -ENOSPC; 4050 set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt); 4051 set_64bit_val(wqe, IRDMA_BYTE_48, aeq->first_pm_pbl_idx); 4052 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) | 4053 FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) | 4054 FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) | 4055 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 4056 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4057 4058 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4059 4060 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "AEQ_DESTROY WQE", wqe, 4061 IRDMA_CQP_WQE_SIZE * 8); 4062 if (post_sq) 4063 irdma_sc_cqp_post_sq(cqp); 4064 return 0; 4065 } 4066 4067 /** 4068 * irdma_sc_get_next_aeqe - get next aeq entry 4069 * @aeq: aeq structure ptr 4070 * @info: aeqe info to be returned 4071 */ 4072 int 4073 irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, 4074 struct irdma_aeqe_info *info) 4075 { 4076 u64 temp, compl_ctx; 4077 __le64 *aeqe; 4078 u8 ae_src; 4079 u8 polarity; 4080 4081 aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq); 4082 get_64bit_val(aeqe, IRDMA_BYTE_8, &temp); 4083 polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp); 4084 4085 if (aeq->polarity != polarity) 4086 return -ENOENT; 4087 4088 /* Ensure AEQE contents are read after valid bit is checked */ 4089 rmb(); 4090 4091 get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx); 4092 4093 irdma_debug_buf(aeq->dev, IRDMA_DEBUG_WQE, "AEQ_ENTRY WQE", aeqe, 16); 4094 4095 ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp); 4096 info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp); 4097 info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) | 4098 ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18); 4099 info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp); 4100 info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp); 4101 info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp); 4102 info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp); 4103 info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp); 4104 4105 info->ae_src = ae_src; 4106 switch (info->ae_id) { 4107 case IRDMA_AE_PRIV_OPERATION_DENIED: 4108 case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW: 4109 case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW: 4110 case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG: 4111 case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH: 4112 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: 4113 case IRDMA_AE_UDA_XMIT_BAD_PD: 4114 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: 4115 case IRDMA_AE_BAD_CLOSE: 4116 case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO: 4117 case IRDMA_AE_STAG_ZERO_INVALID: 4118 case IRDMA_AE_IB_RREQ_AND_Q1_FULL: 4119 case IRDMA_AE_IB_INVALID_REQUEST: 4120 case IRDMA_AE_WQE_UNEXPECTED_OPCODE: 4121 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: 4122 case IRDMA_AE_IB_REMOTE_OP_ERROR: 4123 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: 4124 case IRDMA_AE_DDP_UBE_INVALID_MO: 4125 case IRDMA_AE_DDP_UBE_INVALID_QN: 4126 case IRDMA_AE_DDP_NO_L_BIT: 4127 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: 4128 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: 4129 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: 4130 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: 4131 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: 4132 case IRDMA_AE_ROCE_REQ_LENGTH_ERROR: 4133 case IRDMA_AE_INVALID_ARP_ENTRY: 4134 case IRDMA_AE_INVALID_TCP_OPTION_RCVD: 4135 case IRDMA_AE_STALE_ARP_ENTRY: 4136 case IRDMA_AE_INVALID_AH_ENTRY: 4137 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 4138 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: 4139 case IRDMA_AE_LLP_TOO_MANY_RETRIES: 4140 case IRDMA_AE_LCE_QP_CATASTROPHIC: 4141 case IRDMA_AE_LLP_DOUBT_REACHABILITY: 4142 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED: 4143 case IRDMA_AE_LLP_TOO_MANY_RNRS: 4144 case IRDMA_AE_RESET_SENT: 4145 case IRDMA_AE_TERMINATE_SENT: 4146 case IRDMA_AE_RESET_NOT_SENT: 4147 case IRDMA_AE_QP_SUSPEND_COMPLETE: 4148 case IRDMA_AE_UDA_L4LEN_INVALID: 4149 info->qp = true; 4150 info->compl_ctx = compl_ctx; 4151 break; 4152 case IRDMA_AE_LCE_CQ_CATASTROPHIC: 4153 info->cq = true; 4154 info->compl_ctx = LS_64_1(compl_ctx, 1); 4155 ae_src = IRDMA_AE_SOURCE_RSVD; 4156 break; 4157 case IRDMA_AE_ROCE_EMPTY_MCG: 4158 case IRDMA_AE_ROCE_BAD_MC_IP_ADDR: 4159 case IRDMA_AE_ROCE_BAD_MC_QPID: 4160 case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH: 4161 /* fallthrough */ 4162 case IRDMA_AE_LLP_CONNECTION_RESET: 4163 case IRDMA_AE_LLP_SYN_RECEIVED: 4164 case IRDMA_AE_LLP_FIN_RECEIVED: 4165 case IRDMA_AE_LLP_CLOSE_COMPLETE: 4166 case IRDMA_AE_LLP_TERMINATE_RECEIVED: 4167 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE: 4168 ae_src = IRDMA_AE_SOURCE_RSVD; 4169 info->qp = true; 4170 info->compl_ctx = compl_ctx; 4171 break; 4172 case IRDMA_AE_RESOURCE_EXHAUSTION: 4173 /* 4174 * ae_src contains the exhausted resource with a unique decoding. Set RSVD here to prevent matching 4175 * with a CQ or QP. 4176 */ 4177 ae_src = IRDMA_AE_SOURCE_RSVD; 4178 break; 4179 default: 4180 break; 4181 } 4182 4183 switch (ae_src) { 4184 case IRDMA_AE_SOURCE_RQ: 4185 case IRDMA_AE_SOURCE_RQ_0011: 4186 info->qp = true; 4187 info->rq = true; 4188 info->compl_ctx = compl_ctx; 4189 info->err_rq_idx_valid = true; 4190 break; 4191 case IRDMA_AE_SOURCE_CQ: 4192 case IRDMA_AE_SOURCE_CQ_0110: 4193 case IRDMA_AE_SOURCE_CQ_1010: 4194 case IRDMA_AE_SOURCE_CQ_1110: 4195 info->cq = true; 4196 info->compl_ctx = LS_64_1(compl_ctx, 1); 4197 break; 4198 case IRDMA_AE_SOURCE_SQ: 4199 case IRDMA_AE_SOURCE_SQ_0111: 4200 info->qp = true; 4201 info->sq = true; 4202 info->compl_ctx = compl_ctx; 4203 break; 4204 case IRDMA_AE_SOURCE_IN_WR: 4205 info->qp = true; 4206 info->compl_ctx = compl_ctx; 4207 info->in_rdrsp_wr = true; 4208 break; 4209 case IRDMA_AE_SOURCE_IN_RR: 4210 info->qp = true; 4211 info->compl_ctx = compl_ctx; 4212 info->in_rdrsp_wr = true; 4213 break; 4214 case IRDMA_AE_SOURCE_OUT_RR: 4215 case IRDMA_AE_SOURCE_OUT_RR_1111: 4216 info->qp = true; 4217 info->compl_ctx = compl_ctx; 4218 info->out_rdrsp = true; 4219 break; 4220 case IRDMA_AE_SOURCE_RSVD: 4221 default: 4222 break; 4223 } 4224 4225 IRDMA_RING_MOVE_TAIL(aeq->aeq_ring); 4226 if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring)) 4227 aeq->polarity ^= 1; 4228 4229 return 0; 4230 } 4231 4232 /** 4233 * irdma_sc_repost_aeq_entries - repost completed aeq entries 4234 * @dev: sc device struct 4235 * @count: allocate count 4236 */ 4237 void 4238 irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) 4239 { 4240 db_wr32(count, dev->aeq_alloc_db); 4241 4242 } 4243 4244 /** 4245 * irdma_sc_ccq_init - initialize control cq 4246 * @cq: sc's cq ctruct 4247 * @info: info for control cq initialization 4248 */ 4249 int 4250 irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info) 4251 { 4252 u32 pble_obj_cnt; 4253 4254 if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size || 4255 info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size) 4256 return -EINVAL; 4257 4258 if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) 4259 return -EINVAL; 4260 4261 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 4262 4263 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 4264 return -EINVAL; 4265 4266 cq->cq_pa = info->cq_pa; 4267 cq->cq_uk.cq_base = info->cq_base; 4268 cq->shadow_area_pa = info->shadow_area_pa; 4269 cq->cq_uk.shadow_area = info->shadow_area; 4270 cq->shadow_read_threshold = info->shadow_read_threshold; 4271 cq->dev = info->dev; 4272 cq->ceq_id = info->ceq_id; 4273 cq->cq_uk.cq_size = info->num_elem; 4274 cq->cq_type = IRDMA_CQ_TYPE_CQP; 4275 cq->ceqe_mask = info->ceqe_mask; 4276 IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); 4277 cq->cq_uk.cq_id = 0; /* control cq is id 0 always */ 4278 cq->ceq_id_valid = info->ceq_id_valid; 4279 cq->tph_en = info->tph_en; 4280 cq->tph_val = info->tph_val; 4281 cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct; 4282 cq->pbl_list = info->pbl_list; 4283 cq->virtual_map = info->virtual_map; 4284 cq->pbl_chunk_size = info->pbl_chunk_size; 4285 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 4286 cq->cq_uk.polarity = true; 4287 cq->vsi = info->vsi; 4288 cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db; 4289 4290 /* Only applicable to CQs other than CCQ so initialize to zero */ 4291 cq->cq_uk.cqe_alloc_db = NULL; 4292 4293 info->dev->ccq = cq; 4294 return 0; 4295 } 4296 4297 /** 4298 * irdma_sc_ccq_create_done - poll cqp for ccq create 4299 * @ccq: ccq sc struct 4300 */ 4301 static inline int 4302 irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq) 4303 { 4304 struct irdma_sc_cqp *cqp; 4305 4306 cqp = ccq->dev->cqp; 4307 4308 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL); 4309 } 4310 4311 /** 4312 * irdma_sc_ccq_create - create control cq 4313 * @ccq: ccq sc struct 4314 * @scratch: u64 saved to be used during cqp completion 4315 * @check_overflow: overlow flag for ccq 4316 * @post_sq: flag for cqp db to ring 4317 */ 4318 int 4319 irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, 4320 bool check_overflow, bool post_sq) 4321 { 4322 int ret_code; 4323 4324 ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq); 4325 if (ret_code) 4326 return ret_code; 4327 4328 if (post_sq) { 4329 ret_code = irdma_sc_ccq_create_done(ccq); 4330 if (ret_code) 4331 return ret_code; 4332 } 4333 ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd; 4334 4335 return 0; 4336 } 4337 4338 /** 4339 * irdma_sc_ccq_destroy - destroy ccq during close 4340 * @ccq: ccq sc struct 4341 * @scratch: u64 saved to be used during cqp completion 4342 * @post_sq: flag for cqp db to ring 4343 */ 4344 int 4345 irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq) 4346 { 4347 struct irdma_sc_cqp *cqp; 4348 __le64 *wqe; 4349 u64 hdr; 4350 int ret_code = 0; 4351 u32 tail, val, error; 4352 struct irdma_sc_dev *dev; 4353 4354 cqp = ccq->dev->cqp; 4355 dev = ccq->dev; 4356 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4357 if (!wqe) 4358 return -ENOSPC; 4359 4360 set_64bit_val(wqe, IRDMA_BYTE_0, ccq->cq_uk.cq_size); 4361 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(ccq, 1)); 4362 set_64bit_val(wqe, IRDMA_BYTE_40, ccq->shadow_area_pa); 4363 4364 hdr = ccq->cq_uk.cq_id | 4365 FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0), 4366 IRDMA_CQPSQ_CQ_CEQID) | 4367 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) | 4368 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) | 4369 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) | 4370 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) | 4371 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) | 4372 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 4373 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4374 4375 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4376 4377 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CCQ_DESTROY WQE", wqe, 4378 IRDMA_CQP_WQE_SIZE * 8); 4379 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4380 4381 if (post_sq) { 4382 irdma_sc_cqp_post_sq(cqp); 4383 ret_code = irdma_cqp_poll_registers(cqp, tail, 4384 dev->hw_attrs.max_done_count); 4385 } 4386 4387 cqp->process_cqp_sds = irdma_update_sds_noccq; 4388 dev->ccq = NULL; 4389 4390 return ret_code; 4391 } 4392 4393 /** 4394 * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info 4395 * @dev : ptr to irdma_dev struct 4396 * @hmc_fn_id: hmc function id 4397 */ 4398 int 4399 irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u16 hmc_fn_id) 4400 { 4401 struct irdma_hmc_info *hmc_info; 4402 struct irdma_hmc_fpm_misc *hmc_fpm_misc; 4403 struct irdma_dma_mem query_fpm_mem; 4404 int ret_code = 0; 4405 u8 wait_type; 4406 4407 hmc_info = dev->hmc_info; 4408 hmc_fpm_misc = &dev->hmc_fpm_misc; 4409 query_fpm_mem.pa = dev->fpm_query_buf_pa; 4410 query_fpm_mem.va = dev->fpm_query_buf; 4411 hmc_info->hmc_fn_id = hmc_fn_id; 4412 wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS; 4413 4414 ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, 4415 &query_fpm_mem, true, wait_type); 4416 if (ret_code) 4417 return ret_code; 4418 4419 /* parse the fpm_query_buf and fill hmc obj info */ 4420 ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info, 4421 hmc_fpm_misc); 4422 4423 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "QUERY FPM BUFFER", 4424 query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE); 4425 return ret_code; 4426 } 4427 4428 /** 4429 * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp 4430 * command and populates fpm base address in hmc_info 4431 * @dev : ptr to irdma_dev struct 4432 * @hmc_fn_id: hmc function id 4433 */ 4434 static int 4435 irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u16 hmc_fn_id) 4436 { 4437 struct irdma_hmc_obj_info *obj_info; 4438 __le64 *buf; 4439 struct irdma_hmc_info *hmc_info; 4440 struct irdma_dma_mem commit_fpm_mem; 4441 int ret_code = 0; 4442 u8 wait_type; 4443 4444 hmc_info = dev->hmc_info; 4445 obj_info = hmc_info->hmc_obj; 4446 buf = dev->fpm_commit_buf; 4447 4448 set_64bit_val(buf, IRDMA_BYTE_0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt); 4449 set_64bit_val(buf, IRDMA_BYTE_8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt); 4450 set_64bit_val(buf, IRDMA_BYTE_16, (u64)0); /* RSRVD */ 4451 set_64bit_val(buf, IRDMA_BYTE_24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt); 4452 set_64bit_val(buf, IRDMA_BYTE_32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt); 4453 set_64bit_val(buf, IRDMA_BYTE_40, (u64)0); /* RSVD */ 4454 set_64bit_val(buf, IRDMA_BYTE_48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt); 4455 set_64bit_val(buf, IRDMA_BYTE_56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt); 4456 set_64bit_val(buf, IRDMA_BYTE_64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt); 4457 set_64bit_val(buf, IRDMA_BYTE_72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt); 4458 set_64bit_val(buf, IRDMA_BYTE_80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt); 4459 set_64bit_val(buf, IRDMA_BYTE_88, 4460 (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt); 4461 set_64bit_val(buf, IRDMA_BYTE_96, 4462 (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt); 4463 set_64bit_val(buf, IRDMA_BYTE_104, 4464 (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt); 4465 set_64bit_val(buf, IRDMA_BYTE_112, 4466 (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt); 4467 set_64bit_val(buf, IRDMA_BYTE_120, (u64)0); /* RSVD */ 4468 set_64bit_val(buf, IRDMA_BYTE_128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt); 4469 set_64bit_val(buf, IRDMA_BYTE_136, 4470 (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt); 4471 set_64bit_val(buf, IRDMA_BYTE_144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt); 4472 set_64bit_val(buf, IRDMA_BYTE_152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt); 4473 set_64bit_val(buf, IRDMA_BYTE_160, 4474 (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt); 4475 set_64bit_val(buf, IRDMA_BYTE_168, 4476 (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt); 4477 commit_fpm_mem.pa = dev->fpm_commit_buf_pa; 4478 commit_fpm_mem.va = dev->fpm_commit_buf; 4479 4480 wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS; 4481 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER", 4482 commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE); 4483 ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, 4484 &commit_fpm_mem, true, wait_type); 4485 if (!ret_code) 4486 irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, 4487 hmc_info->hmc_obj, 4488 &hmc_info->sd_table.sd_cnt); 4489 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER", 4490 commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE); 4491 4492 return ret_code; 4493 } 4494 4495 /** 4496 * cqp_sds_wqe_fill - fill cqp wqe doe sd 4497 * @cqp: struct for cqp hw 4498 * @info: sd info for wqe 4499 * @scratch: u64 saved to be used during cqp completion 4500 */ 4501 static int 4502 cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, 4503 struct irdma_update_sds_info *info, u64 scratch) 4504 { 4505 u64 data; 4506 u64 hdr; 4507 __le64 *wqe; 4508 int mem_entries, wqe_entries; 4509 struct irdma_dma_mem *sdbuf = &cqp->sdbuf; 4510 u64 offset = 0; 4511 u32 wqe_idx; 4512 4513 wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); 4514 if (!wqe) 4515 return -ENOSPC; 4516 4517 wqe_entries = (info->cnt > 3) ? 3 : info->cnt; 4518 mem_entries = info->cnt - wqe_entries; 4519 4520 if (mem_entries) { 4521 offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE; 4522 irdma_memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4); 4523 4524 data = (u64)sdbuf->pa + offset; 4525 } else { 4526 data = 0; 4527 } 4528 data |= FLD_LS_64(cqp->dev, info->hmc_fn_id, IRDMA_CQPSQ_UPESD_HMCFNID); 4529 set_64bit_val(wqe, IRDMA_BYTE_16, data); 4530 4531 switch (wqe_entries) { 4532 case 3: 4533 set_64bit_val(wqe, IRDMA_BYTE_48, 4534 (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) | 4535 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1))); 4536 4537 set_64bit_val(wqe, IRDMA_BYTE_56, info->entry[2].data); 4538 /* fallthrough */ 4539 case 2: 4540 set_64bit_val(wqe, IRDMA_BYTE_32, 4541 (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) | 4542 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1))); 4543 4544 set_64bit_val(wqe, IRDMA_BYTE_40, info->entry[1].data); 4545 /* fallthrough */ 4546 case 1: 4547 set_64bit_val(wqe, IRDMA_BYTE_0, 4548 FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd)); 4549 4550 set_64bit_val(wqe, IRDMA_BYTE_8, info->entry[0].data); 4551 break; 4552 default: 4553 break; 4554 } 4555 4556 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) | 4557 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | 4558 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries); 4559 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4560 4561 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4562 4563 if (mem_entries) 4564 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE Buffer", 4565 (char *)sdbuf->va + offset, mem_entries << 4); 4566 4567 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE", wqe, 4568 IRDMA_CQP_WQE_SIZE * 8); 4569 4570 return 0; 4571 } 4572 4573 /** 4574 * irdma_update_pe_sds - cqp wqe for sd 4575 * @dev: ptr to irdma_dev struct 4576 * @info: sd info for sd's 4577 * @scratch: u64 saved to be used during cqp completion 4578 */ 4579 static int 4580 irdma_update_pe_sds(struct irdma_sc_dev *dev, 4581 struct irdma_update_sds_info *info, u64 scratch) 4582 { 4583 struct irdma_sc_cqp *cqp = dev->cqp; 4584 int ret_code; 4585 4586 ret_code = cqp_sds_wqe_fill(cqp, info, scratch); 4587 if (!ret_code) 4588 irdma_sc_cqp_post_sq(cqp); 4589 4590 return ret_code; 4591 } 4592 4593 /** 4594 * irdma_update_sds_noccq - update sd before ccq created 4595 * @dev: sc device struct 4596 * @info: sd info for sd's 4597 */ 4598 int 4599 irdma_update_sds_noccq(struct irdma_sc_dev *dev, 4600 struct irdma_update_sds_info *info) 4601 { 4602 u32 error, val, tail; 4603 struct irdma_sc_cqp *cqp = dev->cqp; 4604 int ret_code; 4605 4606 ret_code = cqp_sds_wqe_fill(cqp, info, 0); 4607 if (ret_code) 4608 return ret_code; 4609 4610 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4611 4612 irdma_sc_cqp_post_sq(cqp); 4613 return irdma_cqp_poll_registers(cqp, tail, 4614 cqp->dev->hw_attrs.max_done_count); 4615 } 4616 4617 /** 4618 * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages 4619 * @cqp: struct for cqp hw 4620 * @scratch: u64 saved to be used during cqp completion 4621 * @hmc_fn_id: hmc function id 4622 * @post_sq: flag for cqp db to ring 4623 * @poll_registers: flag to poll register for cqp completion 4624 */ 4625 int 4626 irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, 4627 u16 hmc_fn_id, bool post_sq, 4628 bool poll_registers) 4629 { 4630 u64 hdr; 4631 __le64 *wqe; 4632 u32 tail, val, error; 4633 4634 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4635 if (!wqe) 4636 return -ENOSPC; 4637 4638 set_64bit_val(wqe, IRDMA_BYTE_16, 4639 FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id)); 4640 4641 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, 4642 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) | 4643 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 4644 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4645 4646 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4647 4648 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE", 4649 wqe, IRDMA_CQP_WQE_SIZE * 8); 4650 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4651 4652 if (post_sq) { 4653 irdma_sc_cqp_post_sq(cqp); 4654 if (poll_registers) 4655 /* check for cqp sq tail update */ 4656 return irdma_cqp_poll_registers(cqp, tail, 4657 cqp->dev->hw_attrs.max_done_count); 4658 else 4659 return irdma_sc_poll_for_cqp_op_done(cqp, 4660 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED, 4661 NULL); 4662 } 4663 4664 return 0; 4665 } 4666 4667 /** 4668 * irdma_cqp_ring_full - check if cqp ring is full 4669 * @cqp: struct for cqp hw 4670 */ 4671 static bool 4672 irdma_cqp_ring_full(struct irdma_sc_cqp *cqp) 4673 { 4674 return IRDMA_RING_FULL_ERR(cqp->sq_ring); 4675 } 4676 4677 /** 4678 * irdma_est_sd - returns approximate number of SDs for HMC 4679 * @dev: sc device struct 4680 * @hmc_info: hmc structure, size and count for HMC objects 4681 */ 4682 static u32 irdma_est_sd(struct irdma_sc_dev *dev, 4683 struct irdma_hmc_info *hmc_info){ 4684 struct irdma_hmc_obj_info *pble_info; 4685 u64 size = 0; 4686 u64 sd; 4687 int i; 4688 4689 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) { 4690 if (i != IRDMA_HMC_IW_PBLE) 4691 size += round_up(hmc_info->hmc_obj[i].cnt * 4692 hmc_info->hmc_obj[i].size, 512); 4693 } 4694 4695 pble_info = &hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE]; 4696 size += round_up(pble_info->cnt * pble_info->size, 512); 4697 if (size & 0x1FFFFF) 4698 sd = (size >> 21) + 1; /* add 1 for remainder */ 4699 else 4700 sd = size >> 21; 4701 if (sd > 0xFFFFFFFF) { 4702 irdma_debug(dev, IRDMA_DEBUG_HMC, "sd overflow[%ld]\n", sd); 4703 sd = 0xFFFFFFFE; 4704 } 4705 4706 return (u32)sd; 4707 } 4708 4709 /** 4710 * irdma_sc_query_rdma_features - query RDMA features and FW ver 4711 * @cqp: struct for cqp hw 4712 * @buf: buffer to hold query info 4713 * @scratch: u64 saved to be used during cqp completion 4714 */ 4715 static int 4716 irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp, 4717 struct irdma_dma_mem *buf, u64 scratch) 4718 { 4719 __le64 *wqe; 4720 u64 temp; 4721 u32 tail, val, error; 4722 int status; 4723 4724 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4725 if (!wqe) 4726 return -ENOSPC; 4727 4728 temp = buf->pa; 4729 set_64bit_val(wqe, IRDMA_BYTE_32, temp); 4730 4731 temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID, 4732 cqp->polarity) | 4733 FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) | 4734 FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES); 4735 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4736 4737 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 4738 4739 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", wqe, 4740 IRDMA_CQP_WQE_SIZE * 8); 4741 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4742 4743 irdma_sc_cqp_post_sq(cqp); 4744 status = irdma_cqp_poll_registers(cqp, tail, 4745 cqp->dev->hw_attrs.max_done_count); 4746 if (error || status) 4747 status = -EIO; 4748 4749 return status; 4750 } 4751 4752 /** 4753 * irdma_get_rdma_features - get RDMA features 4754 * @dev: sc device struct 4755 */ 4756 int 4757 irdma_get_rdma_features(struct irdma_sc_dev *dev) 4758 { 4759 struct irdma_dma_mem feat_buf; 4760 u16 feat_cnt; 4761 u16 feat_idx; 4762 u8 feat_type; 4763 int ret_code; 4764 u64 temp; 4765 4766 feat_buf.size = IRDMA_FEATURE_BUF_SIZE; 4767 feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, feat_buf.size, 4768 IRDMA_FEATURE_BUF_ALIGNMENT); 4769 if (!feat_buf.va) 4770 return -ENOMEM; 4771 4772 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); 4773 if (ret_code) 4774 goto exit; 4775 4776 get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp); 4777 feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp); 4778 if (feat_cnt < IRDMA_MIN_FEATURES) { 4779 ret_code = -EINVAL; 4780 goto exit; 4781 } else if (feat_cnt > IRDMA_MAX_FEATURES) { 4782 irdma_debug(dev, IRDMA_DEBUG_DEV, 4783 "feature buf size insufficient, retrying with larger buffer\n"); 4784 irdma_free_dma_mem(dev->hw, &feat_buf); 4785 feat_buf.size = 8 * feat_cnt; 4786 feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, 4787 feat_buf.size, 4788 IRDMA_FEATURE_BUF_ALIGNMENT); 4789 if (!feat_buf.va) 4790 return -ENOMEM; 4791 4792 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); 4793 if (ret_code) 4794 goto exit; 4795 4796 get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp); 4797 feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp); 4798 if (feat_cnt < IRDMA_MIN_FEATURES) { 4799 ret_code = -EINVAL; 4800 goto exit; 4801 } 4802 } 4803 4804 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", 4805 feat_buf.va, feat_cnt * 8); 4806 4807 for (feat_idx = 0; feat_idx < feat_cnt; feat_idx++) { 4808 get_64bit_val(feat_buf.va, feat_idx * 8, &temp); 4809 feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp); 4810 4811 if (feat_type >= IRDMA_MAX_FEATURES) { 4812 irdma_debug(dev, IRDMA_DEBUG_DEV, 4813 "unknown feature type %u\n", feat_type); 4814 continue; 4815 } 4816 dev->feature_info[feat_type] = temp; 4817 } 4818 exit: 4819 irdma_free_dma_mem(dev->hw, &feat_buf); 4820 return ret_code; 4821 } 4822 4823 static u32 irdma_q1_cnt(struct irdma_sc_dev *dev, 4824 struct irdma_hmc_info *hmc_info, u32 qpwanted){ 4825 u32 q1_cnt; 4826 4827 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 4828 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted); 4829 } else { 4830 if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) 4831 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512); 4832 else 4833 q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted; 4834 } 4835 4836 return q1_cnt; 4837 } 4838 4839 static void 4840 cfg_fpm_value_gen_1(struct irdma_sc_dev *dev, 4841 struct irdma_hmc_info *hmc_info, u32 qpwanted) 4842 { 4843 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes); 4844 } 4845 4846 static void 4847 cfg_fpm_value_gen_2(struct irdma_sc_dev *dev, 4848 struct irdma_hmc_info *hmc_info, u32 qpwanted) 4849 { 4850 struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc; 4851 4852 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = 4853 4 * hmc_fpm_misc->xf_block_size * qpwanted; 4854 4855 hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted; 4856 4857 if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt) 4858 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted; 4859 if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt) 4860 hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt = 4861 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt / 4862 hmc_fpm_misc->rrf_block_size; 4863 if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) { 4864 if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt) 4865 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted; 4866 if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt) 4867 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt = 4868 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt / 4869 hmc_fpm_misc->ooiscf_block_size; 4870 } 4871 } 4872 4873 /** 4874 * irdma_cfg_sd_mem - allocate sd memory 4875 * @dev: sc device struct 4876 * @hmc_info: ptr to irdma_hmc_obj_info struct 4877 */ 4878 static int 4879 irdma_cfg_sd_mem(struct irdma_sc_dev *dev, 4880 struct irdma_hmc_info *hmc_info) 4881 { 4882 struct irdma_virt_mem virt_mem; 4883 u32 mem_size; 4884 4885 mem_size = sizeof(struct irdma_hmc_sd_entry) * hmc_info->sd_table.sd_cnt; 4886 virt_mem.size = mem_size; 4887 virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); 4888 if (!virt_mem.va) 4889 return -ENOMEM; 4890 hmc_info->sd_table.sd_entry = virt_mem.va; 4891 4892 return 0; 4893 } 4894 4895 /** 4896 * irdma_cfg_fpm_val - configure HMC objects 4897 * @dev: sc device struct 4898 * @qp_count: desired qp count 4899 */ 4900 int 4901 irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) 4902 { 4903 u32 qpwanted, mrwanted, pblewanted; 4904 u32 hte, i; 4905 u32 sd_needed; 4906 u32 sd_diff; 4907 u32 loop_count = 0; 4908 struct irdma_hmc_info *hmc_info; 4909 struct irdma_hmc_fpm_misc *hmc_fpm_misc; 4910 int ret_code = 0; 4911 u32 max_sds; 4912 4913 hmc_info = dev->hmc_info; 4914 hmc_fpm_misc = &dev->hmc_fpm_misc; 4915 ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id); 4916 if (ret_code) { 4917 irdma_debug(dev, IRDMA_DEBUG_HMC, 4918 "irdma_sc_init_iw_hmc returned error_code = %d\n", 4919 ret_code); 4920 return ret_code; 4921 } 4922 4923 max_sds = hmc_fpm_misc->max_sds; 4924 4925 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) 4926 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; 4927 4928 sd_needed = irdma_est_sd(dev, hmc_info); 4929 irdma_debug(dev, IRDMA_DEBUG_HMC, "sd count %u where max sd is %u\n", 4930 hmc_info->sd_table.sd_cnt, max_sds); 4931 4932 qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt); 4933 if (qpwanted != 0) 4934 qpwanted = rounddown_pow_of_two(qpwanted); 4935 4936 mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt; 4937 pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt; 4938 4939 irdma_debug(dev, IRDMA_DEBUG_HMC, 4940 "req_qp=%d max_sd=%u, max_qp = %u, max_cq=%u, max_mr=%u, max_pble=%u, mc=%d, av=%u\n", 4941 qp_count, max_sds, 4942 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt, 4943 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt, 4944 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt, 4945 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt, 4946 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt, 4947 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt); 4948 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt = 4949 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt; 4950 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt = 4951 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt; 4952 hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt = 4953 hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt; 4954 hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1; 4955 4956 while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt) 4957 qpwanted /= 2; 4958 4959 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 4960 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 4961 while (hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt > hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt) { 4962 qpwanted /= 2; 4963 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 4964 } 4965 } 4966 4967 do { 4968 ++loop_count; 4969 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted; 4970 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt = 4971 min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt); 4972 hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */ 4973 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted; 4974 4975 hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512); 4976 hte = roundup_pow_of_two(hte); 4977 hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt = 4978 hte * hmc_fpm_misc->ht_multiplier; 4979 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 4980 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 4981 else 4982 cfg_fpm_value_gen_2(dev, hmc_info, qpwanted); 4983 4984 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted); 4985 hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt = 4986 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; 4987 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt = 4988 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size; 4989 hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt = 4990 (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket; 4991 4992 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted; 4993 sd_needed = irdma_est_sd(dev, hmc_info); 4994 irdma_debug(dev, IRDMA_DEBUG_HMC, 4995 "sd_needed = %d, max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n", 4996 sd_needed, max_sds, mrwanted, pblewanted, qpwanted); 4997 4998 /* Do not reduce resources further. All objects fit with max SDs */ 4999 if (sd_needed <= max_sds) 5000 break; 5001 5002 sd_diff = sd_needed - max_sds; 5003 if (sd_diff > 128) { 5004 if (!(loop_count % 2) && qpwanted > 128) { 5005 qpwanted /= 2; 5006 } else { 5007 pblewanted /= 2; 5008 mrwanted /= 2; 5009 } 5010 continue; 5011 } 5012 5013 if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF && 5014 pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) { 5015 pblewanted -= 256 * FPM_MULTIPLIER * sd_diff; 5016 continue; 5017 } else if (pblewanted > 100 * FPM_MULTIPLIER) { 5018 pblewanted -= 10 * FPM_MULTIPLIER; 5019 } else if (pblewanted > 16 * FPM_MULTIPLIER) { 5020 pblewanted -= FPM_MULTIPLIER; 5021 } else if (qpwanted <= 128) { 5022 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256) 5023 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2; 5024 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256) 5025 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2; 5026 } 5027 if (mrwanted > FPM_MULTIPLIER) 5028 mrwanted -= FPM_MULTIPLIER; 5029 if (!(loop_count % 10) && qpwanted > 128) { 5030 qpwanted /= 2; 5031 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256) 5032 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2; 5033 } 5034 } while (loop_count < 2000); 5035 5036 if (sd_needed > max_sds) { 5037 irdma_debug(dev, IRDMA_DEBUG_HMC, 5038 "cfg_fpm failed loop_cnt=%u, sd_needed=%u, max sd count %u\n", 5039 loop_count, sd_needed, hmc_info->sd_table.sd_cnt); 5040 return -EINVAL; 5041 } 5042 5043 if (loop_count > 1 && sd_needed < max_sds) { 5044 pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER; 5045 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted; 5046 sd_needed = irdma_est_sd(dev, hmc_info); 5047 } 5048 5049 irdma_debug(dev, IRDMA_DEBUG_HMC, 5050 "loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n", 5051 loop_count, sd_needed, 5052 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt, 5053 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, 5054 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt, 5055 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt, 5056 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 5057 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt, 5058 hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index); 5059 5060 ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id); 5061 if (ret_code) { 5062 irdma_debug(dev, IRDMA_DEBUG_HMC, 5063 "cfg_iw_fpm returned error_code[x%08X]\n", 5064 readl(dev->hw_regs[IRDMA_CQPERRCODES])); 5065 return ret_code; 5066 } 5067 5068 return irdma_cfg_sd_mem(dev, hmc_info); 5069 } 5070 5071 /** 5072 * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available 5073 * @dev: rdma device 5074 * @pcmdinfo: cqp command info 5075 */ 5076 static int 5077 irdma_exec_cqp_cmd(struct irdma_sc_dev *dev, 5078 struct cqp_cmds_info *pcmdinfo) 5079 { 5080 int status; 5081 struct irdma_dma_mem val_mem; 5082 5083 dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; 5084 switch (pcmdinfo->cqp_cmd) { 5085 case IRDMA_OP_CEQ_DESTROY: 5086 status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, 5087 pcmdinfo->in.u.ceq_destroy.scratch, 5088 pcmdinfo->post_sq); 5089 break; 5090 case IRDMA_OP_AEQ_DESTROY: 5091 status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, 5092 pcmdinfo->in.u.aeq_destroy.scratch, 5093 pcmdinfo->post_sq); 5094 break; 5095 case IRDMA_OP_CEQ_CREATE: 5096 status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, 5097 pcmdinfo->in.u.ceq_create.scratch, 5098 pcmdinfo->post_sq); 5099 break; 5100 case IRDMA_OP_AEQ_CREATE: 5101 status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, 5102 pcmdinfo->in.u.aeq_create.scratch, 5103 pcmdinfo->post_sq); 5104 break; 5105 case IRDMA_OP_QP_UPLOAD_CONTEXT: 5106 status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev, 5107 &pcmdinfo->in.u.qp_upload_context.info, 5108 pcmdinfo->in.u.qp_upload_context.scratch, 5109 pcmdinfo->post_sq); 5110 break; 5111 case IRDMA_OP_CQ_CREATE: 5112 status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq, 5113 pcmdinfo->in.u.cq_create.scratch, 5114 pcmdinfo->in.u.cq_create.check_overflow, 5115 pcmdinfo->post_sq); 5116 break; 5117 case IRDMA_OP_CQ_MODIFY: 5118 status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq, 5119 &pcmdinfo->in.u.cq_modify.info, 5120 pcmdinfo->in.u.cq_modify.scratch, 5121 pcmdinfo->post_sq); 5122 break; 5123 case IRDMA_OP_CQ_DESTROY: 5124 status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq, 5125 pcmdinfo->in.u.cq_destroy.scratch, 5126 pcmdinfo->post_sq); 5127 break; 5128 case IRDMA_OP_QP_FLUSH_WQES: 5129 status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp, 5130 &pcmdinfo->in.u.qp_flush_wqes.info, 5131 pcmdinfo->in.u.qp_flush_wqes.scratch, 5132 pcmdinfo->post_sq); 5133 break; 5134 case IRDMA_OP_GEN_AE: 5135 status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp, 5136 &pcmdinfo->in.u.gen_ae.info, 5137 pcmdinfo->in.u.gen_ae.scratch, 5138 pcmdinfo->post_sq); 5139 break; 5140 case IRDMA_OP_MANAGE_PUSH_PAGE: 5141 status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp, 5142 &pcmdinfo->in.u.manage_push_page.info, 5143 pcmdinfo->in.u.manage_push_page.scratch, 5144 pcmdinfo->post_sq); 5145 break; 5146 case IRDMA_OP_UPDATE_PE_SDS: 5147 status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev, 5148 &pcmdinfo->in.u.update_pe_sds.info, 5149 pcmdinfo->in.u.update_pe_sds.scratch); 5150 break; 5151 case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE: 5152 /* switch to calling through the call table */ 5153 status = 5154 irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp, 5155 &pcmdinfo->in.u.manage_hmc_pm.info, 5156 pcmdinfo->in.u.manage_hmc_pm.scratch, 5157 true); 5158 break; 5159 case IRDMA_OP_SUSPEND: 5160 status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp, 5161 pcmdinfo->in.u.suspend_resume.qp, 5162 pcmdinfo->in.u.suspend_resume.scratch); 5163 break; 5164 case IRDMA_OP_RESUME: 5165 status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp, 5166 pcmdinfo->in.u.suspend_resume.qp, 5167 pcmdinfo->in.u.suspend_resume.scratch); 5168 break; 5169 case IRDMA_OP_QUERY_FPM_VAL: 5170 val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa; 5171 val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va; 5172 status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp, 5173 pcmdinfo->in.u.query_fpm_val.scratch, 5174 pcmdinfo->in.u.query_fpm_val.hmc_fn_id, 5175 &val_mem, true, IRDMA_CQP_WAIT_EVENT); 5176 break; 5177 case IRDMA_OP_COMMIT_FPM_VAL: 5178 val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa; 5179 val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va; 5180 status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp, 5181 pcmdinfo->in.u.commit_fpm_val.scratch, 5182 pcmdinfo->in.u.commit_fpm_val.hmc_fn_id, 5183 &val_mem, 5184 true, 5185 IRDMA_CQP_WAIT_EVENT); 5186 break; 5187 case IRDMA_OP_STATS_GATHER: 5188 status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp, 5189 &pcmdinfo->in.u.stats_gather.info, 5190 pcmdinfo->in.u.stats_gather.scratch); 5191 break; 5192 case IRDMA_OP_WS_MODIFY_NODE: 5193 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5194 &pcmdinfo->in.u.ws_node.info, 5195 IRDMA_MODIFY_NODE, 5196 pcmdinfo->in.u.ws_node.scratch); 5197 break; 5198 case IRDMA_OP_WS_DELETE_NODE: 5199 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5200 &pcmdinfo->in.u.ws_node.info, 5201 IRDMA_DEL_NODE, 5202 pcmdinfo->in.u.ws_node.scratch); 5203 break; 5204 case IRDMA_OP_WS_ADD_NODE: 5205 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5206 &pcmdinfo->in.u.ws_node.info, 5207 IRDMA_ADD_NODE, 5208 pcmdinfo->in.u.ws_node.scratch); 5209 break; 5210 case IRDMA_OP_SET_UP_MAP: 5211 status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp, 5212 &pcmdinfo->in.u.up_map.info, 5213 pcmdinfo->in.u.up_map.scratch); 5214 break; 5215 case IRDMA_OP_QUERY_RDMA_FEATURES: 5216 status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp, 5217 &pcmdinfo->in.u.query_rdma.query_buff_mem, 5218 pcmdinfo->in.u.query_rdma.scratch); 5219 break; 5220 case IRDMA_OP_DELETE_ARP_CACHE_ENTRY: 5221 status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp, 5222 pcmdinfo->in.u.del_arp_cache_entry.scratch, 5223 pcmdinfo->in.u.del_arp_cache_entry.arp_index, 5224 pcmdinfo->post_sq); 5225 break; 5226 case IRDMA_OP_MANAGE_APBVT_ENTRY: 5227 status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp, 5228 &pcmdinfo->in.u.manage_apbvt_entry.info, 5229 pcmdinfo->in.u.manage_apbvt_entry.scratch, 5230 pcmdinfo->post_sq); 5231 break; 5232 case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY: 5233 status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp, 5234 &pcmdinfo->in.u.manage_qhash_table_entry.info, 5235 pcmdinfo->in.u.manage_qhash_table_entry.scratch, 5236 pcmdinfo->post_sq); 5237 break; 5238 case IRDMA_OP_QP_MODIFY: 5239 status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp, 5240 &pcmdinfo->in.u.qp_modify.info, 5241 pcmdinfo->in.u.qp_modify.scratch, 5242 pcmdinfo->post_sq); 5243 break; 5244 case IRDMA_OP_QP_CREATE: 5245 status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp, 5246 &pcmdinfo->in.u.qp_create.info, 5247 pcmdinfo->in.u.qp_create.scratch, 5248 pcmdinfo->post_sq); 5249 break; 5250 case IRDMA_OP_QP_DESTROY: 5251 status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp, 5252 pcmdinfo->in.u.qp_destroy.scratch, 5253 pcmdinfo->in.u.qp_destroy.remove_hash_idx, 5254 pcmdinfo->in.u.qp_destroy.ignore_mw_bnd, 5255 pcmdinfo->post_sq); 5256 break; 5257 case IRDMA_OP_ALLOC_STAG: 5258 status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev, 5259 &pcmdinfo->in.u.alloc_stag.info, 5260 pcmdinfo->in.u.alloc_stag.scratch, 5261 pcmdinfo->post_sq); 5262 break; 5263 case IRDMA_OP_MR_REG_NON_SHARED: 5264 status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev, 5265 &pcmdinfo->in.u.mr_reg_non_shared.info, 5266 pcmdinfo->in.u.mr_reg_non_shared.scratch, 5267 pcmdinfo->post_sq); 5268 break; 5269 case IRDMA_OP_DEALLOC_STAG: 5270 status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev, 5271 &pcmdinfo->in.u.dealloc_stag.info, 5272 pcmdinfo->in.u.dealloc_stag.scratch, 5273 pcmdinfo->post_sq); 5274 break; 5275 case IRDMA_OP_MW_ALLOC: 5276 status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev, 5277 &pcmdinfo->in.u.mw_alloc.info, 5278 pcmdinfo->in.u.mw_alloc.scratch, 5279 pcmdinfo->post_sq); 5280 break; 5281 case IRDMA_OP_ADD_ARP_CACHE_ENTRY: 5282 status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp, 5283 &pcmdinfo->in.u.add_arp_cache_entry.info, 5284 pcmdinfo->in.u.add_arp_cache_entry.scratch, 5285 pcmdinfo->post_sq); 5286 break; 5287 case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY: 5288 status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp, 5289 pcmdinfo->in.u.alloc_local_mac_entry.scratch, 5290 pcmdinfo->post_sq); 5291 break; 5292 case IRDMA_OP_ADD_LOCAL_MAC_ENTRY: 5293 status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp, 5294 &pcmdinfo->in.u.add_local_mac_entry.info, 5295 pcmdinfo->in.u.add_local_mac_entry.scratch, 5296 pcmdinfo->post_sq); 5297 break; 5298 case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY: 5299 status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp, 5300 pcmdinfo->in.u.del_local_mac_entry.scratch, 5301 pcmdinfo->in.u.del_local_mac_entry.entry_idx, 5302 pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count, 5303 pcmdinfo->post_sq); 5304 break; 5305 case IRDMA_OP_AH_CREATE: 5306 status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp, 5307 &pcmdinfo->in.u.ah_create.info, 5308 pcmdinfo->in.u.ah_create.scratch); 5309 break; 5310 case IRDMA_OP_AH_DESTROY: 5311 status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp, 5312 &pcmdinfo->in.u.ah_destroy.info, 5313 pcmdinfo->in.u.ah_destroy.scratch); 5314 break; 5315 case IRDMA_OP_MC_CREATE: 5316 status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp, 5317 &pcmdinfo->in.u.mc_create.info, 5318 pcmdinfo->in.u.mc_create.scratch); 5319 break; 5320 case IRDMA_OP_MC_DESTROY: 5321 status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp, 5322 &pcmdinfo->in.u.mc_destroy.info, 5323 pcmdinfo->in.u.mc_destroy.scratch); 5324 break; 5325 case IRDMA_OP_MC_MODIFY: 5326 status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp, 5327 &pcmdinfo->in.u.mc_modify.info, 5328 pcmdinfo->in.u.mc_modify.scratch); 5329 break; 5330 default: 5331 status = -EOPNOTSUPP; 5332 break; 5333 } 5334 5335 return status; 5336 } 5337 5338 /** 5339 * irdma_process_cqp_cmd - process all cqp commands 5340 * @dev: sc device struct 5341 * @pcmdinfo: cqp command info 5342 */ 5343 int 5344 irdma_process_cqp_cmd(struct irdma_sc_dev *dev, 5345 struct cqp_cmds_info *pcmdinfo) 5346 { 5347 int status = 0; 5348 unsigned long flags; 5349 5350 spin_lock_irqsave(&dev->cqp_lock, flags); 5351 if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp)) 5352 status = irdma_exec_cqp_cmd(dev, pcmdinfo); 5353 else 5354 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); 5355 pcmdinfo->cqp_cmd_exec_status = status; 5356 spin_unlock_irqrestore(&dev->cqp_lock, flags); 5357 return status; 5358 } 5359 5360 /** 5361 * irdma_process_bh - called from tasklet for cqp list 5362 * @dev: sc device struct 5363 */ 5364 void 5365 irdma_process_bh(struct irdma_sc_dev *dev) 5366 { 5367 int status = 0; 5368 struct cqp_cmds_info *pcmdinfo; 5369 unsigned long flags; 5370 5371 spin_lock_irqsave(&dev->cqp_lock, flags); 5372 while (!list_empty(&dev->cqp_cmd_head) && 5373 !irdma_cqp_ring_full(dev->cqp)) { 5374 pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev); 5375 status = irdma_exec_cqp_cmd(dev, pcmdinfo); 5376 if (status) 5377 pcmdinfo->cqp_cmd_exec_status = status; 5378 } 5379 spin_unlock_irqrestore(&dev->cqp_lock, flags); 5380 } 5381 5382 /** 5383 * irdma_cfg_aeq- Configure AEQ interrupt 5384 * @dev: pointer to the device structure 5385 * @idx: vector index 5386 * @enable: True to enable, False disables 5387 */ 5388 void 5389 irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable) 5390 { 5391 u32 reg_val; 5392 5393 reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) | 5394 FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) | 5395 FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, IRDMA_IDX_NOITR); 5396 5397 writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]); 5398 } 5399 5400 /** 5401 * sc_vsi_update_stats - Update statistics 5402 * @vsi: sc_vsi instance to update 5403 */ 5404 void 5405 sc_vsi_update_stats(struct irdma_sc_vsi *vsi) 5406 { 5407 struct irdma_dev_hw_stats *hw_stats = &vsi->pestat->hw_stats; 5408 struct irdma_gather_stats *gather_stats = 5409 vsi->pestat->gather_info.gather_stats_va; 5410 struct irdma_gather_stats *last_gather_stats = 5411 vsi->pestat->gather_info.last_gather_stats_va; 5412 const struct irdma_hw_stat_map *map = vsi->dev->hw_stats_map; 5413 u16 max_stat_idx = vsi->dev->hw_attrs.max_stat_idx; 5414 5415 irdma_update_stats(hw_stats, gather_stats, last_gather_stats, 5416 map, max_stat_idx); 5417 } 5418 5419 /** 5420 * irdma_wait_pe_ready - Check if firmware is ready 5421 * @dev: provides access to registers 5422 */ 5423 static int 5424 irdma_wait_pe_ready(struct irdma_sc_dev *dev) 5425 { 5426 u32 statuscpu0; 5427 u32 statuscpu1; 5428 u32 statuscpu2; 5429 u32 retrycount = 0; 5430 5431 do { 5432 statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]); 5433 statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]); 5434 statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]); 5435 if (statuscpu0 == 0x80 && statuscpu1 == 0x80 && 5436 statuscpu2 == 0x80) 5437 return 0; 5438 mdelay(100); 5439 } while (retrycount++ < dev->hw_attrs.max_pe_ready_count); 5440 return -1; 5441 } 5442 5443 static inline void 5444 irdma_sc_init_hw(struct irdma_sc_dev *dev) 5445 { 5446 switch (dev->hw_attrs.uk_attrs.hw_rev) { 5447 case IRDMA_GEN_2: 5448 icrdma_init_hw(dev); 5449 break; 5450 } 5451 } 5452 5453 /** 5454 * irdma_sc_dev_init - Initialize control part of device 5455 * @dev: Device pointer 5456 * @info: Device init info 5457 */ 5458 int 5459 irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info) 5460 { 5461 u32 val; 5462 int ret_code = 0; 5463 u8 db_size; 5464 5465 INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */ 5466 mutex_init(&dev->ws_mutex); 5467 dev->debug_mask = info->debug_mask; 5468 dev->hmc_fn_id = info->hmc_fn_id; 5469 dev->fpm_query_buf_pa = info->fpm_query_buf_pa; 5470 dev->fpm_query_buf = info->fpm_query_buf; 5471 dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa; 5472 dev->fpm_commit_buf = info->fpm_commit_buf; 5473 dev->hw = info->hw; 5474 dev->hw->hw_addr = info->bar0; 5475 /* Setup the hardware limits, hmc may limit further */ 5476 dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID; 5477 dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES; 5478 dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES; 5479 dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES; 5480 dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES; 5481 dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE; 5482 dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE; 5483 dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE; 5484 dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE; 5485 dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE; 5486 dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE; 5487 dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES; 5488 dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR); 5489 5490 dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA; 5491 dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA; 5492 dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS; 5493 dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT; 5494 5495 dev->hw_attrs.max_pe_ready_count = 14; 5496 dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT; 5497 dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT; 5498 dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS; 5499 5500 irdma_sc_init_hw(dev); 5501 5502 if (irdma_wait_pe_ready(dev)) 5503 return -ETIMEDOUT; 5504 5505 val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]); 5506 db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val); 5507 if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) { 5508 irdma_debug(dev, IRDMA_DEBUG_DEV, 5509 "RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n", 5510 val, db_size); 5511 return -ENODEV; 5512 } 5513 5514 return ret_code; 5515 } 5516 5517 /** 5518 * irdma_stat_val - Extract HW counter value from statistics buffer 5519 * @stats_val: pointer to statistics buffer 5520 * @byteoff: byte offset of counter value in the buffer (8B-aligned) 5521 * @bitoff: bit offset of counter value within 8B entry 5522 * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter) 5523 */ 5524 static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, 5525 u8 bitoff, u64 bitmask){ 5526 u16 idx = byteoff / sizeof(*stats_val); 5527 5528 return (stats_val[idx] >> bitoff) & bitmask; 5529 } 5530 5531 /** 5532 * irdma_stat_delta - Calculate counter delta 5533 * @new_val: updated counter value 5534 * @old_val: last counter value 5535 * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter) 5536 */ 5537 static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val) { 5538 if (new_val >= old_val) 5539 return new_val - old_val; 5540 5541 /* roll-over case */ 5542 return max_val - old_val + new_val + 1; 5543 } 5544 5545 /** 5546 * irdma_update_stats - Update statistics 5547 * @hw_stats: hw_stats instance to update 5548 * @gather_stats: updated stat counters 5549 * @last_gather_stats: last stat counters 5550 * @map: HW stat map (hw_stats => gather_stats) 5551 * @max_stat_idx: number of HW stats 5552 */ 5553 void 5554 irdma_update_stats(struct irdma_dev_hw_stats *hw_stats, 5555 struct irdma_gather_stats *gather_stats, 5556 struct irdma_gather_stats *last_gather_stats, 5557 const struct irdma_hw_stat_map *map, 5558 u16 max_stat_idx) 5559 { 5560 u64 *stats_val = hw_stats->stats_val; 5561 u16 i; 5562 5563 for (i = 0; i < max_stat_idx; i++) { 5564 u64 new_val = irdma_stat_val(gather_stats->val, 5565 map[i].byteoff, map[i].bitoff, 5566 map[i].bitmask); 5567 u64 last_val = irdma_stat_val(last_gather_stats->val, 5568 map[i].byteoff, map[i].bitoff, 5569 map[i].bitmask); 5570 5571 stats_val[i] += irdma_stat_delta(new_val, last_val, 5572 map[i].bitmask); 5573 } 5574 5575 irdma_memcpy(last_gather_stats, gather_stats, 5576 sizeof(*last_gather_stats)); 5577 } 5578