1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2022 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "osdep.h" 36 #include "irdma_hmc.h" 37 #include "irdma_defs.h" 38 #include "irdma_type.h" 39 #include "irdma_ws.h" 40 #include "irdma_protos.h" 41 42 /** 43 * irdma_qp_from_entry - Given entry, get to the qp structure 44 * @entry: Points to list of qp structure 45 */ 46 static struct irdma_sc_qp * 47 irdma_qp_from_entry(struct list_head *entry) 48 { 49 if (!entry) 50 return NULL; 51 52 return (struct irdma_sc_qp *)((char *)entry - 53 offsetof(struct irdma_sc_qp, list)); 54 } 55 56 /** 57 * irdma_get_qp_from_list - get next qp from a list 58 * @head: Listhead of qp's 59 * @qp: current qp 60 */ 61 struct irdma_sc_qp * 62 irdma_get_qp_from_list(struct list_head *head, 63 struct irdma_sc_qp *qp) 64 { 65 struct list_head *lastentry; 66 struct list_head *entry = NULL; 67 68 if (list_empty(head)) 69 return NULL; 70 71 if (!qp) { 72 entry = (head)->next; 73 } else { 74 lastentry = &qp->list; 75 entry = (lastentry)->next; 76 if (entry == head) 77 return NULL; 78 } 79 80 return irdma_qp_from_entry(entry); 81 } 82 83 /** 84 * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI 85 * @vsi: the VSI struct pointer 86 * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND 87 */ 88 void 89 irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op) 90 { 91 struct irdma_sc_qp *qp = NULL; 92 u8 i; 93 94 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 95 mutex_lock(&vsi->qos[i].qos_mutex); 96 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 97 while (qp) { 98 if (op == IRDMA_OP_RESUME) { 99 if (!qp->dev->ws_add(vsi, i)) { 100 qp->qs_handle = 101 vsi->qos[qp->user_pri].qs_handle; 102 irdma_cqp_qp_suspend_resume(qp, op); 103 } else { 104 irdma_cqp_qp_suspend_resume(qp, op); 105 irdma_modify_qp_to_err(qp); 106 } 107 } else if (op == IRDMA_OP_SUSPEND) { 108 /* issue cqp suspend command */ 109 if (!irdma_cqp_qp_suspend_resume(qp, op)) 110 atomic_inc(&vsi->qp_suspend_reqs); 111 } 112 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 113 } 114 mutex_unlock(&vsi->qos[i].qos_mutex); 115 } 116 } 117 118 static void 119 irdma_set_qos_info(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2p) 120 { 121 u8 i; 122 123 vsi->qos_rel_bw = l2p->vsi_rel_bw; 124 vsi->qos_prio_type = l2p->vsi_prio_type; 125 vsi->dscp_mode = l2p->dscp_mode; 126 if (l2p->dscp_mode) { 127 irdma_memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map)); 128 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) 129 l2p->up2tc[i] = i; 130 } 131 for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++) 132 vsi->tc_print_warning[i] = true; 133 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 134 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 135 vsi->qos[i].qs_handle = l2p->qs_handle_list[i]; 136 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) 137 irdma_init_config_check(&vsi->cfg_check[i], 138 l2p->up2tc[i], 139 l2p->qs_handle_list[i]); 140 vsi->qos[i].traffic_class = l2p->up2tc[i]; 141 vsi->qos[i].rel_bw = 142 l2p->tc_info[vsi->qos[i].traffic_class].rel_bw; 143 vsi->qos[i].prio_type = 144 l2p->tc_info[vsi->qos[i].traffic_class].prio_type; 145 vsi->qos[i].valid = false; 146 } 147 } 148 149 /** 150 * irdma_change_l2params - given the new l2 parameters, change all qp 151 * @vsi: RDMA VSI pointer 152 * @l2params: New parameters from l2 153 */ 154 void 155 irdma_change_l2params(struct irdma_sc_vsi *vsi, 156 struct irdma_l2params *l2params) 157 { 158 if (l2params->tc_changed) { 159 vsi->tc_change_pending = false; 160 irdma_set_qos_info(vsi, l2params); 161 irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME); 162 } 163 if (l2params->mtu_changed) { 164 vsi->mtu = l2params->mtu; 165 if (vsi->ieq) 166 irdma_reinitialize_ieq(vsi); 167 } 168 } 169 170 /** 171 * irdma_qp_rem_qos - remove qp from qos lists during destroy qp 172 * @qp: qp to be removed from qos 173 */ 174 void 175 irdma_qp_rem_qos(struct irdma_sc_qp *qp) 176 { 177 struct irdma_sc_vsi *vsi = qp->vsi; 178 179 irdma_debug(qp->dev, IRDMA_DEBUG_DCB, 180 "DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n", 181 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist); 182 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); 183 if (qp->on_qoslist) { 184 qp->on_qoslist = false; 185 list_del(&qp->list); 186 } 187 mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); 188 } 189 190 /** 191 * irdma_qp_add_qos - called during setctx for qp to be added to qos 192 * @qp: qp to be added to qos 193 */ 194 void 195 irdma_qp_add_qos(struct irdma_sc_qp *qp) 196 { 197 struct irdma_sc_vsi *vsi = qp->vsi; 198 199 irdma_debug(qp->dev, IRDMA_DEBUG_DCB, 200 "DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n", 201 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist); 202 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); 203 if (!qp->on_qoslist) { 204 list_add(&qp->list, &vsi->qos[qp->user_pri].qplist); 205 qp->on_qoslist = true; 206 qp->qs_handle = vsi->qos[qp->user_pri].qs_handle; 207 } 208 mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); 209 } 210 211 /** 212 * irdma_sc_pd_init - initialize sc pd struct 213 * @dev: sc device struct 214 * @pd: sc pd ptr 215 * @pd_id: pd_id for allocated pd 216 * @abi_ver: User/Kernel ABI version 217 */ 218 void 219 irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, 220 int abi_ver) 221 { 222 pd->pd_id = pd_id; 223 pd->abi_ver = abi_ver; 224 pd->dev = dev; 225 } 226 227 /** 228 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry 229 * @cqp: struct for cqp hw 230 * @info: arp entry information 231 * @scratch: u64 saved to be used during cqp completion 232 * @post_sq: flag for cqp db to ring 233 */ 234 static int 235 irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, 236 struct irdma_add_arp_cache_entry_info *info, 237 u64 scratch, bool post_sq) 238 { 239 __le64 *wqe; 240 u64 temp, hdr; 241 242 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 243 if (!wqe) 244 return -ENOSPC; 245 set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max); 246 247 temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | 248 LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | 249 LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); 250 set_64bit_val(wqe, IRDMA_BYTE_16, temp); 251 252 hdr = info->arp_index | 253 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) | 254 FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, info->permanent) | 255 FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, true) | 256 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 257 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 258 259 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 260 261 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", wqe, 262 IRDMA_CQP_WQE_SIZE * 8); 263 if (post_sq) 264 irdma_sc_cqp_post_sq(cqp); 265 266 return 0; 267 } 268 269 /** 270 * irdma_sc_del_arp_cache_entry - dele arp cache entry 271 * @cqp: struct for cqp hw 272 * @scratch: u64 saved to be used during cqp completion 273 * @arp_index: arp index to delete arp entry 274 * @post_sq: flag for cqp db to ring 275 */ 276 static int 277 irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, 278 u16 arp_index, bool post_sq) 279 { 280 __le64 *wqe; 281 u64 hdr; 282 283 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 284 if (!wqe) 285 return -ENOSPC; 286 287 hdr = arp_index | 288 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) | 289 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 290 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 291 292 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 293 294 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE", 295 wqe, IRDMA_CQP_WQE_SIZE * 8); 296 if (post_sq) 297 irdma_sc_cqp_post_sq(cqp); 298 299 return 0; 300 } 301 302 /** 303 * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries 304 * @cqp: struct for cqp hw 305 * @info: info for apbvt entry to add or delete 306 * @scratch: u64 saved to be used during cqp completion 307 * @post_sq: flag for cqp db to ring 308 */ 309 static int 310 irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, 311 struct irdma_apbvt_info *info, 312 u64 scratch, bool post_sq) 313 { 314 __le64 *wqe; 315 u64 hdr; 316 317 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 318 if (!wqe) 319 return -ENOSPC; 320 321 set_64bit_val(wqe, IRDMA_BYTE_16, info->port); 322 323 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) | 324 FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) | 325 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 326 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 327 328 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 329 330 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_APBVT WQE", wqe, 331 IRDMA_CQP_WQE_SIZE * 8); 332 if (post_sq) 333 irdma_sc_cqp_post_sq(cqp); 334 335 return 0; 336 } 337 338 /** 339 * irdma_sc_manage_qhash_table_entry - manage quad hash entries 340 * @cqp: struct for cqp hw 341 * @info: info for quad hash to manage 342 * @scratch: u64 saved to be used during cqp completion 343 * @post_sq: flag for cqp db to ring 344 * 345 * This is called before connection establishment is started. 346 * For passive connections, when listener is created, it will 347 * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local 348 * ip address and tcp port. When SYN is received (passive 349 * connections) or sent (active connections), this routine is 350 * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED 351 * and quad is passed in info. 352 * 353 * When iwarp connection is done and its state moves to RTS, the 354 * quad hash entry in the hardware will point to iwarp's qp 355 * number and requires no calls from the driver. 356 */ 357 static int 358 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp, 359 struct irdma_qhash_table_info *info, 360 u64 scratch, bool post_sq) 361 { 362 __le64 *wqe; 363 u64 qw1 = 0; 364 u64 qw2 = 0; 365 u64 temp; 366 struct irdma_sc_vsi *vsi = info->vsi; 367 368 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 369 if (!wqe) 370 return -ENOSPC; 371 temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | 372 LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | 373 LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); 374 set_64bit_val(wqe, IRDMA_BYTE_0, temp); 375 376 qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) | 377 FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port); 378 if (info->ipv4_valid) { 379 set_64bit_val(wqe, IRDMA_BYTE_48, 380 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0])); 381 } else { 382 set_64bit_val(wqe, IRDMA_BYTE_56, 383 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) | 384 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1])); 385 386 set_64bit_val(wqe, IRDMA_BYTE_48, 387 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) | 388 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3])); 389 } 390 qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE, 391 vsi->qos[info->user_pri].qs_handle); 392 if (info->vlan_valid) 393 qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id); 394 set_64bit_val(wqe, IRDMA_BYTE_16, qw2); 395 if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) { 396 qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port); 397 if (!info->ipv4_valid) { 398 set_64bit_val(wqe, IRDMA_BYTE_40, 399 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) | 400 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1])); 401 set_64bit_val(wqe, IRDMA_BYTE_32, 402 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) | 403 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3])); 404 } else { 405 set_64bit_val(wqe, IRDMA_BYTE_32, 406 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0])); 407 } 408 } 409 410 set_64bit_val(wqe, IRDMA_BYTE_8, qw1); 411 temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) | 412 FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE, 413 IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) | 414 FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) | 415 FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) | 416 FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) | 417 FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type); 418 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 419 420 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 421 422 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_QHASH WQE", wqe, 423 IRDMA_CQP_WQE_SIZE * 8); 424 if (post_sq) 425 irdma_sc_cqp_post_sq(cqp); 426 427 return 0; 428 } 429 430 /** 431 * irdma_sc_qp_init - initialize qp 432 * @qp: sc qp 433 * @info: initialization qp info 434 */ 435 int 436 irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info) 437 { 438 int ret_code; 439 u32 pble_obj_cnt; 440 u16 wqe_size; 441 442 if (info->qp_uk_init_info.max_sq_frag_cnt > 443 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags || 444 info->qp_uk_init_info.max_rq_frag_cnt > 445 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) 446 return -EINVAL; 447 448 qp->dev = info->pd->dev; 449 qp->vsi = info->vsi; 450 qp->ieq_qp = info->vsi->exception_lan_q; 451 qp->sq_pa = info->sq_pa; 452 qp->rq_pa = info->rq_pa; 453 qp->hw_host_ctx_pa = info->host_ctx_pa; 454 qp->q2_pa = info->q2_pa; 455 qp->shadow_area_pa = info->shadow_area_pa; 456 qp->q2_buf = info->q2; 457 qp->pd = info->pd; 458 qp->hw_host_ctx = info->host_ctx; 459 info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db; 460 ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info); 461 if (ret_code) 462 return ret_code; 463 464 qp->virtual_map = info->virtual_map; 465 pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 466 467 if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) || 468 (info->virtual_map && info->rq_pa >= pble_obj_cnt)) 469 return -EINVAL; 470 471 qp->llp_stream_handle = (void *)(-1); 472 qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, 473 IRDMA_QUEUE_TYPE_SQ_RQ); 474 irdma_debug(qp->dev, IRDMA_DEBUG_WQE, 475 "hw_sq_size[%04d] sq_ring.size[%04d]\n", qp->hw_sq_size, 476 qp->qp_uk.sq_ring.size); 477 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) 478 wqe_size = IRDMA_WQE_SIZE_128; 479 else 480 ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, 481 &wqe_size); 482 if (ret_code) 483 return ret_code; 484 485 qp->hw_rq_size = 486 irdma_get_encoded_wqe_size(qp->qp_uk.rq_size * 487 (wqe_size / IRDMA_QP_WQE_MIN_SIZE), 488 IRDMA_QUEUE_TYPE_SQ_RQ); 489 irdma_debug(qp->dev, IRDMA_DEBUG_WQE, 490 "hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n", 491 qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size); 492 493 qp->sq_tph_val = info->sq_tph_val; 494 qp->rq_tph_val = info->rq_tph_val; 495 qp->sq_tph_en = info->sq_tph_en; 496 qp->rq_tph_en = info->rq_tph_en; 497 qp->rcv_tph_en = info->rcv_tph_en; 498 qp->xmit_tph_en = info->xmit_tph_en; 499 qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq; 500 qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle; 501 502 return 0; 503 } 504 505 /** 506 * irdma_sc_qp_create - create qp 507 * @qp: sc qp 508 * @info: qp create info 509 * @scratch: u64 saved to be used during cqp completion 510 * @post_sq: flag for cqp db to ring 511 */ 512 int 513 irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info, 514 u64 scratch, bool post_sq) 515 { 516 struct irdma_sc_cqp *cqp; 517 __le64 *wqe; 518 u64 hdr; 519 520 cqp = qp->dev->cqp; 521 if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id || 522 qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1)) 523 return -EINVAL; 524 525 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 526 if (!wqe) 527 return -ENOSPC; 528 529 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 530 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 531 532 hdr = qp->qp_uk.qp_id | 533 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) | 534 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) | 535 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) | 536 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) | 537 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | 538 FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) | 539 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) | 540 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) | 541 FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID, 542 info->arp_cache_idx_valid) | 543 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) | 544 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 545 546 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 547 548 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 549 550 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_CREATE WQE", wqe, 551 IRDMA_CQP_WQE_SIZE * 8); 552 if (post_sq) 553 irdma_sc_cqp_post_sq(cqp); 554 555 return 0; 556 } 557 558 /** 559 * irdma_sc_qp_modify - modify qp cqp wqe 560 * @qp: sc qp 561 * @info: modify qp info 562 * @scratch: u64 saved to be used during cqp completion 563 * @post_sq: flag for cqp db to ring 564 */ 565 int 566 irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info, 567 u64 scratch, bool post_sq) 568 { 569 __le64 *wqe; 570 struct irdma_sc_cqp *cqp; 571 u64 hdr; 572 u8 term_actions = 0; 573 u8 term_len = 0; 574 575 cqp = qp->dev->cqp; 576 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 577 if (!wqe) 578 return -ENOSPC; 579 580 if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) { 581 if (info->dont_send_fin) 582 term_actions += IRDMAQP_TERM_SEND_TERM_ONLY; 583 if (info->dont_send_term) 584 term_actions += IRDMAQP_TERM_SEND_FIN_ONLY; 585 if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN || 586 term_actions == IRDMAQP_TERM_SEND_TERM_ONLY) 587 term_len = info->termlen; 588 } 589 590 set_64bit_val(wqe, IRDMA_BYTE_8, 591 FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) | 592 FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len)); 593 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 594 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 595 596 hdr = qp->qp_uk.qp_id | 597 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) | 598 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) | 599 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) | 600 FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID, 601 info->cached_var_valid) | 602 FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) | 603 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) | 604 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) | 605 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) | 606 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | 607 FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) | 608 FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, 609 info->remove_hash_idx) | 610 FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) | 611 FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) | 612 FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID, 613 info->arp_cache_idx_valid) | 614 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) | 615 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 616 617 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 618 619 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 620 621 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_MODIFY WQE", wqe, 622 IRDMA_CQP_WQE_SIZE * 8); 623 if (post_sq) 624 irdma_sc_cqp_post_sq(cqp); 625 626 return 0; 627 } 628 629 /** 630 * irdma_sc_qp_destroy - cqp destroy qp 631 * @qp: sc qp 632 * @scratch: u64 saved to be used during cqp completion 633 * @remove_hash_idx: flag if to remove hash idx 634 * @ignore_mw_bnd: memory window bind flag 635 * @post_sq: flag for cqp db to ring 636 */ 637 int 638 irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, 639 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq) 640 { 641 __le64 *wqe; 642 struct irdma_sc_cqp *cqp; 643 u64 hdr; 644 645 cqp = qp->dev->cqp; 646 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 647 if (!wqe) 648 return -ENOSPC; 649 650 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 651 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 652 653 hdr = qp->qp_uk.qp_id | 654 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) | 655 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | 656 FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) | 657 FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) | 658 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 659 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 660 661 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 662 663 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_DESTROY WQE", wqe, 664 IRDMA_CQP_WQE_SIZE * 8); 665 if (post_sq) 666 irdma_sc_cqp_post_sq(cqp); 667 668 return 0; 669 } 670 671 /** 672 * irdma_sc_get_encoded_ird_size - 673 * @ird_size: IRD size 674 * The ird from the connection is rounded to a supported HW setting and then encoded 675 * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based 676 * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input 677 */ 678 static u8 irdma_sc_get_encoded_ird_size(u16 ird_size) { 679 switch (ird_size ? 680 roundup_pow_of_two(2 * ird_size) : 4) { 681 case 256: 682 return IRDMA_IRD_HW_SIZE_256; 683 case 128: 684 return IRDMA_IRD_HW_SIZE_128; 685 case 64: 686 case 32: 687 return IRDMA_IRD_HW_SIZE_64; 688 case 16: 689 case 8: 690 return IRDMA_IRD_HW_SIZE_16; 691 case 4: 692 default: 693 break; 694 } 695 696 return IRDMA_IRD_HW_SIZE_4; 697 } 698 699 /** 700 * irdma_sc_qp_setctx_roce - set qp's context 701 * @qp: sc qp 702 * @qp_ctx: context ptr 703 * @info: ctx info 704 */ 705 void 706 irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx, 707 struct irdma_qp_host_ctx_info *info) 708 { 709 struct irdma_roce_offload_info *roce_info; 710 struct irdma_udp_offload_info *udp; 711 u8 push_mode_en; 712 u32 push_idx; 713 u64 mac; 714 715 roce_info = info->roce_info; 716 udp = info->udp_info; 717 718 mac = LS_64_1(roce_info->mac_addr[5], 16) | 719 LS_64_1(roce_info->mac_addr[4], 24) | 720 LS_64_1(roce_info->mac_addr[3], 32) | 721 LS_64_1(roce_info->mac_addr[2], 40) | 722 LS_64_1(roce_info->mac_addr[1], 48) | 723 LS_64_1(roce_info->mac_addr[0], 56); 724 725 qp->user_pri = info->user_pri; 726 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) { 727 push_mode_en = 0; 728 push_idx = 0; 729 } else { 730 push_mode_en = 1; 731 push_idx = qp->push_idx; 732 } 733 set_64bit_val(qp_ctx, IRDMA_BYTE_0, 734 FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) | 735 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) | 736 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) | 737 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) | 738 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) | 739 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) | 740 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) | 741 FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) | 742 FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) | 743 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) | 744 FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) | 745 FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) | 746 FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) | 747 FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag)); 748 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa); 749 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa); 750 if (roce_info->dcqcn_en || roce_info->dctcp_en) { 751 udp->tos &= ~ECN_CODE_PT_MASK; 752 udp->tos |= ECN_CODE_PT_VAL; 753 } 754 755 set_64bit_val(qp_ctx, IRDMA_BYTE_24, 756 FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | 757 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) | 758 FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) | 759 FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) | 760 FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port)); 761 set_64bit_val(qp_ctx, IRDMA_BYTE_32, 762 FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) | 763 FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3])); 764 set_64bit_val(qp_ctx, IRDMA_BYTE_40, 765 FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) | 766 FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1])); 767 set_64bit_val(qp_ctx, IRDMA_BYTE_48, 768 FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) | 769 FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) | 770 FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx)); 771 set_64bit_val(qp_ctx, IRDMA_BYTE_56, 772 FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) | 773 FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) | 774 FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) | 775 FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label)); 776 set_64bit_val(qp_ctx, IRDMA_BYTE_64, 777 FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) | 778 FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp)); 779 set_64bit_val(qp_ctx, IRDMA_BYTE_80, 780 FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) | 781 FIELD_PREP(IRDMAQPC_LSN, udp->lsn)); 782 set_64bit_val(qp_ctx, IRDMA_BYTE_88, 783 FIELD_PREP(IRDMAQPC_EPSN, udp->epsn)); 784 set_64bit_val(qp_ctx, IRDMA_BYTE_96, 785 FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) | 786 FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una)); 787 set_64bit_val(qp_ctx, IRDMA_BYTE_112, 788 FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd)); 789 set_64bit_val(qp_ctx, IRDMA_BYTE_128, 790 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) | 791 FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) | 792 FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) | 793 FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin)); 794 set_64bit_val(qp_ctx, IRDMA_BYTE_136, 795 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) | 796 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num)); 797 set_64bit_val(qp_ctx, IRDMA_BYTE_144, 798 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx)); 799 set_64bit_val(qp_ctx, IRDMA_BYTE_152, mac); 800 set_64bit_val(qp_ctx, IRDMA_BYTE_160, 801 FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) | 802 FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) | 803 FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) | 804 FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) | 805 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) | 806 FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) | 807 FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) | 808 FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) | 809 FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) | 810 FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) | 811 FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) | 812 FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) | 813 FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en)); 814 set_64bit_val(qp_ctx, IRDMA_BYTE_168, 815 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx)); 816 set_64bit_val(qp_ctx, IRDMA_BYTE_176, 817 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | 818 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | 819 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle)); 820 set_64bit_val(qp_ctx, IRDMA_BYTE_184, 821 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) | 822 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2])); 823 set_64bit_val(qp_ctx, IRDMA_BYTE_192, 824 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) | 825 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0])); 826 set_64bit_val(qp_ctx, IRDMA_BYTE_200, 827 FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) | 828 FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low)); 829 set_64bit_val(qp_ctx, IRDMA_BYTE_208, 830 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx)); 831 832 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX WQE", qp_ctx, 833 IRDMA_QP_CTX_SIZE); 834 } 835 836 /* 837 * irdma_sc_alloc_local_mac_entry - allocate a mac entry @cqp: struct for cqp hw @scratch: u64 saved to be used during 838 * cqp completion @post_sq: flag for cqp db to ring 839 */ 840 static int 841 irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, 842 bool post_sq) 843 { 844 __le64 *wqe; 845 u64 hdr; 846 847 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 848 if (!wqe) 849 return -ENOSPC; 850 851 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, 852 IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) | 853 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 854 855 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 856 857 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 858 859 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ALLOCATE_LOCAL_MAC WQE", 860 wqe, IRDMA_CQP_WQE_SIZE * 8); 861 862 if (post_sq) 863 irdma_sc_cqp_post_sq(cqp); 864 return 0; 865 } 866 867 /** 868 * irdma_sc_add_local_mac_entry - add mac enry 869 * @cqp: struct for cqp hw 870 * @info:mac addr info 871 * @scratch: u64 saved to be used during cqp completion 872 * @post_sq: flag for cqp db to ring 873 */ 874 static int 875 irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp, 876 struct irdma_local_mac_entry_info *info, 877 u64 scratch, bool post_sq) 878 { 879 __le64 *wqe; 880 u64 temp, header; 881 882 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 883 if (!wqe) 884 return -ENOSPC; 885 temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | 886 LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | 887 LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); 888 889 set_64bit_val(wqe, IRDMA_BYTE_32, temp); 890 891 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) | 892 FIELD_PREP(IRDMA_CQPSQ_OPCODE, 893 IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) | 894 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 895 896 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 897 898 set_64bit_val(wqe, IRDMA_BYTE_24, header); 899 900 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ADD_LOCAL_MAC WQE", wqe, 901 IRDMA_CQP_WQE_SIZE * 8); 902 903 if (post_sq) 904 irdma_sc_cqp_post_sq(cqp); 905 return 0; 906 } 907 908 /** 909 * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac 910 * @cqp: struct for cqp hw 911 * @scratch: u64 saved to be used during cqp completion 912 * @entry_idx: index of mac entry 913 * @ignore_ref_count: to force mac adde delete 914 * @post_sq: flag for cqp db to ring 915 */ 916 static int 917 irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, 918 u16 entry_idx, u8 ignore_ref_count, 919 bool post_sq) 920 { 921 __le64 *wqe; 922 u64 header; 923 924 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 925 if (!wqe) 926 return -ENOSPC; 927 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) | 928 FIELD_PREP(IRDMA_CQPSQ_OPCODE, 929 IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) | 930 FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) | 931 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | 932 FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count); 933 934 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 935 936 set_64bit_val(wqe, IRDMA_BYTE_24, header); 937 938 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE", 939 wqe, IRDMA_CQP_WQE_SIZE * 8); 940 941 if (post_sq) 942 irdma_sc_cqp_post_sq(cqp); 943 return 0; 944 } 945 946 /** 947 * irdma_sc_qp_setctx - set qp's context 948 * @qp: sc qp 949 * @qp_ctx: context ptr 950 * @info: ctx info 951 */ 952 void 953 irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx, 954 struct irdma_qp_host_ctx_info *info) 955 { 956 struct irdma_iwarp_offload_info *iw; 957 struct irdma_tcp_offload_info *tcp; 958 struct irdma_sc_dev *dev; 959 u8 push_mode_en; 960 u32 push_idx; 961 u64 qw0, qw3, qw7 = 0, qw16 = 0; 962 u64 mac = 0; 963 964 iw = info->iwarp_info; 965 tcp = info->tcp_info; 966 dev = qp->dev; 967 if (iw->rcv_mark_en) { 968 qp->pfpdu.marker_len = 4; 969 qp->pfpdu.rcv_start_seq = tcp->rcv_nxt; 970 } 971 qp->user_pri = info->user_pri; 972 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) { 973 push_mode_en = 0; 974 push_idx = 0; 975 } else { 976 push_mode_en = 1; 977 push_idx = qp->push_idx; 978 } 979 qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) | 980 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) | 981 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) | 982 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) | 983 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) | 984 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) | 985 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en); 986 987 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa); 988 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa); 989 990 qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | 991 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size); 992 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 993 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, 994 qp->src_mac_addr_idx); 995 set_64bit_val(qp_ctx, IRDMA_BYTE_136, 996 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) | 997 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num)); 998 set_64bit_val(qp_ctx, IRDMA_BYTE_168, 999 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx)); 1000 set_64bit_val(qp_ctx, IRDMA_BYTE_176, 1001 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | 1002 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | 1003 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) | 1004 FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp)); 1005 if (info->iwarp_info_valid) { 1006 qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) | 1007 FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) | 1008 FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) | 1009 FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) | 1010 FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) | 1011 FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) | 1012 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, 1013 iw->err_rq_idx_valid); 1014 qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id); 1015 qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) | 1016 FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin); 1017 set_64bit_val(qp_ctx, IRDMA_BYTE_144, 1018 FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) | 1019 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx)); 1020 1021 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1022 mac = LS_64_1(iw->mac_addr[5], 16) | 1023 LS_64_1(iw->mac_addr[4], 24) | 1024 LS_64_1(iw->mac_addr[3], 32) | 1025 LS_64_1(iw->mac_addr[2], 40) | 1026 LS_64_1(iw->mac_addr[1], 48) | 1027 LS_64_1(iw->mac_addr[0], 56); 1028 } 1029 1030 set_64bit_val(qp_ctx, IRDMA_BYTE_152, 1031 mac | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent)); 1032 set_64bit_val(qp_ctx, IRDMA_BYTE_160, 1033 FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) | 1034 FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) | 1035 FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) | 1036 FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) | 1037 FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) | 1038 FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) | 1039 FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) | 1040 FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) | 1041 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) | 1042 FIELD_PREP(IRDMAQPC_IWARPMODE, 1) | 1043 FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) | 1044 FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) | 1045 FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) | 1046 FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset) | 1047 FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset) | 1048 FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en)); 1049 } 1050 if (info->tcp_info_valid) { 1051 qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) | 1052 FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) | 1053 FIELD_PREP(IRDMAQPC_INSERTVLANTAG, 1054 tcp->insert_vlan_tag) | 1055 FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) | 1056 FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) | 1057 FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) | 1058 FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh); 1059 1060 if (iw->ecn_en || iw->dctcp_en) { 1061 tcp->tos &= ~ECN_CODE_PT_MASK; 1062 tcp->tos |= ECN_CODE_PT_VAL; 1063 } 1064 1065 qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) | 1066 FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) | 1067 FIELD_PREP(IRDMAQPC_TOS, tcp->tos) | 1068 FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) | 1069 FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port); 1070 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 1071 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx); 1072 1073 qp->src_mac_addr_idx = tcp->src_mac_addr_idx; 1074 } 1075 set_64bit_val(qp_ctx, IRDMA_BYTE_32, 1076 FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) | 1077 FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3])); 1078 set_64bit_val(qp_ctx, IRDMA_BYTE_40, 1079 FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) | 1080 FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1])); 1081 set_64bit_val(qp_ctx, IRDMA_BYTE_48, 1082 FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) | 1083 FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) | 1084 FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) | 1085 FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx)); 1086 qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) | 1087 FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) | 1088 FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT, 1089 tcp->ignore_tcp_opt) | 1090 FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT, 1091 tcp->ignore_tcp_uns_opt) | 1092 FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) | 1093 FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) | 1094 FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale); 1095 set_64bit_val(qp_ctx, IRDMA_BYTE_72, 1096 FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) | 1097 FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age)); 1098 set_64bit_val(qp_ctx, IRDMA_BYTE_80, 1099 FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) | 1100 FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd)); 1101 set_64bit_val(qp_ctx, IRDMA_BYTE_88, 1102 FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) | 1103 FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd)); 1104 set_64bit_val(qp_ctx, IRDMA_BYTE_96, 1105 FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) | 1106 FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una)); 1107 set_64bit_val(qp_ctx, IRDMA_BYTE_104, 1108 FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) | 1109 FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var)); 1110 set_64bit_val(qp_ctx, IRDMA_BYTE_112, 1111 FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) | 1112 FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd)); 1113 set_64bit_val(qp_ctx, IRDMA_BYTE_120, 1114 FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) | 1115 FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2)); 1116 qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) | 1117 FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh); 1118 set_64bit_val(qp_ctx, IRDMA_BYTE_184, 1119 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) | 1120 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2])); 1121 set_64bit_val(qp_ctx, IRDMA_BYTE_192, 1122 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) | 1123 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0])); 1124 set_64bit_val(qp_ctx, IRDMA_BYTE_200, 1125 FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) | 1126 FIELD_PREP(IRDMAQPC_TLOW, iw->t_low)); 1127 set_64bit_val(qp_ctx, IRDMA_BYTE_208, 1128 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx)); 1129 } 1130 1131 set_64bit_val(qp_ctx, IRDMA_BYTE_0, qw0); 1132 set_64bit_val(qp_ctx, IRDMA_BYTE_24, qw3); 1133 set_64bit_val(qp_ctx, IRDMA_BYTE_56, qw7); 1134 set_64bit_val(qp_ctx, IRDMA_BYTE_128, qw16); 1135 1136 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX", qp_ctx, 1137 IRDMA_QP_CTX_SIZE); 1138 } 1139 1140 /** 1141 * irdma_sc_alloc_stag - mr stag alloc 1142 * @dev: sc device struct 1143 * @info: stag info 1144 * @scratch: u64 saved to be used during cqp completion 1145 * @post_sq: flag for cqp db to ring 1146 */ 1147 static int 1148 irdma_sc_alloc_stag(struct irdma_sc_dev *dev, 1149 struct irdma_allocate_stag_info *info, 1150 u64 scratch, bool post_sq) 1151 { 1152 __le64 *wqe; 1153 struct irdma_sc_cqp *cqp; 1154 u64 hdr; 1155 enum irdma_page_size page_size; 1156 1157 if (!info->total_len && !info->all_memory) 1158 return -EINVAL; 1159 1160 if (info->page_size == 0x40000000) 1161 page_size = IRDMA_PAGE_SIZE_1G; 1162 else if (info->page_size == 0x200000) 1163 page_size = IRDMA_PAGE_SIZE_2M; 1164 else 1165 page_size = IRDMA_PAGE_SIZE_4K; 1166 1167 cqp = dev->cqp; 1168 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1169 if (!wqe) 1170 return -ENOSPC; 1171 1172 set_64bit_val(wqe, IRDMA_BYTE_8, 1173 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) | 1174 FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len)); 1175 set_64bit_val(wqe, IRDMA_BYTE_16, 1176 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); 1177 set_64bit_val(wqe, IRDMA_BYTE_40, 1178 FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index)); 1179 1180 if (info->chunk_size) 1181 set_64bit_val(wqe, IRDMA_BYTE_48, 1182 FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx)); 1183 1184 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) | 1185 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) | 1186 FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) | 1187 FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | 1188 FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) | 1189 FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) | 1190 FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | 1191 FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) | 1192 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 1193 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1194 1195 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1196 1197 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "ALLOC_STAG WQE", wqe, 1198 IRDMA_CQP_WQE_SIZE * 8); 1199 if (post_sq) 1200 irdma_sc_cqp_post_sq(cqp); 1201 1202 return 0; 1203 } 1204 1205 /** 1206 * irdma_sc_mr_reg_non_shared - non-shared mr registration 1207 * @dev: sc device struct 1208 * @info: mr info 1209 * @scratch: u64 saved to be used during cqp completion 1210 * @post_sq: flag for cqp db to ring 1211 */ 1212 static int 1213 irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, 1214 struct irdma_reg_ns_stag_info *info, 1215 u64 scratch, bool post_sq) 1216 { 1217 __le64 *wqe; 1218 u64 fbo; 1219 struct irdma_sc_cqp *cqp; 1220 u64 hdr; 1221 u32 pble_obj_cnt; 1222 bool remote_access; 1223 u8 addr_type; 1224 enum irdma_page_size page_size; 1225 1226 if (!info->total_len && !info->all_memory) 1227 return -EINVAL; 1228 1229 if (info->page_size == 0x40000000) 1230 page_size = IRDMA_PAGE_SIZE_1G; 1231 else if (info->page_size == 0x200000) 1232 page_size = IRDMA_PAGE_SIZE_2M; 1233 else if (info->page_size == 0x1000) 1234 page_size = IRDMA_PAGE_SIZE_4K; 1235 else 1236 return -EINVAL; 1237 1238 if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY | 1239 IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY)) 1240 remote_access = true; 1241 else 1242 remote_access = false; 1243 1244 pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 1245 if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt) 1246 return -EINVAL; 1247 1248 cqp = dev->cqp; 1249 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1250 if (!wqe) 1251 return -ENOSPC; 1252 fbo = info->va & (info->page_size - 1); 1253 1254 set_64bit_val(wqe, IRDMA_BYTE_0, 1255 (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ? 1256 info->va : fbo)); 1257 set_64bit_val(wqe, IRDMA_BYTE_8, 1258 FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) | 1259 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1260 set_64bit_val(wqe, IRDMA_BYTE_16, 1261 FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) | 1262 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); 1263 if (!info->chunk_size) 1264 set_64bit_val(wqe, IRDMA_BYTE_32, info->reg_addr_pa); 1265 else 1266 set_64bit_val(wqe, IRDMA_BYTE_48, 1267 FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index)); 1268 1269 set_64bit_val(wqe, IRDMA_BYTE_40, info->hmc_fcn_index); 1270 1271 addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0; 1272 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) | 1273 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) | 1274 FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | 1275 FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) | 1276 FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) | 1277 FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) | 1278 FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) | 1279 FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | 1280 FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) | 1281 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 1282 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1283 1284 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1285 1286 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MR_REG_NS WQE", wqe, 1287 IRDMA_CQP_WQE_SIZE * 8); 1288 if (post_sq) 1289 irdma_sc_cqp_post_sq(cqp); 1290 1291 return 0; 1292 } 1293 1294 /** 1295 * irdma_sc_dealloc_stag - deallocate stag 1296 * @dev: sc device struct 1297 * @info: dealloc stag info 1298 * @scratch: u64 saved to be used during cqp completion 1299 * @post_sq: flag for cqp db to ring 1300 */ 1301 static int 1302 irdma_sc_dealloc_stag(struct irdma_sc_dev *dev, 1303 struct irdma_dealloc_stag_info *info, 1304 u64 scratch, bool post_sq) 1305 { 1306 u64 hdr; 1307 __le64 *wqe; 1308 struct irdma_sc_cqp *cqp; 1309 1310 cqp = dev->cqp; 1311 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1312 if (!wqe) 1313 return -ENOSPC; 1314 1315 set_64bit_val(wqe, IRDMA_BYTE_8, 1316 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1317 set_64bit_val(wqe, IRDMA_BYTE_16, 1318 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); 1319 1320 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) | 1321 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) | 1322 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 1323 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1324 1325 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1326 1327 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "DEALLOC_STAG WQE", wqe, 1328 IRDMA_CQP_WQE_SIZE * 8); 1329 if (post_sq) 1330 irdma_sc_cqp_post_sq(cqp); 1331 1332 return 0; 1333 } 1334 1335 /** 1336 * irdma_sc_mw_alloc - mw allocate 1337 * @dev: sc device struct 1338 * @info: memory window allocation information 1339 * @scratch: u64 saved to be used during cqp completion 1340 * @post_sq: flag for cqp db to ring 1341 */ 1342 static int 1343 irdma_sc_mw_alloc(struct irdma_sc_dev *dev, 1344 struct irdma_mw_alloc_info *info, u64 scratch, 1345 bool post_sq) 1346 { 1347 u64 hdr; 1348 struct irdma_sc_cqp *cqp; 1349 __le64 *wqe; 1350 1351 cqp = dev->cqp; 1352 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1353 if (!wqe) 1354 return -ENOSPC; 1355 1356 set_64bit_val(wqe, IRDMA_BYTE_8, 1357 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1358 set_64bit_val(wqe, IRDMA_BYTE_16, 1359 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index)); 1360 1361 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) | 1362 FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) | 1363 FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY, 1364 info->mw1_bind_dont_vldt_key) | 1365 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 1366 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1367 1368 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1369 1370 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MW_ALLOC WQE", wqe, 1371 IRDMA_CQP_WQE_SIZE * 8); 1372 if (post_sq) 1373 irdma_sc_cqp_post_sq(cqp); 1374 1375 return 0; 1376 } 1377 1378 /** 1379 * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp 1380 * @qp: sc qp struct 1381 * @info: fast mr info 1382 * @post_sq: flag for cqp db to ring 1383 */ 1384 int 1385 irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, 1386 struct irdma_fast_reg_stag_info *info, 1387 bool post_sq) 1388 { 1389 u64 temp, hdr; 1390 __le64 *wqe; 1391 u32 wqe_idx; 1392 u16 quanta = IRDMA_QP_WQE_MIN_QUANTA; 1393 enum irdma_page_size page_size; 1394 struct irdma_post_sq_info sq_info = {0}; 1395 1396 if (info->page_size == 0x40000000) 1397 page_size = IRDMA_PAGE_SIZE_1G; 1398 else if (info->page_size == 0x200000) 1399 page_size = IRDMA_PAGE_SIZE_2M; 1400 else 1401 page_size = IRDMA_PAGE_SIZE_4K; 1402 1403 sq_info.wr_id = info->wr_id; 1404 sq_info.signaled = info->signaled; 1405 sq_info.push_wqe = info->push_wqe; 1406 1407 wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, &quanta, 0, &sq_info); 1408 if (!wqe) 1409 return -ENOSPC; 1410 1411 qp->qp_uk.sq_wrtrk_array[wqe_idx].signaled = info->signaled; 1412 irdma_debug(qp->dev, IRDMA_DEBUG_MR, 1413 "wr_id[%llxh] wqe_idx[%04d] location[%p]\n", (unsigned long long)info->wr_id, 1414 wqe_idx, &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid); 1415 1416 temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1417 (uintptr_t)info->va : info->fbo; 1418 set_64bit_val(wqe, IRDMA_BYTE_0, temp); 1419 1420 temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI, 1421 info->first_pm_pbl_index >> 16); 1422 set_64bit_val(wqe, IRDMA_BYTE_8, 1423 FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) | 1424 FIELD_PREP(IRDMAQPSQ_PBLADDR, info->reg_addr_pa >> IRDMA_HW_PAGE_SHIFT)); 1425 set_64bit_val(wqe, IRDMA_BYTE_16, 1426 info->total_len | 1427 FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index)); 1428 1429 hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) | 1430 FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) | 1431 FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) | 1432 FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) | 1433 FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) | 1434 FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) | 1435 FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) | 1436 FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) | 1437 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | 1438 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | 1439 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | 1440 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1441 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1442 1443 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1444 1445 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "FAST_REG WQE", wqe, 1446 IRDMA_QP_WQE_MIN_SIZE); 1447 if (sq_info.push_wqe) 1448 irdma_qp_push_wqe(&qp->qp_uk, wqe, quanta, wqe_idx, post_sq); 1449 else if (post_sq) 1450 irdma_uk_qp_post_wr(&qp->qp_uk); 1451 1452 return 0; 1453 } 1454 1455 /** 1456 * irdma_sc_gen_rts_ae - request AE generated after RTS 1457 * @qp: sc qp struct 1458 */ 1459 static void 1460 irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp) 1461 { 1462 __le64 *wqe; 1463 u64 hdr; 1464 struct irdma_qp_uk *qp_uk; 1465 1466 qp_uk = &qp->qp_uk; 1467 1468 wqe = qp_uk->sq_base[1].elem; 1469 1470 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | 1471 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) | 1472 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1473 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1474 1475 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1476 irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "NOP W/LOCAL FENCE WQE", wqe, 1477 IRDMA_QP_WQE_MIN_SIZE); 1478 1479 wqe = qp_uk->sq_base[2].elem; 1480 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) | 1481 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1482 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1483 1484 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1485 irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "CONN EST WQE", wqe, 1486 IRDMA_QP_WQE_MIN_SIZE); 1487 } 1488 1489 /** 1490 * irdma_sc_send_lsmm - send last streaming mode message 1491 * @qp: sc qp struct 1492 * @lsmm_buf: buffer with lsmm message 1493 * @size: size of lsmm buffer 1494 * @stag: stag of lsmm buffer 1495 */ 1496 void 1497 irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, 1498 irdma_stag stag) 1499 { 1500 __le64 *wqe; 1501 u64 hdr; 1502 struct irdma_qp_uk *qp_uk; 1503 1504 qp_uk = &qp->qp_uk; 1505 wqe = qp_uk->sq_base->elem; 1506 1507 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf); 1508 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1509 set_64bit_val(wqe, IRDMA_BYTE_8, 1510 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) | 1511 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag)); 1512 } else { 1513 set_64bit_val(wqe, IRDMA_BYTE_8, 1514 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) | 1515 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) | 1516 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); 1517 } 1518 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1519 1520 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) | 1521 FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) | 1522 FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) | 1523 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1524 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1525 1526 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1527 1528 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM WQE", wqe, 1529 IRDMA_QP_WQE_MIN_SIZE); 1530 1531 if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) 1532 irdma_sc_gen_rts_ae(qp); 1533 } 1534 1535 /** 1536 * irdma_sc_send_lsmm_nostag - for privilege qp 1537 * @qp: sc qp struct 1538 * @lsmm_buf: buffer with lsmm message 1539 * @size: size of lsmm buffer 1540 */ 1541 void 1542 irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size) 1543 { 1544 __le64 *wqe; 1545 u64 hdr; 1546 struct irdma_qp_uk *qp_uk; 1547 1548 qp_uk = &qp->qp_uk; 1549 wqe = qp_uk->sq_base->elem; 1550 1551 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf); 1552 1553 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) 1554 set_64bit_val(wqe, IRDMA_BYTE_8, 1555 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size)); 1556 else 1557 set_64bit_val(wqe, IRDMA_BYTE_8, 1558 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) | 1559 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); 1560 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1561 1562 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) | 1563 FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) | 1564 FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) | 1565 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1566 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1567 1568 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1569 1570 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", wqe, 1571 IRDMA_QP_WQE_MIN_SIZE); 1572 } 1573 1574 /** 1575 * irdma_sc_send_rtt - send last read0 or write0 1576 * @qp: sc qp struct 1577 * @read: Do read0 or write0 1578 */ 1579 void 1580 irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read) 1581 { 1582 __le64 *wqe; 1583 u64 hdr; 1584 struct irdma_qp_uk *qp_uk; 1585 1586 qp_uk = &qp->qp_uk; 1587 wqe = qp_uk->sq_base->elem; 1588 1589 set_64bit_val(wqe, IRDMA_BYTE_0, 0); 1590 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1591 if (read) { 1592 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1593 set_64bit_val(wqe, IRDMA_BYTE_8, 1594 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd)); 1595 } else { 1596 set_64bit_val(wqe, IRDMA_BYTE_8, 1597 (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, 1598 qp->qp_uk.swqe_polarity)); 1599 } 1600 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) | 1601 FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) | 1602 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1603 1604 } else { 1605 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1606 set_64bit_val(wqe, IRDMA_BYTE_8, 0); 1607 } else { 1608 set_64bit_val(wqe, IRDMA_BYTE_8, 1609 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); 1610 } 1611 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) | 1612 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); 1613 } 1614 1615 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1616 1617 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1618 1619 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "RTR WQE", wqe, 1620 IRDMA_QP_WQE_MIN_SIZE); 1621 1622 if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) 1623 irdma_sc_gen_rts_ae(qp); 1624 } 1625 1626 /** 1627 * irdma_iwarp_opcode - determine if incoming is rdma layer 1628 * @info: aeq info for the packet 1629 * @pkt: packet for error 1630 */ 1631 static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt){ 1632 BE16 *mpa; 1633 u32 opcode = 0xffffffff; 1634 1635 if (info->q2_data_written) { 1636 mpa = (BE16 *) pkt; 1637 opcode = IRDMA_NTOHS(mpa[1]) & 0xf; 1638 } 1639 1640 return opcode; 1641 } 1642 1643 /** 1644 * irdma_locate_mpa - return pointer to mpa in the pkt 1645 * @pkt: packet with data 1646 */ 1647 static u8 *irdma_locate_mpa(u8 *pkt) { 1648 /* skip over ethernet header */ 1649 pkt += IRDMA_MAC_HLEN; 1650 1651 /* Skip over IP and TCP headers */ 1652 pkt += 4 * (pkt[0] & 0x0f); 1653 pkt += 4 * ((pkt[12] >> 4) & 0x0f); 1654 1655 return pkt; 1656 } 1657 1658 /** 1659 * irdma_bld_termhdr_ctrl - setup terminate hdr control fields 1660 * @qp: sc qp ptr for pkt 1661 * @hdr: term hdr 1662 * @opcode: flush opcode for termhdr 1663 * @layer_etype: error layer + error type 1664 * @err: error cod ein the header 1665 */ 1666 static void 1667 irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp, 1668 struct irdma_terminate_hdr *hdr, 1669 enum irdma_flush_opcode opcode, 1670 u8 layer_etype, u8 err) 1671 { 1672 qp->flush_code = opcode; 1673 hdr->layer_etype = layer_etype; 1674 hdr->error_code = err; 1675 } 1676 1677 /** 1678 * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr 1679 * @pkt: ptr to mpa in offending pkt 1680 * @hdr: term hdr 1681 * @copy_len: offending pkt length to be copied to term hdr 1682 * @is_tagged: DDP tagged or untagged 1683 */ 1684 static void 1685 irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr, 1686 int *copy_len, u8 *is_tagged) 1687 { 1688 u16 ddp_seg_len; 1689 1690 ddp_seg_len = IRDMA_NTOHS(*(BE16 *) pkt); 1691 if (ddp_seg_len) { 1692 *copy_len = 2; 1693 hdr->hdrct = DDP_LEN_FLAG; 1694 if (pkt[2] & 0x80) { 1695 *is_tagged = 1; 1696 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { 1697 *copy_len += TERM_DDP_LEN_TAGGED; 1698 hdr->hdrct |= DDP_HDR_FLAG; 1699 } 1700 } else { 1701 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { 1702 *copy_len += TERM_DDP_LEN_UNTAGGED; 1703 hdr->hdrct |= DDP_HDR_FLAG; 1704 } 1705 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) && 1706 ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) { 1707 *copy_len += TERM_RDMA_LEN; 1708 hdr->hdrct |= RDMA_HDR_FLAG; 1709 } 1710 } 1711 } 1712 } 1713 1714 /** 1715 * irdma_bld_terminate_hdr - build terminate message header 1716 * @qp: qp associated with received terminate AE 1717 * @info: the struct contiaing AE information 1718 */ 1719 static int 1720 irdma_bld_terminate_hdr(struct irdma_sc_qp *qp, 1721 struct irdma_aeqe_info *info) 1722 { 1723 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; 1724 int copy_len = 0; 1725 u8 is_tagged = 0; 1726 u32 opcode; 1727 struct irdma_terminate_hdr *termhdr; 1728 1729 termhdr = (struct irdma_terminate_hdr *)qp->q2_buf; 1730 memset(termhdr, 0, Q2_BAD_FRAME_OFFSET); 1731 1732 if (info->q2_data_written) { 1733 pkt = irdma_locate_mpa(pkt); 1734 irdma_bld_termhdr_ddp_rdma(pkt, termhdr, ©_len, &is_tagged); 1735 } 1736 1737 opcode = irdma_iwarp_opcode(info, pkt); 1738 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; 1739 qp->sq_flush_code = info->sq; 1740 qp->rq_flush_code = info->rq; 1741 1742 switch (info->ae_id) { 1743 case IRDMA_AE_AMP_UNALLOCATED_STAG: 1744 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1745 if (opcode == IRDMA_OP_TYPE_RDMA_WRITE) 1746 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1747 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1748 DDP_TAGGED_INV_STAG); 1749 else 1750 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1751 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1752 RDMAP_INV_STAG); 1753 break; 1754 case IRDMA_AE_AMP_BOUNDS_VIOLATION: 1755 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1756 if (info->q2_data_written) 1757 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1758 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1759 DDP_TAGGED_BOUNDS); 1760 else 1761 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1762 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1763 RDMAP_INV_BOUNDS); 1764 break; 1765 case IRDMA_AE_AMP_BAD_PD: 1766 switch (opcode) { 1767 case IRDMA_OP_TYPE_RDMA_WRITE: 1768 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1769 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1770 DDP_TAGGED_UNASSOC_STAG); 1771 break; 1772 case IRDMA_OP_TYPE_SEND_INV: 1773 case IRDMA_OP_TYPE_SEND_SOL_INV: 1774 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1775 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1776 RDMAP_CANT_INV_STAG); 1777 break; 1778 default: 1779 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1780 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1781 RDMAP_UNASSOC_STAG); 1782 } 1783 break; 1784 case IRDMA_AE_AMP_INVALID_STAG: 1785 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1786 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1787 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1788 RDMAP_INV_STAG); 1789 break; 1790 case IRDMA_AE_AMP_BAD_QP: 1791 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR, 1792 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1793 DDP_UNTAGGED_INV_QN); 1794 break; 1795 case IRDMA_AE_AMP_BAD_STAG_KEY: 1796 case IRDMA_AE_AMP_BAD_STAG_INDEX: 1797 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1798 switch (opcode) { 1799 case IRDMA_OP_TYPE_SEND_INV: 1800 case IRDMA_OP_TYPE_SEND_SOL_INV: 1801 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR, 1802 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1803 RDMAP_CANT_INV_STAG); 1804 break; 1805 default: 1806 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1807 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1808 RDMAP_INV_STAG); 1809 } 1810 break; 1811 case IRDMA_AE_AMP_RIGHTS_VIOLATION: 1812 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: 1813 case IRDMA_AE_PRIV_OPERATION_DENIED: 1814 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1815 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1816 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1817 RDMAP_ACCESS); 1818 break; 1819 case IRDMA_AE_AMP_TO_WRAP: 1820 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1821 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1822 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1823 RDMAP_TO_WRAP); 1824 break; 1825 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 1826 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1827 (LAYER_MPA << 4) | DDP_LLP, MPA_CRC); 1828 break; 1829 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: 1830 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR, 1831 (LAYER_DDP << 4) | DDP_CATASTROPHIC, 1832 DDP_CATASTROPHIC_LOCAL); 1833 break; 1834 case IRDMA_AE_LCE_QP_CATASTROPHIC: 1835 case IRDMA_AE_DDP_NO_L_BIT: 1836 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR, 1837 (LAYER_DDP << 4) | DDP_CATASTROPHIC, 1838 DDP_CATASTROPHIC_LOCAL); 1839 break; 1840 case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN: 1841 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1842 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1843 DDP_UNTAGGED_INV_MSN_RANGE); 1844 break; 1845 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 1846 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1847 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR, 1848 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1849 DDP_UNTAGGED_INV_TOO_LONG); 1850 break; 1851 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: 1852 if (is_tagged) 1853 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1854 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1855 DDP_TAGGED_INV_DDP_VER); 1856 else 1857 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1858 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1859 DDP_UNTAGGED_INV_DDP_VER); 1860 break; 1861 case IRDMA_AE_DDP_UBE_INVALID_MO: 1862 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1863 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1864 DDP_UNTAGGED_INV_MO); 1865 break; 1866 case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: 1867 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR, 1868 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1869 DDP_UNTAGGED_INV_MSN_NO_BUF); 1870 break; 1871 case IRDMA_AE_DDP_UBE_INVALID_QN: 1872 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1873 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1874 DDP_UNTAGGED_INV_QN); 1875 break; 1876 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: 1877 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1878 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1879 RDMAP_INV_RDMAP_VER); 1880 break; 1881 default: 1882 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR, 1883 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1884 RDMAP_UNSPECIFIED); 1885 break; 1886 } 1887 1888 if (copy_len) 1889 irdma_memcpy(termhdr + 1, pkt, copy_len); 1890 1891 return sizeof(struct irdma_terminate_hdr) + copy_len; 1892 } 1893 1894 /** 1895 * irdma_terminate_send_fin() - Send fin for terminate message 1896 * @qp: qp associated with received terminate AE 1897 */ 1898 void 1899 irdma_terminate_send_fin(struct irdma_sc_qp *qp) 1900 { 1901 irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE, 1902 IRDMAQP_TERM_SEND_FIN_ONLY, 0); 1903 } 1904 1905 /** 1906 * irdma_terminate_connection() - Bad AE and send terminate to remote QP 1907 * @qp: qp associated with received terminate AE 1908 * @info: the struct contiaing AE information 1909 */ 1910 void 1911 irdma_terminate_connection(struct irdma_sc_qp *qp, 1912 struct irdma_aeqe_info *info) 1913 { 1914 u8 termlen = 0; 1915 1916 if (qp->term_flags & IRDMA_TERM_SENT) 1917 return; 1918 1919 termlen = irdma_bld_terminate_hdr(qp, info); 1920 irdma_terminate_start_timer(qp); 1921 qp->term_flags |= IRDMA_TERM_SENT; 1922 irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE, 1923 IRDMAQP_TERM_SEND_TERM_ONLY, termlen); 1924 } 1925 1926 /** 1927 * irdma_terminate_received - handle terminate received AE 1928 * @qp: qp associated with received terminate AE 1929 * @info: the struct contiaing AE information 1930 */ 1931 void 1932 irdma_terminate_received(struct irdma_sc_qp *qp, 1933 struct irdma_aeqe_info *info) 1934 { 1935 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; 1936 BE32 *mpa; 1937 u8 ddp_ctl; 1938 u8 rdma_ctl; 1939 u16 aeq_id = 0; 1940 struct irdma_terminate_hdr *termhdr; 1941 1942 mpa = (BE32 *) irdma_locate_mpa(pkt); 1943 if (info->q2_data_written) { 1944 /* did not validate the frame - do it now */ 1945 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff; 1946 rdma_ctl = ntohl(mpa[0]) & 0xff; 1947 if ((ddp_ctl & 0xc0) != 0x40) 1948 aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC; 1949 else if ((ddp_ctl & 0x03) != 1) 1950 aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION; 1951 else if (ntohl(mpa[2]) != 2) 1952 aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN; 1953 else if (ntohl(mpa[3]) != 1) 1954 aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN; 1955 else if (ntohl(mpa[4]) != 0) 1956 aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO; 1957 else if ((rdma_ctl & 0xc0) != 0x40) 1958 aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION; 1959 1960 info->ae_id = aeq_id; 1961 if (info->ae_id) { 1962 /* Bad terminate recvd - send back a terminate */ 1963 irdma_terminate_connection(qp, info); 1964 return; 1965 } 1966 } 1967 1968 qp->term_flags |= IRDMA_TERM_RCVD; 1969 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; 1970 termhdr = (struct irdma_terminate_hdr *)&mpa[5]; 1971 if (termhdr->layer_etype == RDMAP_REMOTE_PROT || 1972 termhdr->layer_etype == RDMAP_REMOTE_OP) { 1973 irdma_terminate_done(qp, 0); 1974 } else { 1975 irdma_terminate_start_timer(qp); 1976 irdma_terminate_send_fin(qp); 1977 } 1978 } 1979 1980 static int 1981 irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) 1982 { 1983 return 0; 1984 } 1985 1986 static void 1987 irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri) 1988 { 1989 /* do nothing */ 1990 } 1991 1992 static void 1993 irdma_null_ws_reset(struct irdma_sc_vsi *vsi) 1994 { 1995 /* do nothing */ 1996 } 1997 1998 /** 1999 * irdma_sc_vsi_init - Init the vsi structure 2000 * @vsi: pointer to vsi structure to initialize 2001 * @info: the info used to initialize the vsi struct 2002 */ 2003 void 2004 irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, 2005 struct irdma_vsi_init_info *info) 2006 { 2007 u8 i; 2008 2009 vsi->dev = info->dev; 2010 vsi->back_vsi = info->back_vsi; 2011 vsi->register_qset = info->register_qset; 2012 vsi->unregister_qset = info->unregister_qset; 2013 vsi->mtu = info->params->mtu; 2014 vsi->exception_lan_q = info->exception_lan_q; 2015 vsi->vsi_idx = info->pf_data_vsi_num; 2016 2017 irdma_set_qos_info(vsi, info->params); 2018 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 2019 mutex_init(&vsi->qos[i].qos_mutex); 2020 INIT_LIST_HEAD(&vsi->qos[i].qplist); 2021 } 2022 if (vsi->register_qset) { 2023 vsi->dev->ws_add = irdma_ws_add; 2024 vsi->dev->ws_remove = irdma_ws_remove; 2025 vsi->dev->ws_reset = irdma_ws_reset; 2026 } else { 2027 vsi->dev->ws_add = irdma_null_ws_add; 2028 vsi->dev->ws_remove = irdma_null_ws_remove; 2029 vsi->dev->ws_reset = irdma_null_ws_reset; 2030 } 2031 } 2032 2033 /** 2034 * irdma_get_stats_idx - Return stats index 2035 * @vsi: pointer to the vsi 2036 */ 2037 static u16 irdma_get_stats_idx(struct irdma_sc_vsi *vsi){ 2038 struct irdma_stats_inst_info stats_info = {0}; 2039 struct irdma_sc_dev *dev = vsi->dev; 2040 2041 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 2042 if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE, 2043 &stats_info)) 2044 return stats_info.stats_idx; 2045 } 2046 2047 return IRDMA_INVALID_STATS_IDX; 2048 } 2049 2050 /** 2051 * irdma_vsi_stats_init - Initialize the vsi statistics 2052 * @vsi: pointer to the vsi structure 2053 * @info: The info structure used for initialization 2054 */ 2055 int 2056 irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, 2057 struct irdma_vsi_stats_info *info) 2058 { 2059 struct irdma_dma_mem *stats_buff_mem; 2060 2061 vsi->pestat = info->pestat; 2062 vsi->pestat->hw = vsi->dev->hw; 2063 vsi->pestat->vsi = vsi; 2064 2065 stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem; 2066 stats_buff_mem->size = IRDMA_GATHER_STATS_BUF_SIZE * 2; 2067 stats_buff_mem->va = irdma_allocate_dma_mem(vsi->pestat->hw, 2068 stats_buff_mem, 2069 stats_buff_mem->size, 1); 2070 if (!stats_buff_mem->va) 2071 return -ENOMEM; 2072 2073 vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va; 2074 vsi->pestat->gather_info.last_gather_stats_va = 2075 (void *)((uintptr_t)stats_buff_mem->va + 2076 IRDMA_GATHER_STATS_BUF_SIZE); 2077 2078 irdma_hw_stats_start_timer(vsi); 2079 2080 /* when stat allocation is not required default to fcn_id. */ 2081 vsi->stats_idx = info->fcn_id; 2082 if (info->alloc_stats_inst) { 2083 u16 stats_idx = irdma_get_stats_idx(vsi); 2084 2085 if (stats_idx != IRDMA_INVALID_STATS_IDX) { 2086 vsi->stats_inst_alloc = true; 2087 vsi->stats_idx = stats_idx; 2088 vsi->pestat->gather_info.use_stats_inst = true; 2089 vsi->pestat->gather_info.stats_inst_index = stats_idx; 2090 } 2091 } 2092 2093 return 0; 2094 } 2095 2096 /** 2097 * irdma_vsi_stats_free - Free the vsi stats 2098 * @vsi: pointer to the vsi structure 2099 */ 2100 void 2101 irdma_vsi_stats_free(struct irdma_sc_vsi *vsi) 2102 { 2103 struct irdma_stats_inst_info stats_info = {0}; 2104 struct irdma_sc_dev *dev = vsi->dev; 2105 2106 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 2107 if (vsi->stats_inst_alloc) { 2108 stats_info.stats_idx = vsi->stats_idx; 2109 irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE, 2110 &stats_info); 2111 } 2112 } 2113 2114 if (!vsi->pestat) 2115 return; 2116 2117 irdma_hw_stats_stop_timer(vsi); 2118 irdma_free_dma_mem(vsi->pestat->hw, 2119 &vsi->pestat->gather_info.stats_buff_mem); 2120 } 2121 2122 /** 2123 * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size 2124 * @wqsize: size of the wq (sq, rq) to encoded_size 2125 * @queue_type: queue type selected for the calculation algorithm 2126 */ 2127 u8 2128 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type) 2129 { 2130 u8 encoded_size = 0; 2131 2132 /* 2133 * cqp sq's hw coded value starts from 1 for size of 4 while it starts from 0 for qp' wq's. 2134 */ 2135 if (queue_type == IRDMA_QUEUE_TYPE_CQP) 2136 encoded_size = 1; 2137 wqsize >>= 2; 2138 while (wqsize >>= 1) 2139 encoded_size++; 2140 2141 return encoded_size; 2142 } 2143 2144 /** 2145 * irdma_sc_gather_stats - collect the statistics 2146 * @cqp: struct for cqp hw 2147 * @info: gather stats info structure 2148 * @scratch: u64 saved to be used during cqp completion 2149 */ 2150 static int 2151 irdma_sc_gather_stats(struct irdma_sc_cqp *cqp, 2152 struct irdma_stats_gather_info *info, 2153 u64 scratch) 2154 { 2155 __le64 *wqe; 2156 u64 temp; 2157 2158 if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE) 2159 return -ENOSPC; 2160 2161 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2162 if (!wqe) 2163 return -ENOSPC; 2164 2165 set_64bit_val(wqe, IRDMA_BYTE_40, 2166 FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index)); 2167 set_64bit_val(wqe, IRDMA_BYTE_32, info->stats_buff_mem.pa); 2168 2169 temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) | 2170 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) | 2171 FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, 2172 info->stats_inst_index) | 2173 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX, 2174 info->use_hmc_fcn_index) | 2175 FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS); 2176 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2177 2178 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2179 2180 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_STATS, "GATHER_STATS WQE", wqe, 2181 IRDMA_CQP_WQE_SIZE * 8); 2182 2183 irdma_sc_cqp_post_sq(cqp); 2184 irdma_debug(cqp->dev, IRDMA_DEBUG_STATS, 2185 "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head, 2186 cqp->sq_ring.tail, cqp->sq_ring.size); 2187 2188 return 0; 2189 } 2190 2191 /** 2192 * irdma_sc_manage_stats_inst - allocate or free stats instance 2193 * @cqp: struct for cqp hw 2194 * @info: stats info structure 2195 * @alloc: alloc vs. delete flag 2196 * @scratch: u64 saved to be used during cqp completion 2197 */ 2198 static int 2199 irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp, 2200 struct irdma_stats_inst_info *info, 2201 bool alloc, u64 scratch) 2202 { 2203 __le64 *wqe; 2204 u64 temp; 2205 2206 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2207 if (!wqe) 2208 return -ENOSPC; 2209 2210 set_64bit_val(wqe, IRDMA_BYTE_40, 2211 FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id)); 2212 temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) | 2213 FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) | 2214 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX, 2215 info->use_hmc_fcn_index) | 2216 FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) | 2217 FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS); 2218 2219 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2220 2221 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2222 2223 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_STATS WQE", wqe, 2224 IRDMA_CQP_WQE_SIZE * 8); 2225 2226 irdma_sc_cqp_post_sq(cqp); 2227 return 0; 2228 } 2229 2230 /** 2231 * irdma_sc_set_up_map - set the up map table 2232 * @cqp: struct for cqp hw 2233 * @info: User priority map info 2234 * @scratch: u64 saved to be used during cqp completion 2235 */ 2236 static int 2237 irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, 2238 struct irdma_up_info *info, u64 scratch) 2239 { 2240 __le64 *wqe; 2241 u64 temp; 2242 2243 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2244 if (!wqe) 2245 return -ENOSPC; 2246 2247 temp = info->map[0] | LS_64_1(info->map[1], 8) | 2248 LS_64_1(info->map[2], 16) | LS_64_1(info->map[3], 24) | 2249 LS_64_1(info->map[4], 32) | LS_64_1(info->map[5], 40) | 2250 LS_64_1(info->map[6], 48) | LS_64_1(info->map[7], 56); 2251 2252 set_64bit_val(wqe, IRDMA_BYTE_0, temp); 2253 set_64bit_val(wqe, IRDMA_BYTE_40, 2254 FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) | 2255 FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx)); 2256 2257 temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) | 2258 FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) | 2259 FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE, 2260 info->use_cnp_up_override) | 2261 FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP); 2262 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2263 2264 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2265 2266 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPMAP WQE", wqe, 2267 IRDMA_CQP_WQE_SIZE * 8); 2268 irdma_sc_cqp_post_sq(cqp); 2269 2270 return 0; 2271 } 2272 2273 /** 2274 * irdma_sc_manage_ws_node - create/modify/destroy WS node 2275 * @cqp: struct for cqp hw 2276 * @info: node info structure 2277 * @node_op: 0 for add 1 for modify, 2 for delete 2278 * @scratch: u64 saved to be used during cqp completion 2279 */ 2280 static int 2281 irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp, 2282 struct irdma_ws_node_info *info, 2283 enum irdma_ws_node_op node_op, u64 scratch) 2284 { 2285 __le64 *wqe; 2286 u64 temp = 0; 2287 2288 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2289 if (!wqe) 2290 return -ENOSPC; 2291 2292 set_64bit_val(wqe, IRDMA_BYTE_32, 2293 FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) | 2294 FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight)); 2295 2296 temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) | 2297 FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) | 2298 FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) | 2299 FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) | 2300 FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) | 2301 FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) | 2302 FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) | 2303 FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) | 2304 FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id); 2305 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2306 2307 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2308 2309 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_WS WQE", wqe, 2310 IRDMA_CQP_WQE_SIZE * 8); 2311 irdma_sc_cqp_post_sq(cqp); 2312 2313 return 0; 2314 } 2315 2316 /** 2317 * irdma_sc_qp_flush_wqes - flush qp's wqe 2318 * @qp: sc qp 2319 * @info: dlush information 2320 * @scratch: u64 saved to be used during cqp completion 2321 * @post_sq: flag for cqp db to ring 2322 */ 2323 int 2324 irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, 2325 struct irdma_qp_flush_info *info, u64 scratch, 2326 bool post_sq) 2327 { 2328 u64 temp = 0; 2329 __le64 *wqe; 2330 struct irdma_sc_cqp *cqp; 2331 u64 hdr; 2332 bool flush_sq = false, flush_rq = false; 2333 2334 if (info->rq && !qp->flush_rq) 2335 flush_rq = true; 2336 if (info->sq && !qp->flush_sq) 2337 flush_sq = true; 2338 qp->flush_sq |= flush_sq; 2339 qp->flush_rq |= flush_rq; 2340 2341 if (!flush_sq && !flush_rq) { 2342 irdma_debug(qp->dev, IRDMA_DEBUG_CQP, 2343 "Additional flush request ignored for qp %x\n", qp->qp_uk.qp_id); 2344 return -EALREADY; 2345 } 2346 2347 cqp = qp->pd->dev->cqp; 2348 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2349 if (!wqe) 2350 return -ENOSPC; 2351 2352 if (info->userflushcode) { 2353 if (flush_rq) 2354 temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR, 2355 info->rq_minor_code) | 2356 FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR, 2357 info->rq_major_code); 2358 if (flush_sq) 2359 temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR, 2360 info->sq_minor_code) | 2361 FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR, 2362 info->sq_major_code); 2363 } 2364 set_64bit_val(wqe, IRDMA_BYTE_16, temp); 2365 2366 temp = (info->generate_ae) ? 2367 info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE, 2368 info->ae_src) : 0; 2369 set_64bit_val(wqe, IRDMA_BYTE_8, temp); 2370 hdr = qp->qp_uk.qp_id | 2371 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) | 2372 FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) | 2373 FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) | 2374 FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) | 2375 FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) | 2376 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2377 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2378 2379 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2380 2381 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_FLUSH WQE", wqe, 2382 IRDMA_CQP_WQE_SIZE * 8); 2383 if (post_sq) 2384 irdma_sc_cqp_post_sq(cqp); 2385 2386 return 0; 2387 } 2388 2389 /** 2390 * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP 2391 * @qp: sc qp 2392 * @info: gen ae information 2393 * @scratch: u64 saved to be used during cqp completion 2394 * @post_sq: flag for cqp db to ring 2395 */ 2396 static int 2397 irdma_sc_gen_ae(struct irdma_sc_qp *qp, 2398 struct irdma_gen_ae_info *info, u64 scratch, 2399 bool post_sq) 2400 { 2401 u64 temp; 2402 __le64 *wqe; 2403 struct irdma_sc_cqp *cqp; 2404 u64 hdr; 2405 2406 cqp = qp->pd->dev->cqp; 2407 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2408 if (!wqe) 2409 return -ENOSPC; 2410 2411 temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE, 2412 info->ae_src); 2413 set_64bit_val(wqe, IRDMA_BYTE_8, temp); 2414 2415 hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, 2416 IRDMA_CQP_OP_GEN_AE) | 2417 FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) | 2418 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2419 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2420 2421 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2422 2423 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "GEN_AE WQE", wqe, 2424 IRDMA_CQP_WQE_SIZE * 8); 2425 if (post_sq) 2426 irdma_sc_cqp_post_sq(cqp); 2427 2428 return 0; 2429 } 2430 2431 /*** irdma_sc_qp_upload_context - upload qp's context 2432 * @dev: sc device struct 2433 * @info: upload context info ptr for return 2434 * @scratch: u64 saved to be used during cqp completion 2435 * @post_sq: flag for cqp db to ring 2436 */ 2437 static int 2438 irdma_sc_qp_upload_context(struct irdma_sc_dev *dev, 2439 struct irdma_upload_context_info *info, 2440 u64 scratch, bool post_sq) 2441 { 2442 __le64 *wqe; 2443 struct irdma_sc_cqp *cqp; 2444 u64 hdr; 2445 2446 cqp = dev->cqp; 2447 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2448 if (!wqe) 2449 return -ENOSPC; 2450 2451 set_64bit_val(wqe, IRDMA_BYTE_16, info->buf_pa); 2452 2453 hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) | 2454 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) | 2455 FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) | 2456 FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) | 2457 FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) | 2458 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2459 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2460 2461 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2462 2463 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QP_UPLOAD_CTX WQE", wqe, 2464 IRDMA_CQP_WQE_SIZE * 8); 2465 if (post_sq) 2466 irdma_sc_cqp_post_sq(cqp); 2467 2468 return 0; 2469 } 2470 2471 /** 2472 * irdma_sc_manage_push_page - Handle push page 2473 * @cqp: struct for cqp hw 2474 * @info: push page info 2475 * @scratch: u64 saved to be used during cqp completion 2476 * @post_sq: flag for cqp db to ring 2477 */ 2478 static int 2479 irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp, 2480 struct irdma_cqp_manage_push_page_info *info, 2481 u64 scratch, bool post_sq) 2482 { 2483 __le64 *wqe; 2484 u64 hdr; 2485 2486 if (info->free_page && 2487 info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages) 2488 return -EINVAL; 2489 2490 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2491 if (!wqe) 2492 return -ENOSPC; 2493 2494 set_64bit_val(wqe, IRDMA_BYTE_16, info->qs_handle); 2495 hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) | 2496 FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) | 2497 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) | 2498 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | 2499 FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page); 2500 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2501 2502 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2503 2504 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", wqe, 2505 IRDMA_CQP_WQE_SIZE * 8); 2506 if (post_sq) 2507 irdma_sc_cqp_post_sq(cqp); 2508 2509 return 0; 2510 } 2511 2512 /** 2513 * irdma_sc_suspend_qp - suspend qp for param change 2514 * @cqp: struct for cqp hw 2515 * @qp: sc qp struct 2516 * @scratch: u64 saved to be used during cqp completion 2517 */ 2518 static int 2519 irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, 2520 u64 scratch) 2521 { 2522 u64 hdr; 2523 __le64 *wqe; 2524 2525 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2526 if (!wqe) 2527 return -ENOSPC; 2528 2529 hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) | 2530 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) | 2531 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2532 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2533 2534 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2535 2536 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SUSPEND_QP WQE", wqe, 2537 IRDMA_CQP_WQE_SIZE * 8); 2538 irdma_sc_cqp_post_sq(cqp); 2539 2540 return 0; 2541 } 2542 2543 /** 2544 * irdma_sc_resume_qp - resume qp after suspend 2545 * @cqp: struct for cqp hw 2546 * @qp: sc qp struct 2547 * @scratch: u64 saved to be used during cqp completion 2548 */ 2549 static int 2550 irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, 2551 u64 scratch) 2552 { 2553 u64 hdr; 2554 __le64 *wqe; 2555 2556 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2557 if (!wqe) 2558 return -ENOSPC; 2559 2560 set_64bit_val(wqe, IRDMA_BYTE_16, 2561 FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle)); 2562 2563 hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) | 2564 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) | 2565 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2566 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2567 2568 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2569 2570 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "RESUME_QP WQE", wqe, 2571 IRDMA_CQP_WQE_SIZE * 8); 2572 irdma_sc_cqp_post_sq(cqp); 2573 2574 return 0; 2575 } 2576 2577 /** 2578 * irdma_sc_cq_ack - acknowledge completion q 2579 * @cq: cq struct 2580 */ 2581 static inline void 2582 irdma_sc_cq_ack(struct irdma_sc_cq *cq) 2583 { 2584 db_wr32(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db); 2585 } 2586 2587 /** 2588 * irdma_sc_cq_init - initialize completion q 2589 * @cq: cq struct 2590 * @info: cq initialization info 2591 */ 2592 int 2593 irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info) 2594 { 2595 int ret_code; 2596 u32 pble_obj_cnt; 2597 2598 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 2599 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 2600 return -EINVAL; 2601 2602 cq->cq_pa = info->cq_base_pa; 2603 cq->dev = info->dev; 2604 cq->ceq_id = info->ceq_id; 2605 info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db; 2606 info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db; 2607 ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info); 2608 if (ret_code) 2609 return ret_code; 2610 2611 cq->virtual_map = info->virtual_map; 2612 cq->pbl_chunk_size = info->pbl_chunk_size; 2613 cq->ceqe_mask = info->ceqe_mask; 2614 cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP; 2615 cq->shadow_area_pa = info->shadow_area_pa; 2616 cq->shadow_read_threshold = info->shadow_read_threshold; 2617 cq->ceq_id_valid = info->ceq_id_valid; 2618 cq->tph_en = info->tph_en; 2619 cq->tph_val = info->tph_val; 2620 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 2621 cq->vsi = info->vsi; 2622 2623 return 0; 2624 } 2625 2626 /** 2627 * irdma_sc_cq_create - create completion q 2628 * @cq: cq struct 2629 * @scratch: u64 saved to be used during cqp completion 2630 * @check_overflow: flag for overflow check 2631 * @post_sq: flag for cqp db to ring 2632 */ 2633 static int 2634 irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch, 2635 bool check_overflow, bool post_sq) 2636 { 2637 __le64 *wqe; 2638 struct irdma_sc_cqp *cqp; 2639 u64 hdr; 2640 struct irdma_sc_ceq *ceq; 2641 int ret_code = 0; 2642 2643 cqp = cq->dev->cqp; 2644 if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1)) 2645 return -EINVAL; 2646 2647 if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1)) 2648 return -EINVAL; 2649 2650 ceq = cq->dev->ceq[cq->ceq_id]; 2651 if (ceq && ceq->reg_cq) 2652 ret_code = irdma_sc_add_cq_ctx(ceq, cq); 2653 2654 if (ret_code) 2655 return ret_code; 2656 2657 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2658 if (!wqe) { 2659 if (ceq && ceq->reg_cq) 2660 irdma_sc_remove_cq_ctx(ceq, cq); 2661 return -ENOSPC; 2662 } 2663 2664 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size); 2665 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2666 set_64bit_val(wqe, IRDMA_BYTE_16, 2667 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold)); 2668 set_64bit_val(wqe, IRDMA_BYTE_32, (cq->virtual_map ? 0 : cq->cq_pa)); 2669 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2670 set_64bit_val(wqe, IRDMA_BYTE_48, 2671 FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0))); 2672 set_64bit_val(wqe, IRDMA_BYTE_56, 2673 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | 2674 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); 2675 2676 hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) | 2677 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), 2678 IRDMA_CQPSQ_CQ_CEQID) | 2679 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) | 2680 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) | 2681 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) | 2682 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) | 2683 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | 2684 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) | 2685 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | 2686 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, 2687 cq->cq_uk.avoid_mem_cflct) | 2688 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2689 2690 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2691 2692 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2693 2694 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_CREATE WQE", wqe, 2695 IRDMA_CQP_WQE_SIZE * 8); 2696 if (post_sq) 2697 irdma_sc_cqp_post_sq(cqp); 2698 2699 return 0; 2700 } 2701 2702 /** 2703 * irdma_sc_cq_destroy - destroy completion q 2704 * @cq: cq struct 2705 * @scratch: u64 saved to be used during cqp completion 2706 * @post_sq: flag for cqp db to ring 2707 */ 2708 int 2709 irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq) 2710 { 2711 struct irdma_sc_cqp *cqp; 2712 __le64 *wqe; 2713 u64 hdr; 2714 struct irdma_sc_ceq *ceq; 2715 2716 cqp = cq->dev->cqp; 2717 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2718 if (!wqe) 2719 return -ENOSPC; 2720 2721 ceq = cq->dev->ceq[cq->ceq_id]; 2722 if (ceq && ceq->reg_cq) 2723 irdma_sc_remove_cq_ctx(ceq, cq); 2724 2725 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size); 2726 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2727 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2728 set_64bit_val(wqe, IRDMA_BYTE_48, 2729 (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); 2730 2731 hdr = cq->cq_uk.cq_id | 2732 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), 2733 IRDMA_CQPSQ_CQ_CEQID) | 2734 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) | 2735 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) | 2736 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) | 2737 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | 2738 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) | 2739 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | 2740 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) | 2741 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2742 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2743 2744 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2745 2746 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_DESTROY WQE", wqe, 2747 IRDMA_CQP_WQE_SIZE * 8); 2748 if (post_sq) 2749 irdma_sc_cqp_post_sq(cqp); 2750 2751 return 0; 2752 } 2753 2754 /** 2755 * irdma_sc_cq_resize - set resized cq buffer info 2756 * @cq: resized cq 2757 * @info: resized cq buffer info 2758 */ 2759 void 2760 irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info) 2761 { 2762 cq->virtual_map = info->virtual_map; 2763 cq->cq_pa = info->cq_pa; 2764 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 2765 cq->pbl_chunk_size = info->pbl_chunk_size; 2766 irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size); 2767 } 2768 2769 /** 2770 * irdma_sc_cq_modify - modify a Completion Queue 2771 * @cq: cq struct 2772 * @info: modification info struct 2773 * @scratch: u64 saved to be used during cqp completion 2774 * @post_sq: flag to post to sq 2775 */ 2776 static int 2777 irdma_sc_cq_modify(struct irdma_sc_cq *cq, 2778 struct irdma_modify_cq_info *info, u64 scratch, 2779 bool post_sq) 2780 { 2781 struct irdma_sc_cqp *cqp; 2782 __le64 *wqe; 2783 u64 hdr; 2784 u32 pble_obj_cnt; 2785 2786 pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 2787 if (info->cq_resize && info->virtual_map && 2788 info->first_pm_pbl_idx >= pble_obj_cnt) 2789 return -EINVAL; 2790 2791 cqp = cq->dev->cqp; 2792 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2793 if (!wqe) 2794 return -ENOSPC; 2795 2796 set_64bit_val(wqe, IRDMA_BYTE_0, info->cq_size); 2797 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2798 set_64bit_val(wqe, IRDMA_BYTE_16, 2799 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold)); 2800 set_64bit_val(wqe, IRDMA_BYTE_32, info->cq_pa); 2801 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2802 set_64bit_val(wqe, IRDMA_BYTE_48, info->first_pm_pbl_idx); 2803 set_64bit_val(wqe, IRDMA_BYTE_56, 2804 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | 2805 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); 2806 2807 hdr = cq->cq_uk.cq_id | 2808 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) | 2809 FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) | 2810 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) | 2811 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) | 2812 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) | 2813 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | 2814 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | 2815 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, 2816 cq->cq_uk.avoid_mem_cflct) | 2817 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 2818 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2819 2820 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2821 2822 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_MODIFY WQE", wqe, 2823 IRDMA_CQP_WQE_SIZE * 8); 2824 if (post_sq) 2825 irdma_sc_cqp_post_sq(cqp); 2826 2827 return 0; 2828 } 2829 2830 /** 2831 * irdma_check_cqp_progress - check cqp processing progress 2832 * @timeout: timeout info struct 2833 * @dev: sc device struct 2834 */ 2835 void 2836 irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, 2837 struct irdma_sc_dev *dev) 2838 { 2839 if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) { 2840 timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; 2841 timeout->count = 0; 2842 } else if (timeout->compl_cqp_cmds != 2843 dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]) { 2844 timeout->count++; 2845 } 2846 } 2847 2848 /** 2849 * irdma_get_cqp_reg_info - get head and tail for cqp using registers 2850 * @cqp: struct for cqp hw 2851 * @val: cqp tail register value 2852 * @tail: wqtail register value 2853 * @error: cqp processing err 2854 */ 2855 static inline void 2856 irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val, 2857 u32 *tail, u32 *error) 2858 { 2859 *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]); 2860 *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val); 2861 *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val); 2862 } 2863 2864 /** 2865 * irdma_cqp_poll_registers - poll cqp registers 2866 * @cqp: struct for cqp hw 2867 * @tail: wqtail register value 2868 * @count: how many times to try for completion 2869 */ 2870 static int 2871 irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail, 2872 u32 count) 2873 { 2874 u32 i = 0; 2875 u32 newtail, error, val; 2876 2877 while (i++ < count) { 2878 irdma_get_cqp_reg_info(cqp, &val, &newtail, &error); 2879 if (error) { 2880 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 2881 irdma_debug(cqp->dev, IRDMA_DEBUG_CQP, 2882 "CQPERRCODES error_code[x%08X]\n", error); 2883 return -EIO; 2884 } 2885 if (newtail != tail) { 2886 /* SUCCESS */ 2887 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); 2888 cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++; 2889 return 0; 2890 } 2891 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 2892 } 2893 2894 return -ETIMEDOUT; 2895 } 2896 2897 /** 2898 * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base 2899 * @dev: sc device struct 2900 * @buf: pointer to commit buffer 2901 * @buf_idx: buffer index 2902 * @obj_info: object info pointer 2903 * @rsrc_idx: indexs of memory resource 2904 */ 2905 static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 * buf, 2906 u32 buf_idx, struct irdma_hmc_obj_info *obj_info, 2907 u32 rsrc_idx){ 2908 u64 temp; 2909 2910 get_64bit_val(buf, buf_idx, &temp); 2911 2912 switch (rsrc_idx) { 2913 case IRDMA_HMC_IW_QP: 2914 obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp); 2915 break; 2916 case IRDMA_HMC_IW_CQ: 2917 obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT); 2918 break; 2919 case IRDMA_HMC_IW_APBVT_ENTRY: 2920 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) 2921 obj_info[rsrc_idx].cnt = 1; 2922 else 2923 obj_info[rsrc_idx].cnt = 0; 2924 break; 2925 default: 2926 obj_info[rsrc_idx].cnt = (u32)temp; 2927 break; 2928 } 2929 2930 obj_info[rsrc_idx].base = (u64)RS_64_1(temp, IRDMA_COMMIT_FPM_BASE_S) * 512; 2931 2932 return temp; 2933 } 2934 2935 /** 2936 * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer 2937 * @dev: pointer to dev struct 2938 * @buf: ptr to fpm commit buffer 2939 * @info: ptr to irdma_hmc_obj_info struct 2940 * @sd: number of SDs for HMC objects 2941 * 2942 * parses fpm commit info and copy base value 2943 * of hmc objects in hmc_info 2944 */ 2945 static void 2946 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf, 2947 struct irdma_hmc_obj_info *info, 2948 u32 *sd) 2949 { 2950 u64 size; 2951 u32 i; 2952 u64 max_base = 0; 2953 u32 last_hmc_obj = 0; 2954 2955 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_0, info, 2956 IRDMA_HMC_IW_QP); 2957 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_8, info, 2958 IRDMA_HMC_IW_CQ); 2959 /* skiping RSRVD */ 2960 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_24, info, 2961 IRDMA_HMC_IW_HTE); 2962 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_32, info, 2963 IRDMA_HMC_IW_ARP); 2964 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_40, info, 2965 IRDMA_HMC_IW_APBVT_ENTRY); 2966 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_48, info, 2967 IRDMA_HMC_IW_MR); 2968 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_56, info, 2969 IRDMA_HMC_IW_XF); 2970 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_64, info, 2971 IRDMA_HMC_IW_XFFL); 2972 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_72, info, 2973 IRDMA_HMC_IW_Q1); 2974 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_80, info, 2975 IRDMA_HMC_IW_Q1FL); 2976 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_88, info, 2977 IRDMA_HMC_IW_TIMER); 2978 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_112, info, 2979 IRDMA_HMC_IW_PBLE); 2980 /* skipping RSVD. */ 2981 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) { 2982 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_96, info, 2983 IRDMA_HMC_IW_FSIMC); 2984 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_104, info, 2985 IRDMA_HMC_IW_FSIAV); 2986 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_128, info, 2987 IRDMA_HMC_IW_RRF); 2988 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_136, info, 2989 IRDMA_HMC_IW_RRFFL); 2990 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_144, info, 2991 IRDMA_HMC_IW_HDR); 2992 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info, 2993 IRDMA_HMC_IW_MD); 2994 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info, 2995 IRDMA_HMC_IW_OOISC); 2996 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info, 2997 IRDMA_HMC_IW_OOISCFFL); 2998 } 2999 3000 /* searching for the last object in HMC to find the size of the HMC area. */ 3001 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) { 3002 if (info[i].base > max_base) { 3003 max_base = info[i].base; 3004 last_hmc_obj = i; 3005 } 3006 } 3007 3008 size = info[last_hmc_obj].cnt * info[last_hmc_obj].size + 3009 info[last_hmc_obj].base; 3010 3011 if (size & 0x1FFFFF) 3012 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */ 3013 else 3014 *sd = (u32)(size >> 21); 3015 3016 } 3017 3018 /** 3019 * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size 3020 * @buf: ptr to fpm query buffer 3021 * @buf_idx: index into buf 3022 * @obj_info: ptr to irdma_hmc_obj_info struct 3023 * @rsrc_idx: resource index into info 3024 * 3025 * Decode a 64 bit value from fpm query buffer into max count and size 3026 */ 3027 static u64 irdma_sc_decode_fpm_query(__le64 * buf, u32 buf_idx, 3028 struct irdma_hmc_obj_info *obj_info, 3029 u32 rsrc_idx){ 3030 u64 temp; 3031 u32 size; 3032 3033 get_64bit_val(buf, buf_idx, &temp); 3034 obj_info[rsrc_idx].max_cnt = (u32)temp; 3035 size = (u32)RS_64_1(temp, 32); 3036 obj_info[rsrc_idx].size = LS_64_1(1, size); 3037 3038 return temp; 3039 } 3040 3041 /** 3042 * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer 3043 * @dev: ptr to shared code device 3044 * @buf: ptr to fpm query buffer 3045 * @hmc_info: ptr to irdma_hmc_obj_info struct 3046 * @hmc_fpm_misc: ptr to fpm data 3047 * 3048 * parses fpm query buffer and copy max_cnt and 3049 * size value of hmc objects in hmc_info 3050 */ 3051 static int 3052 irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf, 3053 struct irdma_hmc_info *hmc_info, 3054 struct irdma_hmc_fpm_misc *hmc_fpm_misc) 3055 { 3056 struct irdma_hmc_obj_info *obj_info; 3057 u64 temp; 3058 u32 size; 3059 u16 max_pe_sds; 3060 3061 obj_info = hmc_info->hmc_obj; 3062 3063 get_64bit_val(buf, IRDMA_BYTE_0, &temp); 3064 hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp); 3065 max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp); 3066 3067 hmc_fpm_misc->max_sds = max_pe_sds; 3068 hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; 3069 get_64bit_val(buf, 8, &temp); 3070 obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp); 3071 size = (u32)RS_64_1(temp, 32); 3072 obj_info[IRDMA_HMC_IW_QP].size = LS_64_1(1, size); 3073 3074 get_64bit_val(buf, 16, &temp); 3075 obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp); 3076 size = (u32)RS_64_1(temp, 32); 3077 obj_info[IRDMA_HMC_IW_CQ].size = LS_64_1(1, size); 3078 3079 irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE); 3080 irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP); 3081 3082 obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192; 3083 obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1; 3084 3085 irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR); 3086 irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF); 3087 3088 get_64bit_val(buf, 64, &temp); 3089 obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp; 3090 obj_info[IRDMA_HMC_IW_XFFL].size = 4; 3091 hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp); 3092 if (!hmc_fpm_misc->xf_block_size) 3093 return -EINVAL; 3094 3095 irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1); 3096 get_64bit_val(buf, 80, &temp); 3097 obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp; 3098 obj_info[IRDMA_HMC_IW_Q1FL].size = 4; 3099 3100 hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp); 3101 if (!hmc_fpm_misc->q1_block_size) 3102 return -EINVAL; 3103 3104 irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER); 3105 3106 get_64bit_val(buf, 112, &temp); 3107 obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp; 3108 obj_info[IRDMA_HMC_IW_PBLE].size = 8; 3109 3110 get_64bit_val(buf, 120, &temp); 3111 hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp); 3112 hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp); 3113 hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp); 3114 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 3115 return 0; 3116 irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC); 3117 irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV); 3118 irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF); 3119 3120 get_64bit_val(buf, IRDMA_BYTE_136, &temp); 3121 obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp; 3122 obj_info[IRDMA_HMC_IW_RRFFL].size = 4; 3123 hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp); 3124 if (!hmc_fpm_misc->rrf_block_size && 3125 obj_info[IRDMA_HMC_IW_RRFFL].max_cnt) 3126 return -EINVAL; 3127 3128 irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR); 3129 irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD); 3130 irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC); 3131 3132 get_64bit_val(buf, IRDMA_BYTE_168, &temp); 3133 obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp; 3134 obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4; 3135 hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp); 3136 if (!hmc_fpm_misc->ooiscf_block_size && 3137 obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt) 3138 return -EINVAL; 3139 3140 return 0; 3141 } 3142 3143 /** 3144 * irdma_sc_find_reg_cq - find cq ctx index 3145 * @ceq: ceq sc structure 3146 * @cq: cq sc structure 3147 */ 3148 static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq, 3149 struct irdma_sc_cq *cq){ 3150 u32 i; 3151 3152 for (i = 0; i < ceq->reg_cq_size; i++) { 3153 if (cq == ceq->reg_cq[i]) 3154 return i; 3155 } 3156 3157 return IRDMA_INVALID_CQ_IDX; 3158 } 3159 3160 /** 3161 * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq 3162 * @ceq: ceq sc structure 3163 * @cq: cq sc structure 3164 */ 3165 int 3166 irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) 3167 { 3168 unsigned long flags; 3169 3170 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3171 3172 if (ceq->reg_cq_size == ceq->elem_cnt) { 3173 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3174 return -ENOSPC; 3175 } 3176 3177 ceq->reg_cq[ceq->reg_cq_size++] = cq; 3178 3179 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3180 3181 return 0; 3182 } 3183 3184 /** 3185 * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq 3186 * @ceq: ceq sc structure 3187 * @cq: cq sc structure 3188 */ 3189 void 3190 irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) 3191 { 3192 unsigned long flags; 3193 u32 cq_ctx_idx; 3194 3195 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3196 cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq); 3197 if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX) 3198 goto exit; 3199 3200 ceq->reg_cq_size--; 3201 if (cq_ctx_idx != ceq->reg_cq_size) 3202 ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size]; 3203 ceq->reg_cq[ceq->reg_cq_size] = NULL; 3204 3205 exit: 3206 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3207 } 3208 3209 /** 3210 * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair 3211 * @cqp: IWARP control queue pair pointer 3212 * @info: IWARP control queue pair init info pointer 3213 * 3214 * Initializes the object and context buffers for a control Queue Pair. 3215 */ 3216 int 3217 irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, 3218 struct irdma_cqp_init_info *info) 3219 { 3220 u8 hw_sq_size; 3221 3222 if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 || 3223 info->sq_size < IRDMA_CQP_SW_SQSIZE_4 || 3224 ((info->sq_size & (info->sq_size - 1)))) 3225 return -EINVAL; 3226 3227 hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size, 3228 IRDMA_QUEUE_TYPE_CQP); 3229 cqp->size = sizeof(*cqp); 3230 cqp->sq_size = info->sq_size; 3231 cqp->hw_sq_size = hw_sq_size; 3232 cqp->sq_base = info->sq; 3233 cqp->host_ctx = info->host_ctx; 3234 cqp->sq_pa = info->sq_pa; 3235 cqp->host_ctx_pa = info->host_ctx_pa; 3236 cqp->dev = info->dev; 3237 cqp->struct_ver = info->struct_ver; 3238 cqp->hw_maj_ver = info->hw_maj_ver; 3239 cqp->hw_min_ver = info->hw_min_ver; 3240 cqp->scratch_array = info->scratch_array; 3241 cqp->polarity = 0; 3242 cqp->en_datacenter_tcp = info->en_datacenter_tcp; 3243 cqp->ena_vf_count = info->ena_vf_count; 3244 cqp->hmc_profile = info->hmc_profile; 3245 cqp->ceqs_per_vf = info->ceqs_per_vf; 3246 cqp->disable_packed = info->disable_packed; 3247 cqp->rocev2_rto_policy = info->rocev2_rto_policy; 3248 cqp->protocol_used = info->protocol_used; 3249 irdma_memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params)); 3250 cqp->en_rem_endpoint_trk = info->en_rem_endpoint_trk; 3251 info->dev->cqp = cqp; 3252 3253 IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size); 3254 cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0; 3255 cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0; 3256 /* for the cqp commands backlog. */ 3257 INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); 3258 3259 writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]); 3260 writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]); 3261 writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3262 3263 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3264 "sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04x]\n", 3265 cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, (unsigned long long)cqp->sq_pa, cqp, 3266 cqp->polarity); 3267 return 0; 3268 } 3269 3270 /** 3271 * irdma_sc_cqp_create - create cqp during bringup 3272 * @cqp: struct for cqp hw 3273 * @maj_err: If error, major err number 3274 * @min_err: If error, minor err number 3275 */ 3276 int 3277 irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err) 3278 { 3279 u64 temp; 3280 u8 hw_rev; 3281 u32 cnt = 0, p1, p2, val = 0, err_code; 3282 int ret_code; 3283 3284 hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev; 3285 cqp->sdbuf.size = IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size; 3286 cqp->sdbuf.va = irdma_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf, 3287 cqp->sdbuf.size, 3288 IRDMA_SD_BUF_ALIGNMENT); 3289 if (!cqp->sdbuf.va) 3290 return -ENOMEM; 3291 3292 spin_lock_init(&cqp->dev->cqp_lock); 3293 3294 temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) | 3295 FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) | 3296 FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) | 3297 FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf); 3298 if (hw_rev >= IRDMA_GEN_2) { 3299 temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY, 3300 cqp->rocev2_rto_policy) | 3301 FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED, 3302 cqp->protocol_used); 3303 } 3304 3305 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_0, temp); 3306 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_8, cqp->sq_pa); 3307 3308 temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) | 3309 FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile); 3310 if (hw_rev >= IRDMA_GEN_2) 3311 temp |= FIELD_PREP(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK, 3312 cqp->en_rem_endpoint_trk); 3313 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_16, temp); 3314 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_24, (uintptr_t)cqp); 3315 temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) | 3316 FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver); 3317 if (hw_rev >= IRDMA_GEN_2) { 3318 temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) | 3319 FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor); 3320 } 3321 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_32, temp); 3322 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_40, 0); 3323 temp = 0; 3324 if (hw_rev >= IRDMA_GEN_2) { 3325 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) | 3326 FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) | 3327 FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor); 3328 } 3329 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_48, temp); 3330 temp = 0; 3331 if (hw_rev >= IRDMA_GEN_2) { 3332 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) | 3333 FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) | 3334 FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) | 3335 FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod); 3336 } 3337 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_56, temp); 3338 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQP_HOST_CTX WQE", 3339 cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8); 3340 p1 = RS_32_1(cqp->host_ctx_pa, 32); 3341 p2 = (u32)cqp->host_ctx_pa; 3342 3343 writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); 3344 writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]); 3345 3346 do { 3347 if (cnt++ > cqp->dev->hw_attrs.max_done_count) { 3348 ret_code = -ETIMEDOUT; 3349 goto err; 3350 } 3351 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3352 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3353 } while (!val); 3354 3355 if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) { 3356 ret_code = -EOPNOTSUPP; 3357 goto err; 3358 } 3359 3360 cqp->process_cqp_sds = irdma_update_sds_noccq; 3361 return 0; 3362 3363 err: 3364 spin_lock_destroy(&cqp->dev->cqp_lock); 3365 irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); 3366 err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 3367 *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code); 3368 *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code); 3369 return ret_code; 3370 } 3371 3372 /** 3373 * irdma_sc_cqp_post_sq - post of cqp's sq 3374 * @cqp: struct for cqp hw 3375 */ 3376 void 3377 irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp) 3378 { 3379 db_wr32(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db); 3380 3381 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3382 "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head, 3383 cqp->sq_ring.tail, cqp->sq_ring.size); 3384 } 3385 3386 /** 3387 * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq 3388 * and pass back index 3389 * @cqp: CQP HW structure 3390 * @scratch: private data for CQP WQE 3391 * @wqe_idx: WQE index of CQP SQ 3392 */ 3393 __le64 * 3394 irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch, 3395 u32 *wqe_idx) 3396 { 3397 __le64 *wqe = NULL; 3398 int ret_code; 3399 3400 if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) { 3401 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3402 "CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n", 3403 cqp->sq_ring.head, cqp->sq_ring.tail, 3404 cqp->sq_ring.size); 3405 return NULL; 3406 } 3407 IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); 3408 if (ret_code) 3409 return NULL; 3410 3411 cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++; 3412 if (!*wqe_idx) 3413 cqp->polarity = !cqp->polarity; 3414 wqe = cqp->sq_base[*wqe_idx].elem; 3415 cqp->scratch_array[*wqe_idx] = scratch; 3416 3417 memset(&wqe[0], 0, 24); 3418 memset(&wqe[4], 0, 32); 3419 3420 return wqe; 3421 } 3422 3423 /** 3424 * irdma_sc_cqp_destroy - destroy cqp during close 3425 * @cqp: struct for cqp hw 3426 * @free_hwcqp: true for regular cqp destroy; false for reset path 3427 */ 3428 int 3429 irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp) 3430 { 3431 u32 cnt = 0, val; 3432 int ret_code = 0; 3433 3434 if (free_hwcqp) { 3435 writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); 3436 writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]); 3437 do { 3438 if (cnt++ > cqp->dev->hw_attrs.max_done_count) { 3439 ret_code = -ETIMEDOUT; 3440 break; 3441 } 3442 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3443 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3444 } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE)); 3445 } 3446 irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); 3447 spin_lock_destroy(&cqp->dev->cqp_lock); 3448 return ret_code; 3449 } 3450 3451 /** 3452 * irdma_sc_ccq_arm - enable intr for control cq 3453 * @ccq: ccq sc struct 3454 */ 3455 void 3456 irdma_sc_ccq_arm(struct irdma_sc_cq *ccq) 3457 { 3458 u64 temp_val; 3459 u16 sw_cq_sel; 3460 u8 arm_next_se; 3461 u8 arm_seq_num; 3462 3463 get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val); 3464 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); 3465 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); 3466 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); 3467 arm_seq_num++; 3468 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | 3469 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | 3470 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | 3471 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1); 3472 set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val); 3473 3474 irdma_wmb(); /* make sure shadow area is updated before arming */ 3475 3476 db_wr32(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db); 3477 } 3478 3479 /** 3480 * irdma_sc_ccq_get_cqe_info - get ccq's cq entry 3481 * @ccq: ccq sc struct 3482 * @info: completion q entry to return 3483 */ 3484 int 3485 irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, 3486 struct irdma_ccq_cqe_info *info) 3487 { 3488 u64 qp_ctx, temp, temp1; 3489 __le64 *cqe; 3490 struct irdma_sc_cqp *cqp; 3491 u32 wqe_idx; 3492 u32 error; 3493 u8 polarity; 3494 int ret_code = 0; 3495 3496 if (ccq->cq_uk.avoid_mem_cflct) 3497 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk); 3498 else 3499 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk); 3500 3501 get_64bit_val(cqe, IRDMA_BYTE_24, &temp); 3502 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp); 3503 if (polarity != ccq->cq_uk.polarity) 3504 return -ENOENT; 3505 3506 get_64bit_val(cqe, IRDMA_BYTE_8, &qp_ctx); 3507 cqp = (struct irdma_sc_cqp *)(irdma_uintptr) qp_ctx; 3508 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp); 3509 info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR; 3510 info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp); 3511 if (info->error) { 3512 info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp); 3513 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 3514 irdma_debug(cqp->dev, IRDMA_DEBUG_CQP, 3515 "CQPERRCODES error_code[x%08X]\n", error); 3516 } 3517 3518 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp); 3519 info->scratch = cqp->scratch_array[wqe_idx]; 3520 3521 get_64bit_val(cqe, IRDMA_BYTE_16, &temp1); 3522 info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1); 3523 3524 get_64bit_val(cqp->sq_base[wqe_idx].elem, IRDMA_BYTE_24, &temp1); 3525 info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1); 3526 info->cqp = cqp; 3527 3528 /* move the head for cq */ 3529 IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); 3530 if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)) 3531 ccq->cq_uk.polarity ^= 1; 3532 3533 /* update cq tail in cq shadow memory also */ 3534 IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); 3535 set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_0, 3536 IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)); 3537 3538 irdma_wmb(); /* make sure shadow area is updated before moving tail */ 3539 3540 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); 3541 ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++; 3542 3543 return ret_code; 3544 } 3545 3546 /** 3547 * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ 3548 * @cqp: struct for cqp hw 3549 * @op_code: cqp opcode for completion 3550 * @compl_info: completion q entry to return 3551 */ 3552 int 3553 irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code, 3554 struct irdma_ccq_cqe_info *compl_info) 3555 { 3556 struct irdma_ccq_cqe_info info = {0}; 3557 struct irdma_sc_cq *ccq; 3558 int ret_code = 0; 3559 u32 cnt = 0; 3560 3561 ccq = cqp->dev->ccq; 3562 while (1) { 3563 if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count) 3564 return -ETIMEDOUT; 3565 3566 if (irdma_sc_ccq_get_cqe_info(ccq, &info)) { 3567 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3568 continue; 3569 } 3570 if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) { 3571 ret_code = -EIO; 3572 break; 3573 } 3574 /* make sure op code matches */ 3575 if (op_code == info.op_code) 3576 break; 3577 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3578 "opcode mismatch for my op code 0x%x, returned opcode %x\n", 3579 op_code, info.op_code); 3580 } 3581 3582 if (compl_info) 3583 irdma_memcpy(compl_info, &info, sizeof(*compl_info)); 3584 3585 return ret_code; 3586 } 3587 3588 /** 3589 * irdma_sc_manage_hmc_pm_func_table - manage of function table 3590 * @cqp: struct for cqp hw 3591 * @scratch: u64 saved to be used during cqp completion 3592 * @info: info for the manage function table operation 3593 * @post_sq: flag for cqp db to ring 3594 */ 3595 static int 3596 irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp, 3597 struct irdma_hmc_fcn_info *info, 3598 u64 scratch, bool post_sq) 3599 { 3600 __le64 *wqe; 3601 u64 hdr; 3602 3603 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3604 if (!wqe) 3605 return -ENOSPC; 3606 3607 hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) | 3608 FIELD_PREP(IRDMA_CQPSQ_OPCODE, 3609 IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) | 3610 FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) | 3611 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3612 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3613 3614 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3615 3616 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, 3617 "MANAGE_HMC_PM_FUNC_TABLE WQE", wqe, 3618 IRDMA_CQP_WQE_SIZE * 8); 3619 if (post_sq) 3620 irdma_sc_cqp_post_sq(cqp); 3621 3622 return 0; 3623 } 3624 3625 /** 3626 * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion 3627 * for fpm commit 3628 * @cqp: struct for cqp hw 3629 */ 3630 static int 3631 irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp) 3632 { 3633 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL, 3634 NULL); 3635 } 3636 3637 /** 3638 * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values 3639 * @cqp: struct for cqp hw 3640 * @scratch: u64 saved to be used during cqp completion 3641 * @hmc_fn_id: hmc function id 3642 * @commit_fpm_mem: Memory for fpm values 3643 * @post_sq: flag for cqp db to ring 3644 * @wait_type: poll ccq or cqp registers for cqp completion 3645 */ 3646 static int 3647 irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, 3648 u16 hmc_fn_id, 3649 struct irdma_dma_mem *commit_fpm_mem, 3650 bool post_sq, u8 wait_type) 3651 { 3652 __le64 *wqe; 3653 u64 hdr; 3654 u32 tail, val, error; 3655 int ret_code = 0; 3656 3657 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3658 if (!wqe) 3659 return -ENOSPC; 3660 3661 set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id); 3662 set_64bit_val(wqe, IRDMA_BYTE_32, commit_fpm_mem->pa); 3663 3664 hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) | 3665 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) | 3666 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3667 3668 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3669 3670 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3671 3672 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "COMMIT_FPM_VAL WQE", wqe, 3673 IRDMA_CQP_WQE_SIZE * 8); 3674 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 3675 3676 if (post_sq) { 3677 irdma_sc_cqp_post_sq(cqp); 3678 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS) 3679 ret_code = irdma_cqp_poll_registers(cqp, tail, 3680 cqp->dev->hw_attrs.max_done_count); 3681 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ) 3682 ret_code = irdma_sc_commit_fpm_val_done(cqp); 3683 } 3684 3685 return ret_code; 3686 } 3687 3688 /** 3689 * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for 3690 * query fpm 3691 * @cqp: struct for cqp hw 3692 */ 3693 static int 3694 irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp) 3695 { 3696 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL, 3697 NULL); 3698 } 3699 3700 /** 3701 * irdma_sc_query_fpm_val - cqp wqe query fpm values 3702 * @cqp: struct for cqp hw 3703 * @scratch: u64 saved to be used during cqp completion 3704 * @hmc_fn_id: hmc function id 3705 * @query_fpm_mem: memory for return fpm values 3706 * @post_sq: flag for cqp db to ring 3707 * @wait_type: poll ccq or cqp registers for cqp completion 3708 */ 3709 static int 3710 irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, 3711 u16 hmc_fn_id, 3712 struct irdma_dma_mem *query_fpm_mem, 3713 bool post_sq, u8 wait_type) 3714 { 3715 __le64 *wqe; 3716 u64 hdr; 3717 u32 tail, val, error; 3718 int ret_code = 0; 3719 3720 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3721 if (!wqe) 3722 return -ENOSPC; 3723 3724 set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id); 3725 set_64bit_val(wqe, IRDMA_BYTE_32, query_fpm_mem->pa); 3726 3727 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) | 3728 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3729 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3730 3731 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3732 3733 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY_FPM WQE", wqe, 3734 IRDMA_CQP_WQE_SIZE * 8); 3735 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 3736 3737 if (post_sq) { 3738 irdma_sc_cqp_post_sq(cqp); 3739 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS) 3740 ret_code = irdma_cqp_poll_registers(cqp, tail, 3741 cqp->dev->hw_attrs.max_done_count); 3742 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ) 3743 ret_code = irdma_sc_query_fpm_val_done(cqp); 3744 } 3745 3746 return ret_code; 3747 } 3748 3749 /** 3750 * irdma_sc_ceq_init - initialize ceq 3751 * @ceq: ceq sc structure 3752 * @info: ceq initialization info 3753 */ 3754 int 3755 irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, 3756 struct irdma_ceq_init_info *info) 3757 { 3758 u32 pble_obj_cnt; 3759 3760 if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size || 3761 info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size) 3762 return -EINVAL; 3763 3764 if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) 3765 return -EINVAL; 3766 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 3767 3768 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 3769 return -EINVAL; 3770 3771 ceq->size = sizeof(*ceq); 3772 ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base; 3773 ceq->ceq_id = info->ceq_id; 3774 ceq->dev = info->dev; 3775 ceq->elem_cnt = info->elem_cnt; 3776 ceq->ceq_elem_pa = info->ceqe_pa; 3777 ceq->virtual_map = info->virtual_map; 3778 ceq->itr_no_expire = info->itr_no_expire; 3779 ceq->reg_cq = info->reg_cq; 3780 ceq->reg_cq_size = 0; 3781 spin_lock_init(&ceq->req_cq_lock); 3782 ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); 3783 ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); 3784 ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); 3785 ceq->tph_en = info->tph_en; 3786 ceq->tph_val = info->tph_val; 3787 ceq->vsi = info->vsi; 3788 ceq->polarity = 1; 3789 IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); 3790 ceq->dev->ceq[info->ceq_id] = ceq; 3791 3792 return 0; 3793 } 3794 3795 /** 3796 * irdma_sc_ceq_create - create ceq wqe 3797 * @ceq: ceq sc structure 3798 * @scratch: u64 saved to be used during cqp completion 3799 * @post_sq: flag for cqp db to ring 3800 */ 3801 static int 3802 irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch, 3803 bool post_sq) 3804 { 3805 struct irdma_sc_cqp *cqp; 3806 __le64 *wqe; 3807 u64 hdr; 3808 3809 cqp = ceq->dev->cqp; 3810 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3811 if (!wqe) 3812 return -ENOSPC; 3813 set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt); 3814 set_64bit_val(wqe, IRDMA_BYTE_32, 3815 (ceq->virtual_map ? 0 : ceq->ceq_elem_pa)); 3816 set_64bit_val(wqe, IRDMA_BYTE_48, 3817 (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0)); 3818 set_64bit_val(wqe, IRDMA_BYTE_56, 3819 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) | 3820 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx)); 3821 hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) | 3822 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) | 3823 FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) | 3824 FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) | 3825 FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) | 3826 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) | 3827 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3828 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3829 3830 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3831 3832 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_CREATE WQE", wqe, 3833 IRDMA_CQP_WQE_SIZE * 8); 3834 if (post_sq) 3835 irdma_sc_cqp_post_sq(cqp); 3836 3837 return 0; 3838 } 3839 3840 /** 3841 * irdma_sc_cceq_create_done - poll for control ceq wqe to complete 3842 * @ceq: ceq sc structure 3843 */ 3844 static int 3845 irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq) 3846 { 3847 struct irdma_sc_cqp *cqp; 3848 3849 cqp = ceq->dev->cqp; 3850 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ, 3851 NULL); 3852 } 3853 3854 /** 3855 * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete 3856 * @ceq: ceq sc structure 3857 */ 3858 int 3859 irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq) 3860 { 3861 struct irdma_sc_cqp *cqp; 3862 3863 if (ceq->reg_cq) 3864 irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq); 3865 3866 cqp = ceq->dev->cqp; 3867 cqp->process_cqp_sds = irdma_update_sds_noccq; 3868 3869 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ, 3870 NULL); 3871 } 3872 3873 /** 3874 * irdma_sc_cceq_create - create cceq 3875 * @ceq: ceq sc structure 3876 * @scratch: u64 saved to be used during cqp completion 3877 */ 3878 int 3879 irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch) 3880 { 3881 int ret_code; 3882 struct irdma_sc_dev *dev = ceq->dev; 3883 3884 dev->ccq->vsi = ceq->vsi; 3885 if (ceq->reg_cq) { 3886 ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq); 3887 if (ret_code) 3888 return ret_code; 3889 } 3890 3891 ret_code = irdma_sc_ceq_create(ceq, scratch, true); 3892 if (!ret_code) 3893 return irdma_sc_cceq_create_done(ceq); 3894 3895 return ret_code; 3896 } 3897 3898 /** 3899 * irdma_sc_ceq_destroy - destroy ceq 3900 * @ceq: ceq sc structure 3901 * @scratch: u64 saved to be used during cqp completion 3902 * @post_sq: flag for cqp db to ring 3903 */ 3904 int 3905 irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq) 3906 { 3907 struct irdma_sc_cqp *cqp; 3908 __le64 *wqe; 3909 u64 hdr; 3910 3911 cqp = ceq->dev->cqp; 3912 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3913 if (!wqe) 3914 return -ENOSPC; 3915 3916 set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt); 3917 set_64bit_val(wqe, IRDMA_BYTE_48, ceq->first_pm_pbl_idx); 3918 hdr = ceq->ceq_id | 3919 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) | 3920 FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) | 3921 FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) | 3922 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) | 3923 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 3924 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3925 3926 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3927 3928 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_DESTROY WQE", wqe, 3929 IRDMA_CQP_WQE_SIZE * 8); 3930 ceq->dev->ceq[ceq->ceq_id] = NULL; 3931 if (post_sq) 3932 irdma_sc_cqp_post_sq(cqp); 3933 3934 return 0; 3935 } 3936 3937 /** 3938 * irdma_sc_process_ceq - process ceq 3939 * @dev: sc device struct 3940 * @ceq: ceq sc structure 3941 * 3942 * It is expected caller serializes this function with cleanup_ceqes() 3943 * because these functions manipulate the same ceq 3944 */ 3945 void * 3946 irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq) 3947 { 3948 u64 temp; 3949 __le64 *ceqe; 3950 struct irdma_sc_cq *cq = NULL; 3951 struct irdma_sc_cq *temp_cq; 3952 u8 polarity; 3953 u32 cq_idx; 3954 unsigned long flags; 3955 3956 do { 3957 cq_idx = 0; 3958 ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq); 3959 get_64bit_val(ceqe, IRDMA_BYTE_0, &temp); 3960 polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp); 3961 if (polarity != ceq->polarity) 3962 return NULL; 3963 3964 temp_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1); 3965 if (!temp_cq) { 3966 cq_idx = IRDMA_INVALID_CQ_IDX; 3967 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); 3968 3969 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) 3970 ceq->polarity ^= 1; 3971 continue; 3972 } 3973 3974 cq = temp_cq; 3975 if (ceq->reg_cq) { 3976 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3977 cq_idx = irdma_sc_find_reg_cq(ceq, cq); 3978 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3979 } 3980 3981 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); 3982 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) 3983 ceq->polarity ^= 1; 3984 } while (cq_idx == IRDMA_INVALID_CQ_IDX); 3985 3986 if (cq) 3987 irdma_sc_cq_ack(cq); 3988 return cq; 3989 } 3990 3991 /** 3992 * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq 3993 * @cq: cq for which the ceqes need to be cleaned up 3994 * @ceq: ceq ptr 3995 * 3996 * The function is called after the cq is destroyed to cleanup 3997 * its pending ceqe entries. It is expected caller serializes this 3998 * function with process_ceq() in interrupt context. 3999 */ 4000 void 4001 irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq) 4002 { 4003 struct irdma_sc_cq *next_cq; 4004 u8 ceq_polarity = ceq->polarity; 4005 __le64 *ceqe; 4006 u8 polarity; 4007 u64 temp; 4008 int next; 4009 u32 i; 4010 4011 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0); 4012 4013 for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) { 4014 ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next); 4015 4016 get_64bit_val(ceqe, IRDMA_BYTE_0, &temp); 4017 polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp); 4018 if (polarity != ceq_polarity) 4019 return; 4020 4021 next_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1); 4022 if (cq == next_cq) 4023 set_64bit_val(ceqe, IRDMA_BYTE_0, temp & IRDMA_CEQE_VALID); 4024 4025 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i); 4026 if (!next) 4027 ceq_polarity ^= 1; 4028 } 4029 } 4030 4031 /** 4032 * irdma_sc_aeq_init - initialize aeq 4033 * @aeq: aeq structure ptr 4034 * @info: aeq initialization info 4035 */ 4036 int 4037 irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, 4038 struct irdma_aeq_init_info *info) 4039 { 4040 u32 pble_obj_cnt; 4041 4042 if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size || 4043 info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size) 4044 return -EINVAL; 4045 4046 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 4047 4048 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 4049 return -EINVAL; 4050 4051 aeq->size = sizeof(*aeq); 4052 aeq->polarity = 1; 4053 aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base; 4054 aeq->dev = info->dev; 4055 aeq->elem_cnt = info->elem_cnt; 4056 aeq->aeq_elem_pa = info->aeq_elem_pa; 4057 IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); 4058 aeq->virtual_map = info->virtual_map; 4059 aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); 4060 aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); 4061 aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); 4062 aeq->msix_idx = info->msix_idx; 4063 info->dev->aeq = aeq; 4064 4065 return 0; 4066 } 4067 4068 /** 4069 * irdma_sc_aeq_create - create aeq 4070 * @aeq: aeq structure ptr 4071 * @scratch: u64 saved to be used during cqp completion 4072 * @post_sq: flag for cqp db to ring 4073 */ 4074 static int 4075 irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch, 4076 bool post_sq) 4077 { 4078 __le64 *wqe; 4079 struct irdma_sc_cqp *cqp; 4080 u64 hdr; 4081 4082 cqp = aeq->dev->cqp; 4083 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4084 if (!wqe) 4085 return -ENOSPC; 4086 set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt); 4087 set_64bit_val(wqe, IRDMA_BYTE_32, 4088 (aeq->virtual_map ? 0 : aeq->aeq_elem_pa)); 4089 set_64bit_val(wqe, IRDMA_BYTE_48, 4090 (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0)); 4091 4092 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) | 4093 FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) | 4094 FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) | 4095 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 4096 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4097 4098 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4099 4100 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "AEQ_CREATE WQE", wqe, 4101 IRDMA_CQP_WQE_SIZE * 8); 4102 if (post_sq) 4103 irdma_sc_cqp_post_sq(cqp); 4104 4105 return 0; 4106 } 4107 4108 /** 4109 * irdma_sc_aeq_destroy - destroy aeq during close 4110 * @aeq: aeq structure ptr 4111 * @scratch: u64 saved to be used during cqp completion 4112 * @post_sq: flag for cqp db to ring 4113 */ 4114 int 4115 irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq) 4116 { 4117 __le64 *wqe; 4118 struct irdma_sc_cqp *cqp; 4119 struct irdma_sc_dev *dev; 4120 u64 hdr; 4121 4122 dev = aeq->dev; 4123 writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]); 4124 4125 cqp = dev->cqp; 4126 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4127 if (!wqe) 4128 return -ENOSPC; 4129 set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt); 4130 set_64bit_val(wqe, IRDMA_BYTE_48, aeq->first_pm_pbl_idx); 4131 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) | 4132 FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) | 4133 FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) | 4134 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 4135 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4136 4137 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4138 4139 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "AEQ_DESTROY WQE", wqe, 4140 IRDMA_CQP_WQE_SIZE * 8); 4141 if (post_sq) 4142 irdma_sc_cqp_post_sq(cqp); 4143 return 0; 4144 } 4145 4146 /** 4147 * irdma_sc_get_next_aeqe - get next aeq entry 4148 * @aeq: aeq structure ptr 4149 * @info: aeqe info to be returned 4150 */ 4151 int 4152 irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, 4153 struct irdma_aeqe_info *info) 4154 { 4155 u64 temp, compl_ctx; 4156 __le64 *aeqe; 4157 u8 ae_src; 4158 u8 polarity; 4159 4160 aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq); 4161 get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx); 4162 get_64bit_val(aeqe, IRDMA_BYTE_8, &temp); 4163 polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp); 4164 4165 if (aeq->polarity != polarity) 4166 return -ENOENT; 4167 4168 irdma_debug_buf(aeq->dev, IRDMA_DEBUG_WQE, "AEQ_ENTRY WQE", aeqe, 16); 4169 4170 ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp); 4171 info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp); 4172 info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) | 4173 ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18); 4174 info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp); 4175 info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp); 4176 info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp); 4177 info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp); 4178 info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp); 4179 4180 info->ae_src = ae_src; 4181 switch (info->ae_id) { 4182 case IRDMA_AE_PRIV_OPERATION_DENIED: 4183 case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW: 4184 case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW: 4185 case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG: 4186 case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH: 4187 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: 4188 case IRDMA_AE_UDA_XMIT_BAD_PD: 4189 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: 4190 case IRDMA_AE_BAD_CLOSE: 4191 case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO: 4192 case IRDMA_AE_STAG_ZERO_INVALID: 4193 case IRDMA_AE_IB_RREQ_AND_Q1_FULL: 4194 case IRDMA_AE_IB_INVALID_REQUEST: 4195 case IRDMA_AE_WQE_UNEXPECTED_OPCODE: 4196 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: 4197 case IRDMA_AE_IB_REMOTE_OP_ERROR: 4198 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: 4199 case IRDMA_AE_DDP_UBE_INVALID_MO: 4200 case IRDMA_AE_DDP_UBE_INVALID_QN: 4201 case IRDMA_AE_DDP_NO_L_BIT: 4202 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: 4203 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: 4204 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: 4205 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: 4206 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: 4207 case IRDMA_AE_INVALID_ARP_ENTRY: 4208 case IRDMA_AE_INVALID_TCP_OPTION_RCVD: 4209 case IRDMA_AE_STALE_ARP_ENTRY: 4210 case IRDMA_AE_INVALID_AH_ENTRY: 4211 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 4212 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: 4213 case IRDMA_AE_LLP_TOO_MANY_RETRIES: 4214 case IRDMA_AE_LCE_QP_CATASTROPHIC: 4215 case IRDMA_AE_LLP_DOUBT_REACHABILITY: 4216 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED: 4217 case IRDMA_AE_RESET_SENT: 4218 case IRDMA_AE_TERMINATE_SENT: 4219 case IRDMA_AE_RESET_NOT_SENT: 4220 case IRDMA_AE_QP_SUSPEND_COMPLETE: 4221 case IRDMA_AE_UDA_L4LEN_INVALID: 4222 info->qp = true; 4223 info->compl_ctx = compl_ctx; 4224 break; 4225 case IRDMA_AE_LCE_CQ_CATASTROPHIC: 4226 info->cq = true; 4227 info->compl_ctx = LS_64_1(compl_ctx, 1); 4228 ae_src = IRDMA_AE_SOURCE_RSVD; 4229 break; 4230 case IRDMA_AE_ROCE_EMPTY_MCG: 4231 case IRDMA_AE_ROCE_BAD_MC_IP_ADDR: 4232 case IRDMA_AE_ROCE_BAD_MC_QPID: 4233 case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH: 4234 /* fallthrough */ 4235 case IRDMA_AE_LLP_CONNECTION_RESET: 4236 case IRDMA_AE_LLP_SYN_RECEIVED: 4237 case IRDMA_AE_LLP_FIN_RECEIVED: 4238 case IRDMA_AE_LLP_CLOSE_COMPLETE: 4239 case IRDMA_AE_LLP_TERMINATE_RECEIVED: 4240 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE: 4241 ae_src = IRDMA_AE_SOURCE_RSVD; 4242 info->qp = true; 4243 info->compl_ctx = compl_ctx; 4244 break; 4245 case IRDMA_AE_RESOURCE_EXHAUSTION: 4246 /* 4247 * ae_src contains the exhausted resource with a unique decoding. Set RSVD here to prevent matching 4248 * with a CQ or QP. 4249 */ 4250 ae_src = IRDMA_AE_SOURCE_RSVD; 4251 break; 4252 default: 4253 break; 4254 } 4255 4256 switch (ae_src) { 4257 case IRDMA_AE_SOURCE_RQ: 4258 case IRDMA_AE_SOURCE_RQ_0011: 4259 info->qp = true; 4260 info->rq = true; 4261 info->compl_ctx = compl_ctx; 4262 info->err_rq_idx_valid = true; 4263 break; 4264 case IRDMA_AE_SOURCE_CQ: 4265 case IRDMA_AE_SOURCE_CQ_0110: 4266 case IRDMA_AE_SOURCE_CQ_1010: 4267 case IRDMA_AE_SOURCE_CQ_1110: 4268 info->cq = true; 4269 info->compl_ctx = LS_64_1(compl_ctx, 1); 4270 break; 4271 case IRDMA_AE_SOURCE_SQ: 4272 case IRDMA_AE_SOURCE_SQ_0111: 4273 info->qp = true; 4274 info->sq = true; 4275 info->compl_ctx = compl_ctx; 4276 break; 4277 case IRDMA_AE_SOURCE_IN_WR: 4278 info->qp = true; 4279 info->compl_ctx = compl_ctx; 4280 info->in_rdrsp_wr = true; 4281 break; 4282 case IRDMA_AE_SOURCE_IN_RR: 4283 info->qp = true; 4284 info->compl_ctx = compl_ctx; 4285 info->in_rdrsp_wr = true; 4286 break; 4287 case IRDMA_AE_SOURCE_OUT_RR: 4288 case IRDMA_AE_SOURCE_OUT_RR_1111: 4289 info->qp = true; 4290 info->compl_ctx = compl_ctx; 4291 info->out_rdrsp = true; 4292 break; 4293 case IRDMA_AE_SOURCE_RSVD: 4294 default: 4295 break; 4296 } 4297 4298 IRDMA_RING_MOVE_TAIL(aeq->aeq_ring); 4299 if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring)) 4300 aeq->polarity ^= 1; 4301 4302 return 0; 4303 } 4304 4305 /** 4306 * irdma_sc_repost_aeq_entries - repost completed aeq entries 4307 * @dev: sc device struct 4308 * @count: allocate count 4309 */ 4310 void 4311 irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) 4312 { 4313 db_wr32(count, dev->aeq_alloc_db); 4314 4315 } 4316 4317 /** 4318 * irdma_sc_ccq_init - initialize control cq 4319 * @cq: sc's cq ctruct 4320 * @info: info for control cq initialization 4321 */ 4322 int 4323 irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info) 4324 { 4325 u32 pble_obj_cnt; 4326 4327 if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size || 4328 info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size) 4329 return -EINVAL; 4330 4331 if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) 4332 return -EINVAL; 4333 4334 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 4335 4336 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 4337 return -EINVAL; 4338 4339 cq->cq_pa = info->cq_pa; 4340 cq->cq_uk.cq_base = info->cq_base; 4341 cq->shadow_area_pa = info->shadow_area_pa; 4342 cq->cq_uk.shadow_area = info->shadow_area; 4343 cq->shadow_read_threshold = info->shadow_read_threshold; 4344 cq->dev = info->dev; 4345 cq->ceq_id = info->ceq_id; 4346 cq->cq_uk.cq_size = info->num_elem; 4347 cq->cq_type = IRDMA_CQ_TYPE_CQP; 4348 cq->ceqe_mask = info->ceqe_mask; 4349 IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); 4350 cq->cq_uk.cq_id = 0; /* control cq is id 0 always */ 4351 cq->ceq_id_valid = info->ceq_id_valid; 4352 cq->tph_en = info->tph_en; 4353 cq->tph_val = info->tph_val; 4354 cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct; 4355 cq->pbl_list = info->pbl_list; 4356 cq->virtual_map = info->virtual_map; 4357 cq->pbl_chunk_size = info->pbl_chunk_size; 4358 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 4359 cq->cq_uk.polarity = true; 4360 cq->vsi = info->vsi; 4361 cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db; 4362 4363 /* Only applicable to CQs other than CCQ so initialize to zero */ 4364 cq->cq_uk.cqe_alloc_db = NULL; 4365 4366 info->dev->ccq = cq; 4367 return 0; 4368 } 4369 4370 /** 4371 * irdma_sc_ccq_create_done - poll cqp for ccq create 4372 * @ccq: ccq sc struct 4373 */ 4374 static inline int 4375 irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq) 4376 { 4377 struct irdma_sc_cqp *cqp; 4378 4379 cqp = ccq->dev->cqp; 4380 4381 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL); 4382 } 4383 4384 /** 4385 * irdma_sc_ccq_create - create control cq 4386 * @ccq: ccq sc struct 4387 * @scratch: u64 saved to be used during cqp completion 4388 * @check_overflow: overlow flag for ccq 4389 * @post_sq: flag for cqp db to ring 4390 */ 4391 int 4392 irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, 4393 bool check_overflow, bool post_sq) 4394 { 4395 int ret_code; 4396 4397 ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq); 4398 if (ret_code) 4399 return ret_code; 4400 4401 if (post_sq) { 4402 ret_code = irdma_sc_ccq_create_done(ccq); 4403 if (ret_code) 4404 return ret_code; 4405 } 4406 ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd; 4407 4408 return 0; 4409 } 4410 4411 /** 4412 * irdma_sc_ccq_destroy - destroy ccq during close 4413 * @ccq: ccq sc struct 4414 * @scratch: u64 saved to be used during cqp completion 4415 * @post_sq: flag for cqp db to ring 4416 */ 4417 int 4418 irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq) 4419 { 4420 struct irdma_sc_cqp *cqp; 4421 __le64 *wqe; 4422 u64 hdr; 4423 int ret_code = 0; 4424 u32 tail, val, error; 4425 4426 cqp = ccq->dev->cqp; 4427 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4428 if (!wqe) 4429 return -ENOSPC; 4430 4431 set_64bit_val(wqe, IRDMA_BYTE_0, ccq->cq_uk.cq_size); 4432 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(ccq, 1)); 4433 set_64bit_val(wqe, IRDMA_BYTE_40, ccq->shadow_area_pa); 4434 4435 hdr = ccq->cq_uk.cq_id | 4436 FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0), 4437 IRDMA_CQPSQ_CQ_CEQID) | 4438 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) | 4439 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) | 4440 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) | 4441 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) | 4442 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) | 4443 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 4444 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4445 4446 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4447 4448 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CCQ_DESTROY WQE", wqe, 4449 IRDMA_CQP_WQE_SIZE * 8); 4450 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4451 4452 if (post_sq) { 4453 irdma_sc_cqp_post_sq(cqp); 4454 ret_code = irdma_cqp_poll_registers(cqp, tail, 4455 cqp->dev->hw_attrs.max_done_count); 4456 } 4457 4458 cqp->process_cqp_sds = irdma_update_sds_noccq; 4459 4460 return ret_code; 4461 } 4462 4463 /** 4464 * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info 4465 * @dev : ptr to irdma_dev struct 4466 * @hmc_fn_id: hmc function id 4467 */ 4468 int 4469 irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u16 hmc_fn_id) 4470 { 4471 struct irdma_hmc_info *hmc_info; 4472 struct irdma_hmc_fpm_misc *hmc_fpm_misc; 4473 struct irdma_dma_mem query_fpm_mem; 4474 int ret_code = 0; 4475 u8 wait_type; 4476 4477 hmc_info = dev->hmc_info; 4478 hmc_fpm_misc = &dev->hmc_fpm_misc; 4479 query_fpm_mem.pa = dev->fpm_query_buf_pa; 4480 query_fpm_mem.va = dev->fpm_query_buf; 4481 hmc_info->hmc_fn_id = hmc_fn_id; 4482 wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS; 4483 4484 ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, 4485 &query_fpm_mem, true, wait_type); 4486 if (ret_code) 4487 return ret_code; 4488 4489 /* parse the fpm_query_buf and fill hmc obj info */ 4490 ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info, 4491 hmc_fpm_misc); 4492 4493 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "QUERY FPM BUFFER", 4494 query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE); 4495 return ret_code; 4496 } 4497 4498 /** 4499 * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp 4500 * command and populates fpm base address in hmc_info 4501 * @dev : ptr to irdma_dev struct 4502 * @hmc_fn_id: hmc function id 4503 */ 4504 static int 4505 irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u16 hmc_fn_id) 4506 { 4507 struct irdma_hmc_obj_info *obj_info; 4508 __le64 *buf; 4509 struct irdma_hmc_info *hmc_info; 4510 struct irdma_dma_mem commit_fpm_mem; 4511 int ret_code = 0; 4512 u8 wait_type; 4513 4514 hmc_info = dev->hmc_info; 4515 obj_info = hmc_info->hmc_obj; 4516 buf = dev->fpm_commit_buf; 4517 4518 set_64bit_val(buf, IRDMA_BYTE_0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt); 4519 set_64bit_val(buf, IRDMA_BYTE_8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt); 4520 set_64bit_val(buf, IRDMA_BYTE_16, (u64)0); /* RSRVD */ 4521 set_64bit_val(buf, IRDMA_BYTE_24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt); 4522 set_64bit_val(buf, IRDMA_BYTE_32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt); 4523 set_64bit_val(buf, IRDMA_BYTE_40, (u64)0); /* RSVD */ 4524 set_64bit_val(buf, IRDMA_BYTE_48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt); 4525 set_64bit_val(buf, IRDMA_BYTE_56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt); 4526 set_64bit_val(buf, IRDMA_BYTE_64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt); 4527 set_64bit_val(buf, IRDMA_BYTE_72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt); 4528 set_64bit_val(buf, IRDMA_BYTE_80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt); 4529 set_64bit_val(buf, IRDMA_BYTE_88, 4530 (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt); 4531 set_64bit_val(buf, IRDMA_BYTE_96, 4532 (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt); 4533 set_64bit_val(buf, IRDMA_BYTE_104, 4534 (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt); 4535 set_64bit_val(buf, IRDMA_BYTE_112, 4536 (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt); 4537 set_64bit_val(buf, IRDMA_BYTE_120, (u64)0); /* RSVD */ 4538 set_64bit_val(buf, IRDMA_BYTE_128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt); 4539 set_64bit_val(buf, IRDMA_BYTE_136, 4540 (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt); 4541 set_64bit_val(buf, IRDMA_BYTE_144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt); 4542 set_64bit_val(buf, IRDMA_BYTE_152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt); 4543 set_64bit_val(buf, IRDMA_BYTE_160, 4544 (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt); 4545 set_64bit_val(buf, IRDMA_BYTE_168, 4546 (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt); 4547 commit_fpm_mem.pa = dev->fpm_commit_buf_pa; 4548 commit_fpm_mem.va = dev->fpm_commit_buf; 4549 4550 wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS; 4551 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER", 4552 commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE); 4553 ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, 4554 &commit_fpm_mem, true, wait_type); 4555 if (!ret_code) 4556 irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, 4557 hmc_info->hmc_obj, 4558 &hmc_info->sd_table.sd_cnt); 4559 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER", 4560 commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE); 4561 4562 return ret_code; 4563 } 4564 4565 /** 4566 * cqp_sds_wqe_fill - fill cqp wqe doe sd 4567 * @cqp: struct for cqp hw 4568 * @info: sd info for wqe 4569 * @scratch: u64 saved to be used during cqp completion 4570 */ 4571 static int 4572 cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, 4573 struct irdma_update_sds_info *info, u64 scratch) 4574 { 4575 u64 data; 4576 u64 hdr; 4577 __le64 *wqe; 4578 int mem_entries, wqe_entries; 4579 struct irdma_dma_mem *sdbuf = &cqp->sdbuf; 4580 u64 offset = 0; 4581 u32 wqe_idx; 4582 4583 wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); 4584 if (!wqe) 4585 return -ENOSPC; 4586 4587 wqe_entries = (info->cnt > 3) ? 3 : info->cnt; 4588 mem_entries = info->cnt - wqe_entries; 4589 4590 if (mem_entries) { 4591 offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE; 4592 irdma_memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4); 4593 4594 data = (u64)sdbuf->pa + offset; 4595 } else { 4596 data = 0; 4597 } 4598 data |= FLD_LS_64(cqp->dev, info->hmc_fn_id, IRDMA_CQPSQ_UPESD_HMCFNID); 4599 set_64bit_val(wqe, IRDMA_BYTE_16, data); 4600 4601 switch (wqe_entries) { 4602 case 3: 4603 set_64bit_val(wqe, IRDMA_BYTE_48, 4604 (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) | 4605 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1))); 4606 4607 set_64bit_val(wqe, IRDMA_BYTE_56, info->entry[2].data); 4608 /* fallthrough */ 4609 case 2: 4610 set_64bit_val(wqe, IRDMA_BYTE_32, 4611 (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) | 4612 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1))); 4613 4614 set_64bit_val(wqe, IRDMA_BYTE_40, info->entry[1].data); 4615 /* fallthrough */ 4616 case 1: 4617 set_64bit_val(wqe, IRDMA_BYTE_0, 4618 FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd)); 4619 4620 set_64bit_val(wqe, IRDMA_BYTE_8, info->entry[0].data); 4621 break; 4622 default: 4623 break; 4624 } 4625 4626 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) | 4627 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | 4628 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries); 4629 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4630 4631 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4632 4633 if (mem_entries) 4634 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE Buffer", 4635 (char *)sdbuf->va + offset, mem_entries << 4); 4636 4637 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE", wqe, 4638 IRDMA_CQP_WQE_SIZE * 8); 4639 4640 return 0; 4641 } 4642 4643 /** 4644 * irdma_update_pe_sds - cqp wqe for sd 4645 * @dev: ptr to irdma_dev struct 4646 * @info: sd info for sd's 4647 * @scratch: u64 saved to be used during cqp completion 4648 */ 4649 static int 4650 irdma_update_pe_sds(struct irdma_sc_dev *dev, 4651 struct irdma_update_sds_info *info, u64 scratch) 4652 { 4653 struct irdma_sc_cqp *cqp = dev->cqp; 4654 int ret_code; 4655 4656 ret_code = cqp_sds_wqe_fill(cqp, info, scratch); 4657 if (!ret_code) 4658 irdma_sc_cqp_post_sq(cqp); 4659 4660 return ret_code; 4661 } 4662 4663 /** 4664 * irdma_update_sds_noccq - update sd before ccq created 4665 * @dev: sc device struct 4666 * @info: sd info for sd's 4667 */ 4668 int 4669 irdma_update_sds_noccq(struct irdma_sc_dev *dev, 4670 struct irdma_update_sds_info *info) 4671 { 4672 u32 error, val, tail; 4673 struct irdma_sc_cqp *cqp = dev->cqp; 4674 int ret_code; 4675 4676 ret_code = cqp_sds_wqe_fill(cqp, info, 0); 4677 if (ret_code) 4678 return ret_code; 4679 4680 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4681 4682 irdma_sc_cqp_post_sq(cqp); 4683 return irdma_cqp_poll_registers(cqp, tail, 4684 cqp->dev->hw_attrs.max_done_count); 4685 } 4686 4687 /** 4688 * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages 4689 * @cqp: struct for cqp hw 4690 * @scratch: u64 saved to be used during cqp completion 4691 * @hmc_fn_id: hmc function id 4692 * @post_sq: flag for cqp db to ring 4693 * @poll_registers: flag to poll register for cqp completion 4694 */ 4695 int 4696 irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, 4697 u16 hmc_fn_id, bool post_sq, 4698 bool poll_registers) 4699 { 4700 u64 hdr; 4701 __le64 *wqe; 4702 u32 tail, val, error; 4703 4704 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4705 if (!wqe) 4706 return -ENOSPC; 4707 4708 set_64bit_val(wqe, IRDMA_BYTE_16, 4709 FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id)); 4710 4711 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, 4712 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) | 4713 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 4714 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4715 4716 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4717 4718 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE", 4719 wqe, IRDMA_CQP_WQE_SIZE * 8); 4720 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4721 4722 if (post_sq) { 4723 irdma_sc_cqp_post_sq(cqp); 4724 if (poll_registers) 4725 /* check for cqp sq tail update */ 4726 return irdma_cqp_poll_registers(cqp, tail, 4727 cqp->dev->hw_attrs.max_done_count); 4728 else 4729 return irdma_sc_poll_for_cqp_op_done(cqp, 4730 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED, 4731 NULL); 4732 } 4733 4734 return 0; 4735 } 4736 4737 /** 4738 * irdma_cqp_ring_full - check if cqp ring is full 4739 * @cqp: struct for cqp hw 4740 */ 4741 static bool 4742 irdma_cqp_ring_full(struct irdma_sc_cqp *cqp) 4743 { 4744 return IRDMA_RING_FULL_ERR(cqp->sq_ring); 4745 } 4746 4747 /** 4748 * irdma_est_sd - returns approximate number of SDs for HMC 4749 * @dev: sc device struct 4750 * @hmc_info: hmc structure, size and count for HMC objects 4751 */ 4752 static u32 irdma_est_sd(struct irdma_sc_dev *dev, 4753 struct irdma_hmc_info *hmc_info){ 4754 int i; 4755 u64 size = 0; 4756 u64 sd; 4757 4758 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) 4759 if (i != IRDMA_HMC_IW_PBLE) 4760 size += round_up(hmc_info->hmc_obj[i].cnt * 4761 hmc_info->hmc_obj[i].size, 512); 4762 size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt * 4763 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512); 4764 if (size & 0x1FFFFF) 4765 sd = (size >> 21) + 1; /* add 1 for remainder */ 4766 else 4767 sd = size >> 21; 4768 if (sd > 0xFFFFFFFF) { 4769 irdma_debug(dev, IRDMA_DEBUG_HMC, "sd overflow[%ld]\n", sd); 4770 sd = 0xFFFFFFFF - 1; 4771 } 4772 4773 return (u32)sd; 4774 } 4775 4776 /** 4777 * irdma_sc_query_rdma_features - query RDMA features and FW ver 4778 * @cqp: struct for cqp hw 4779 * @buf: buffer to hold query info 4780 * @scratch: u64 saved to be used during cqp completion 4781 */ 4782 static int 4783 irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp, 4784 struct irdma_dma_mem *buf, u64 scratch) 4785 { 4786 __le64 *wqe; 4787 u64 temp; 4788 u32 tail, val, error; 4789 int status; 4790 4791 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4792 if (!wqe) 4793 return -ENOSPC; 4794 4795 temp = buf->pa; 4796 set_64bit_val(wqe, IRDMA_BYTE_32, temp); 4797 4798 temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID, 4799 cqp->polarity) | 4800 FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) | 4801 FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES); 4802 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4803 4804 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 4805 4806 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", wqe, 4807 IRDMA_CQP_WQE_SIZE * 8); 4808 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4809 4810 irdma_sc_cqp_post_sq(cqp); 4811 status = irdma_cqp_poll_registers(cqp, tail, 4812 cqp->dev->hw_attrs.max_done_count); 4813 if (error || status) 4814 status = -EIO; 4815 4816 return status; 4817 } 4818 4819 /** 4820 * irdma_get_rdma_features - get RDMA features 4821 * @dev: sc device struct 4822 */ 4823 int 4824 irdma_get_rdma_features(struct irdma_sc_dev *dev) 4825 { 4826 int ret_code; 4827 struct irdma_dma_mem feat_buf; 4828 u64 temp; 4829 u16 byte_idx, feat_type, feat_cnt, feat_idx; 4830 4831 feat_buf.size = IRDMA_FEATURE_BUF_SIZE; 4832 feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, feat_buf.size, 4833 IRDMA_FEATURE_BUF_ALIGNMENT); 4834 if (!feat_buf.va) 4835 return -ENOMEM; 4836 4837 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); 4838 if (ret_code) 4839 goto exit; 4840 4841 get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp); 4842 feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp); 4843 if (feat_cnt < IRDMA_MIN_FEATURES) { 4844 ret_code = -EINVAL; 4845 goto exit; 4846 } else if (feat_cnt > IRDMA_MAX_FEATURES) { 4847 irdma_debug(dev, IRDMA_DEBUG_DEV, 4848 "feature buf size insufficient," 4849 "retrying with larger buffer\n"); 4850 irdma_free_dma_mem(dev->hw, &feat_buf); 4851 feat_buf.size = 8 * feat_cnt; 4852 feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, 4853 feat_buf.size, 4854 IRDMA_FEATURE_BUF_ALIGNMENT); 4855 if (!feat_buf.va) 4856 return -ENOMEM; 4857 4858 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); 4859 if (ret_code) 4860 goto exit; 4861 4862 get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp); 4863 feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp); 4864 if (feat_cnt < IRDMA_MIN_FEATURES) { 4865 ret_code = -EINVAL; 4866 goto exit; 4867 } 4868 } 4869 4870 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", feat_buf.va, 4871 feat_cnt * 8); 4872 4873 for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES); 4874 feat_idx++, byte_idx += 8) { 4875 get_64bit_val(feat_buf.va, byte_idx, &temp); 4876 feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp); 4877 dev->feature_info[feat_type] = temp; 4878 } 4879 exit: 4880 irdma_free_dma_mem(dev->hw, &feat_buf); 4881 return ret_code; 4882 } 4883 4884 static u32 irdma_q1_cnt(struct irdma_sc_dev *dev, 4885 struct irdma_hmc_info *hmc_info, u32 qpwanted){ 4886 u32 q1_cnt; 4887 4888 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 4889 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted); 4890 } else { 4891 if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) 4892 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512); 4893 else 4894 q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted; 4895 } 4896 4897 return q1_cnt; 4898 } 4899 4900 static void 4901 cfg_fpm_value_gen_1(struct irdma_sc_dev *dev, 4902 struct irdma_hmc_info *hmc_info, u32 qpwanted) 4903 { 4904 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes); 4905 } 4906 4907 static void 4908 cfg_fpm_value_gen_2(struct irdma_sc_dev *dev, 4909 struct irdma_hmc_info *hmc_info, u32 qpwanted) 4910 { 4911 struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc; 4912 4913 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = 4914 4 * hmc_fpm_misc->xf_block_size * qpwanted; 4915 4916 hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted; 4917 4918 if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt) 4919 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted; 4920 if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt) 4921 hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt = 4922 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt / 4923 hmc_fpm_misc->rrf_block_size; 4924 if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) { 4925 if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt) 4926 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted; 4927 if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt) 4928 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt = 4929 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt / 4930 hmc_fpm_misc->ooiscf_block_size; 4931 } 4932 } 4933 4934 /** 4935 * irdma_cfg_fpm_val - configure HMC objects 4936 * @dev: sc device struct 4937 * @qp_count: desired qp count 4938 */ 4939 int 4940 irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) 4941 { 4942 struct irdma_virt_mem virt_mem; 4943 u32 i, mem_size; 4944 u32 qpwanted, mrwanted, pblewanted; 4945 u32 powerof2, hte; 4946 u32 sd_needed; 4947 u32 sd_diff; 4948 u32 loop_count = 0; 4949 struct irdma_hmc_info *hmc_info; 4950 struct irdma_hmc_fpm_misc *hmc_fpm_misc; 4951 int ret_code = 0; 4952 u32 max_sds; 4953 4954 hmc_info = dev->hmc_info; 4955 hmc_fpm_misc = &dev->hmc_fpm_misc; 4956 ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id); 4957 if (ret_code) { 4958 irdma_debug(dev, IRDMA_DEBUG_HMC, 4959 "irdma_sc_init_iw_hmc returned error_code = %d\n", 4960 ret_code); 4961 return ret_code; 4962 } 4963 4964 max_sds = hmc_fpm_misc->max_sds; 4965 4966 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) 4967 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; 4968 4969 sd_needed = irdma_est_sd(dev, hmc_info); 4970 irdma_debug(dev, IRDMA_DEBUG_HMC, "sd count %d where max sd is %d\n", 4971 hmc_info->sd_table.sd_cnt, max_sds); 4972 4973 qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt); 4974 4975 powerof2 = 1; 4976 while (powerof2 <= qpwanted) 4977 powerof2 *= 2; 4978 powerof2 /= 2; 4979 qpwanted = powerof2; 4980 4981 mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt; 4982 pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt; 4983 4984 irdma_debug(dev, IRDMA_DEBUG_HMC, 4985 "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n", 4986 qp_count, max_sds, 4987 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt, 4988 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt, 4989 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt, 4990 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt, 4991 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt, 4992 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt); 4993 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt = 4994 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt; 4995 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt = 4996 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt; 4997 hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt = 4998 hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt; 4999 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) 5000 hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1; 5001 5002 while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt) 5003 qpwanted /= 2; 5004 5005 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 5006 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 5007 while (hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt > hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt) { 5008 qpwanted /= 2; 5009 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 5010 } 5011 } 5012 5013 do { 5014 ++loop_count; 5015 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted; 5016 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt = 5017 min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt); 5018 hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */ 5019 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted; 5020 5021 hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512); 5022 powerof2 = 1; 5023 while (powerof2 < hte) 5024 powerof2 *= 2; 5025 hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt = 5026 powerof2 * hmc_fpm_misc->ht_multiplier; 5027 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 5028 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 5029 else 5030 cfg_fpm_value_gen_2(dev, hmc_info, qpwanted); 5031 5032 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted); 5033 hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt = 5034 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; 5035 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt = 5036 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size; 5037 hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt = 5038 (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket; 5039 5040 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted; 5041 sd_needed = irdma_est_sd(dev, hmc_info); 5042 irdma_debug(dev, IRDMA_DEBUG_HMC, 5043 "sd_needed = %d, max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n", 5044 sd_needed, max_sds, mrwanted, pblewanted, qpwanted); 5045 5046 /* Do not reduce resources further. All objects fit with max SDs */ 5047 if (sd_needed <= max_sds) 5048 break; 5049 5050 sd_diff = sd_needed - max_sds; 5051 if (sd_diff > 128) { 5052 if (!(loop_count % 2) && qpwanted > 128) { 5053 qpwanted /= 2; 5054 } else { 5055 mrwanted /= 2; 5056 pblewanted /= 2; 5057 } 5058 continue; 5059 } 5060 if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF && 5061 pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) { 5062 pblewanted -= 256 * FPM_MULTIPLIER * sd_diff; 5063 continue; 5064 } else if (pblewanted > (100 * FPM_MULTIPLIER)) { 5065 pblewanted -= 10 * FPM_MULTIPLIER; 5066 } else if (pblewanted > FPM_MULTIPLIER) { 5067 pblewanted -= FPM_MULTIPLIER; 5068 } else if (qpwanted <= 128) { 5069 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256) 5070 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2; 5071 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256) 5072 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2; 5073 } 5074 if (mrwanted > FPM_MULTIPLIER) 5075 mrwanted -= FPM_MULTIPLIER; 5076 if (!(loop_count % 10) && qpwanted > 128) { 5077 qpwanted /= 2; 5078 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256) 5079 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2; 5080 } 5081 } while (loop_count < 2000); 5082 5083 if (sd_needed > max_sds) { 5084 irdma_debug(dev, IRDMA_DEBUG_HMC, 5085 "cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n", 5086 loop_count, sd_needed, hmc_info->sd_table.sd_cnt); 5087 return -EINVAL; 5088 } 5089 5090 if (loop_count > 1 && sd_needed < max_sds) { 5091 pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER; 5092 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted; 5093 sd_needed = irdma_est_sd(dev, hmc_info); 5094 } 5095 5096 irdma_debug(dev, IRDMA_DEBUG_HMC, 5097 "loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n", 5098 loop_count, sd_needed, 5099 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt, 5100 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, 5101 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt, 5102 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt, 5103 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 5104 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt, 5105 hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index); 5106 5107 ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id); 5108 if (ret_code) { 5109 irdma_debug(dev, IRDMA_DEBUG_HMC, 5110 "cfg_iw_fpm returned error_code[x%08X]\n", 5111 readl(dev->hw_regs[IRDMA_CQPERRCODES])); 5112 return ret_code; 5113 } 5114 5115 mem_size = sizeof(struct irdma_hmc_sd_entry) * 5116 (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1); 5117 virt_mem.size = mem_size; 5118 virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); 5119 if (!virt_mem.va) { 5120 irdma_debug(dev, IRDMA_DEBUG_HMC, 5121 "failed to allocate memory for sd_entry buffer\n"); 5122 return -ENOMEM; 5123 } 5124 hmc_info->sd_table.sd_entry = virt_mem.va; 5125 5126 return ret_code; 5127 } 5128 5129 /** 5130 * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available 5131 * @dev: rdma device 5132 * @pcmdinfo: cqp command info 5133 */ 5134 static int 5135 irdma_exec_cqp_cmd(struct irdma_sc_dev *dev, 5136 struct cqp_cmds_info *pcmdinfo) 5137 { 5138 int status; 5139 struct irdma_dma_mem val_mem; 5140 bool alloc = false; 5141 5142 dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; 5143 switch (pcmdinfo->cqp_cmd) { 5144 case IRDMA_OP_CEQ_DESTROY: 5145 status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, 5146 pcmdinfo->in.u.ceq_destroy.scratch, 5147 pcmdinfo->post_sq); 5148 break; 5149 case IRDMA_OP_AEQ_DESTROY: 5150 status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, 5151 pcmdinfo->in.u.aeq_destroy.scratch, 5152 pcmdinfo->post_sq); 5153 break; 5154 case IRDMA_OP_CEQ_CREATE: 5155 status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, 5156 pcmdinfo->in.u.ceq_create.scratch, 5157 pcmdinfo->post_sq); 5158 break; 5159 case IRDMA_OP_AEQ_CREATE: 5160 status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, 5161 pcmdinfo->in.u.aeq_create.scratch, 5162 pcmdinfo->post_sq); 5163 break; 5164 case IRDMA_OP_QP_UPLOAD_CONTEXT: 5165 status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev, 5166 &pcmdinfo->in.u.qp_upload_context.info, 5167 pcmdinfo->in.u.qp_upload_context.scratch, 5168 pcmdinfo->post_sq); 5169 break; 5170 case IRDMA_OP_CQ_CREATE: 5171 status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq, 5172 pcmdinfo->in.u.cq_create.scratch, 5173 pcmdinfo->in.u.cq_create.check_overflow, 5174 pcmdinfo->post_sq); 5175 break; 5176 case IRDMA_OP_CQ_MODIFY: 5177 status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq, 5178 &pcmdinfo->in.u.cq_modify.info, 5179 pcmdinfo->in.u.cq_modify.scratch, 5180 pcmdinfo->post_sq); 5181 break; 5182 case IRDMA_OP_CQ_DESTROY: 5183 status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq, 5184 pcmdinfo->in.u.cq_destroy.scratch, 5185 pcmdinfo->post_sq); 5186 break; 5187 case IRDMA_OP_QP_FLUSH_WQES: 5188 status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp, 5189 &pcmdinfo->in.u.qp_flush_wqes.info, 5190 pcmdinfo->in.u.qp_flush_wqes.scratch, 5191 pcmdinfo->post_sq); 5192 break; 5193 case IRDMA_OP_GEN_AE: 5194 status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp, 5195 &pcmdinfo->in.u.gen_ae.info, 5196 pcmdinfo->in.u.gen_ae.scratch, 5197 pcmdinfo->post_sq); 5198 break; 5199 case IRDMA_OP_MANAGE_PUSH_PAGE: 5200 status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp, 5201 &pcmdinfo->in.u.manage_push_page.info, 5202 pcmdinfo->in.u.manage_push_page.scratch, 5203 pcmdinfo->post_sq); 5204 break; 5205 case IRDMA_OP_UPDATE_PE_SDS: 5206 status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev, 5207 &pcmdinfo->in.u.update_pe_sds.info, 5208 pcmdinfo->in.u.update_pe_sds.scratch); 5209 break; 5210 case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE: 5211 /* switch to calling through the call table */ 5212 status = 5213 irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp, 5214 &pcmdinfo->in.u.manage_hmc_pm.info, 5215 pcmdinfo->in.u.manage_hmc_pm.scratch, 5216 true); 5217 break; 5218 case IRDMA_OP_SUSPEND: 5219 status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp, 5220 pcmdinfo->in.u.suspend_resume.qp, 5221 pcmdinfo->in.u.suspend_resume.scratch); 5222 break; 5223 case IRDMA_OP_RESUME: 5224 status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp, 5225 pcmdinfo->in.u.suspend_resume.qp, 5226 pcmdinfo->in.u.suspend_resume.scratch); 5227 break; 5228 case IRDMA_OP_QUERY_FPM_VAL: 5229 val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa; 5230 val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va; 5231 status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp, 5232 pcmdinfo->in.u.query_fpm_val.scratch, 5233 pcmdinfo->in.u.query_fpm_val.hmc_fn_id, 5234 &val_mem, true, IRDMA_CQP_WAIT_EVENT); 5235 break; 5236 case IRDMA_OP_COMMIT_FPM_VAL: 5237 val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa; 5238 val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va; 5239 status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp, 5240 pcmdinfo->in.u.commit_fpm_val.scratch, 5241 pcmdinfo->in.u.commit_fpm_val.hmc_fn_id, 5242 &val_mem, 5243 true, 5244 IRDMA_CQP_WAIT_EVENT); 5245 break; 5246 case IRDMA_OP_STATS_ALLOCATE: 5247 alloc = true; 5248 /* fallthrough */ 5249 case IRDMA_OP_STATS_FREE: 5250 status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp, 5251 &pcmdinfo->in.u.stats_manage.info, 5252 alloc, 5253 pcmdinfo->in.u.stats_manage.scratch); 5254 break; 5255 case IRDMA_OP_STATS_GATHER: 5256 status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp, 5257 &pcmdinfo->in.u.stats_gather.info, 5258 pcmdinfo->in.u.stats_gather.scratch); 5259 break; 5260 case IRDMA_OP_WS_MODIFY_NODE: 5261 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5262 &pcmdinfo->in.u.ws_node.info, 5263 IRDMA_MODIFY_NODE, 5264 pcmdinfo->in.u.ws_node.scratch); 5265 break; 5266 case IRDMA_OP_WS_DELETE_NODE: 5267 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5268 &pcmdinfo->in.u.ws_node.info, 5269 IRDMA_DEL_NODE, 5270 pcmdinfo->in.u.ws_node.scratch); 5271 break; 5272 case IRDMA_OP_WS_ADD_NODE: 5273 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5274 &pcmdinfo->in.u.ws_node.info, 5275 IRDMA_ADD_NODE, 5276 pcmdinfo->in.u.ws_node.scratch); 5277 break; 5278 case IRDMA_OP_SET_UP_MAP: 5279 status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp, 5280 &pcmdinfo->in.u.up_map.info, 5281 pcmdinfo->in.u.up_map.scratch); 5282 break; 5283 case IRDMA_OP_QUERY_RDMA_FEATURES: 5284 status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp, 5285 &pcmdinfo->in.u.query_rdma.query_buff_mem, 5286 pcmdinfo->in.u.query_rdma.scratch); 5287 break; 5288 case IRDMA_OP_DELETE_ARP_CACHE_ENTRY: 5289 status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp, 5290 pcmdinfo->in.u.del_arp_cache_entry.scratch, 5291 pcmdinfo->in.u.del_arp_cache_entry.arp_index, 5292 pcmdinfo->post_sq); 5293 break; 5294 case IRDMA_OP_MANAGE_APBVT_ENTRY: 5295 status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp, 5296 &pcmdinfo->in.u.manage_apbvt_entry.info, 5297 pcmdinfo->in.u.manage_apbvt_entry.scratch, 5298 pcmdinfo->post_sq); 5299 break; 5300 case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY: 5301 status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp, 5302 &pcmdinfo->in.u.manage_qhash_table_entry.info, 5303 pcmdinfo->in.u.manage_qhash_table_entry.scratch, 5304 pcmdinfo->post_sq); 5305 break; 5306 case IRDMA_OP_QP_MODIFY: 5307 status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp, 5308 &pcmdinfo->in.u.qp_modify.info, 5309 pcmdinfo->in.u.qp_modify.scratch, 5310 pcmdinfo->post_sq); 5311 break; 5312 case IRDMA_OP_QP_CREATE: 5313 status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp, 5314 &pcmdinfo->in.u.qp_create.info, 5315 pcmdinfo->in.u.qp_create.scratch, 5316 pcmdinfo->post_sq); 5317 break; 5318 case IRDMA_OP_QP_DESTROY: 5319 status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp, 5320 pcmdinfo->in.u.qp_destroy.scratch, 5321 pcmdinfo->in.u.qp_destroy.remove_hash_idx, 5322 pcmdinfo->in.u.qp_destroy.ignore_mw_bnd, 5323 pcmdinfo->post_sq); 5324 break; 5325 case IRDMA_OP_ALLOC_STAG: 5326 status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev, 5327 &pcmdinfo->in.u.alloc_stag.info, 5328 pcmdinfo->in.u.alloc_stag.scratch, 5329 pcmdinfo->post_sq); 5330 break; 5331 case IRDMA_OP_MR_REG_NON_SHARED: 5332 status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev, 5333 &pcmdinfo->in.u.mr_reg_non_shared.info, 5334 pcmdinfo->in.u.mr_reg_non_shared.scratch, 5335 pcmdinfo->post_sq); 5336 break; 5337 case IRDMA_OP_DEALLOC_STAG: 5338 status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev, 5339 &pcmdinfo->in.u.dealloc_stag.info, 5340 pcmdinfo->in.u.dealloc_stag.scratch, 5341 pcmdinfo->post_sq); 5342 break; 5343 case IRDMA_OP_MW_ALLOC: 5344 status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev, 5345 &pcmdinfo->in.u.mw_alloc.info, 5346 pcmdinfo->in.u.mw_alloc.scratch, 5347 pcmdinfo->post_sq); 5348 break; 5349 case IRDMA_OP_ADD_ARP_CACHE_ENTRY: 5350 status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp, 5351 &pcmdinfo->in.u.add_arp_cache_entry.info, 5352 pcmdinfo->in.u.add_arp_cache_entry.scratch, 5353 pcmdinfo->post_sq); 5354 break; 5355 case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY: 5356 status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp, 5357 pcmdinfo->in.u.alloc_local_mac_entry.scratch, 5358 pcmdinfo->post_sq); 5359 break; 5360 case IRDMA_OP_ADD_LOCAL_MAC_ENTRY: 5361 status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp, 5362 &pcmdinfo->in.u.add_local_mac_entry.info, 5363 pcmdinfo->in.u.add_local_mac_entry.scratch, 5364 pcmdinfo->post_sq); 5365 break; 5366 case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY: 5367 status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp, 5368 pcmdinfo->in.u.del_local_mac_entry.scratch, 5369 pcmdinfo->in.u.del_local_mac_entry.entry_idx, 5370 pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count, 5371 pcmdinfo->post_sq); 5372 break; 5373 case IRDMA_OP_AH_CREATE: 5374 status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp, 5375 &pcmdinfo->in.u.ah_create.info, 5376 pcmdinfo->in.u.ah_create.scratch); 5377 break; 5378 case IRDMA_OP_AH_DESTROY: 5379 status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp, 5380 &pcmdinfo->in.u.ah_destroy.info, 5381 pcmdinfo->in.u.ah_destroy.scratch); 5382 break; 5383 case IRDMA_OP_MC_CREATE: 5384 status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp, 5385 &pcmdinfo->in.u.mc_create.info, 5386 pcmdinfo->in.u.mc_create.scratch); 5387 break; 5388 case IRDMA_OP_MC_DESTROY: 5389 status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp, 5390 &pcmdinfo->in.u.mc_destroy.info, 5391 pcmdinfo->in.u.mc_destroy.scratch); 5392 break; 5393 case IRDMA_OP_MC_MODIFY: 5394 status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp, 5395 &pcmdinfo->in.u.mc_modify.info, 5396 pcmdinfo->in.u.mc_modify.scratch); 5397 break; 5398 default: 5399 status = -EOPNOTSUPP; 5400 break; 5401 } 5402 5403 return status; 5404 } 5405 5406 /** 5407 * irdma_process_cqp_cmd - process all cqp commands 5408 * @dev: sc device struct 5409 * @pcmdinfo: cqp command info 5410 */ 5411 int 5412 irdma_process_cqp_cmd(struct irdma_sc_dev *dev, 5413 struct cqp_cmds_info *pcmdinfo) 5414 { 5415 int status = 0; 5416 unsigned long flags; 5417 5418 spin_lock_irqsave(&dev->cqp_lock, flags); 5419 if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp)) 5420 status = irdma_exec_cqp_cmd(dev, pcmdinfo); 5421 else 5422 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); 5423 spin_unlock_irqrestore(&dev->cqp_lock, flags); 5424 return status; 5425 } 5426 5427 /** 5428 * irdma_process_bh - called from tasklet for cqp list 5429 * @dev: sc device struct 5430 */ 5431 int 5432 irdma_process_bh(struct irdma_sc_dev *dev) 5433 { 5434 int status = 0; 5435 struct cqp_cmds_info *pcmdinfo; 5436 unsigned long flags; 5437 5438 spin_lock_irqsave(&dev->cqp_lock, flags); 5439 while (!list_empty(&dev->cqp_cmd_head) && 5440 !irdma_cqp_ring_full(dev->cqp)) { 5441 pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev); 5442 status = irdma_exec_cqp_cmd(dev, pcmdinfo); 5443 if (status) 5444 break; 5445 } 5446 spin_unlock_irqrestore(&dev->cqp_lock, flags); 5447 return status; 5448 } 5449 5450 /** 5451 * irdma_cfg_aeq- Configure AEQ interrupt 5452 * @dev: pointer to the device structure 5453 * @idx: vector index 5454 * @enable: True to enable, False disables 5455 */ 5456 void 5457 irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable) 5458 { 5459 u32 reg_val; 5460 reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) | 5461 FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) | 5462 FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, IRDMA_IDX_NOITR); 5463 5464 writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]); 5465 } 5466 5467 /** 5468 * sc_vsi_update_stats - Update statistics 5469 * @vsi: sc_vsi instance to update 5470 */ 5471 void 5472 sc_vsi_update_stats(struct irdma_sc_vsi *vsi) 5473 { 5474 struct irdma_gather_stats *gather_stats; 5475 struct irdma_gather_stats *last_gather_stats; 5476 5477 gather_stats = vsi->pestat->gather_info.gather_stats_va; 5478 last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va; 5479 irdma_update_stats(&vsi->pestat->hw_stats, gather_stats, 5480 last_gather_stats, vsi->dev->hw_stats_map, 5481 vsi->dev->hw_attrs.max_stat_idx); 5482 } 5483 5484 /** 5485 * irdma_wait_pe_ready - Check if firmware is ready 5486 * @dev: provides access to registers 5487 */ 5488 static int 5489 irdma_wait_pe_ready(struct irdma_sc_dev *dev) 5490 { 5491 u32 statuscpu0; 5492 u32 statuscpu1; 5493 u32 statuscpu2; 5494 u32 retrycount = 0; 5495 5496 do { 5497 statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]); 5498 statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]); 5499 statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]); 5500 if (statuscpu0 == 0x80 && statuscpu1 == 0x80 && 5501 statuscpu2 == 0x80) 5502 return 0; 5503 mdelay(1000); 5504 } while (retrycount++ < dev->hw_attrs.max_pe_ready_count); 5505 return -1; 5506 } 5507 5508 static inline void 5509 irdma_sc_init_hw(struct irdma_sc_dev *dev) 5510 { 5511 switch (dev->hw_attrs.uk_attrs.hw_rev) { 5512 case IRDMA_GEN_2: 5513 icrdma_init_hw(dev); 5514 break; 5515 } 5516 } 5517 5518 /** 5519 * irdma_sc_dev_init - Initialize control part of device 5520 * @dev: Device pointer 5521 * @info: Device init info 5522 */ 5523 int 5524 irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info) 5525 { 5526 u32 val; 5527 int ret_code = 0; 5528 u8 db_size; 5529 5530 INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */ 5531 mutex_init(&dev->ws_mutex); 5532 dev->debug_mask = info->debug_mask; 5533 dev->hmc_fn_id = info->hmc_fn_id; 5534 dev->fpm_query_buf_pa = info->fpm_query_buf_pa; 5535 dev->fpm_query_buf = info->fpm_query_buf; 5536 dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa; 5537 dev->fpm_commit_buf = info->fpm_commit_buf; 5538 dev->hw = info->hw; 5539 dev->hw->hw_addr = info->bar0; 5540 /* Setup the hardware limits, hmc may limit further */ 5541 dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID; 5542 dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES; 5543 dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES; 5544 dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES; 5545 dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES; 5546 dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE; 5547 dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE; 5548 dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE; 5549 dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE; 5550 dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE; 5551 dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT; 5552 dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE; 5553 dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES; 5554 dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR); 5555 5556 dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA; 5557 dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA; 5558 dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS; 5559 dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT; 5560 5561 dev->hw_attrs.max_pe_ready_count = 14; 5562 dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT; 5563 dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT; 5564 dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS; 5565 5566 irdma_sc_init_hw(dev); 5567 5568 if (irdma_wait_pe_ready(dev)) 5569 return -ETIMEDOUT; 5570 5571 val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]); 5572 db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val); 5573 if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) { 5574 irdma_debug(dev, IRDMA_DEBUG_DEV, 5575 "RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n", 5576 val, db_size); 5577 return -ENODEV; 5578 } 5579 dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET]; 5580 5581 return ret_code; 5582 } 5583 5584 /** 5585 * irdma_stat_val - Extract HW counter value from statistics buffer 5586 * @stats_val: pointer to statistics buffer 5587 * @byteoff: byte offset of counter value in the buffer (8B-aligned) 5588 * @bitoff: bit offset of counter value within 8B entry 5589 * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter) 5590 */ 5591 static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, 5592 u8 bitoff, u64 bitmask){ 5593 u16 idx = byteoff / sizeof(*stats_val); 5594 5595 return (stats_val[idx] >> bitoff) & bitmask; 5596 } 5597 5598 /** 5599 * irdma_stat_delta - Calculate counter delta 5600 * @new_val: updated counter value 5601 * @old_val: last counter value 5602 * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter) 5603 */ 5604 static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val) { 5605 if (new_val >= old_val) 5606 return new_val - old_val; 5607 else 5608 /* roll-over case */ 5609 return max_val - old_val + new_val + 1; 5610 } 5611 5612 /** 5613 * irdma_update_stats - Update statistics 5614 * @hw_stats: hw_stats instance to update 5615 * @gather_stats: updated stat counters 5616 * @last_gather_stats: last stat counters 5617 * @map: HW stat map (hw_stats => gather_stats) 5618 * @max_stat_idx: number of HW stats 5619 */ 5620 void 5621 irdma_update_stats(struct irdma_dev_hw_stats *hw_stats, 5622 struct irdma_gather_stats *gather_stats, 5623 struct irdma_gather_stats *last_gather_stats, 5624 const struct irdma_hw_stat_map *map, 5625 u16 max_stat_idx) 5626 { 5627 u64 *stats_val = hw_stats->stats_val; 5628 u16 i; 5629 5630 for (i = 0; i < max_stat_idx; i++) { 5631 u64 new_val = irdma_stat_val(gather_stats->val, 5632 map[i].byteoff, map[i].bitoff, 5633 map[i].bitmask); 5634 u64 last_val = irdma_stat_val(last_gather_stats->val, 5635 map[i].byteoff, map[i].bitoff, 5636 map[i].bitmask); 5637 5638 stats_val[i] += irdma_stat_delta(new_val, last_val, 5639 map[i].bitmask); 5640 } 5641 5642 irdma_memcpy(last_gather_stats, gather_stats, 5643 sizeof(*last_gather_stats)); 5644 } 5645